repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
noam09/deluge-telegramer
telegramer/include/telegram/ext/handler.py
1
5772
#!/usr/bin/env python # # A library that provides a Python interface to the Telegram Bot API # Copyright (C) 2015-2018 # Leandro Toledo de Souza <devs@python-telegram-bot.org> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser Public License for more details. # # You should have received a copy of the GNU Lesser Public License # along with this program. If not, see [http://www.gnu.org/licenses/]. """This module contains the base class for handlers as used by the Dispatcher.""" class Handler(object): """The base class for all update handlers. Create custom handlers by inheriting from it. Attributes: callback (:obj:`callable`): The callback function for this handler. pass_update_queue (:obj:`bool`): Optional. Determines whether ``update_queue`` will be passed to the callback function. pass_job_queue (:obj:`bool`): Optional. Determines whether ``job_queue`` will be passed to the callback function. pass_user_data (:obj:`bool`): Optional. Determines whether ``user_data`` will be passed to the callback function. pass_chat_data (:obj:`bool`): Optional. Determines whether ``chat_data`` will be passed to the callback function. Note: :attr:`pass_user_data` and :attr:`pass_chat_data` determine whether a ``dict`` you can use to keep any data in will be sent to the :attr:`callback` function. Related to either the user or the chat that the update was sent in. For each update from the same user or in the same chat, it will be the same ``dict``. Args: callback (:obj:`callable`): A function that takes ``bot, update`` as positional arguments. It will be called when the :attr:`check_update` has determined that an update should be processed by this handler. pass_update_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called ``update_queue`` will be passed to the callback function. It will be the ``Queue`` instance used by the :class:`telegram.ext.Updater` and :class:`telegram.ext.Dispatcher` that contains new updates which can be used to insert updates. Default is ``False``. pass_job_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called ``job_queue`` will be passed to the callback function. It will be a :class:`telegram.ext.JobQueue` instance created by the :class:`telegram.ext.Updater` which can be used to schedule new jobs. Default is ``False``. pass_user_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called ``user_data`` will be passed to the callback function. Default is ``False``. pass_chat_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called ``chat_data`` will be passed to the callback function. Default is ``False``. """ def __init__(self, callback, pass_update_queue=False, pass_job_queue=False, pass_user_data=False, pass_chat_data=False): self.callback = callback self.pass_update_queue = pass_update_queue self.pass_job_queue = pass_job_queue self.pass_user_data = pass_user_data self.pass_chat_data = pass_chat_data def check_update(self, update): """ This method is called to determine if an update should be handled by this handler instance. It should always be overridden. Args: update (:obj:`str` | :class:`telegram.Update`): The update to be tested. Returns: :obj:`bool` """ raise NotImplementedError def handle_update(self, update, dispatcher): """ This method is called if it was determined that an update should indeed be handled by this instance. It should also be overridden, but in most cases call ``self.callback(dispatcher.bot, update)``, possibly along with optional arguments. To work with the ``ConversationHandler``, this method should return the value returned from ``self.callback`` Args: update (:obj:`str` | :class:`telegram.Update`): The update to be handled. dispatcher (:class:`telegram.ext.Dispatcher`): The dispatcher to collect optional args. """ raise NotImplementedError def collect_optional_args(self, dispatcher, update=None): """Prepares the optional arguments that are the same for all types of handlers. Args: dispatcher (:class:`telegram.ext.Dispatcher`): The dispatcher. """ optional_args = dict() if self.pass_update_queue: optional_args['update_queue'] = dispatcher.update_queue if self.pass_job_queue: optional_args['job_queue'] = dispatcher.job_queue if self.pass_user_data or self.pass_chat_data: chat = update.effective_chat user = update.effective_user if self.pass_user_data: optional_args['user_data'] = dispatcher.user_data[user.id if user else None] if self.pass_chat_data: optional_args['chat_data'] = dispatcher.chat_data[chat.id if chat else None] return optional_args
gpl-3.0
ltowarek/budget-supervisor
third_party/saltedge/swagger_client/models/created_customer_response.py
1
3095
# coding: utf-8 """ Salt Edge Account Information API API Reference for services # noqa: E501 OpenAPI spec version: 5.0.0 Contact: support@saltedge.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class CreatedCustomerResponse(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'data': 'Customer' } attribute_map = { 'data': 'data' } def __init__(self, data=None): # noqa: E501 """CreatedCustomerResponse - a model defined in Swagger""" # noqa: E501 self._data = None self.discriminator = None if data is not None: self.data = data @property def data(self): """Gets the data of this CreatedCustomerResponse. # noqa: E501 :return: The data of this CreatedCustomerResponse. # noqa: E501 :rtype: Customer """ return self._data @data.setter def data(self, data): """Sets the data of this CreatedCustomerResponse. :param data: The data of this CreatedCustomerResponse. # noqa: E501 :type: Customer """ self._data = data def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(CreatedCustomerResponse, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, CreatedCustomerResponse): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
mit
Juniper/ceilometer
ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.py
4
2715
# # Copyright 2013 eNovance SAS <licensing@enovance.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from ceilometer.storage.sqlalchemy import migration from ceilometer.storage.sqlalchemy import models def _convert_data_type(table, col, from_t, to_t, pk_attr='id', index=False): temp_col_n = 'convert_data_type_temp_col' # Override column we're going to convert with from_t, since the type we're # replacing could be custom and we need to tell SQLALchemy how to perform # CRUD operations with it. table = sa.Table(table.name, table.metadata, sa.Column(col, from_t), extend_existing=True) sa.Column(temp_col_n, to_t).create(table) key_attr = getattr(table.c, pk_attr) orig_col = getattr(table.c, col) new_col = getattr(table.c, temp_col_n) query = sa.select([key_attr, orig_col]) for key, value in migration.paged(query): (table.update().where(key_attr == key).values({temp_col_n: value}). execute()) orig_col.drop() new_col.alter(name=col) if index: sa.Index('ix_%s_%s' % (table.name, col), new_col).create() def upgrade(migrate_engine): if migrate_engine.name == 'mysql': meta = sa.MetaData(bind=migrate_engine) event = sa.Table('event', meta, autoload=True) _convert_data_type(event, 'generated', sa.Float(), models.PreciseTimestamp(), pk_attr='id', index=True) trait = sa.Table('trait', meta, autoload=True) _convert_data_type(trait, 't_datetime', sa.Float(), models.PreciseTimestamp(), pk_attr='id', index=True) def downgrade(migrate_engine): if migrate_engine.name == 'mysql': meta = sa.MetaData(bind=migrate_engine) event = sa.Table('event', meta, autoload=True) _convert_data_type(event, 'generated', models.PreciseTimestamp(), sa.Float(), pk_attr='id', index=True) trait = sa.Table('trait', meta, autoload=True) _convert_data_type(trait, 't_datetime', models.PreciseTimestamp(), sa.Float(), pk_attr='id', index=True)
apache-2.0
tempesta-tech/mariadb
storage/tokudb/mysql-test/tokudb/t/change_column_int.py
18
1914
#!/usr/bin/env python2 import sys def gen_test(types, values): for a in range(len(types)): for b in range(len(types)): print print "CREATE TABLE t (a %s);" % (types[a]) if a > b: print "--replace_regex /MariaDB/XYZ/ /MySQL/XYZ/" print "--error ER_UNSUPPORTED_EXTENSION" else: for x in values[a]: print "INSERT INTO t VALUES (", x, ");" print "CREATE TABLE ti LIKE t;" print "ALTER TABLE ti ENGINE=myisam;" print "INSERT INTO ti SELECT * from t;" print "ALTER TABLE ti CHANGE COLUMN a a %s;" % (types[b]) print "ALTER TABLE t CHANGE COLUMN a a %s;" % (types[b]) if a > b: pass else: print "let $diff_tables = test.t, test.ti;" print "source include/diff_tables.inc;" print "DROP TABLE ti;" print "DROP TABLE t;" def main(): print "source include/have_tokudb.inc;" print "# this test is generated by change_int.py" print "# test int expansion is hot" print "--disable_warnings" print "DROP TABLE IF EXISTS t, ti;" print "--enable_warnings" print "SET SESSION DEFAULT_STORAGE_ENGINE=\"TokuDB\";" print "SET SESSION TOKUDB_DISABLE_SLOW_ALTER=1;" gen_test( [ "TINYINT", "SMALLINT", "MEDIUMINT", "INT", "BIGINT" ], [ [ -128, -1, 0, 1, 127 ], [ -32768, -1, 0, 1, 32767], [-8388608, -1, 0, 1, 8388607], [-2147483648, 0, 1, 2147483647], [-9223372036854775808, 0, 1, 9223372036854775807] ] ) gen_test( [ "TINYINT UNSIGNED", "SMALLINT UNSIGNED", "MEDIUMINT UNSIGNED", "INT UNSIGNED", "BIGINT UNSIGNED" ], [ [ 0, 1, 255 ], [ 0, 1, 65535], [0, 1, 16777215], [0, 1, 4294967295], [0, 1, 18446744073709551615] ] ) return 0 sys.exit(main())
gpl-2.0
zhangkaixuan/ExtendedCloudStorage_V2.0
ecs/client/httpclient.py
1
1455
__author__ = 'zhangkaixuan' import httplib from ecs.util.params import PROXY_SERVER_URL, CDMI_VERSION from ecs.util.tools import md5, encrypt class ECSClient: def __init__(self, conn): self.conn = conn self.heads = {"X-CDMI-Specification-Version":CDMI_VERSION} self.body = None self.params = {} self.method = "GET" self.host = "cloud.ecust.edu.cn" self.path = "/" def setAcceptType(self, accepttype): self.heads.setdefault("Accept", accepttype) def setMethod(self, method): self.method = method def setHost(self, host): self.host = host def setPath(self, path): self.path = path def setUser(self, user): u = encrypt(10,user) self.heads.setdefault("X-Auth-User", u) def setPassWord(self, password): ciphertext = md5(password) self.heads.setdefault("X-Auth-Key", ciphertext) def setBody(self, body): self.body = body def sendRequest(self): self.conn.request(self.method, self.path, self.body, self.heads) response = self.conn.getresponse(buffering = False) data = response.read() print data if __name__ == '__main__': conn = httplib.HTTPConnection(PROXY_SERVER_URL) client = ECSClient(conn) client.setUser("045130160") client.setPassWord("oooooo") client.setAcceptType("text/plain") client.sendRequest() conn.close()
mit
LeoNineStudios/nano4
main.py
1
1823
#!/usr/bin/env python """ main.py -- Udacity conference server-side Python App Engine HTTP controller handlers for memcache & task queue access $Id$ created by wesc on 2014 may 24 """ __author__ = 'wesc+api@google.com (Wesley Chun)' import webapp2 from google.appengine.api import app_identity from google.appengine.api import mail from google.appengine.api import memcache from conference import ConferenceApi class SetAnnouncementHandler(webapp2.RequestHandler): def get(self): """Set Announcement in Memcache.""" ConferenceApi._cacheAnnouncement() self.response.set_status(204) class SendConfirmationEmailHandler(webapp2.RequestHandler): def post(self): """Send email confirming Conference creation.""" mail.send_mail( 'noreply@%s.appspotmail.com' % ( app_identity.get_application_id()), # from self.request.get('email'), # to 'You created a new Conference!', # subj 'Hi, you have created a following ' # body 'conference:\r\n\r\n%s' % self.request.get( 'conferenceInfo') ) class SetFeaturedSpeakerHandler(webapp2.RequestHandler): def post(self): """ Features the speaker and session names.""" speakerName = self.request.get('speaker') conferenceKey = self.request.get('conferenceKey') ConferenceApi._cacheFeaturedSpeaker(speakerName, conferenceKey) self.response.set_status(204) app = webapp2.WSGIApplication([ ('/crons/set_announcement', SetAnnouncementHandler), ('/tasks/send_confirmation_email', SendConfirmationEmailHandler), ('/tasks/set_featured_speaker', SetFeaturedSpeakerHandler), ], debug=True)
apache-2.0
bitreserve/bitcoin
qa/rpc-tests/test_framework/coverage.py
27
2939
#!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Utilities for doing coverage analysis on the RPC interface. Provides a way to track which RPC commands are exercised during testing. """ import os REFERENCE_FILENAME = 'rpc_interface.txt' class AuthServiceProxyWrapper(object): """ An object that wraps AuthServiceProxy to record specific RPC calls. """ def __init__(self, auth_service_proxy_instance, coverage_logfile=None): """ Kwargs: auth_service_proxy_instance (AuthServiceProxy): the instance being wrapped. coverage_logfile (str): if specified, write each service_name out to a file when called. """ self.auth_service_proxy_instance = auth_service_proxy_instance self.coverage_logfile = coverage_logfile def __getattr__(self, *args, **kwargs): return_val = self.auth_service_proxy_instance.__getattr__( *args, **kwargs) return AuthServiceProxyWrapper(return_val, self.coverage_logfile) def __call__(self, *args, **kwargs): """ Delegates to AuthServiceProxy, then writes the particular RPC method called to a file. """ return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs) rpc_method = self.auth_service_proxy_instance._service_name if self.coverage_logfile: with open(self.coverage_logfile, 'a+', encoding='utf8') as f: f.write("%s\n" % rpc_method) return return_val @property def url(self): return self.auth_service_proxy_instance.url def get_filename(dirname, n_node): """ Get a filename unique to the test process ID and node. This file will contain a list of RPC commands covered. """ pid = str(os.getpid()) return os.path.join( dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node))) def write_all_rpc_commands(dirname, node): """ Write out a list of all RPC functions available in `bitcoin-cli` for coverage comparison. This will only happen once per coverage directory. Args: dirname (str): temporary test dir node (AuthServiceProxy): client Returns: bool. if the RPC interface file was written. """ filename = os.path.join(dirname, REFERENCE_FILENAME) if os.path.isfile(filename): return False help_output = node.help().split('\n') commands = set() for line in help_output: line = line.strip() # Ignore blanks and headers if line and not line.startswith('='): commands.add("%s\n" % line.split()[0]) with open(filename, 'w', encoding='utf8') as f: f.writelines(list(commands)) return True
mit
precedenceguo/mxnet
example/gluon/tree_lstm/scripts/download.py
44
3654
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Downloads the following: - Stanford parser - Stanford POS tagger - Glove vectors - SICK dataset (semantic relatedness task) """ from __future__ import print_function import urllib2 import sys import os import shutil import zipfile import gzip from mxnet.test_utils import download def unzip(filepath): print("Extracting: " + filepath) dirpath = os.path.dirname(filepath) with zipfile.ZipFile(filepath) as zf: zf.extractall(dirpath) os.remove(filepath) def download_tagger(dirpath): tagger_dir = 'stanford-tagger' if os.path.exists(os.path.join(dirpath, tagger_dir)): print('Found Stanford POS Tagger - skip') return url = 'http://nlp.stanford.edu/software/stanford-postagger-2015-01-29.zip' filepath = download(url, dirname=dirpath) zip_dir = '' with zipfile.ZipFile(filepath) as zf: zip_dir = zf.namelist()[0] zf.extractall(dirpath) os.remove(filepath) os.rename(os.path.join(dirpath, zip_dir), os.path.join(dirpath, tagger_dir)) def download_parser(dirpath): parser_dir = 'stanford-parser' if os.path.exists(os.path.join(dirpath, parser_dir)): print('Found Stanford Parser - skip') return url = 'http://nlp.stanford.edu/software/stanford-parser-full-2015-01-29.zip' filepath = download(url, dirname=dirpath) zip_dir = '' with zipfile.ZipFile(filepath) as zf: zip_dir = zf.namelist()[0] zf.extractall(dirpath) os.remove(filepath) os.rename(os.path.join(dirpath, zip_dir), os.path.join(dirpath, parser_dir)) def download_wordvecs(dirpath): if os.path.exists(dirpath): print('Found Glove vectors - skip') return else: os.makedirs(dirpath) url = 'http://www-nlp.stanford.edu/data/glove.840B.300d.zip' unzip(download(url, dirname=dirpath)) def download_sick(dirpath): if os.path.exists(dirpath): print('Found SICK dataset - skip') return else: os.makedirs(dirpath) train_url = 'http://alt.qcri.org/semeval2014/task1/data/uploads/sick_train.zip' trial_url = 'http://alt.qcri.org/semeval2014/task1/data/uploads/sick_trial.zip' test_url = 'http://alt.qcri.org/semeval2014/task1/data/uploads/sick_test_annotated.zip' unzip(download(train_url, dirname=dirpath)) unzip(download(trial_url, dirname=dirpath)) unzip(download(test_url, dirname=dirpath)) if __name__ == '__main__': base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) # data data_dir = os.path.join(base_dir, 'data') wordvec_dir = os.path.join(data_dir, 'glove') sick_dir = os.path.join(data_dir, 'sick') # libraries lib_dir = os.path.join(base_dir, 'lib') # download dependencies download_tagger(lib_dir) download_parser(lib_dir) download_wordvecs(wordvec_dir) download_sick(sick_dir)
apache-2.0
AlexGrig/GPy
GPy/kern/_src/todo/finite_dimensional.py
19
2621
# Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) from kernpart import Kernpart import numpy as np from ...util.linalg import pdinv,mdot class FiniteDimensional(Kernpart): def __init__(self, input_dim, F, G, variance=1., weights=None): """ Argumnents ---------- input_dim: int - the number of input dimensions F: np.array of functions with shape (n,) - the n basis functions G: np.array with shape (n,n) - the Gram matrix associated to F weights : np.ndarray with shape (n,) """ self.input_dim = input_dim self.F = F self.G = G self.G_1 ,L,Li,logdet = pdinv(G) self.n = F.shape[0] if weights is not None: assert weights.shape==(self.n,) else: weights = np.ones(self.n) self.num_params = self.n + 1 self.name = 'finite_dim' self._set_params(np.hstack((variance,weights))) def _get_params(self): return np.hstack((self.variance,self.weights)) def _set_params(self,x): assert x.size == (self.num_params) self.variance = x[0] self.weights = x[1:] def _get_param_names(self): if self.n==1: return ['variance','weight'] else: return ['variance']+['weight_%i'%i for i in range(self.weights.size)] def K(self,X,X2,target): if X2 is None: X2 = X FX = np.column_stack([f(X) for f in self.F]) FX2 = np.column_stack([f(X2) for f in self.F]) product = self.variance * mdot(FX,np.diag(np.sqrt(self.weights)),self.G_1,np.diag(np.sqrt(self.weights)),FX2.T) np.add(product,target,target) def Kdiag(self,X,target): product = np.diag(self.K(X, X)) np.add(target,product,target) def _param_grad_helper(self,X,X2,target): """Return shape is NxMx(Ntheta)""" if X2 is None: X2 = X FX = np.column_stack([f(X) for f in self.F]) FX2 = np.column_stack([f(X2) for f in self.F]) DER = np.zeros((self.n,self.n,self.n)) for i in range(self.n): DER[i,i,i] = np.sqrt(self.weights[i]) dw = self.variance * mdot(FX,DER,self.G_1,np.diag(np.sqrt(self.weights)),FX2.T) dv = mdot(FX,np.diag(np.sqrt(self.weights)),self.G_1,np.diag(np.sqrt(self.weights)),FX2.T) np.add(target[:,:,0],np.transpose(dv,(0,2,1)), target[:,:,0]) np.add(target[:,:,1:],np.transpose(dw,(0,2,1)), target[:,:,1:]) def dKdiag_dtheta(self,X,target): np.add(target[:,0],1.,target[:,0])
bsd-3-clause
looker/sentry
src/sentry/tagstore/v2/models/eventtag.py
2
1403
""" sentry.tagstore.v2.models.eventtag ~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2017 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import from django.db import models, router, connections from django.utils import timezone from sentry.db.models import (Model, BoundedBigIntegerField, FlexibleForeignKey, sane_repr) class EventTag(Model): __core__ = False project_id = BoundedBigIntegerField() group_id = BoundedBigIntegerField() event_id = BoundedBigIntegerField() key = FlexibleForeignKey('tagstore.TagKey', db_column='key_id') value = FlexibleForeignKey('tagstore.TagValue', db_column='value_id') date_added = models.DateTimeField(default=timezone.now, db_index=True) class Meta: app_label = 'tagstore' unique_together = (('project_id', 'event_id', 'key', 'value'), ) index_together = ( ('project_id', 'key', 'value'), ('group_id', 'key', 'value'), ) __repr__ = sane_repr('event_id', 'key_id', 'value_id') def delete(self): using = router.db_for_read(EventTag) cursor = connections[using].cursor() cursor.execute( """ DELETE FROM tagstore_eventtag WHERE project_id = %s AND id = %s """, [self.project_id, self.id] )
bsd-3-clause
linjoahow/2015cdaa-w11
static/Brython3.1.0-20150301-090019/Lib/site-packages/highlight.py
617
2518
import keyword import _jsre as re from browser import html letters = 'abcdefghijklmnopqrstuvwxyz' letters += letters.upper()+'_' digits = '0123456789' builtin_funcs = ("abs|divmod|input|open|staticmethod|all|enumerate|int|ord|str|any|" + "eval|isinstance|pow|sum|basestring|execfile|issubclass|print|super|" + "binfile|iter|property|tuple|bool|filter|len|range|type|bytearray|" + "float|list|raw_input|unichr|callable|format|locals|reduce|unicode|" + "chr|frozenset|long|reload|vars|classmethod|getattr|map|repr|xrange|" + "cmp|globals|max|reversed|zip|compile|hasattr|memoryview|round|" + "__import__|complex|hash|min|set|apply|delattr|help|next|setattr|" + "buffer|dict|hex|object|slice|coerce|dir|id|oct|sorted|intern") kw_pattern = '^('+'|'.join(keyword.kwlist)+')$' bf_pattern = '^('+builtin_funcs+')$' def highlight(txt, string_color="blue", comment_color="green", keyword_color="purple"): res = html.PRE() i = 0 name = '' while i<len(txt): car = txt[i] if car in ["'",'"']: k = i+1 while k<len(txt): if txt[k]==car: nb_as = 0 j = k-1 while True: if txt[j]=='\\': nb_as+=1 j -= 1 else: break if nb_as % 2 == 0: res <= html.SPAN(txt[i:k+1], style=dict(color=string_color)) i = k break k += 1 elif car == '#': # comment end = txt.find('\n', i) if end== -1: res <= html.SPAN(txt[i:],style=dict(color=comment_color)) break else: res <= html.SPAN(txt[i:end],style=dict(color=comment_color)) i = end-1 elif car in letters: name += car elif car in digits and name: name += car else: if name: if re.search(kw_pattern,name): res <= html.SPAN(name,style=dict(color=keyword_color)) elif re.search(bf_pattern,name): res <= html.SPAN(name,style=dict(color=keyword_color)) else: res <= name name = '' res <= car i += 1 res <= name return res
gpl-3.0
ajjl/ITK
Wrapping/Generators/Python/Tests/wrappingCoverage.py
10
2846
#========================================================================== # # Copyright Insight Software Consortium # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #==========================================================================*/ from __future__ import print_function import sys import re import itk import os from optparse import OptionParser parser = OptionParser(usage='wrappingCoverage.py paths') parser.add_option( "-b", "--base", dest="base", default="Filter", help="Base string used to search for the classes (default: Filter).") parser.add_option( "-e", "--exclude", dest="exclude", default=None, help="Path of a file with one class to exclude per line (default: None).") parser.add_option( "-E", "--no-error", action="store_true", dest="noError", help="Don't generate an error code if all the classes are not wrapped.") opts, args = parser.parse_args() # declares classes which will not be wrapped excluded = set([]) if opts.exclude: map(excluded.add, [c.strip() for c in file(opts.exclude).readlines()]) # get classes from sources headers = [] for d in args: headers += sum([f for p, d, f in os.walk(d) if "Deprecated" not in p and "TestKernel" not in p], []) classes = set([f[len('itk'):-len('.h')] for f in headers if f.startswith("itk") and not f.startswith("itkv3") and f.endswith(opts.base + ".h")]) - excluded # get filter from wrapper files # remove classes which are not in the toolkit (external projects, # PyImageFilter, ...) wrapped = set([a for a in dir(itk) if a.endswith(opts.base)] ).intersection(classes) nonWrapped = classes - wrapped # print non wrapped classes without much text to stdout, so they can be # easily reused for f in sorted(nonWrapped): print(f) # and print stats in stderr to avoid poluting the list above print("", file=sys.stderr) print('%i %s' % (len(classes), opts.base), file=sys.stderr) print('%i wrapped %s' % (len(wrapped), opts.base), file=sys.stderr) print('%i non wrapped %s' % (len(nonWrapped), opts.base), file=sys.stderr) covered = len(wrapped) / float(len(classes)) print('%f%% covered' % (covered * 100), file=sys.stderr) print("", file=sys.stderr) if not opts.noError: sys.exit(len(nonWrapped))
apache-2.0
arank/mxnet
example/fcn-xs/data.py
14
5282
# pylint: skip-file """ file iterator for pasval voc 2012""" import mxnet as mx import numpy as np import sys, os from mxnet.io import DataIter from PIL import Image class FileIter(DataIter): """FileIter object in fcn-xs example. Taking a file list file to get dataiter. in this example, we use the whole image training for fcn-xs, that is to say we do not need resize/crop the image to the same size, so the batch_size is set to 1 here Parameters ---------- root_dir : string the root dir of image/label lie in flist_name : string the list file of iamge and label, every line owns the form: index \t image_data_path \t image_label_path cut_off_size : int if the maximal size of one image is larger than cut_off_size, then it will crop the image with the minimal size of that image data_name : string the data name used in symbol data(default data name) label_name : string the label name used in symbol softmax_label(default label name) """ def __init__(self, root_dir, flist_name, rgb_mean = (117, 117, 117), cut_off_size = None, data_name = "data", label_name = "softmax_label"): super(FileIter, self).__init__() self.root_dir = root_dir self.flist_name = os.path.join(self.root_dir, flist_name) self.mean = np.array(rgb_mean) # (R, G, B) self.cut_off_size = cut_off_size self.data_name = data_name self.label_name = label_name self.num_data = len(open(self.flist_name, 'r').readlines()) self.f = open(self.flist_name, 'r') self.data, self.label = self._read() self.cursor = -1 def _read(self): """get two list, each list contains two elements: name and nd.array value""" _, data_img_name, label_img_name = self.f.readline().strip('\n').split("\t") data = {} label = {} data[self.data_name], label[self.label_name] = self._read_img(data_img_name, label_img_name) return list(data.items()), list(label.items()) def _read_img(self, img_name, label_name): img = Image.open(os.path.join(self.root_dir, img_name)) label = Image.open(os.path.join(self.root_dir, label_name)) assert img.size == label.size img = np.array(img, dtype=np.float32) # (h, w, c) label = np.array(label) # (h, w) if self.cut_off_size is not None: max_hw = max(img.shape[0], img.shape[1]) min_hw = min(img.shape[0], img.shape[1]) if min_hw > self.cut_off_size: rand_start_max = int(np.random.uniform(0, max_hw - self.cut_off_size - 1)) rand_start_min = int(np.random.uniform(0, min_hw - self.cut_off_size - 1)) if img.shape[0] == max_hw : img = img[rand_start_max : rand_start_max + self.cut_off_size, rand_start_min : rand_start_min + self.cut_off_size] label = label[rand_start_max : rand_start_max + self.cut_off_size, rand_start_min : rand_start_min + self.cut_off_size] else : img = img[rand_start_min : rand_start_min + self.cut_off_size, rand_start_max : rand_start_max + self.cut_off_size] label = label[rand_start_min : rand_start_min + self.cut_off_size, rand_start_max : rand_start_max + self.cut_off_size] elif max_hw > self.cut_off_size: rand_start = int(np.random.uniform(0, max_hw - min_hw - 1)) if img.shape[0] == max_hw : img = img[rand_start : rand_start + min_hw, :] label = label[rand_start : rand_start + min_hw, :] else : img = img[:, rand_start : rand_start + min_hw] label = label[:, rand_start : rand_start + min_hw] reshaped_mean = self.mean.reshape(1, 1, 3) img = img - reshaped_mean img = np.swapaxes(img, 0, 2) img = np.swapaxes(img, 1, 2) # (c, h, w) img = np.expand_dims(img, axis=0) # (1, c, h, w) label = np.array(label) # (h, w) label = np.expand_dims(label, axis=0) # (1, h, w) return (img, label) @property def provide_data(self): """The name and shape of data provided by this iterator""" return [(k, tuple([1] + list(v.shape[1:]))) for k, v in self.data] @property def provide_label(self): """The name and shape of label provided by this iterator""" return [(k, tuple([1] + list(v.shape[1:]))) for k, v in self.label] def get_batch_size(self): return 1 def reset(self): self.cursor = -1 self.f.close() self.f = open(self.flist_name, 'r') def iter_next(self): self.cursor += 1 if(self.cursor < self.num_data-1): return True else: return False def next(self): """return one dict which contains "data" and "label" """ if self.iter_next(): self.data, self.label = self._read() return {self.data_name : self.data[0][1], self.label_name : self.label[0][1]} else: raise StopIteration
apache-2.0
tjlaboss/openmc
openmc/data/resonance_covariance.py
10
27136
from collections.abc import MutableSequence import warnings import io import copy import numpy as np import pandas as pd from . import endf import openmc.checkvalue as cv from .resonance import Resonances def _add_file2_contributions(file32params, file2params): """Function for aiding in adding resonance parameters from File 2 that are not always present in File 32. Uses already imported resonance data. Paramaters ---------- file32params : pandas.Dataframe Incomplete set of resonance parameters contained in File 32. file2params : pandas.Dataframe Resonance parameters from File 2. Ordered by energy. Returns ------- parameters : pandas.Dataframe Complete set of parameters ordered by L-values and then energy """ # Use l-values and competitiveWidth from File 2 data # Re-sort File 2 by energy to match File 32 file2params = file2params.sort_values(by=['energy']) file2params.reset_index(drop=True, inplace=True) # Sort File 32 parameters by energy as well (maintaining index) file32params.sort_values(by=['energy'], inplace=True) # Add in values (.values converts to array first to ignore index) file32params['L'] = file2params['L'].values if 'competitiveWidth' in file2params.columns: file32params['competitiveWidth'] = file2params['competitiveWidth'].values # Resort to File 32 order (by L then by E) for use with covariance file32params.sort_index(inplace=True) return file32params class ResonanceCovariances(Resonances): """Resolved resonance covariance data Parameters ---------- ranges : list of openmc.data.ResonanceCovarianceRange Distinct energy ranges for resonance data Attributes ---------- ranges : list of openmc.data.ResonanceCovarianceRange Distinct energy ranges for resonance data """ @property def ranges(self): return self._ranges @ranges.setter def ranges(self, ranges): cv.check_type('resonance ranges', ranges, MutableSequence) self._ranges = cv.CheckedList(ResonanceCovarianceRange, 'resonance range', ranges) @classmethod def from_endf(cls, ev, resonances): """Generate resonance covariance data from an ENDF evaluation. Parameters ---------- ev : openmc.data.endf.Evaluation ENDF evaluation resonances : openmc.data.Resonance object openmc.data.Resonanance object generated from the same evaluation used to import values not contained in File 32 Returns ------- openmc.data.ResonanceCovariances Resonance covariance data """ file_obj = io.StringIO(ev.section[32, 151]) # Determine whether discrete or continuous representation items = endf.get_head_record(file_obj) n_isotope = items[4] # Number of isotopes ranges = [] for iso in range(n_isotope): items = endf.get_cont_record(file_obj) abundance = items[1] fission_widths = (items[3] == 1) # Flag for fission widths n_ranges = items[4] # Number of resonance energy ranges for j in range(n_ranges): items = endf.get_cont_record(file_obj) # Unresolved flags - 0: only scattering radius given # 1: resolved parameters given # 2: unresolved parameters given unresolved_flag = items[2] formalism = items[3] # resonance formalism # Throw error for unsupported formalisms if formalism in [0, 7]: error = 'LRF='+str(formalism)+' covariance not supported '\ 'for this formalism' raise NotImplementedError(error) if unresolved_flag in (0, 1): # Resolved resonance region resonance = resonances.ranges[j] erange = _FORMALISMS[formalism].from_endf(ev, file_obj, items, resonance) ranges.append(erange) elif unresolved_flag == 2: warn = 'Unresolved resonance not supported. Covariance '\ 'values for the unresolved region not imported.' warnings.warn(warn) return cls(ranges) class ResonanceCovarianceRange: """Resonace covariance range. Base class for different formalisms. Parameters ---------- energy_min : float Minimum energy of the resolved resonance range in eV energy_max : float Maximum energy of the resolved resonance range in eV Attributes ---------- energy_min : float Minimum energy of the resolved resonance range in eV energy_max : float Maximum energy of the resolved resonance range in eV parameters : pandas.DataFrame Resonance parameters covariance : numpy.array The covariance matrix contained within the ENDF evaluation lcomp : int Flag indicating format of the covariance matrix within the ENDF file file2res : openmc.data.ResonanceRange object Corresponding resonance range with File 2 data. mpar : int Number of parameters in covariance matrix for each individual resonance formalism : str String descriptor of formalism """ def __init__(self, energy_min, energy_max): self.energy_min = energy_min self.energy_max = energy_max def subset(self, parameter_str, bounds): """Produce a subset of resonance parameters and the corresponding covariance matrix to an IncidentNeutron object. Parameters ---------- parameter_str : str parameter to be discriminated (i.e. 'energy', 'captureWidth', 'fissionWidthA'...) bounds : np.array [low numerical bound, high numerical bound] Returns ------- res_cov_range : openmc.data.ResonanceCovarianceRange ResonanceCovarianceRange object that contains a subset of the covariance matrix (upper triangular) as well as a subset parameters within self.file2params """ # Copy range and prevent change of original res_cov_range = copy.deepcopy(self) parameters = self.file2res.parameters cov = res_cov_range.covariance mpar = res_cov_range.mpar # Create mask mask1 = parameters[parameter_str] >= bounds[0] mask2 = parameters[parameter_str] <= bounds[1] mask = mask1 & mask2 res_cov_range.parameters = parameters[mask] indices = res_cov_range.parameters.index.values # Build subset of covariance sub_cov_dim = len(indices)*mpar cov_subset_vals = [] for index1 in indices: for i in range(mpar): for index2 in indices: for j in range(mpar): if index2*mpar+j >= index1*mpar+i: cov_subset_vals.append(cov[index1*mpar+i, index2*mpar+j]) cov_subset = np.zeros([sub_cov_dim, sub_cov_dim]) tri_indices = np.triu_indices(sub_cov_dim) cov_subset[tri_indices] = cov_subset_vals res_cov_range.file2res.parameters = parameters[mask] res_cov_range.covariance = cov_subset return res_cov_range def sample(self, n_samples): """Sample resonance parameters based on the covariances provided within an ENDF evaluation. Parameters ---------- n_samples : int The number of samples to produce Returns ------- samples : list of openmc.data.ResonanceCovarianceRange objects List of samples size `n_samples` """ warn_str = 'Sampling routine does not guarantee positive values for '\ 'parameters. This can lead to undefined behavior in the '\ 'reconstruction routine.' warnings.warn(warn_str) parameters = self.parameters cov = self.covariance # Symmetrizing covariance matrix cov = cov + cov.T - np.diag(cov.diagonal()) formalism = self.formalism mpar = self.mpar samples = [] # Handling MLBW/SLBW sampling if formalism == 'mlbw' or formalism == 'slbw': params = ['energy', 'neutronWidth', 'captureWidth', 'fissionWidth', 'competitiveWidth'] param_list = params[:mpar] mean_array = parameters[param_list].values mean = mean_array.flatten() par_samples = np.random.multivariate_normal(mean, cov, size=n_samples) spin = parameters['J'].values l_value = parameters['L'].values for sample in par_samples: energy = sample[0::mpar] gn = sample[1::mpar] gg = sample[2::mpar] gf = sample[3::mpar] if mpar > 3 else parameters['fissionWidth'].values gx = sample[4::mpar] if mpar > 4 else parameters['competitiveWidth'].values gt = gn + gg + gf + gx records = [] for j, E in enumerate(energy): records.append([energy[j], l_value[j], spin[j], gt[j], gn[j], gg[j], gf[j], gx[j]]) columns = ['energy', 'L', 'J', 'totalWidth', 'neutronWidth', 'captureWidth', 'fissionWidth', 'competitiveWidth'] sample_params = pd.DataFrame.from_records(records, columns=columns) # Copy ResonanceRange object res_range = copy.copy(self.file2res) res_range.parameters = sample_params samples.append(res_range) # Handling RM sampling elif formalism == 'rm': params = ['energy', 'neutronWidth', 'captureWidth', 'fissionWidthA', 'fissionWidthB'] param_list = params[:mpar] mean_array = parameters[param_list].values mean = mean_array.flatten() par_samples = np.random.multivariate_normal(mean, cov, size=n_samples) spin = parameters['J'].values l_value = parameters['L'].values for sample in par_samples: energy = sample[0::mpar] gn = sample[1::mpar] gg = sample[2::mpar] gfa = sample[3::mpar] if mpar > 3 else parameters['fissionWidthA'].values gfb = sample[4::mpar] if mpar > 3 else parameters['fissionWidthB'].values records = [] for j, E in enumerate(energy): records.append([energy[j], l_value[j], spin[j], gn[j], gg[j], gfa[j], gfb[j]]) columns = ['energy', 'L', 'J', 'neutronWidth', 'captureWidth', 'fissionWidthA', 'fissionWidthB'] sample_params = pd.DataFrame.from_records(records, columns=columns) # Copy ResonanceRange object res_range = copy.copy(self.file2res) res_range.parameters = sample_params samples.append(res_range) return samples class MultiLevelBreitWignerCovariance(ResonanceCovarianceRange): """Multi-level Breit-Wigner resolved resonance formalism covariance data. Parameters ---------- energy_min : float Minimum energy of the resolved resonance range in eV energy_max : float Maximum energy of the resolved resonance range in eV Attributes ---------- energy_min : float Minimum energy of the resolved resonance range in eV energy_max : float Maximum energy of the resolved resonance range in eV parameters : pandas.DataFrame Resonance parameters covariance : numpy.array The covariance matrix contained within the ENDF evaluation mpar : int Number of parameters in covariance matrix for each individual resonance lcomp : int Flag indicating format of the covariance matrix within the ENDF file file2res : openmc.data.ResonanceRange object Corresponding resonance range with File 2 data. formalism : str String descriptor of formalism """ def __init__(self, energy_min, energy_max, parameters, covariance, mpar, lcomp, file2res): super().__init__(energy_min, energy_max) self.parameters = parameters self.covariance = covariance self.mpar = mpar self.lcomp = lcomp self.file2res = copy.copy(file2res) self.formalism = 'mlbw' @classmethod def from_endf(cls, ev, file_obj, items, resonance): """Create MLBW covariance data from an ENDF evaluation. Parameters ---------- ev : openmc.data.endf.Evaluation ENDF evaluation file_obj : file-like object ENDF file positioned at the second record of a resonance range subsection in MF=32, MT=151 items : list Items from the CONT record at the start of the resonance range subsection resonance : openmc.data.ResonanceRange object Corresponding resonance range with File 2 data. Returns ------- openmc.data.MultiLevelBreitWignerCovariance Multi-level Breit-Wigner resonance covariance parameters """ # Read energy-dependent scattering radius if present energy_min, energy_max = items[0:2] nro, naps = items[4:6] if nro != 0: params, ape = endf.get_tab1_record(file_obj) # Other scatter radius parameters items = endf.get_cont_record(file_obj) target_spin = items[0] lcomp = items[3] # Flag for compatibility 0, 1, 2 - 2 is compact form nls = items[4] # number of l-values # Build covariance matrix for General Resolved Resonance Formats if lcomp == 1: items = endf.get_cont_record(file_obj) # Number of short range type resonance covariances num_short_range = items[4] # Number of long range type resonance covariances num_long_range = items[5] # Read resonance widths, J values, etc records = [] for i in range(num_short_range): items, values = endf.get_list_record(file_obj) mpar = items[2] num_res = items[5] num_par_vals = num_res*6 res_values = values[:num_par_vals] cov_values = values[num_par_vals:] energy = res_values[0::6] spin = res_values[1::6] gt = res_values[2::6] gn = res_values[3::6] gg = res_values[4::6] gf = res_values[5::6] for i, E in enumerate(energy): records.append([energy[i], spin[i], gt[i], gn[i], gg[i], gf[i]]) # Build the upper-triangular covariance matrix cov_dim = mpar*num_res cov = np.zeros([cov_dim, cov_dim]) indices = np.triu_indices(cov_dim) cov[indices] = cov_values # Compact format - Resonances and individual uncertainties followed by # compact correlations elif lcomp == 2: items, values = endf.get_list_record(file_obj) mean = items num_res = items[5] energy = values[0::12] spin = values[1::12] gt = values[2::12] gn = values[3::12] gg = values[4::12] gf = values[5::12] par_unc = [] for i in range(num_res): res_unc = values[i*12+6 : i*12+12] # Delete 0 values (not provided, no fission width) # DAJ/DGT always zero, DGF sometimes nonzero [1, 2, 5] res_unc_nonzero = [] for j in range(6): if j in [1, 2, 5] and res_unc[j] != 0.0: res_unc_nonzero.append(res_unc[j]) elif j in [0, 3, 4]: res_unc_nonzero.append(res_unc[j]) par_unc.extend(res_unc_nonzero) records = [] for i, E in enumerate(energy): records.append([energy[i], spin[i], gt[i], gn[i], gg[i], gf[i]]) corr = endf.get_intg_record(file_obj) cov = np.diag(par_unc).dot(corr).dot(np.diag(par_unc)) # Compatible resolved resonance format elif lcomp == 0: cov = np.zeros([4, 4]) records = [] cov_index = 0 for i in range(nls): items, values = endf.get_list_record(file_obj) num_res = items[5] for j in range(num_res): one_res = values[18*j:18*(j+1)] res_values = one_res[:6] cov_values = one_res[6:] records.append(list(res_values)) # Populate the coviariance matrix for this resonance # There are no covariances between resonances in lcomp=0 cov[cov_index, cov_index] = cov_values[0] cov[cov_index+1, cov_index+1 : cov_index+2] = cov_values[1:2] cov[cov_index+1, cov_index+3] = cov_values[4] cov[cov_index+2, cov_index+2] = cov_values[3] cov[cov_index+2, cov_index+3] = cov_values[5] cov[cov_index+3, cov_index+3] = cov_values[6] cov_index += 4 if j < num_res-1: # Pad matrix for additional values cov = np.pad(cov, ((0, 4), (0, 4)), 'constant', constant_values=0) # Create pandas DataFrame with resonance data, currently # redundant with data.IncidentNeutron.resonance columns = ['energy', 'J', 'totalWidth', 'neutronWidth', 'captureWidth', 'fissionWidth'] parameters = pd.DataFrame.from_records(records, columns=columns) # Determine mpar (number of parameters for each resonance in # covariance matrix) nparams, params = parameters.shape covsize = cov.shape[0] mpar = int(covsize/nparams) # Add parameters from File 2 parameters = _add_file2_contributions(parameters, resonance.parameters) # Create instance of class mlbw = cls(energy_min, energy_max, parameters, cov, mpar, lcomp, resonance) return mlbw class SingleLevelBreitWignerCovariance(MultiLevelBreitWignerCovariance): """Single-level Breit-Wigner resolved resonance formalism covariance data. Single-level Breit-Wigner resolved resonance data is is identified by LRF=1 in the ENDF-6 format. Parameters ---------- energy_min : float Minimum energy of the resolved resonance range in eV energy_max : float Maximum energy of the resolved resonance range in eV Attributes ---------- energy_min : float Minimum energy of the resolved resonance range in eV energy_max : float Maximum energy of the resolved resonance range in eV parameters : pandas.DataFrame Resonance parameters covariance : numpy.array The covariance matrix contained within the ENDF evaluation mpar : int Number of parameters in covariance matrix for each individual resonance formalism : str String descriptor of formalism lcomp : int Flag indicating format of the covariance matrix within the ENDF file file2res : openmc.data.ResonanceRange object Corresponding resonance range with File 2 data. """ def __init__(self, energy_min, energy_max, parameters, covariance, mpar, lcomp, file2res): super().__init__(energy_min, energy_max, parameters, covariance, mpar, lcomp, file2res) self.formalism = 'slbw' class ReichMooreCovariance(ResonanceCovarianceRange): """Reich-Moore resolved resonance formalism covariance data. Reich-Moore resolved resonance data is identified by LRF=3 in the ENDF-6 format. Parameters ---------- energy_min : float Minimum energy of the resolved resonance range in eV energy_max : float Maximum energy of the resolved resonance range in eV Attributes ---------- energy_min : float Minimum energy of the resolved resonance range in eV energy_max : float Maximum energy of the resolved resonance range in eV parameters : pandas.DataFrame Resonance parameters covariance : numpy.array The covariance matrix contained within the ENDF evaluation lcomp : int Flag indicating format of the covariance matrix within the ENDF file mpar : int Number of parameters in covariance matrix for each individual resonance file2res : openmc.data.ResonanceRange object Corresponding resonance range with File 2 data. formalism : str String descriptor of formalism """ def __init__(self, energy_min, energy_max, parameters, covariance, mpar, lcomp, file2res): super().__init__(energy_min, energy_max) self.parameters = parameters self.covariance = covariance self.mpar = mpar self.lcomp = lcomp self.file2res = copy.copy(file2res) self.formalism = 'rm' @classmethod def from_endf(cls, ev, file_obj, items, resonance): """Create Reich-Moore resonance covariance data from an ENDF evaluation. Includes the resonance parameters contained separately in File 32. Parameters ---------- ev : openmc.data.endf.Evaluation ENDF evaluation file_obj : file-like object ENDF file positioned at the second record of a resonance range subsection in MF=2, MT=151 items : list Items from the CONT record at the start of the resonance range subsection resonance : openmc.data.Resonance object openmc.data.Resonanance object generated from the same evaluation used to import values not contained in File 32 Returns ------- openmc.data.ReichMooreCovariance Reich-Moore resonance covariance parameters """ # Read energy-dependent scattering radius if present energy_min, energy_max = items[0:2] nro, naps = items[4:6] if nro != 0: params, ape = endf.get_tab1_record(file_obj) # Other scatter radius parameters items = endf.get_cont_record(file_obj) target_spin = items[0] lcomp = items[3] # Flag for compatibility 0, 1, 2 - 2 is compact form nls = items[4] # Number of l-values # Build covariance matrix for General Resolved Resonance Formats if lcomp == 1: items = endf.get_cont_record(file_obj) # Number of short range type resonance covariances num_short_range = items[4] # Number of long range type resonance covariances num_long_range = items[5] # Read resonance widths, J values, etc channel_radius = {} scattering_radius = {} records = [] for i in range(num_short_range): items, values = endf.get_list_record(file_obj) mpar = items[2] num_res = items[5] num_par_vals = num_res*6 res_values = values[:num_par_vals] cov_values = values[num_par_vals:] energy = res_values[0::6] spin = res_values[1::6] gn = res_values[2::6] gg = res_values[3::6] gfa = res_values[4::6] gfb = res_values[5::6] for i, E in enumerate(energy): records.append([energy[i], spin[i], gn[i], gg[i], gfa[i], gfb[i]]) # Build the upper-triangular covariance matrix cov_dim = mpar*num_res cov = np.zeros([cov_dim, cov_dim]) indices = np.triu_indices(cov_dim) cov[indices] = cov_values # Compact format - Resonances and individual uncertainties followed by # compact correlations elif lcomp == 2: items, values = endf.get_list_record(file_obj) num_res = items[5] energy = values[0::12] spin = values[1::12] gn = values[2::12] gg = values[3::12] gfa = values[4::12] gfb = values[5::12] par_unc = [] for i in range(num_res): res_unc = values[i*12+6 : i*12+12] # Delete 0 values (not provided in evaluation) res_unc = [x for x in res_unc if x != 0.0] par_unc.extend(res_unc) records = [] for i, E in enumerate(energy): records.append([energy[i], spin[i], gn[i], gg[i], gfa[i], gfb[i]]) corr = endf.get_intg_record(file_obj) cov = np.diag(par_unc).dot(corr).dot(np.diag(par_unc)) # Create pandas DataFrame with resonacne data columns = ['energy', 'J', 'neutronWidth', 'captureWidth', 'fissionWidthA', 'fissionWidthB'] parameters = pd.DataFrame.from_records(records, columns=columns) # Determine mpar (number of parameters for each resonance in # covariance matrix) nparams, params = parameters.shape covsize = cov.shape[0] mpar = int(covsize/nparams) # Add parameters from File 2 parameters = _add_file2_contributions(parameters, resonance.parameters) # Create instance of ReichMooreCovariance rmc = cls(energy_min, energy_max, parameters, cov, mpar, lcomp, resonance) return rmc _FORMALISMS = { 0: ResonanceCovarianceRange, 1: SingleLevelBreitWignerCovariance, 2: MultiLevelBreitWignerCovariance, 3: ReichMooreCovariance # 7: RMatrixLimitedCovariance }
mit
Kazade/NeHe-Website
google_appengine/lib/django-1.5/django/contrib/comments/views/moderation.py
210
5204
from __future__ import absolute_import from django import template from django.conf import settings from django.contrib import comments from django.contrib.auth.decorators import login_required, permission_required from django.contrib.comments import signals from django.contrib.comments.views.utils import next_redirect, confirmation_view from django.shortcuts import get_object_or_404, render_to_response from django.views.decorators.csrf import csrf_protect @csrf_protect @login_required def flag(request, comment_id, next=None): """ Flags a comment. Confirmation on GET, action on POST. Templates: :template:`comments/flag.html`, Context: comment the flagged `comments.comment` object """ comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID) # Flag on POST if request.method == 'POST': perform_flag(request, comment) return next_redirect(request, fallback=next or 'comments-flag-done', c=comment.pk) # Render a form on GET else: return render_to_response('comments/flag.html', {'comment': comment, "next": next}, template.RequestContext(request) ) @csrf_protect @permission_required("comments.can_moderate") def delete(request, comment_id, next=None): """ Deletes a comment. Confirmation on GET, action on POST. Requires the "can moderate comments" permission. Templates: :template:`comments/delete.html`, Context: comment the flagged `comments.comment` object """ comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID) # Delete on POST if request.method == 'POST': # Flag the comment as deleted instead of actually deleting it. perform_delete(request, comment) return next_redirect(request, fallback=next or 'comments-delete-done', c=comment.pk) # Render a form on GET else: return render_to_response('comments/delete.html', {'comment': comment, "next": next}, template.RequestContext(request) ) @csrf_protect @permission_required("comments.can_moderate") def approve(request, comment_id, next=None): """ Approve a comment (that is, mark it as public and non-removed). Confirmation on GET, action on POST. Requires the "can moderate comments" permission. Templates: :template:`comments/approve.html`, Context: comment the `comments.comment` object for approval """ comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID) # Delete on POST if request.method == 'POST': # Flag the comment as approved. perform_approve(request, comment) return next_redirect(request, fallback=next or 'comments-approve-done', c=comment.pk) # Render a form on GET else: return render_to_response('comments/approve.html', {'comment': comment, "next": next}, template.RequestContext(request) ) # The following functions actually perform the various flag/aprove/delete # actions. They've been broken out into separate functions to that they # may be called from admin actions. def perform_flag(request, comment): """ Actually perform the flagging of a comment from a request. """ flag, created = comments.models.CommentFlag.objects.get_or_create( comment = comment, user = request.user, flag = comments.models.CommentFlag.SUGGEST_REMOVAL ) signals.comment_was_flagged.send( sender = comment.__class__, comment = comment, flag = flag, created = created, request = request, ) def perform_delete(request, comment): flag, created = comments.models.CommentFlag.objects.get_or_create( comment = comment, user = request.user, flag = comments.models.CommentFlag.MODERATOR_DELETION ) comment.is_removed = True comment.save() signals.comment_was_flagged.send( sender = comment.__class__, comment = comment, flag = flag, created = created, request = request, ) def perform_approve(request, comment): flag, created = comments.models.CommentFlag.objects.get_or_create( comment = comment, user = request.user, flag = comments.models.CommentFlag.MODERATOR_APPROVAL, ) comment.is_removed = False comment.is_public = True comment.save() signals.comment_was_flagged.send( sender = comment.__class__, comment = comment, flag = flag, created = created, request = request, ) # Confirmation views. flag_done = confirmation_view( template = "comments/flagged.html", doc = 'Displays a "comment was flagged" success page.' ) delete_done = confirmation_view( template = "comments/deleted.html", doc = 'Displays a "comment was deleted" success page.' ) approve_done = confirmation_view( template = "comments/approved.html", doc = 'Displays a "comment was approved" success page.' )
bsd-3-clause
SOKP/external_chromium_org
chrome/common/extensions/docs/server2/persistent_object_store_test.py
117
1737
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from persistent_object_store import PersistentObjectStore import unittest class PersistentObjectStoreTest(unittest.TestCase): '''Tests for PersistentObjectStore. These are all a bit contrived because ultimately it comes down to our use of the appengine datastore API, and we mock it out for tests anyway. Who knows whether it's correct. ''' def testPersistence(self): # First object store. object_store = PersistentObjectStore('test') object_store.Set('key', 'value') self.assertEqual('value', object_store.Get('key').Get()) # Other object store should have it too. another_object_store = PersistentObjectStore('test') self.assertEqual('value', another_object_store.Get('key').Get()) # Setting in the other store should set in both. mapping = {'key2': 'value2', 'key3': 'value3'} another_object_store.SetMulti(mapping) self.assertEqual(mapping, object_store.GetMulti(mapping.keys()).Get()) self.assertEqual(mapping, another_object_store.GetMulti(mapping.keys()).Get()) # And delete. object_store.DelMulti(mapping.keys()) self.assertEqual({}, object_store.GetMulti(mapping.keys()).Get()) self.assertEqual({}, another_object_store.GetMulti(mapping.keys()).Get()) def testNamespaceIsolation(self): object_store = PersistentObjectStore('test') another_object_store = PersistentObjectStore('another') object_store.Set('key', 'value') self.assertEqual(None, another_object_store.Get('key').Get()) if __name__ == '__main__': unittest.main()
bsd-3-clause
tvalacarta/tvalacarta
python/main-classic/servers/telemadrid.py
1
2312
# -*- coding: utf-8 -*- #------------------------------------------------------------ # pelisalacarta - XBMC Plugin # Conector para telemadrid # http://blog.tvalacarta.info/plugin-xbmc/tvalacarta/ #------------------------------------------------------------ import urlparse,urllib2,urllib,re import os from core import scrapertools from core import logger from core import config from lib import youtube_dl def get_video_url( page_url , premium = False , user="" , password="", video_password="" ): logger.info("tvalacarta.servers.telemadrid get_video_url(page_url='%s')" % page_url) ''' <video id="5c04f64ee88f4" data-video-id="5971764264001" data-account="104403117001" data-player="SkevQBitbl" data-embed="default" class="video-js" controls></video> ''' #http://players.brightcove.net/104403117001/SkevQBitbl_default/index.html?videoId=5971764264001 data = scrapertools.cache_page(page_url) account = scrapertools.find_single_match(data,'data-account="([^"]+)"') logger.info("account="+account) player = scrapertools.find_single_match(data,'data-player="([^"]+)"') logger.info("player="+account) video_id = scrapertools.find_single_match(data,'data-video-id="([^"]+)"') logger.info("video_id="+video_id) api_url = "http://players.brightcove.net/"+account+"/"+player+"_default/index.html?videoId="+video_id ydl = youtube_dl.YoutubeDL({'outtmpl': u'%(id)s%(ext)s'}) result = ydl.extract_info(api_url, download=False) logger.info("tvalacarta.servers.telemadrid get_video_url result="+repr(result)) video_urls = [] if "ext" in result and "url" in result: video_urls.append(["(.m3u8)", scrapertools.safe_unicode(result['url']).encode('utf-8')]) else: if "entries" in result: for entry in result["entries"]: video_urls.append(["(.m3u8)", scrapertools.safe_unicode(entry['url']).encode('utf-8')]) for video_url in video_urls: logger.info("tvalacarta.servers.telemadrid %s - %s" % (video_url[0],video_url[1])) return video_urls # Encuentra vídeos del servidor en el texto pasado def find_videos(data): encontrados = set() devuelve = [] return devuelve
gpl-3.0
openqt/algorithms
leetcode/python/lc547-friend-circles.py
1
1972
# coding=utf-8 import unittest """547. Friend Circles https://leetcode.com/problems/friend-circles/description/ There are **N** students in a class. Some of them are friends, while some are not. Their friendship is transitive in nature. For example, if A is a **direct** friend of B, and B is a **direct** friend of C, then A is an **indirect** friend of C. And we defined a friend circle is a group of students who are direct or indirect friends. Given a **N*N** matrix **M** representing the friend relationship between students in the class. If M[i][j] = 1, then the i th and jth students are **direct** friends with each other, otherwise not. And you have to output the total number of friend circles among all the students. **Example 1:** **Input:** [[1,1,0], [1,1,0], [0,0,1]] **Output:** 2 **Explanation:** The 0th and 1st students are direct friends, so they are in a friend circle. The 2nd student himself is in a friend circle. So return 2. **Example 2:** **Input:** [[1,1,0], [1,1,1], [0,1,1]] **Output:** 1 **Explanation:** The 0th and 1st students are direct friends, the 1st and 2nd students are direct friends, so the 0th and 2nd students are indirect friends. All of them are in the same friend circle, so return 1. **Note:** 1. N is in range [1,200]. 2. M[i][i] = 1 for all students. 3. If M[i][j] = 1, then M[j][i] = 1. Similar Questions: Number of Connected Components in an Undirected Graph (number-of-connected-components-in-an-undirected-graph) Judge Route Circle (judge-route-circle) Sentence Similarity (sentence-similarity) Sentence Similarity II (sentence-similarity-ii) """ class Solution(object): def findCircleNum(self, M): """ :type M: List[List[int]] :rtype: int """ def test(self): pass if __name__ == "__main__": unittest.main()
gpl-3.0
sss/calibre-at-bzr
src/calibre/gui2/store/stores/bookoteka_plugin.py
3
2754
# -*- coding: utf-8 -*- from __future__ import (unicode_literals, division, absolute_import, print_function) store_version = 1 # Needed for dynamic plugin loading __license__ = 'GPL 3' __copyright__ = '2011, Tomasz Długosz <tomek3d@gmail.com>' __docformat__ = 'restructuredtext en' import urllib from contextlib import closing from lxml import html from PyQt4.Qt import QUrl from calibre import browser, url_slash_cleaner from calibre.gui2 import open_url from calibre.gui2.store import StorePlugin from calibre.gui2.store.basic_config import BasicStoreConfig from calibre.gui2.store.search_result import SearchResult from calibre.gui2.store.web_store_dialog import WebStoreDialog class BookotekaStore(BasicStoreConfig, StorePlugin): def open(self, parent=None, detail_item=None, external=False): url = 'http://bookoteka.pl/ebooki' detail_url = None if detail_item: detail_url = detail_item if external or self.config.get('open_external', False): open_url(QUrl(url_slash_cleaner(detail_url if detail_url else url))) else: d = WebStoreDialog(self.gui, url, parent, detail_url) d.setWindowTitle(self.name) d.set_tags(self.config.get('tags', '')) d.exec_() def search(self, query, max_results=10, timeout=60): url = 'http://bookoteka.pl/list?search=' + urllib.quote_plus(query) + '&cat=1&hp=1&type=1' br = browser() counter = max_results with closing(br.open(url, timeout=timeout)) as f: doc = html.fromstring(f.read()) for data in doc.xpath('//li[@class="EBOOK"]'): if counter <= 0: break id = ''.join(data.xpath('.//a[@class="item_link"]/@href')) if not id: continue cover_url = ''.join(data.xpath('.//a[@class="item_link"]/img/@src')) title = ''.join(data.xpath('.//div[@class="shelf_title"]/a/text()')) author = ''.join(data.xpath('.//div[@class="shelf_authors"][1]/text()')) price = ''.join(data.xpath('.//span[@class="EBOOK"]/text()')) price = price.replace('.', ',') formats = ', '.join(data.xpath('.//a[@class="fancybox protected"]/text()')) counter -= 1 s = SearchResult() s.cover_url = 'http://bookoteka.pl' + cover_url s.title = title.strip() s.author = author.strip() s.price = price s.detail_item = 'http://bookoteka.pl' + id.strip() s.drm = SearchResult.DRM_UNLOCKED s.formats = formats.strip() yield s
gpl-3.0
repotvsupertuga/tvsupertuga.repository
instal/script.module.requests/lib/requests/packages/idna/idnadata.py
155
35172
# This file is automatically generated by build-idnadata.py scripts = { 'Greek': ( (0x370, 0x374), (0x375, 0x378), (0x37a, 0x37e), (0x384, 0x385), (0x386, 0x387), (0x388, 0x38b), (0x38c, 0x38d), (0x38e, 0x3a2), (0x3a3, 0x3e2), (0x3f0, 0x400), (0x1d26, 0x1d2b), (0x1d5d, 0x1d62), (0x1d66, 0x1d6b), (0x1dbf, 0x1dc0), (0x1f00, 0x1f16), (0x1f18, 0x1f1e), (0x1f20, 0x1f46), (0x1f48, 0x1f4e), (0x1f50, 0x1f58), (0x1f59, 0x1f5a), (0x1f5b, 0x1f5c), (0x1f5d, 0x1f5e), (0x1f5f, 0x1f7e), (0x1f80, 0x1fb5), (0x1fb6, 0x1fc5), (0x1fc6, 0x1fd4), (0x1fd6, 0x1fdc), (0x1fdd, 0x1ff0), (0x1ff2, 0x1ff5), (0x1ff6, 0x1fff), (0x2126, 0x2127), (0x10140, 0x1018b), (0x1d200, 0x1d246), ), 'Han': ( (0x2e80, 0x2e9a), (0x2e9b, 0x2ef4), (0x2f00, 0x2fd6), (0x3005, 0x3006), (0x3007, 0x3008), (0x3021, 0x302a), (0x3038, 0x303c), (0x3400, 0x4db6), (0x4e00, 0x9fcd), (0xf900, 0xfa6e), (0xfa70, 0xfada), (0x20000, 0x2a6d7), (0x2a700, 0x2b735), (0x2b740, 0x2b81e), (0x2f800, 0x2fa1e), ), 'Hebrew': ( (0x591, 0x5c8), (0x5d0, 0x5eb), (0x5f0, 0x5f5), (0xfb1d, 0xfb37), (0xfb38, 0xfb3d), (0xfb3e, 0xfb3f), (0xfb40, 0xfb42), (0xfb43, 0xfb45), (0xfb46, 0xfb50), ), 'Hiragana': ( (0x3041, 0x3097), (0x309d, 0x30a0), (0x1b001, 0x1b002), (0x1f200, 0x1f201), ), 'Katakana': ( (0x30a1, 0x30fb), (0x30fd, 0x3100), (0x31f0, 0x3200), (0x32d0, 0x32ff), (0x3300, 0x3358), (0xff66, 0xff70), (0xff71, 0xff9e), (0x1b000, 0x1b001), ), } joining_types = { 0x600: 'U', 0x601: 'U', 0x602: 'U', 0x603: 'U', 0x604: 'U', 0x608: 'U', 0x60b: 'U', 0x620: 'D', 0x621: 'U', 0x622: 'R', 0x623: 'R', 0x624: 'R', 0x625: 'R', 0x626: 'D', 0x627: 'R', 0x628: 'D', 0x629: 'R', 0x62a: 'D', 0x62b: 'D', 0x62c: 'D', 0x62d: 'D', 0x62e: 'D', 0x62f: 'R', 0x630: 'R', 0x631: 'R', 0x632: 'R', 0x633: 'D', 0x634: 'D', 0x635: 'D', 0x636: 'D', 0x637: 'D', 0x638: 'D', 0x639: 'D', 0x63a: 'D', 0x63b: 'D', 0x63c: 'D', 0x63d: 'D', 0x63e: 'D', 0x63f: 'D', 0x640: 'C', 0x641: 'D', 0x642: 'D', 0x643: 'D', 0x644: 'D', 0x645: 'D', 0x646: 'D', 0x647: 'D', 0x648: 'R', 0x649: 'D', 0x64a: 'D', 0x66e: 'D', 0x66f: 'D', 0x671: 'R', 0x672: 'R', 0x673: 'R', 0x674: 'U', 0x675: 'R', 0x676: 'R', 0x677: 'R', 0x678: 'D', 0x679: 'D', 0x67a: 'D', 0x67b: 'D', 0x67c: 'D', 0x67d: 'D', 0x67e: 'D', 0x67f: 'D', 0x680: 'D', 0x681: 'D', 0x682: 'D', 0x683: 'D', 0x684: 'D', 0x685: 'D', 0x686: 'D', 0x687: 'D', 0x688: 'R', 0x689: 'R', 0x68a: 'R', 0x68b: 'R', 0x68c: 'R', 0x68d: 'R', 0x68e: 'R', 0x68f: 'R', 0x690: 'R', 0x691: 'R', 0x692: 'R', 0x693: 'R', 0x694: 'R', 0x695: 'R', 0x696: 'R', 0x697: 'R', 0x698: 'R', 0x699: 'R', 0x69a: 'D', 0x69b: 'D', 0x69c: 'D', 0x69d: 'D', 0x69e: 'D', 0x69f: 'D', 0x6a0: 'D', 0x6a1: 'D', 0x6a2: 'D', 0x6a3: 'D', 0x6a4: 'D', 0x6a5: 'D', 0x6a6: 'D', 0x6a7: 'D', 0x6a8: 'D', 0x6a9: 'D', 0x6aa: 'D', 0x6ab: 'D', 0x6ac: 'D', 0x6ad: 'D', 0x6ae: 'D', 0x6af: 'D', 0x6b0: 'D', 0x6b1: 'D', 0x6b2: 'D', 0x6b3: 'D', 0x6b4: 'D', 0x6b5: 'D', 0x6b6: 'D', 0x6b7: 'D', 0x6b8: 'D', 0x6b9: 'D', 0x6ba: 'D', 0x6bb: 'D', 0x6bc: 'D', 0x6bd: 'D', 0x6be: 'D', 0x6bf: 'D', 0x6c0: 'R', 0x6c1: 'D', 0x6c2: 'D', 0x6c3: 'R', 0x6c4: 'R', 0x6c5: 'R', 0x6c6: 'R', 0x6c7: 'R', 0x6c8: 'R', 0x6c9: 'R', 0x6ca: 'R', 0x6cb: 'R', 0x6cc: 'D', 0x6cd: 'R', 0x6ce: 'D', 0x6cf: 'R', 0x6d0: 'D', 0x6d1: 'D', 0x6d2: 'R', 0x6d3: 'R', 0x6d5: 'R', 0x6dd: 'U', 0x6ee: 'R', 0x6ef: 'R', 0x6fa: 'D', 0x6fb: 'D', 0x6fc: 'D', 0x6ff: 'D', 0x710: 'R', 0x712: 'D', 0x713: 'D', 0x714: 'D', 0x715: 'R', 0x716: 'R', 0x717: 'R', 0x718: 'R', 0x719: 'R', 0x71a: 'D', 0x71b: 'D', 0x71c: 'D', 0x71d: 'D', 0x71e: 'R', 0x71f: 'D', 0x720: 'D', 0x721: 'D', 0x722: 'D', 0x723: 'D', 0x724: 'D', 0x725: 'D', 0x726: 'D', 0x727: 'D', 0x728: 'R', 0x729: 'D', 0x72a: 'R', 0x72b: 'D', 0x72c: 'R', 0x72d: 'D', 0x72e: 'D', 0x72f: 'R', 0x74d: 'R', 0x74e: 'D', 0x74f: 'D', 0x750: 'D', 0x751: 'D', 0x752: 'D', 0x753: 'D', 0x754: 'D', 0x755: 'D', 0x756: 'D', 0x757: 'D', 0x758: 'D', 0x759: 'R', 0x75a: 'R', 0x75b: 'R', 0x75c: 'D', 0x75d: 'D', 0x75e: 'D', 0x75f: 'D', 0x760: 'D', 0x761: 'D', 0x762: 'D', 0x763: 'D', 0x764: 'D', 0x765: 'D', 0x766: 'D', 0x767: 'D', 0x768: 'D', 0x769: 'D', 0x76a: 'D', 0x76b: 'R', 0x76c: 'R', 0x76d: 'D', 0x76e: 'D', 0x76f: 'D', 0x770: 'D', 0x771: 'R', 0x772: 'D', 0x773: 'R', 0x774: 'R', 0x775: 'D', 0x776: 'D', 0x777: 'D', 0x778: 'R', 0x779: 'R', 0x77a: 'D', 0x77b: 'D', 0x77c: 'D', 0x77d: 'D', 0x77e: 'D', 0x77f: 'D', 0x7ca: 'D', 0x7cb: 'D', 0x7cc: 'D', 0x7cd: 'D', 0x7ce: 'D', 0x7cf: 'D', 0x7d0: 'D', 0x7d1: 'D', 0x7d2: 'D', 0x7d3: 'D', 0x7d4: 'D', 0x7d5: 'D', 0x7d6: 'D', 0x7d7: 'D', 0x7d8: 'D', 0x7d9: 'D', 0x7da: 'D', 0x7db: 'D', 0x7dc: 'D', 0x7dd: 'D', 0x7de: 'D', 0x7df: 'D', 0x7e0: 'D', 0x7e1: 'D', 0x7e2: 'D', 0x7e3: 'D', 0x7e4: 'D', 0x7e5: 'D', 0x7e6: 'D', 0x7e7: 'D', 0x7e8: 'D', 0x7e9: 'D', 0x7ea: 'D', 0x7fa: 'C', 0x840: 'R', 0x841: 'D', 0x842: 'D', 0x843: 'D', 0x844: 'D', 0x845: 'D', 0x846: 'R', 0x847: 'D', 0x848: 'D', 0x849: 'R', 0x84a: 'D', 0x84b: 'D', 0x84c: 'D', 0x84d: 'D', 0x84e: 'D', 0x84f: 'R', 0x850: 'D', 0x851: 'D', 0x852: 'D', 0x853: 'D', 0x854: 'R', 0x855: 'D', 0x856: 'U', 0x857: 'U', 0x858: 'U', 0x8a0: 'D', 0x8a2: 'D', 0x8a3: 'D', 0x8a4: 'D', 0x8a5: 'D', 0x8a6: 'D', 0x8a7: 'D', 0x8a8: 'D', 0x8a9: 'D', 0x8aa: 'R', 0x8ab: 'R', 0x8ac: 'R', 0x1806: 'U', 0x1807: 'D', 0x180a: 'C', 0x180e: 'U', 0x1820: 'D', 0x1821: 'D', 0x1822: 'D', 0x1823: 'D', 0x1824: 'D', 0x1825: 'D', 0x1826: 'D', 0x1827: 'D', 0x1828: 'D', 0x1829: 'D', 0x182a: 'D', 0x182b: 'D', 0x182c: 'D', 0x182d: 'D', 0x182e: 'D', 0x182f: 'D', 0x1830: 'D', 0x1831: 'D', 0x1832: 'D', 0x1833: 'D', 0x1834: 'D', 0x1835: 'D', 0x1836: 'D', 0x1837: 'D', 0x1838: 'D', 0x1839: 'D', 0x183a: 'D', 0x183b: 'D', 0x183c: 'D', 0x183d: 'D', 0x183e: 'D', 0x183f: 'D', 0x1840: 'D', 0x1841: 'D', 0x1842: 'D', 0x1843: 'D', 0x1844: 'D', 0x1845: 'D', 0x1846: 'D', 0x1847: 'D', 0x1848: 'D', 0x1849: 'D', 0x184a: 'D', 0x184b: 'D', 0x184c: 'D', 0x184d: 'D', 0x184e: 'D', 0x184f: 'D', 0x1850: 'D', 0x1851: 'D', 0x1852: 'D', 0x1853: 'D', 0x1854: 'D', 0x1855: 'D', 0x1856: 'D', 0x1857: 'D', 0x1858: 'D', 0x1859: 'D', 0x185a: 'D', 0x185b: 'D', 0x185c: 'D', 0x185d: 'D', 0x185e: 'D', 0x185f: 'D', 0x1860: 'D', 0x1861: 'D', 0x1862: 'D', 0x1863: 'D', 0x1864: 'D', 0x1865: 'D', 0x1866: 'D', 0x1867: 'D', 0x1868: 'D', 0x1869: 'D', 0x186a: 'D', 0x186b: 'D', 0x186c: 'D', 0x186d: 'D', 0x186e: 'D', 0x186f: 'D', 0x1870: 'D', 0x1871: 'D', 0x1872: 'D', 0x1873: 'D', 0x1874: 'D', 0x1875: 'D', 0x1876: 'D', 0x1877: 'D', 0x1880: 'U', 0x1881: 'U', 0x1882: 'U', 0x1883: 'U', 0x1884: 'U', 0x1885: 'U', 0x1886: 'U', 0x1887: 'D', 0x1888: 'D', 0x1889: 'D', 0x188a: 'D', 0x188b: 'D', 0x188c: 'D', 0x188d: 'D', 0x188e: 'D', 0x188f: 'D', 0x1890: 'D', 0x1891: 'D', 0x1892: 'D', 0x1893: 'D', 0x1894: 'D', 0x1895: 'D', 0x1896: 'D', 0x1897: 'D', 0x1898: 'D', 0x1899: 'D', 0x189a: 'D', 0x189b: 'D', 0x189c: 'D', 0x189d: 'D', 0x189e: 'D', 0x189f: 'D', 0x18a0: 'D', 0x18a1: 'D', 0x18a2: 'D', 0x18a3: 'D', 0x18a4: 'D', 0x18a5: 'D', 0x18a6: 'D', 0x18a7: 'D', 0x18a8: 'D', 0x18aa: 'D', 0x200c: 'U', 0x200d: 'C', 0x2066: 'U', 0x2067: 'U', 0x2068: 'U', 0x2069: 'U', 0xa840: 'D', 0xa841: 'D', 0xa842: 'D', 0xa843: 'D', 0xa844: 'D', 0xa845: 'D', 0xa846: 'D', 0xa847: 'D', 0xa848: 'D', 0xa849: 'D', 0xa84a: 'D', 0xa84b: 'D', 0xa84c: 'D', 0xa84d: 'D', 0xa84e: 'D', 0xa84f: 'D', 0xa850: 'D', 0xa851: 'D', 0xa852: 'D', 0xa853: 'D', 0xa854: 'D', 0xa855: 'D', 0xa856: 'D', 0xa857: 'D', 0xa858: 'D', 0xa859: 'D', 0xa85a: 'D', 0xa85b: 'D', 0xa85c: 'D', 0xa85d: 'D', 0xa85e: 'D', 0xa85f: 'D', 0xa860: 'D', 0xa861: 'D', 0xa862: 'D', 0xa863: 'D', 0xa864: 'D', 0xa865: 'D', 0xa866: 'D', 0xa867: 'D', 0xa868: 'D', 0xa869: 'D', 0xa86a: 'D', 0xa86b: 'D', 0xa86c: 'D', 0xa86d: 'D', 0xa86e: 'D', 0xa86f: 'D', 0xa870: 'D', 0xa871: 'D', 0xa872: 'L', 0xa873: 'U', } codepoint_classes = { 'PVALID': ( (0x2d, 0x2e), (0x30, 0x3a), (0x61, 0x7b), (0xdf, 0xf7), (0xf8, 0x100), (0x101, 0x102), (0x103, 0x104), (0x105, 0x106), (0x107, 0x108), (0x109, 0x10a), (0x10b, 0x10c), (0x10d, 0x10e), (0x10f, 0x110), (0x111, 0x112), (0x113, 0x114), (0x115, 0x116), (0x117, 0x118), (0x119, 0x11a), (0x11b, 0x11c), (0x11d, 0x11e), (0x11f, 0x120), (0x121, 0x122), (0x123, 0x124), (0x125, 0x126), (0x127, 0x128), (0x129, 0x12a), (0x12b, 0x12c), (0x12d, 0x12e), (0x12f, 0x130), (0x131, 0x132), (0x135, 0x136), (0x137, 0x139), (0x13a, 0x13b), (0x13c, 0x13d), (0x13e, 0x13f), (0x142, 0x143), (0x144, 0x145), (0x146, 0x147), (0x148, 0x149), (0x14b, 0x14c), (0x14d, 0x14e), (0x14f, 0x150), (0x151, 0x152), (0x153, 0x154), (0x155, 0x156), (0x157, 0x158), (0x159, 0x15a), (0x15b, 0x15c), (0x15d, 0x15e), (0x15f, 0x160), (0x161, 0x162), (0x163, 0x164), (0x165, 0x166), (0x167, 0x168), (0x169, 0x16a), (0x16b, 0x16c), (0x16d, 0x16e), (0x16f, 0x170), (0x171, 0x172), (0x173, 0x174), (0x175, 0x176), (0x177, 0x178), (0x17a, 0x17b), (0x17c, 0x17d), (0x17e, 0x17f), (0x180, 0x181), (0x183, 0x184), (0x185, 0x186), (0x188, 0x189), (0x18c, 0x18e), (0x192, 0x193), (0x195, 0x196), (0x199, 0x19c), (0x19e, 0x19f), (0x1a1, 0x1a2), (0x1a3, 0x1a4), (0x1a5, 0x1a6), (0x1a8, 0x1a9), (0x1aa, 0x1ac), (0x1ad, 0x1ae), (0x1b0, 0x1b1), (0x1b4, 0x1b5), (0x1b6, 0x1b7), (0x1b9, 0x1bc), (0x1bd, 0x1c4), (0x1ce, 0x1cf), (0x1d0, 0x1d1), (0x1d2, 0x1d3), (0x1d4, 0x1d5), (0x1d6, 0x1d7), (0x1d8, 0x1d9), (0x1da, 0x1db), (0x1dc, 0x1de), (0x1df, 0x1e0), (0x1e1, 0x1e2), (0x1e3, 0x1e4), (0x1e5, 0x1e6), (0x1e7, 0x1e8), (0x1e9, 0x1ea), (0x1eb, 0x1ec), (0x1ed, 0x1ee), (0x1ef, 0x1f1), (0x1f5, 0x1f6), (0x1f9, 0x1fa), (0x1fb, 0x1fc), (0x1fd, 0x1fe), (0x1ff, 0x200), (0x201, 0x202), (0x203, 0x204), (0x205, 0x206), (0x207, 0x208), (0x209, 0x20a), (0x20b, 0x20c), (0x20d, 0x20e), (0x20f, 0x210), (0x211, 0x212), (0x213, 0x214), (0x215, 0x216), (0x217, 0x218), (0x219, 0x21a), (0x21b, 0x21c), (0x21d, 0x21e), (0x21f, 0x220), (0x221, 0x222), (0x223, 0x224), (0x225, 0x226), (0x227, 0x228), (0x229, 0x22a), (0x22b, 0x22c), (0x22d, 0x22e), (0x22f, 0x230), (0x231, 0x232), (0x233, 0x23a), (0x23c, 0x23d), (0x23f, 0x241), (0x242, 0x243), (0x247, 0x248), (0x249, 0x24a), (0x24b, 0x24c), (0x24d, 0x24e), (0x24f, 0x2b0), (0x2b9, 0x2c2), (0x2c6, 0x2d2), (0x2ec, 0x2ed), (0x2ee, 0x2ef), (0x300, 0x340), (0x342, 0x343), (0x346, 0x34f), (0x350, 0x370), (0x371, 0x372), (0x373, 0x374), (0x377, 0x378), (0x37b, 0x37e), (0x390, 0x391), (0x3ac, 0x3cf), (0x3d7, 0x3d8), (0x3d9, 0x3da), (0x3db, 0x3dc), (0x3dd, 0x3de), (0x3df, 0x3e0), (0x3e1, 0x3e2), (0x3e3, 0x3e4), (0x3e5, 0x3e6), (0x3e7, 0x3e8), (0x3e9, 0x3ea), (0x3eb, 0x3ec), (0x3ed, 0x3ee), (0x3ef, 0x3f0), (0x3f3, 0x3f4), (0x3f8, 0x3f9), (0x3fb, 0x3fd), (0x430, 0x460), (0x461, 0x462), (0x463, 0x464), (0x465, 0x466), (0x467, 0x468), (0x469, 0x46a), (0x46b, 0x46c), (0x46d, 0x46e), (0x46f, 0x470), (0x471, 0x472), (0x473, 0x474), (0x475, 0x476), (0x477, 0x478), (0x479, 0x47a), (0x47b, 0x47c), (0x47d, 0x47e), (0x47f, 0x480), (0x481, 0x482), (0x483, 0x488), (0x48b, 0x48c), (0x48d, 0x48e), (0x48f, 0x490), (0x491, 0x492), (0x493, 0x494), (0x495, 0x496), (0x497, 0x498), (0x499, 0x49a), (0x49b, 0x49c), (0x49d, 0x49e), (0x49f, 0x4a0), (0x4a1, 0x4a2), (0x4a3, 0x4a4), (0x4a5, 0x4a6), (0x4a7, 0x4a8), (0x4a9, 0x4aa), (0x4ab, 0x4ac), (0x4ad, 0x4ae), (0x4af, 0x4b0), (0x4b1, 0x4b2), (0x4b3, 0x4b4), (0x4b5, 0x4b6), (0x4b7, 0x4b8), (0x4b9, 0x4ba), (0x4bb, 0x4bc), (0x4bd, 0x4be), (0x4bf, 0x4c0), (0x4c2, 0x4c3), (0x4c4, 0x4c5), (0x4c6, 0x4c7), (0x4c8, 0x4c9), (0x4ca, 0x4cb), (0x4cc, 0x4cd), (0x4ce, 0x4d0), (0x4d1, 0x4d2), (0x4d3, 0x4d4), (0x4d5, 0x4d6), (0x4d7, 0x4d8), (0x4d9, 0x4da), (0x4db, 0x4dc), (0x4dd, 0x4de), (0x4df, 0x4e0), (0x4e1, 0x4e2), (0x4e3, 0x4e4), (0x4e5, 0x4e6), (0x4e7, 0x4e8), (0x4e9, 0x4ea), (0x4eb, 0x4ec), (0x4ed, 0x4ee), (0x4ef, 0x4f0), (0x4f1, 0x4f2), (0x4f3, 0x4f4), (0x4f5, 0x4f6), (0x4f7, 0x4f8), (0x4f9, 0x4fa), (0x4fb, 0x4fc), (0x4fd, 0x4fe), (0x4ff, 0x500), (0x501, 0x502), (0x503, 0x504), (0x505, 0x506), (0x507, 0x508), (0x509, 0x50a), (0x50b, 0x50c), (0x50d, 0x50e), (0x50f, 0x510), (0x511, 0x512), (0x513, 0x514), (0x515, 0x516), (0x517, 0x518), (0x519, 0x51a), (0x51b, 0x51c), (0x51d, 0x51e), (0x51f, 0x520), (0x521, 0x522), (0x523, 0x524), (0x525, 0x526), (0x527, 0x528), (0x559, 0x55a), (0x561, 0x587), (0x591, 0x5be), (0x5bf, 0x5c0), (0x5c1, 0x5c3), (0x5c4, 0x5c6), (0x5c7, 0x5c8), (0x5d0, 0x5eb), (0x5f0, 0x5f3), (0x610, 0x61b), (0x620, 0x640), (0x641, 0x660), (0x66e, 0x675), (0x679, 0x6d4), (0x6d5, 0x6dd), (0x6df, 0x6e9), (0x6ea, 0x6f0), (0x6fa, 0x700), (0x710, 0x74b), (0x74d, 0x7b2), (0x7c0, 0x7f6), (0x800, 0x82e), (0x840, 0x85c), (0x8a0, 0x8a1), (0x8a2, 0x8ad), (0x8e4, 0x8ff), (0x900, 0x958), (0x960, 0x964), (0x966, 0x970), (0x971, 0x978), (0x979, 0x980), (0x981, 0x984), (0x985, 0x98d), (0x98f, 0x991), (0x993, 0x9a9), (0x9aa, 0x9b1), (0x9b2, 0x9b3), (0x9b6, 0x9ba), (0x9bc, 0x9c5), (0x9c7, 0x9c9), (0x9cb, 0x9cf), (0x9d7, 0x9d8), (0x9e0, 0x9e4), (0x9e6, 0x9f2), (0xa01, 0xa04), (0xa05, 0xa0b), (0xa0f, 0xa11), (0xa13, 0xa29), (0xa2a, 0xa31), (0xa32, 0xa33), (0xa35, 0xa36), (0xa38, 0xa3a), (0xa3c, 0xa3d), (0xa3e, 0xa43), (0xa47, 0xa49), (0xa4b, 0xa4e), (0xa51, 0xa52), (0xa5c, 0xa5d), (0xa66, 0xa76), (0xa81, 0xa84), (0xa85, 0xa8e), (0xa8f, 0xa92), (0xa93, 0xaa9), (0xaaa, 0xab1), (0xab2, 0xab4), (0xab5, 0xaba), (0xabc, 0xac6), (0xac7, 0xaca), (0xacb, 0xace), (0xad0, 0xad1), (0xae0, 0xae4), (0xae6, 0xaf0), (0xb01, 0xb04), (0xb05, 0xb0d), (0xb0f, 0xb11), (0xb13, 0xb29), (0xb2a, 0xb31), (0xb32, 0xb34), (0xb35, 0xb3a), (0xb3c, 0xb45), (0xb47, 0xb49), (0xb4b, 0xb4e), (0xb56, 0xb58), (0xb5f, 0xb64), (0xb66, 0xb70), (0xb71, 0xb72), (0xb82, 0xb84), (0xb85, 0xb8b), (0xb8e, 0xb91), (0xb92, 0xb96), (0xb99, 0xb9b), (0xb9c, 0xb9d), (0xb9e, 0xba0), (0xba3, 0xba5), (0xba8, 0xbab), (0xbae, 0xbba), (0xbbe, 0xbc3), (0xbc6, 0xbc9), (0xbca, 0xbce), (0xbd0, 0xbd1), (0xbd7, 0xbd8), (0xbe6, 0xbf0), (0xc01, 0xc04), (0xc05, 0xc0d), (0xc0e, 0xc11), (0xc12, 0xc29), (0xc2a, 0xc34), (0xc35, 0xc3a), (0xc3d, 0xc45), (0xc46, 0xc49), (0xc4a, 0xc4e), (0xc55, 0xc57), (0xc58, 0xc5a), (0xc60, 0xc64), (0xc66, 0xc70), (0xc82, 0xc84), (0xc85, 0xc8d), (0xc8e, 0xc91), (0xc92, 0xca9), (0xcaa, 0xcb4), (0xcb5, 0xcba), (0xcbc, 0xcc5), (0xcc6, 0xcc9), (0xcca, 0xcce), (0xcd5, 0xcd7), (0xcde, 0xcdf), (0xce0, 0xce4), (0xce6, 0xcf0), (0xcf1, 0xcf3), (0xd02, 0xd04), (0xd05, 0xd0d), (0xd0e, 0xd11), (0xd12, 0xd3b), (0xd3d, 0xd45), (0xd46, 0xd49), (0xd4a, 0xd4f), (0xd57, 0xd58), (0xd60, 0xd64), (0xd66, 0xd70), (0xd7a, 0xd80), (0xd82, 0xd84), (0xd85, 0xd97), (0xd9a, 0xdb2), (0xdb3, 0xdbc), (0xdbd, 0xdbe), (0xdc0, 0xdc7), (0xdca, 0xdcb), (0xdcf, 0xdd5), (0xdd6, 0xdd7), (0xdd8, 0xde0), (0xdf2, 0xdf4), (0xe01, 0xe33), (0xe34, 0xe3b), (0xe40, 0xe4f), (0xe50, 0xe5a), (0xe81, 0xe83), (0xe84, 0xe85), (0xe87, 0xe89), (0xe8a, 0xe8b), (0xe8d, 0xe8e), (0xe94, 0xe98), (0xe99, 0xea0), (0xea1, 0xea4), (0xea5, 0xea6), (0xea7, 0xea8), (0xeaa, 0xeac), (0xead, 0xeb3), (0xeb4, 0xeba), (0xebb, 0xebe), (0xec0, 0xec5), (0xec6, 0xec7), (0xec8, 0xece), (0xed0, 0xeda), (0xede, 0xee0), (0xf00, 0xf01), (0xf0b, 0xf0c), (0xf18, 0xf1a), (0xf20, 0xf2a), (0xf35, 0xf36), (0xf37, 0xf38), (0xf39, 0xf3a), (0xf3e, 0xf43), (0xf44, 0xf48), (0xf49, 0xf4d), (0xf4e, 0xf52), (0xf53, 0xf57), (0xf58, 0xf5c), (0xf5d, 0xf69), (0xf6a, 0xf6d), (0xf71, 0xf73), (0xf74, 0xf75), (0xf7a, 0xf81), (0xf82, 0xf85), (0xf86, 0xf93), (0xf94, 0xf98), (0xf99, 0xf9d), (0xf9e, 0xfa2), (0xfa3, 0xfa7), (0xfa8, 0xfac), (0xfad, 0xfb9), (0xfba, 0xfbd), (0xfc6, 0xfc7), (0x1000, 0x104a), (0x1050, 0x109e), (0x10d0, 0x10fb), (0x10fd, 0x1100), (0x1200, 0x1249), (0x124a, 0x124e), (0x1250, 0x1257), (0x1258, 0x1259), (0x125a, 0x125e), (0x1260, 0x1289), (0x128a, 0x128e), (0x1290, 0x12b1), (0x12b2, 0x12b6), (0x12b8, 0x12bf), (0x12c0, 0x12c1), (0x12c2, 0x12c6), (0x12c8, 0x12d7), (0x12d8, 0x1311), (0x1312, 0x1316), (0x1318, 0x135b), (0x135d, 0x1360), (0x1380, 0x1390), (0x13a0, 0x13f5), (0x1401, 0x166d), (0x166f, 0x1680), (0x1681, 0x169b), (0x16a0, 0x16eb), (0x1700, 0x170d), (0x170e, 0x1715), (0x1720, 0x1735), (0x1740, 0x1754), (0x1760, 0x176d), (0x176e, 0x1771), (0x1772, 0x1774), (0x1780, 0x17b4), (0x17b6, 0x17d4), (0x17d7, 0x17d8), (0x17dc, 0x17de), (0x17e0, 0x17ea), (0x1810, 0x181a), (0x1820, 0x1878), (0x1880, 0x18ab), (0x18b0, 0x18f6), (0x1900, 0x191d), (0x1920, 0x192c), (0x1930, 0x193c), (0x1946, 0x196e), (0x1970, 0x1975), (0x1980, 0x19ac), (0x19b0, 0x19ca), (0x19d0, 0x19da), (0x1a00, 0x1a1c), (0x1a20, 0x1a5f), (0x1a60, 0x1a7d), (0x1a7f, 0x1a8a), (0x1a90, 0x1a9a), (0x1aa7, 0x1aa8), (0x1b00, 0x1b4c), (0x1b50, 0x1b5a), (0x1b6b, 0x1b74), (0x1b80, 0x1bf4), (0x1c00, 0x1c38), (0x1c40, 0x1c4a), (0x1c4d, 0x1c7e), (0x1cd0, 0x1cd3), (0x1cd4, 0x1cf7), (0x1d00, 0x1d2c), (0x1d2f, 0x1d30), (0x1d3b, 0x1d3c), (0x1d4e, 0x1d4f), (0x1d6b, 0x1d78), (0x1d79, 0x1d9b), (0x1dc0, 0x1de7), (0x1dfc, 0x1e00), (0x1e01, 0x1e02), (0x1e03, 0x1e04), (0x1e05, 0x1e06), (0x1e07, 0x1e08), (0x1e09, 0x1e0a), (0x1e0b, 0x1e0c), (0x1e0d, 0x1e0e), (0x1e0f, 0x1e10), (0x1e11, 0x1e12), (0x1e13, 0x1e14), (0x1e15, 0x1e16), (0x1e17, 0x1e18), (0x1e19, 0x1e1a), (0x1e1b, 0x1e1c), (0x1e1d, 0x1e1e), (0x1e1f, 0x1e20), (0x1e21, 0x1e22), (0x1e23, 0x1e24), (0x1e25, 0x1e26), (0x1e27, 0x1e28), (0x1e29, 0x1e2a), (0x1e2b, 0x1e2c), (0x1e2d, 0x1e2e), (0x1e2f, 0x1e30), (0x1e31, 0x1e32), (0x1e33, 0x1e34), (0x1e35, 0x1e36), (0x1e37, 0x1e38), (0x1e39, 0x1e3a), (0x1e3b, 0x1e3c), (0x1e3d, 0x1e3e), (0x1e3f, 0x1e40), (0x1e41, 0x1e42), (0x1e43, 0x1e44), (0x1e45, 0x1e46), (0x1e47, 0x1e48), (0x1e49, 0x1e4a), (0x1e4b, 0x1e4c), (0x1e4d, 0x1e4e), (0x1e4f, 0x1e50), (0x1e51, 0x1e52), (0x1e53, 0x1e54), (0x1e55, 0x1e56), (0x1e57, 0x1e58), (0x1e59, 0x1e5a), (0x1e5b, 0x1e5c), (0x1e5d, 0x1e5e), (0x1e5f, 0x1e60), (0x1e61, 0x1e62), (0x1e63, 0x1e64), (0x1e65, 0x1e66), (0x1e67, 0x1e68), (0x1e69, 0x1e6a), (0x1e6b, 0x1e6c), (0x1e6d, 0x1e6e), (0x1e6f, 0x1e70), (0x1e71, 0x1e72), (0x1e73, 0x1e74), (0x1e75, 0x1e76), (0x1e77, 0x1e78), (0x1e79, 0x1e7a), (0x1e7b, 0x1e7c), (0x1e7d, 0x1e7e), (0x1e7f, 0x1e80), (0x1e81, 0x1e82), (0x1e83, 0x1e84), (0x1e85, 0x1e86), (0x1e87, 0x1e88), (0x1e89, 0x1e8a), (0x1e8b, 0x1e8c), (0x1e8d, 0x1e8e), (0x1e8f, 0x1e90), (0x1e91, 0x1e92), (0x1e93, 0x1e94), (0x1e95, 0x1e9a), (0x1e9c, 0x1e9e), (0x1e9f, 0x1ea0), (0x1ea1, 0x1ea2), (0x1ea3, 0x1ea4), (0x1ea5, 0x1ea6), (0x1ea7, 0x1ea8), (0x1ea9, 0x1eaa), (0x1eab, 0x1eac), (0x1ead, 0x1eae), (0x1eaf, 0x1eb0), (0x1eb1, 0x1eb2), (0x1eb3, 0x1eb4), (0x1eb5, 0x1eb6), (0x1eb7, 0x1eb8), (0x1eb9, 0x1eba), (0x1ebb, 0x1ebc), (0x1ebd, 0x1ebe), (0x1ebf, 0x1ec0), (0x1ec1, 0x1ec2), (0x1ec3, 0x1ec4), (0x1ec5, 0x1ec6), (0x1ec7, 0x1ec8), (0x1ec9, 0x1eca), (0x1ecb, 0x1ecc), (0x1ecd, 0x1ece), (0x1ecf, 0x1ed0), (0x1ed1, 0x1ed2), (0x1ed3, 0x1ed4), (0x1ed5, 0x1ed6), (0x1ed7, 0x1ed8), (0x1ed9, 0x1eda), (0x1edb, 0x1edc), (0x1edd, 0x1ede), (0x1edf, 0x1ee0), (0x1ee1, 0x1ee2), (0x1ee3, 0x1ee4), (0x1ee5, 0x1ee6), (0x1ee7, 0x1ee8), (0x1ee9, 0x1eea), (0x1eeb, 0x1eec), (0x1eed, 0x1eee), (0x1eef, 0x1ef0), (0x1ef1, 0x1ef2), (0x1ef3, 0x1ef4), (0x1ef5, 0x1ef6), (0x1ef7, 0x1ef8), (0x1ef9, 0x1efa), (0x1efb, 0x1efc), (0x1efd, 0x1efe), (0x1eff, 0x1f08), (0x1f10, 0x1f16), (0x1f20, 0x1f28), (0x1f30, 0x1f38), (0x1f40, 0x1f46), (0x1f50, 0x1f58), (0x1f60, 0x1f68), (0x1f70, 0x1f71), (0x1f72, 0x1f73), (0x1f74, 0x1f75), (0x1f76, 0x1f77), (0x1f78, 0x1f79), (0x1f7a, 0x1f7b), (0x1f7c, 0x1f7d), (0x1fb0, 0x1fb2), (0x1fb6, 0x1fb7), (0x1fc6, 0x1fc7), (0x1fd0, 0x1fd3), (0x1fd6, 0x1fd8), (0x1fe0, 0x1fe3), (0x1fe4, 0x1fe8), (0x1ff6, 0x1ff7), (0x214e, 0x214f), (0x2184, 0x2185), (0x2c30, 0x2c5f), (0x2c61, 0x2c62), (0x2c65, 0x2c67), (0x2c68, 0x2c69), (0x2c6a, 0x2c6b), (0x2c6c, 0x2c6d), (0x2c71, 0x2c72), (0x2c73, 0x2c75), (0x2c76, 0x2c7c), (0x2c81, 0x2c82), (0x2c83, 0x2c84), (0x2c85, 0x2c86), (0x2c87, 0x2c88), (0x2c89, 0x2c8a), (0x2c8b, 0x2c8c), (0x2c8d, 0x2c8e), (0x2c8f, 0x2c90), (0x2c91, 0x2c92), (0x2c93, 0x2c94), (0x2c95, 0x2c96), (0x2c97, 0x2c98), (0x2c99, 0x2c9a), (0x2c9b, 0x2c9c), (0x2c9d, 0x2c9e), (0x2c9f, 0x2ca0), (0x2ca1, 0x2ca2), (0x2ca3, 0x2ca4), (0x2ca5, 0x2ca6), (0x2ca7, 0x2ca8), (0x2ca9, 0x2caa), (0x2cab, 0x2cac), (0x2cad, 0x2cae), (0x2caf, 0x2cb0), (0x2cb1, 0x2cb2), (0x2cb3, 0x2cb4), (0x2cb5, 0x2cb6), (0x2cb7, 0x2cb8), (0x2cb9, 0x2cba), (0x2cbb, 0x2cbc), (0x2cbd, 0x2cbe), (0x2cbf, 0x2cc0), (0x2cc1, 0x2cc2), (0x2cc3, 0x2cc4), (0x2cc5, 0x2cc6), (0x2cc7, 0x2cc8), (0x2cc9, 0x2cca), (0x2ccb, 0x2ccc), (0x2ccd, 0x2cce), (0x2ccf, 0x2cd0), (0x2cd1, 0x2cd2), (0x2cd3, 0x2cd4), (0x2cd5, 0x2cd6), (0x2cd7, 0x2cd8), (0x2cd9, 0x2cda), (0x2cdb, 0x2cdc), (0x2cdd, 0x2cde), (0x2cdf, 0x2ce0), (0x2ce1, 0x2ce2), (0x2ce3, 0x2ce5), (0x2cec, 0x2ced), (0x2cee, 0x2cf2), (0x2cf3, 0x2cf4), (0x2d00, 0x2d26), (0x2d27, 0x2d28), (0x2d2d, 0x2d2e), (0x2d30, 0x2d68), (0x2d7f, 0x2d97), (0x2da0, 0x2da7), (0x2da8, 0x2daf), (0x2db0, 0x2db7), (0x2db8, 0x2dbf), (0x2dc0, 0x2dc7), (0x2dc8, 0x2dcf), (0x2dd0, 0x2dd7), (0x2dd8, 0x2ddf), (0x2de0, 0x2e00), (0x2e2f, 0x2e30), (0x3005, 0x3008), (0x302a, 0x302e), (0x303c, 0x303d), (0x3041, 0x3097), (0x3099, 0x309b), (0x309d, 0x309f), (0x30a1, 0x30fb), (0x30fc, 0x30ff), (0x3105, 0x312e), (0x31a0, 0x31bb), (0x31f0, 0x3200), (0x3400, 0x4db6), (0x4e00, 0x9fcd), (0xa000, 0xa48d), (0xa4d0, 0xa4fe), (0xa500, 0xa60d), (0xa610, 0xa62c), (0xa641, 0xa642), (0xa643, 0xa644), (0xa645, 0xa646), (0xa647, 0xa648), (0xa649, 0xa64a), (0xa64b, 0xa64c), (0xa64d, 0xa64e), (0xa64f, 0xa650), (0xa651, 0xa652), (0xa653, 0xa654), (0xa655, 0xa656), (0xa657, 0xa658), (0xa659, 0xa65a), (0xa65b, 0xa65c), (0xa65d, 0xa65e), (0xa65f, 0xa660), (0xa661, 0xa662), (0xa663, 0xa664), (0xa665, 0xa666), (0xa667, 0xa668), (0xa669, 0xa66a), (0xa66b, 0xa66c), (0xa66d, 0xa670), (0xa674, 0xa67e), (0xa67f, 0xa680), (0xa681, 0xa682), (0xa683, 0xa684), (0xa685, 0xa686), (0xa687, 0xa688), (0xa689, 0xa68a), (0xa68b, 0xa68c), (0xa68d, 0xa68e), (0xa68f, 0xa690), (0xa691, 0xa692), (0xa693, 0xa694), (0xa695, 0xa696), (0xa697, 0xa698), (0xa69f, 0xa6e6), (0xa6f0, 0xa6f2), (0xa717, 0xa720), (0xa723, 0xa724), (0xa725, 0xa726), (0xa727, 0xa728), (0xa729, 0xa72a), (0xa72b, 0xa72c), (0xa72d, 0xa72e), (0xa72f, 0xa732), (0xa733, 0xa734), (0xa735, 0xa736), (0xa737, 0xa738), (0xa739, 0xa73a), (0xa73b, 0xa73c), (0xa73d, 0xa73e), (0xa73f, 0xa740), (0xa741, 0xa742), (0xa743, 0xa744), (0xa745, 0xa746), (0xa747, 0xa748), (0xa749, 0xa74a), (0xa74b, 0xa74c), (0xa74d, 0xa74e), (0xa74f, 0xa750), (0xa751, 0xa752), (0xa753, 0xa754), (0xa755, 0xa756), (0xa757, 0xa758), (0xa759, 0xa75a), (0xa75b, 0xa75c), (0xa75d, 0xa75e), (0xa75f, 0xa760), (0xa761, 0xa762), (0xa763, 0xa764), (0xa765, 0xa766), (0xa767, 0xa768), (0xa769, 0xa76a), (0xa76b, 0xa76c), (0xa76d, 0xa76e), (0xa76f, 0xa770), (0xa771, 0xa779), (0xa77a, 0xa77b), (0xa77c, 0xa77d), (0xa77f, 0xa780), (0xa781, 0xa782), (0xa783, 0xa784), (0xa785, 0xa786), (0xa787, 0xa789), (0xa78c, 0xa78d), (0xa78e, 0xa78f), (0xa791, 0xa792), (0xa793, 0xa794), (0xa7a1, 0xa7a2), (0xa7a3, 0xa7a4), (0xa7a5, 0xa7a6), (0xa7a7, 0xa7a8), (0xa7a9, 0xa7aa), (0xa7fa, 0xa828), (0xa840, 0xa874), (0xa880, 0xa8c5), (0xa8d0, 0xa8da), (0xa8e0, 0xa8f8), (0xa8fb, 0xa8fc), (0xa900, 0xa92e), (0xa930, 0xa954), (0xa980, 0xa9c1), (0xa9cf, 0xa9da), (0xaa00, 0xaa37), (0xaa40, 0xaa4e), (0xaa50, 0xaa5a), (0xaa60, 0xaa77), (0xaa7a, 0xaa7c), (0xaa80, 0xaac3), (0xaadb, 0xaade), (0xaae0, 0xaaf0), (0xaaf2, 0xaaf7), (0xab01, 0xab07), (0xab09, 0xab0f), (0xab11, 0xab17), (0xab20, 0xab27), (0xab28, 0xab2f), (0xabc0, 0xabeb), (0xabec, 0xabee), (0xabf0, 0xabfa), (0xac00, 0xd7a4), (0xfa0e, 0xfa10), (0xfa11, 0xfa12), (0xfa13, 0xfa15), (0xfa1f, 0xfa20), (0xfa21, 0xfa22), (0xfa23, 0xfa25), (0xfa27, 0xfa2a), (0xfb1e, 0xfb1f), (0xfe20, 0xfe27), (0xfe73, 0xfe74), (0x10000, 0x1000c), (0x1000d, 0x10027), (0x10028, 0x1003b), (0x1003c, 0x1003e), (0x1003f, 0x1004e), (0x10050, 0x1005e), (0x10080, 0x100fb), (0x101fd, 0x101fe), (0x10280, 0x1029d), (0x102a0, 0x102d1), (0x10300, 0x1031f), (0x10330, 0x10341), (0x10342, 0x1034a), (0x10380, 0x1039e), (0x103a0, 0x103c4), (0x103c8, 0x103d0), (0x10428, 0x1049e), (0x104a0, 0x104aa), (0x10800, 0x10806), (0x10808, 0x10809), (0x1080a, 0x10836), (0x10837, 0x10839), (0x1083c, 0x1083d), (0x1083f, 0x10856), (0x10900, 0x10916), (0x10920, 0x1093a), (0x10980, 0x109b8), (0x109be, 0x109c0), (0x10a00, 0x10a04), (0x10a05, 0x10a07), (0x10a0c, 0x10a14), (0x10a15, 0x10a18), (0x10a19, 0x10a34), (0x10a38, 0x10a3b), (0x10a3f, 0x10a40), (0x10a60, 0x10a7d), (0x10b00, 0x10b36), (0x10b40, 0x10b56), (0x10b60, 0x10b73), (0x10c00, 0x10c49), (0x11000, 0x11047), (0x11066, 0x11070), (0x11080, 0x110bb), (0x110d0, 0x110e9), (0x110f0, 0x110fa), (0x11100, 0x11135), (0x11136, 0x11140), (0x11180, 0x111c5), (0x111d0, 0x111da), (0x11680, 0x116b8), (0x116c0, 0x116ca), (0x12000, 0x1236f), (0x13000, 0x1342f), (0x16800, 0x16a39), (0x16f00, 0x16f45), (0x16f50, 0x16f7f), (0x16f8f, 0x16fa0), (0x1b000, 0x1b002), (0x20000, 0x2a6d7), (0x2a700, 0x2b735), (0x2b740, 0x2b81e), ), 'CONTEXTJ': ( (0x200c, 0x200e), ), 'CONTEXTO': ( (0xb7, 0xb8), (0x375, 0x376), (0x5f3, 0x5f5), (0x660, 0x66a), (0x6f0, 0x6fa), (0x30fb, 0x30fc), ), }
gpl-2.0
Sweetgrassbuffalo/ReactionSweeGrass-v2
.meteor/local/dev_bundle/python/Lib/encodings/cp1258.py
593
13620
""" Python Character Mapping Codec cp1258 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1258.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp1258', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( u'\x00' # 0x00 -> NULL u'\x01' # 0x01 -> START OF HEADING u'\x02' # 0x02 -> START OF TEXT u'\x03' # 0x03 -> END OF TEXT u'\x04' # 0x04 -> END OF TRANSMISSION u'\x05' # 0x05 -> ENQUIRY u'\x06' # 0x06 -> ACKNOWLEDGE u'\x07' # 0x07 -> BELL u'\x08' # 0x08 -> BACKSPACE u'\t' # 0x09 -> HORIZONTAL TABULATION u'\n' # 0x0A -> LINE FEED u'\x0b' # 0x0B -> VERTICAL TABULATION u'\x0c' # 0x0C -> FORM FEED u'\r' # 0x0D -> CARRIAGE RETURN u'\x0e' # 0x0E -> SHIFT OUT u'\x0f' # 0x0F -> SHIFT IN u'\x10' # 0x10 -> DATA LINK ESCAPE u'\x11' # 0x11 -> DEVICE CONTROL ONE u'\x12' # 0x12 -> DEVICE CONTROL TWO u'\x13' # 0x13 -> DEVICE CONTROL THREE u'\x14' # 0x14 -> DEVICE CONTROL FOUR u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE u'\x16' # 0x16 -> SYNCHRONOUS IDLE u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK u'\x18' # 0x18 -> CANCEL u'\x19' # 0x19 -> END OF MEDIUM u'\x1a' # 0x1A -> SUBSTITUTE u'\x1b' # 0x1B -> ESCAPE u'\x1c' # 0x1C -> FILE SEPARATOR u'\x1d' # 0x1D -> GROUP SEPARATOR u'\x1e' # 0x1E -> RECORD SEPARATOR u'\x1f' # 0x1F -> UNIT SEPARATOR u' ' # 0x20 -> SPACE u'!' # 0x21 -> EXCLAMATION MARK u'"' # 0x22 -> QUOTATION MARK u'#' # 0x23 -> NUMBER SIGN u'$' # 0x24 -> DOLLAR SIGN u'%' # 0x25 -> PERCENT SIGN u'&' # 0x26 -> AMPERSAND u"'" # 0x27 -> APOSTROPHE u'(' # 0x28 -> LEFT PARENTHESIS u')' # 0x29 -> RIGHT PARENTHESIS u'*' # 0x2A -> ASTERISK u'+' # 0x2B -> PLUS SIGN u',' # 0x2C -> COMMA u'-' # 0x2D -> HYPHEN-MINUS u'.' # 0x2E -> FULL STOP u'/' # 0x2F -> SOLIDUS u'0' # 0x30 -> DIGIT ZERO u'1' # 0x31 -> DIGIT ONE u'2' # 0x32 -> DIGIT TWO u'3' # 0x33 -> DIGIT THREE u'4' # 0x34 -> DIGIT FOUR u'5' # 0x35 -> DIGIT FIVE u'6' # 0x36 -> DIGIT SIX u'7' # 0x37 -> DIGIT SEVEN u'8' # 0x38 -> DIGIT EIGHT u'9' # 0x39 -> DIGIT NINE u':' # 0x3A -> COLON u';' # 0x3B -> SEMICOLON u'<' # 0x3C -> LESS-THAN SIGN u'=' # 0x3D -> EQUALS SIGN u'>' # 0x3E -> GREATER-THAN SIGN u'?' # 0x3F -> QUESTION MARK u'@' # 0x40 -> COMMERCIAL AT u'A' # 0x41 -> LATIN CAPITAL LETTER A u'B' # 0x42 -> LATIN CAPITAL LETTER B u'C' # 0x43 -> LATIN CAPITAL LETTER C u'D' # 0x44 -> LATIN CAPITAL LETTER D u'E' # 0x45 -> LATIN CAPITAL LETTER E u'F' # 0x46 -> LATIN CAPITAL LETTER F u'G' # 0x47 -> LATIN CAPITAL LETTER G u'H' # 0x48 -> LATIN CAPITAL LETTER H u'I' # 0x49 -> LATIN CAPITAL LETTER I u'J' # 0x4A -> LATIN CAPITAL LETTER J u'K' # 0x4B -> LATIN CAPITAL LETTER K u'L' # 0x4C -> LATIN CAPITAL LETTER L u'M' # 0x4D -> LATIN CAPITAL LETTER M u'N' # 0x4E -> LATIN CAPITAL LETTER N u'O' # 0x4F -> LATIN CAPITAL LETTER O u'P' # 0x50 -> LATIN CAPITAL LETTER P u'Q' # 0x51 -> LATIN CAPITAL LETTER Q u'R' # 0x52 -> LATIN CAPITAL LETTER R u'S' # 0x53 -> LATIN CAPITAL LETTER S u'T' # 0x54 -> LATIN CAPITAL LETTER T u'U' # 0x55 -> LATIN CAPITAL LETTER U u'V' # 0x56 -> LATIN CAPITAL LETTER V u'W' # 0x57 -> LATIN CAPITAL LETTER W u'X' # 0x58 -> LATIN CAPITAL LETTER X u'Y' # 0x59 -> LATIN CAPITAL LETTER Y u'Z' # 0x5A -> LATIN CAPITAL LETTER Z u'[' # 0x5B -> LEFT SQUARE BRACKET u'\\' # 0x5C -> REVERSE SOLIDUS u']' # 0x5D -> RIGHT SQUARE BRACKET u'^' # 0x5E -> CIRCUMFLEX ACCENT u'_' # 0x5F -> LOW LINE u'`' # 0x60 -> GRAVE ACCENT u'a' # 0x61 -> LATIN SMALL LETTER A u'b' # 0x62 -> LATIN SMALL LETTER B u'c' # 0x63 -> LATIN SMALL LETTER C u'd' # 0x64 -> LATIN SMALL LETTER D u'e' # 0x65 -> LATIN SMALL LETTER E u'f' # 0x66 -> LATIN SMALL LETTER F u'g' # 0x67 -> LATIN SMALL LETTER G u'h' # 0x68 -> LATIN SMALL LETTER H u'i' # 0x69 -> LATIN SMALL LETTER I u'j' # 0x6A -> LATIN SMALL LETTER J u'k' # 0x6B -> LATIN SMALL LETTER K u'l' # 0x6C -> LATIN SMALL LETTER L u'm' # 0x6D -> LATIN SMALL LETTER M u'n' # 0x6E -> LATIN SMALL LETTER N u'o' # 0x6F -> LATIN SMALL LETTER O u'p' # 0x70 -> LATIN SMALL LETTER P u'q' # 0x71 -> LATIN SMALL LETTER Q u'r' # 0x72 -> LATIN SMALL LETTER R u's' # 0x73 -> LATIN SMALL LETTER S u't' # 0x74 -> LATIN SMALL LETTER T u'u' # 0x75 -> LATIN SMALL LETTER U u'v' # 0x76 -> LATIN SMALL LETTER V u'w' # 0x77 -> LATIN SMALL LETTER W u'x' # 0x78 -> LATIN SMALL LETTER X u'y' # 0x79 -> LATIN SMALL LETTER Y u'z' # 0x7A -> LATIN SMALL LETTER Z u'{' # 0x7B -> LEFT CURLY BRACKET u'|' # 0x7C -> VERTICAL LINE u'}' # 0x7D -> RIGHT CURLY BRACKET u'~' # 0x7E -> TILDE u'\x7f' # 0x7F -> DELETE u'\u20ac' # 0x80 -> EURO SIGN u'\ufffe' # 0x81 -> UNDEFINED u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS u'\u2020' # 0x86 -> DAGGER u'\u2021' # 0x87 -> DOUBLE DAGGER u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT u'\u2030' # 0x89 -> PER MILLE SIGN u'\ufffe' # 0x8A -> UNDEFINED u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK u'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE u'\ufffe' # 0x8D -> UNDEFINED u'\ufffe' # 0x8E -> UNDEFINED u'\ufffe' # 0x8F -> UNDEFINED u'\ufffe' # 0x90 -> UNDEFINED u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK u'\u2022' # 0x95 -> BULLET u'\u2013' # 0x96 -> EN DASH u'\u2014' # 0x97 -> EM DASH u'\u02dc' # 0x98 -> SMALL TILDE u'\u2122' # 0x99 -> TRADE MARK SIGN u'\ufffe' # 0x9A -> UNDEFINED u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK u'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE u'\ufffe' # 0x9D -> UNDEFINED u'\ufffe' # 0x9E -> UNDEFINED u'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS u'\xa0' # 0xA0 -> NO-BREAK SPACE u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK u'\xa2' # 0xA2 -> CENT SIGN u'\xa3' # 0xA3 -> POUND SIGN u'\xa4' # 0xA4 -> CURRENCY SIGN u'\xa5' # 0xA5 -> YEN SIGN u'\xa6' # 0xA6 -> BROKEN BAR u'\xa7' # 0xA7 -> SECTION SIGN u'\xa8' # 0xA8 -> DIAERESIS u'\xa9' # 0xA9 -> COPYRIGHT SIGN u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xac' # 0xAC -> NOT SIGN u'\xad' # 0xAD -> SOFT HYPHEN u'\xae' # 0xAE -> REGISTERED SIGN u'\xaf' # 0xAF -> MACRON u'\xb0' # 0xB0 -> DEGREE SIGN u'\xb1' # 0xB1 -> PLUS-MINUS SIGN u'\xb2' # 0xB2 -> SUPERSCRIPT TWO u'\xb3' # 0xB3 -> SUPERSCRIPT THREE u'\xb4' # 0xB4 -> ACUTE ACCENT u'\xb5' # 0xB5 -> MICRO SIGN u'\xb6' # 0xB6 -> PILCROW SIGN u'\xb7' # 0xB7 -> MIDDLE DOT u'\xb8' # 0xB8 -> CEDILLA u'\xb9' # 0xB9 -> SUPERSCRIPT ONE u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS u'\xbf' # 0xBF -> INVERTED QUESTION MARK u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS u'\u0300' # 0xCC -> COMBINING GRAVE ACCENT u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE u'\u0309' # 0xD2 -> COMBINING HOOK ABOVE u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX u'\u01a0' # 0xD5 -> LATIN CAPITAL LETTER O WITH HORN u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS u'\xd7' # 0xD7 -> MULTIPLICATION SIGN u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS u'\u01af' # 0xDD -> LATIN CAPITAL LETTER U WITH HORN u'\u0303' # 0xDE -> COMBINING TILDE u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS u'\u0301' # 0xEC -> COMBINING ACUTE ACCENT u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE u'\u0323' # 0xF2 -> COMBINING DOT BELOW u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX u'\u01a1' # 0xF5 -> LATIN SMALL LETTER O WITH HORN u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS u'\xf7' # 0xF7 -> DIVISION SIGN u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS u'\u01b0' # 0xFD -> LATIN SMALL LETTER U WITH HORN u'\u20ab' # 0xFE -> DONG SIGN u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
gpl-3.0
googlemaps/google-maps-services-python
googlemaps/directions.py
1
5727
# # Copyright 2014 Google Inc. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. # """Performs requests to the Google Maps Directions API.""" from googlemaps import convert def directions(client, origin, destination, mode=None, waypoints=None, alternatives=False, avoid=None, language=None, units=None, region=None, departure_time=None, arrival_time=None, optimize_waypoints=False, transit_mode=None, transit_routing_preference=None, traffic_model=None): """Get directions between an origin point and a destination point. :param origin: The address or latitude/longitude value from which you wish to calculate directions. :type origin: string, dict, list, or tuple :param destination: The address or latitude/longitude value from which you wish to calculate directions. You can use a place_id as destination by putting 'place_id:' as a prefix in the passing parameter. :type destination: string, dict, list, or tuple :param mode: Specifies the mode of transport to use when calculating directions. One of "driving", "walking", "bicycling" or "transit" :type mode: string :param waypoints: Specifies an array of waypoints. Waypoints alter a route by routing it through the specified location(s). To influence route without adding stop prefix the waypoint with `via`, similar to `waypoints = ["via:San Francisco", "via:Mountain View"]`. :type waypoints: a single location, or a list of locations, where a location is a string, dict, list, or tuple :param alternatives: If True, more than one route may be returned in the response. :type alternatives: bool :param avoid: Indicates that the calculated route(s) should avoid the indicated features. :type avoid: list or string :param language: The language in which to return results. :type language: string :param units: Specifies the unit system to use when displaying results. "metric" or "imperial" :type units: string :param region: The region code, specified as a ccTLD ("top-level domain" two-character value. :type region: string :param departure_time: Specifies the desired time of departure. :type departure_time: int or datetime.datetime :param arrival_time: Specifies the desired time of arrival for transit directions. Note: you can't specify both departure_time and arrival_time. :type arrival_time: int or datetime.datetime :param optimize_waypoints: Optimize the provided route by rearranging the waypoints in a more efficient order. :type optimize_waypoints: bool :param transit_mode: Specifies one or more preferred modes of transit. This parameter may only be specified for requests where the mode is transit. Valid values are "bus", "subway", "train", "tram", "rail". "rail" is equivalent to ["train", "tram", "subway"]. :type transit_mode: string or list of strings :param transit_routing_preference: Specifies preferences for transit requests. Valid values are "less_walking" or "fewer_transfers" :type transit_routing_preference: string :param traffic_model: Specifies the predictive travel time model to use. Valid values are "best_guess" or "optimistic" or "pessimistic". The traffic_model parameter may only be specified for requests where the travel mode is driving, and where the request includes a departure_time. :type units: string :rtype: list of routes """ params = { "origin": convert.latlng(origin), "destination": convert.latlng(destination) } if mode: # NOTE(broady): the mode parameter is not validated by the Maps API # server. Check here to prevent silent failures. if mode not in ["driving", "walking", "bicycling", "transit"]: raise ValueError("Invalid travel mode.") params["mode"] = mode if waypoints: waypoints = convert.location_list(waypoints) if optimize_waypoints: waypoints = "optimize:true|" + waypoints params["waypoints"] = waypoints if alternatives: params["alternatives"] = "true" if avoid: params["avoid"] = convert.join_list("|", avoid) if language: params["language"] = language if units: params["units"] = units if region: params["region"] = region if departure_time: params["departure_time"] = convert.time(departure_time) if arrival_time: params["arrival_time"] = convert.time(arrival_time) if departure_time and arrival_time: raise ValueError("Should not specify both departure_time and" "arrival_time.") if transit_mode: params["transit_mode"] = convert.join_list("|", transit_mode) if transit_routing_preference: params["transit_routing_preference"] = transit_routing_preference if traffic_model: params["traffic_model"] = traffic_model return client._request("/maps/api/directions/json", params).get("routes", [])
apache-2.0
msmolens/VTK
ThirdParty/Twisted/twisted/lore/latex.py
29
15440
# -*- test-case-name: twisted.lore.test.test_latex -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ LaTeX output support for Lore. """ from xml.dom import minidom as dom import os.path, re from cStringIO import StringIO import urlparse from twisted.web import domhelpers from twisted.python import procutils import tree escapingRE = re.compile(r'([\[\]#$%&_{}^~\\])') lowerUpperRE = re.compile(r'([a-z])([A-Z])') def _escapeMatch(match): c = match.group() if c == '\\': return '$\\backslash$' elif c == '~': return '\\~{}' elif c == '^': return '\\^{}' elif c in '[]': return '{'+c+'}' else: return '\\' + c def latexEscape(txt): txt = escapingRE.sub(_escapeMatch, txt) return txt.replace('\n', ' ') entities = {'amp': '\&', 'gt': '>', 'lt': '<', 'quot': '"', 'copy': '\\copyright', 'mdash': '---', 'rdquo': '``', 'ldquo': "''"} def realpath(path): # Normalise path cwd = os.getcwd() path = os.path.normpath(os.path.join(cwd, path)) return path.replace('\\', '/') # windows slashes make LaTeX blow up def getLatexText(node, writer, filter=lambda x:x, entities=entities): if hasattr(node, 'eref'): return writer(entities.get(node.eref, '')) if hasattr(node, 'data'): if isinstance(node.data, unicode): data = node.data.encode('utf-8') else: data = node.data return writer(filter(data)) for child in node.childNodes: getLatexText(child, writer, filter, entities) class BaseLatexSpitter: def __init__(self, writer, currDir='.', filename=''): self.writer = writer self.currDir = currDir self.filename = filename def visitNode(self, node): if isinstance(node, dom.Comment): return if not hasattr(node, 'tagName'): self.writeNodeData(node) return getattr(self, 'visitNode_'+node.tagName, self.visitNodeDefault)(node) def visitNodeDefault(self, node): self.writer(getattr(self, 'start_'+node.tagName, '')) for child in node.childNodes: self.visitNode(child) self.writer(getattr(self, 'end_'+node.tagName, '')) def visitNode_a(self, node): if node.hasAttribute('class'): if node.getAttribute('class').endswith('listing'): return self.visitNode_a_listing(node) if node.hasAttribute('href'): return self.visitNode_a_href(node) if node.hasAttribute('name'): return self.visitNode_a_name(node) self.visitNodeDefault(node) def visitNode_span(self, node): if not node.hasAttribute('class'): return self.visitNodeDefault(node) node.tagName += '_'+node.getAttribute('class') self.visitNode(node) visitNode_div = visitNode_span def visitNode_h1(self, node): pass def visitNode_style(self, node): pass class LatexSpitter(BaseLatexSpitter): baseLevel = 0 diaHack = bool(procutils.which("dia")) def writeNodeData(self, node): buf = StringIO() getLatexText(node, buf.write, latexEscape) self.writer(buf.getvalue().replace('<', '$<$').replace('>', '$>$')) def visitNode_head(self, node): authorNodes = domhelpers.findElementsWithAttribute(node, 'rel', 'author') authorNodes = [n for n in authorNodes if n.tagName == 'link'] if authorNodes: self.writer('\\author{') authors = [] for aNode in authorNodes: name = aNode.getAttribute('title') href = aNode.getAttribute('href') if href.startswith('mailto:'): href = href[7:] if href: if name: name += ' ' name += '$<$' + href + '$>$' if name: authors.append(name) self.writer(' \\and '.join(authors)) self.writer('}') self.visitNodeDefault(node) def visitNode_pre(self, node): """ Writes a I{verbatim} block when it encounters a I{pre} element. @param node: The element to process. @type node: L{xml.dom.minidom.Element} """ self.writer('\\begin{verbatim}\n') buf = StringIO() getLatexText(node, buf.write) self.writer(tree._removeLeadingTrailingBlankLines(buf.getvalue())) self.writer('\\end{verbatim}\n') def visitNode_code(self, node): fout = StringIO() getLatexText(node, fout.write, latexEscape) data = lowerUpperRE.sub(r'\1\\linebreak[1]\2', fout.getvalue()) data = data[:1] + data[1:].replace('.', '.\\linebreak[1]') self.writer('\\texttt{'+data+'}') def visitNode_img(self, node): fileName = os.path.join(self.currDir, node.getAttribute('src')) target, ext = os.path.splitext(fileName) if self.diaHack and os.access(target + '.dia', os.R_OK): ext = '.dia' fileName = target + ext f = getattr(self, 'convert_'+ext[1:], None) if not f: return target = os.path.join(self.currDir, os.path.basename(target)+'.eps') f(fileName, target) target = os.path.basename(target) self._write_img(target) def _write_img(self, target): """Write LaTeX for image.""" self.writer('\\begin{center}\\includegraphics[%%\n' 'width=1.0\n' '\\textwidth,height=1.0\\textheight,\nkeepaspectratio]' '{%s}\\end{center}\n' % target) def convert_png(self, src, target): # XXX there's a *reason* Python comes with the pipes module - # someone fix this to use it. r = os.system('pngtopnm "%s" | pnmtops -noturn > "%s"' % (src, target)) if r != 0: raise OSError(r) def convert_dia(self, src, target): # EVIL DISGUSTING HACK data = os.popen("gunzip -dc %s" % (src)).read() pre = '<dia:attribute name="scaling">\n <dia:real val="1"/>' post = '<dia:attribute name="scaling">\n <dia:real val="0.5"/>' f = open('%s_hacked.dia' % (src), 'wb') f.write(data.replace(pre, post)) f.close() os.system('gzip %s_hacked.dia' % (src,)) os.system('mv %s_hacked.dia.gz %s_hacked.dia' % (src,src)) # Let's pretend we never saw that. # Silly dia needs an X server, even though it doesn't display anything. # If this is a problem for you, try using Xvfb. os.system("dia %s_hacked.dia -n -e %s" % (src, target)) def visitNodeHeader(self, node): level = (int(node.tagName[1])-2)+self.baseLevel self.writer('\n\n\\'+level*'sub'+'section{') spitter = HeadingLatexSpitter(self.writer, self.currDir, self.filename) spitter.visitNodeDefault(node) self.writer('}\n') def visitNode_a_listing(self, node): """ Writes a I{verbatim} block when it encounters a code listing (represented by an I{a} element with a I{listing} class). @param node: The element to process. @type node: C{xml.dom.minidom.Element} """ fileName = os.path.join(self.currDir, node.getAttribute('href')) self.writer('\\begin{verbatim}\n') lines = map(str.rstrip, open(fileName).readlines()) skipLines = int(node.getAttribute('skipLines') or 0) lines = lines[skipLines:] self.writer(tree._removeLeadingTrailingBlankLines('\n'.join(lines))) self.writer('\\end{verbatim}') # Write a caption for this source listing fileName = os.path.basename(fileName) caption = domhelpers.getNodeText(node) if caption == fileName: caption = 'Source listing' self.writer('\parbox[b]{\linewidth}{\\begin{center}%s --- ' '\\begin{em}%s\\end{em}\\end{center}}' % (latexEscape(caption), latexEscape(fileName))) def visitNode_a_href(self, node): supported_schemes=['http', 'https', 'ftp', 'mailto'] href = node.getAttribute('href') if urlparse.urlparse(href)[0] in supported_schemes: text = domhelpers.getNodeText(node) self.visitNodeDefault(node) if text != href: self.writer('\\footnote{%s}' % latexEscape(href)) else: path, fragid = (href.split('#', 1) + [None])[:2] if path == '': path = self.filename else: path = os.path.join(os.path.dirname(self.filename), path) #if path == '': #path = os.path.basename(self.filename) #else: # # Hack for linking to man pages from howtos, i.e. # # ../doc/foo-man.html -> foo-man.html # path = os.path.basename(path) path = realpath(path) if fragid: ref = path + 'HASH' + fragid else: ref = path self.writer('\\textit{') self.visitNodeDefault(node) self.writer('}') self.writer('\\loreref{%s}' % ref) def visitNode_a_name(self, node): self.writer('\\label{%sHASH%s}' % ( realpath(self.filename), node.getAttribute('name'))) self.visitNodeDefault(node) def visitNode_table(self, node): rows = [[col for col in row.childNodes if getattr(col, 'tagName', None) in ('th', 'td')] for row in node.childNodes if getattr(row, 'tagName', None)=='tr'] numCols = 1+max([len(row) for row in rows]) self.writer('\\begin{table}[ht]\\begin{center}') self.writer('\\begin{tabular}{@{}'+'l'*numCols+'@{}}') for row in rows: th = 0 for col in row: self.visitNode(col) self.writer('&') if col.tagName == 'th': th = 1 self.writer('\\\\\n') #\\ ends lines if th: self.writer('\\hline\n') self.writer('\\end{tabular}\n') if node.hasAttribute('title'): self.writer('\\caption{%s}' % latexEscape(node.getAttribute('title'))) self.writer('\\end{center}\\end{table}\n') def visitNode_span_footnote(self, node): self.writer('\\footnote{') spitter = FootnoteLatexSpitter(self.writer, self.currDir, self.filename) spitter.visitNodeDefault(node) self.writer('}') def visitNode_span_index(self, node): self.writer('\\index{%s}\n' % node.getAttribute('value')) self.visitNodeDefault(node) visitNode_h2 = visitNode_h3 = visitNode_h4 = visitNodeHeader start_title = '\\title{' end_title = '}\n' start_sub = '$_{' end_sub = '}$' start_sup = '$^{' end_sup = '}$' start_html = '''\\documentclass{article} \\newcommand{\\loreref}[1]{% \\ifthenelse{\\value{page}=\\pageref{#1}}% { (this page)}% { (page \\pageref{#1})}% }''' start_body = '\\begin{document}\n\\maketitle\n' end_body = '\\end{document}' start_dl = '\\begin{description}\n' end_dl = '\\end{description}\n' start_ul = '\\begin{itemize}\n' end_ul = '\\end{itemize}\n' start_ol = '\\begin{enumerate}\n' end_ol = '\\end{enumerate}\n' start_li = '\\item ' end_li = '\n' start_dt = '\\item[' end_dt = ']' end_dd = '\n' start_p = '\n\n' start_strong = start_em = '\\begin{em}' end_strong = end_em = '\\end{em}' start_q = "``" end_q = "''" start_div_note = '\\begin{quotation}\\textbf{Note:}' end_div_note = '\\end{quotation}' start_th = '\\textbf{' end_th = '}' class SectionLatexSpitter(LatexSpitter): baseLevel = 1 start_title = '\\section{' def visitNode_title(self, node): self.visitNodeDefault(node) #self.writer('\\label{%s}}\n' % os.path.basename(self.filename)) self.writer('\\label{%s}}\n' % realpath(self.filename)) end_title = end_body = start_body = start_html = '' class ChapterLatexSpitter(SectionLatexSpitter): baseLevel = 0 start_title = '\\chapter{' class HeadingLatexSpitter(BaseLatexSpitter): start_q = "``" end_q = "''" writeNodeData = LatexSpitter.writeNodeData.im_func class FootnoteLatexSpitter(LatexSpitter): """For multi-paragraph footnotes, this avoids having an empty leading paragraph.""" start_p = '' def visitNode_span_footnote(self, node): self.visitNodeDefault(node) def visitNode_p(self, node): self.visitNodeDefault(node) self.start_p = LatexSpitter.start_p class BookLatexSpitter(LatexSpitter): def visitNode_body(self, node): tocs=domhelpers.locateNodes([node], 'class', 'toc') domhelpers.clearNode(node) if len(tocs): toc=tocs[0] node.appendChild(toc) self.visitNodeDefault(node) def visitNode_link(self, node): if not node.hasAttribute('rel'): return self.visitNodeDefault(node) node.tagName += '_'+node.getAttribute('rel') self.visitNode(node) def visitNode_link_author(self, node): self.writer('\\author{%s}\n' % node.getAttribute('text')) def visitNode_link_stylesheet(self, node): if node.hasAttribute('type') and node.hasAttribute('href'): if node.getAttribute('type')=='application/x-latex': packagename=node.getAttribute('href') packagebase,ext=os.path.splitext(packagename) self.writer('\\usepackage{%s}\n' % packagebase) start_html = r'''\documentclass[oneside]{book} \usepackage{graphicx} \usepackage{times,mathptmx} ''' start_body = r'''\begin{document} \maketitle \tableofcontents ''' start_li='' end_li='' start_ul='' end_ul='' def visitNode_a(self, node): if node.hasAttribute('class'): a_class=node.getAttribute('class') if a_class.endswith('listing'): return self.visitNode_a_listing(node) else: return getattr(self, 'visitNode_a_%s' % a_class)(node) if node.hasAttribute('href'): return self.visitNode_a_href(node) if node.hasAttribute('name'): return self.visitNode_a_name(node) self.visitNodeDefault(node) def visitNode_a_chapter(self, node): self.writer('\\chapter{') self.visitNodeDefault(node) self.writer('}\n') def visitNode_a_sect(self, node): base,ext=os.path.splitext(node.getAttribute('href')) self.writer('\\input{%s}\n' % base) def processFile(spitter, fin): # XXX Use Inversion Of Control Pattern to orthogonalize the parsing API # from the Visitor Pattern application. (EnterPrise) dom = tree.parseFileAndReport(fin.name, lambda x: fin).documentElement spitter.visitNode(dom) def convertFile(filename, spitterClass): fout = open(os.path.splitext(filename)[0]+".tex", 'w') spitter = spitterClass(fout.write, os.path.dirname(filename), filename) fin = open(filename) processFile(spitter, fin) fin.close() fout.close()
bsd-3-clause
Aristocles/CouchPotatoServer
libs/apscheduler/triggers/cron/fields.py
115
3058
""" Fields represent CronTrigger options which map to :class:`~datetime.datetime` fields. """ from calendar import monthrange from apscheduler.triggers.cron.expressions import * __all__ = ('MIN_VALUES', 'MAX_VALUES', 'DEFAULT_VALUES', 'BaseField', 'WeekField', 'DayOfMonthField', 'DayOfWeekField') MIN_VALUES = {'year': 1970, 'month': 1, 'day': 1, 'week': 1, 'day_of_week': 0, 'hour': 0, 'minute': 0, 'second': 0} MAX_VALUES = {'year': 2 ** 63, 'month': 12, 'day:': 31, 'week': 53, 'day_of_week': 6, 'hour': 23, 'minute': 59, 'second': 59} DEFAULT_VALUES = {'year': '*', 'month': 1, 'day': 1, 'week': '*', 'day_of_week': '*', 'hour': 0, 'minute': 0, 'second': 0} class BaseField(object): REAL = True COMPILERS = [AllExpression, RangeExpression] def __init__(self, name, exprs, is_default=False): self.name = name self.is_default = is_default self.compile_expressions(exprs) def get_min(self, dateval): return MIN_VALUES[self.name] def get_max(self, dateval): return MAX_VALUES[self.name] def get_value(self, dateval): return getattr(dateval, self.name) def get_next_value(self, dateval): smallest = None for expr in self.expressions: value = expr.get_next_value(dateval, self) if smallest is None or (value is not None and value < smallest): smallest = value return smallest def compile_expressions(self, exprs): self.expressions = [] # Split a comma-separated expression list, if any exprs = str(exprs).strip() if ',' in exprs: for expr in exprs.split(','): self.compile_expression(expr) else: self.compile_expression(exprs) def compile_expression(self, expr): for compiler in self.COMPILERS: match = compiler.value_re.match(expr) if match: compiled_expr = compiler(**match.groupdict()) self.expressions.append(compiled_expr) return raise ValueError('Unrecognized expression "%s" for field "%s"' % (expr, self.name)) def __str__(self): expr_strings = (str(e) for e in self.expressions) return ','.join(expr_strings) def __repr__(self): return "%s('%s', '%s')" % (self.__class__.__name__, self.name, str(self)) class WeekField(BaseField): REAL = False def get_value(self, dateval): return dateval.isocalendar()[1] class DayOfMonthField(BaseField): COMPILERS = BaseField.COMPILERS + [WeekdayPositionExpression, LastDayOfMonthExpression] def get_max(self, dateval): return monthrange(dateval.year, dateval.month)[1] class DayOfWeekField(BaseField): REAL = False COMPILERS = BaseField.COMPILERS + [WeekdayRangeExpression] def get_value(self, dateval): return dateval.weekday()
gpl-3.0
kwailamchan/programming-languages
python/facepp/facepp/faceapi/facepp.py
10
11857
# -*- coding: utf-8 -*- # $File: facepp.py # $Date: Thu May 16 14:59:36 2013 +0800 # $Author: jiakai@megvii.com # # This program is free software. It comes without any warranty, to # the extent permitted by applicable law. You can redistribute it # and/or modify it under the terms of the Do What The Fuck You Want # To Public License, Version 2, as published by Sam Hocevar. See # http://sam.zoy.org/wtfpl/COPYING (copied as below) for more details. # # DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE # Version 2, December 2004 # # Copyright (C) 2004 Sam Hocevar <sam@hocevar.net> # # Everyone is permitted to copy and distribute verbatim or modified # copies of this license document, and changing it is allowed as long # as the name is changed. # # DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE # TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION # # 0. You just DO WHAT THE FUCK YOU WANT TO. """a simple facepp sdk example: api = API(key, secret) api.detection.detect(img = File('/tmp/test.jpg'))""" __all__ = ['File', 'APIError', 'API'] DEBUG_LEVEL = 1 import sys import socket import urllib import urllib2 import json import os import os.path import itertools import mimetools import mimetypes import time import tempfile from collections import Iterable from cStringIO import StringIO class File(object): """an object representing a local file""" path = None content = None def __init__(self, path): self.path = path self._get_content() def _resize_cv2(self, ftmp): try: import cv2 except ImportError: return False img = cv2.imread(self.path) assert img is not None and img.size != 0, 'Invalid image' bigdim = max(img.shape[0], img.shape[1]) downscale = max(1., bigdim / 600.) img = cv2.resize(img, (int(img.shape[1] / downscale), int(img.shape[0] / downscale))) cv2.imwrite(ftmp, img) return True def _resize_PIL(self, ftmp): try: import PIL.Image except ImportError: return False img = PIL.Image.open(self.path) bigdim = max(img.size[0], img.size[1]) downscale = max(1., bigdim / 600.) img = img.resize( (int(img.size[0] / downscale), int(img.size[1] / downscale))) img.save(ftmp) return True def _get_content(self): """read image content; resize the image if necessary""" if os.path.getsize(self.path) > 2 * 1024 * 1024: ftmp = tempfile.NamedTemporaryFile( suffix = '.jpg', delete = False).name try: if not (self._resize_cv2(ftmp) or self._resize_PIL(ftmp)): raise APIError(-1, None, 'image file size too large') with open(ftmp, 'rb') as f: self.content = f.read() finally: os.unlink(ftmp) else: with open(self.path, 'rb') as f: self.content = f.read() def get_filename(self): return os.path.basename(self.path) class APIError(Exception): code = None """HTTP status code""" url = None """request URL""" body = None """server response body; or detailed error information""" def __init__(self, code, url, body): self.code = code self.url = url self.body = body def __str__(self): return 'code={s.code}\nurl={s.url}\n{s.body}'.format(s = self) __repr__ = __str__ class API(object): key = None secret = None server = 'http://api.faceplusplus.com/' decode_result = True timeout = None max_retries = None retry_delay = None def __init__(self, key, secret, srv = None, decode_result = True, timeout = 30, max_retries = 10, retry_delay = 5): """:param srv: The API server address :param decode_result: whether to json_decode the result :param timeout: HTTP request timeout in seconds :param max_retries: maximal number of retries after catching URL error or socket error :param retry_delay: time to sleep before retrying""" self.key = key self.secret = secret if srv: self.server = srv self.decode_result = decode_result assert timeout >= 0 or timeout is None assert max_retries >= 0 self.timeout = timeout self.max_retries = max_retries self.retry_delay = retry_delay _setup_apiobj(self, self, []) def wait_async(self, session_id, referesh_interval = 2): """wait for asynchronous operations to complete""" while True: rst = self.info.get_session(session_id = session_id) if rst['status'] != u'INQUEUE': return rst _print_debug(rst) time.sleep(referesh_interval) def update_request(self, request): """overwrite this function to update the request before sending it to server""" pass def _setup_apiobj(self, api, path): if self is not api: self._api = api self._urlbase = api.server + '/'.join(path) lvl = len(path) done = set() for i in _APIS: if len(i) <= lvl: continue cur = i[lvl] if i[:lvl] == path and cur not in done: done.add(cur) setattr(self, cur, _APIProxy(api, i[:lvl + 1])) class _APIProxy(object): _api = None """underlying :class:`API` object""" _urlbase = None def __init__(self, api, path): _setup_apiobj(self, api, path) def __call__(self, post = False, *args, **kargs): if len(args): raise TypeError('Only keyword arguments are allowed') if type(post) is not bool: raise TypeError('post argument can only be True or False') form = _MultiPartForm() add_form = False for (k, v) in kargs.iteritems(): if isinstance(v, File): add_form = True form.add_file(k, v.get_filename(), v.content) if post: url = self._urlbase for k, v in self._mkarg(kargs).iteritems(): form.add_field(k, v) add_form = True else: url = self.geturl(**kargs) request = urllib2.Request(url) if add_form: body = str(form) request.add_header('Content-type', form.get_content_type()) request.add_header('Content-length', str(len(body))) request.add_data(body) self._api.update_request(request) retry = self._api.max_retries while True: retry -= 1 try: ret = urllib2.urlopen(request, timeout = self._api.timeout).read() break except urllib2.HTTPError as e: raise APIError(e.code, url, e.read()) except (socket.error, urllib2.URLError) as e: if retry < 0: raise e _print_debug('caught error: {}; retrying'.format(e)) time.sleep(self._api.retry_delay) if self._api.decode_result: try: ret = json.loads(ret) except: raise APIError(-1, url, 'json decode error, value={0!r}'.format(ret)) return ret def _mkarg(self, kargs): """change the argument list (encode value, add api key/secret) :return: the new argument list""" def enc(x): if isinstance(x, unicode): return x.encode('utf-8') return str(x) kargs = kargs.copy() kargs['api_key'] = self._api.key kargs['api_secret'] = self._api.secret for (k, v) in kargs.items(): if isinstance(v, Iterable) and not isinstance(v, basestring): kargs[k] = ','.join([enc(i) for i in v]) elif isinstance(v, File) or v is None: del kargs[k] else: kargs[k] = enc(v) return kargs def geturl(self, **kargs): """return the request url""" return self._urlbase + '?' + urllib.urlencode(self._mkarg(kargs)) def visit(self, browser = 'chromium', **kargs): """visit the url in browser""" os.system('{0} "{1}"'.format(browser, self.geturl(**kargs))) # ref: http://www.doughellmann.com/PyMOTW/urllib2/ class _MultiPartForm(object): """Accumulate the data to be used when posting a form.""" def __init__(self): self.form_fields = [] self.files = [] self.boundary = mimetools.choose_boundary() return def get_content_type(self): return 'multipart/form-data; boundary=%s' % self.boundary def add_field(self, name, value): """Add a simple field to the form data.""" self.form_fields.append((name, value)) return def add_file(self, fieldname, filename, content, mimetype = None): """Add a file to be uploaded.""" if mimetype is None: mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream' self.files.append((fieldname, filename, mimetype, content)) return def __str__(self): """Return a string representing the form data, including attached files.""" # Build a list of lists, each containing "lines" of the # request. Each part is separated by a boundary string. # Once the list is built, return a string where each # line is separated by '\r\n'. parts = [] part_boundary = '--' + self.boundary # Add the form fields parts.extend( [ part_boundary, 'Content-Disposition: form-data; name="%s"' % name, '', value, ] for name, value in self.form_fields ) # Add the files to upload parts.extend( [ part_boundary, 'Content-Disposition: file; name="%s"; filename="%s"' % \ (field_name, filename), 'Content-Type: %s' % content_type, '', body, ] for field_name, filename, content_type, body in self.files ) # Flatten the list and add closing boundary marker, # then return CR+LF separated data flattened = list(itertools.chain(*parts)) flattened.append('--' + self.boundary + '--') flattened.append('') return '\r\n'.join(flattened) def _print_debug(msg): if DEBUG_LEVEL: sys.stderr.write(str(msg) + '\n') _APIS = [ '/detection/detect', '/detection/landmark', '/faceset/add_face', '/faceset/create', '/faceset/delete', '/faceset/get_info', '/faceset/remove_face', '/faceset/set_info', '/group/add_person', '/group/create', '/group/delete', '/group/get_info', '/group/remove_person', '/group/set_info', '/grouping/grouping', '/info/get_app', '/info/get_face', '/info/get_faceset_list', '/info/get_group_list', '/info/get_image', '/info/get_person_list', '/info/get_quota', '/info/get_session', '/person/add_face', '/person/create', '/person/delete', '/person/get_info', '/person/remove_face', '/person/set_info', '/recognition/compare', '/recognition/group_search', '/recognition/identify', '/recognition/recognize', '/recognition/search', '/recognition/test_train', '/recognition/train', '/recognition/verify', '/train/group_search', '/train/identify', '/train/recognize', '/train/search', '/train/verify' ] _APIS = [i.split('/')[1:] for i in _APIS]
mit
drkitty/cyder
vendor-local/src/django-extensions/build/lib/django_extensions/management/commands/find_template.py
18
1114
from django.core.management.base import LabelCommand from django.template import loader from django.template import TemplateDoesNotExist import sys def get_template_path(path): try: template = loader.find_template(path) if template[1]: return template[1].name # work arround https://code.djangoproject.com/ticket/17199 issue for template_loader in loader.template_source_loaders: try: source, origin = template_loader.load_template_source(path) return origin except TemplateDoesNotExist: pass raise TemplateDoesNotExist(path) except TemplateDoesNotExist: return None class Command(LabelCommand): help = "Finds the location of the given template by resolving its path" args = "[template_path]" label = 'template path' def handle_label(self, template_path, **options): path = get_template_path(template_path) if path is None: sys.stderr.write("No template found\n") sys.exit(1) else: print path
bsd-3-clause
AAROC/invenio
invenio/legacy/bibupload/engine.py
2
147250
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, # 2015 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. from __future__ import print_function """ BibUpload: Receive MARC XML file and update the appropriate database tables according to options. """ __revision__ = "$Id$" import os import re import sys import time from datetime import datetime from six import iteritems from zlib import compress import socket import marshal import copy import tempfile import urlparse import urllib2 import urllib from invenio.config import CFG_OAI_ID_FIELD, \ CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \ CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, \ CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG, \ CFG_BIBUPLOAD_STRONG_TAGS, \ CFG_BIBUPLOAD_CONTROLLED_PROVENANCE_TAGS, \ CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE, \ CFG_BIBUPLOAD_DELETE_FORMATS, \ CFG_SITE_URL, \ CFG_SITE_SECURE_URL, \ CFG_SITE_RECORD, \ CFG_OAI_PROVENANCE_ALTERED_SUBFIELD, \ CFG_BIBUPLOAD_DISABLE_RECORD_REVISIONS, \ CFG_BIBUPLOAD_CONFLICTING_REVISION_TICKET_QUEUE, \ CFG_CERN_SITE, \ CFG_BIBUPLOAD_MATCH_DELETED_RECORDS from invenio.utils.json import json, CFG_JSON_AVAILABLE from invenio.legacy.bibupload.config import CFG_BIBUPLOAD_CONTROLFIELD_TAGS, \ CFG_BIBUPLOAD_SPECIAL_TAGS, \ CFG_BIBUPLOAD_DELETE_CODE, \ CFG_BIBUPLOAD_DELETE_VALUE, \ CFG_BIBUPLOAD_OPT_MODES from invenio.legacy.dbquery import run_sql from invenio.legacy.bibrecord import create_records, \ record_add_field, \ record_delete_field, \ record_xml_output, \ record_get_field_instances, \ record_get_field_value, \ record_get_field_values, \ field_get_subfield_values, \ field_get_subfield_instances, \ record_modify_subfield, \ record_delete_subfield_from, \ record_delete_fields, \ record_add_subfield_into, \ record_find_field, \ record_extract_oai_id, \ record_extract_dois, \ record_has_field, \ records_identical, \ record_drop_duplicate_fields from invenio.legacy.search_engine import get_record, record_exists, search_pattern from invenio.utils.date import convert_datestruct_to_datetext from invenio.ext.logging import register_exception from intbitset import intbitset from invenio.utils.url import make_user_agent_string from invenio.config import CFG_BIBDOCFILE_FILEDIR from invenio.legacy.bibsched.bibtask import task_init, write_message, \ task_set_option, task_get_option, task_get_task_param, \ task_update_progress, task_sleep_now_if_required, fix_argv_paths, \ RecoverableError from invenio.legacy.bibdocfile.api import BibRecDocs, file_strip_ext, normalize_format, \ get_docname_from_url, check_valid_url, download_url, \ KEEP_OLD_VALUE, decompose_bibdocfile_url, InvenioBibDocFileError, \ bibdocfile_url_p, CFG_BIBDOCFILE_AVAILABLE_FLAGS, guess_format_from_url, \ BibRelation, MoreInfo from invenio.legacy.search_engine import search_pattern from invenio.legacy.bibupload.revisionverifier import RevisionVerifier, \ InvenioBibUploadConflictingRevisionsError, \ InvenioBibUploadInvalidRevisionError, \ InvenioBibUploadMissing005Error, \ InvenioBibUploadUnchangedRecordError #Statistic variables stat = {} stat['nb_records_to_upload'] = 0 stat['nb_records_updated'] = 0 stat['nb_records_inserted'] = 0 stat['nb_errors'] = 0 stat['nb_holdingpen'] = 0 stat['exectime'] = time.localtime() _WRITING_RIGHTS = None CFG_BIBUPLOAD_ALLOWED_SPECIAL_TREATMENTS = ('oracle', ) CFG_HAS_BIBCATALOG = "UNKNOWN" def check_bibcatalog(): """ Return True if bibcatalog is available. """ global CFG_HAS_BIBCATALOG # pylint: disable=W0603 if CFG_HAS_BIBCATALOG != "UNKNOWN": return CFG_HAS_BIBCATALOG CFG_HAS_BIBCATALOG = True bibcatalog_response = "No ticket system configured" if bibcatalog_response != "": write_message("BibCatalog error: %s\n" % (bibcatalog_response,)) CFG_HAS_BIBCATALOG = False return CFG_HAS_BIBCATALOG # Let's set a reasonable timeout for URL request (e.g. FFT) socket.setdefaulttimeout(40) def parse_identifier(identifier): """Parse the identifier and determine if it is temporary or fixed""" id_str = str(identifier) if not id_str.startswith("TMP:"): return (False, identifier) else: return (True, id_str[4:]) def resolve_identifier(tmps, identifier): """Resolves an identifier. If the identifier is not temporary, this function is an identity on the second argument. Otherwise, a resolved value is returned or an exception raised""" is_tmp, tmp_id = parse_identifier(identifier) if is_tmp: if not tmp_id in tmps: raise StandardError("Temporary identifier %s not present in the dictionary" % (tmp_id, )) if tmps[tmp_id] == -1: # the identifier has been signalised but never assigned a value - probably error during processing raise StandardError("Temporary identifier %s has been declared, but never assigned a value. Probably an error during processign of an appropriate FFT has happened. Please see the log" % (tmp_id, )) return int(tmps[tmp_id]) else: return int(identifier) _re_find_001 = re.compile('<controlfield\\s+tag=("001"|\'001\')\\s*>\\s*(\\d*)\\s*</controlfield>', re.S) def bibupload_pending_recids(): """This function embed a bit of A.I. and is more a hack than an elegant algorithm. It should be updated in case bibupload/bibsched are modified in incompatible ways. This function return the intbitset of all the records that are being (or are scheduled to be) touched by other bibuploads. """ options = run_sql("""SELECT arguments FROM "schTASK" WHERE status<>'DONE' AND proc='bibupload' AND (status='RUNNING' OR status='CONTINUING' OR status='WAITING' OR status='SCHEDULED' OR status='ABOUT TO STOP' OR status='ABOUT TO SLEEP')""") ret = intbitset() xmls = [] if options: for arguments in options: arguments = marshal.loads(arguments[0]) for argument in arguments[1:]: if argument.startswith('/'): # XMLs files are recognizable because they're absolute # files... xmls.append(argument) for xmlfile in xmls: # Let's grep for the 001 try: xml = open(xmlfile).read() ret += [int(group[1]) for group in _re_find_001.findall(xml)] except: continue return ret ### bibupload engine functions: def bibupload(record, opt_mode=None, opt_notimechange=0, oai_rec_id="", pretend=False, tmp_ids=None, tmp_vers=None): """Main function: process a record and fit it in the tables bibfmt, bibrec, bibrec_bibxxx, bibxxx with proper record metadata. Return (error_code, recID) of the processed record. """ if tmp_ids is None: tmp_ids = {} if tmp_vers is None: tmp_vers = {} if opt_mode == 'reference': ## NOTE: reference mode has been deprecated in favour of 'correct' opt_mode = 'correct' assert(opt_mode in CFG_BIBUPLOAD_OPT_MODES) try: record_xml_output(record).decode('utf-8') except UnicodeDecodeError: msg = " Failed: Invalid utf-8 characters." write_message(msg, verbose=1, stream=sys.stderr) return (1, -1, msg) error = None affected_tags = {} original_record = {} rec_old = {} record_modification_date = datetime.now() # will hold record creation/modification date record_had_altered_bit = False is_opt_mode_delete = False # Extraction of the Record Id from 001, SYSNO or OAIID or DOI tags: rec_id = retrieve_rec_id(record, opt_mode, pretend=pretend) if rec_id == -1: msg = " Failed: either the record already exists and insert was " \ "requested or the record does not exists and " \ "replace/correct/append has been used" write_message(msg, verbose=1, stream=sys.stderr) return (1, -1, msg) elif rec_id > 0: write_message(" -Retrieve record ID (found %s): DONE." % rec_id, verbose=2) (unique_p, msg) = check_record_doi_is_unique(rec_id, record) if not unique_p: write_message(msg, verbose=1, stream=sys.stderr) return (1, int(rec_id), msg) if '001' not in record: # Found record ID by means of SYSNO or OAIID or DOI, and the # input MARCXML buffer does not have this 001 tag, so we # should add it now: error = record_add_field(record, '001', controlfield_value=rec_id) if error is None: msg = " Failed: Error during adding the 001 controlfield " \ "to the record" write_message(msg, verbose=1, stream=sys.stderr) return (1, int(rec_id), msg) else: error = None write_message(" -Added tag 001: DONE.", verbose=2) write_message(" -Check if the xml marc file is already in the database: DONE" , verbose=2) record_deleted_p = False record_creation_date = None if opt_mode == 'insert' or \ (opt_mode == 'replace_or_insert') and rec_id is None: insert_mode_p = True # Insert the record into the bibrec databases to have a recordId rec_id = create_new_record(pretend=pretend) write_message(" -Creation of a new record id (%d): DONE" % rec_id, verbose=2) # we add the record Id control field to the record error = record_add_field(record, '001', controlfield_value=rec_id) if error is None: msg = " Failed: Error during adding the 001 controlfield " \ "to the record" write_message(msg, verbose=1, stream=sys.stderr) return (1, int(rec_id), msg) else: error = None if '005' not in record: error = record_add_field(record, '005', controlfield_value=record_modification_date.strftime("%Y%m%d%H%M%S.0")) if error is None: msg = " ERROR: during adding to 005 controlfield to record" write_message(msg, verbose=1, stream=sys.stderr) return (1, int(rec_id), msg) else: error = None else: write_message(" Note: 005 already existing upon inserting of new record. Keeping it.", verbose=2) record_creation_date = time.strftime("%Y-%m-%d %H:%M:%S", time.strptime(record['005'][0][3].split('.')[0], "%Y%m%d%H%M%S")) elif opt_mode != 'insert': insert_mode_p = False # Update Mode # Retrieve the old record to update rec_old = get_record(rec_id) record_had_altered_bit = record_get_field_values(rec_old, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4], CFG_OAI_PROVENANCE_ALTERED_SUBFIELD) # Also save a copy to restore previous situation in case of errors original_record = get_record(rec_id) if rec_old is None: msg = " Failed during the creation of the old record!" write_message(msg, verbose=1, stream=sys.stderr) return (1, int(rec_id), msg) else: write_message(" -Retrieve the old record to update: DONE", verbose=2) # flag to check whether the revisions have been verified and patch generated. # If revision verification failed, then we need to manually identify the affected tags # and process them revision_verified = False rev_verifier = RevisionVerifier() #check for revision conflicts before updating record if record_has_field(record, '005') and not CFG_BIBUPLOAD_DISABLE_RECORD_REVISIONS: write_message(" -Upload Record has 005. Verifying Revision", verbose=2) try: rev_res = rev_verifier.verify_revision(record, original_record, opt_mode) if rev_res: opt_mode = rev_res[0] record = rev_res[1] affected_tags = rev_res[2] revision_verified = True write_message(lambda: " -Patch record generated. Changing opt_mode to correct.\nPatch:\n%s " % record_xml_output(record), verbose=2) else: write_message(" -No Patch Record.", verbose=2) except InvenioBibUploadUnchangedRecordError as err: msg = " -ISSUE: %s" % err write_message(msg, verbose=1, stream=sys.stderr) write_message(msg, " Continuing anyway in case there are FFT or other tags") except InvenioBibUploadConflictingRevisionsError as err: msg = " -ERROR: Conflicting Revisions - %s" % err write_message(msg, verbose=1, stream=sys.stderr) submit_ticket_for_holding_pen(rec_id, err, "Conflicting Revisions. Inserting record into holding pen.", pretend=pretend) insert_record_into_holding_pen(record, str(rec_id), pretend=pretend) return (2, int(rec_id), msg) except InvenioBibUploadInvalidRevisionError as err: msg = " -ERROR: Invalid Revision - %s" % err write_message(msg) submit_ticket_for_holding_pen(rec_id, err, "Invalid Revisions. Inserting record into holding pen.", pretend=pretend) insert_record_into_holding_pen(record, str(rec_id), pretend=pretend) return (2, int(rec_id), msg) except InvenioBibUploadMissing005Error as err: msg = " -ERROR: Missing 005 - %s" % err write_message(msg) submit_ticket_for_holding_pen(rec_id, err, "Missing 005. Inserting record into holding pen.", pretend=pretend) insert_record_into_holding_pen(record, str(rec_id), pretend=pretend) return (2, int(rec_id), msg) else: write_message(" - No 005 Tag Present. Resuming normal flow.", verbose=2) # dictionaries to temporarily hold original recs tag-fields existing_tags = {} retained_tags = {} # in case of delete operation affected tags should be deleted in delete_bibrec_bibxxx # but should not be updated again in STAGE 4 # utilising the below flag is_opt_mode_delete = False if not revision_verified: # either 005 was not present or opt_mode was not correct/replace # in this case we still need to find out affected tags to process write_message(" - Missing 005 or opt_mode!=Replace/Correct.Revision Verifier not called.", verbose=2) # Identify affected tags if opt_mode == 'correct' or opt_mode == 'replace' or opt_mode == 'replace_or_insert': rec_diff = rev_verifier.compare_records(record, original_record, opt_mode) affected_tags = rev_verifier.retrieve_affected_tags_with_ind(rec_diff) elif opt_mode == 'delete': # populate an intermediate dictionary # used in upcoming step related to 'delete' mode is_opt_mode_delete = True for tag, fields in iteritems(original_record): existing_tags[tag] = [tag + (field[1] != ' ' and field[1] or '_') + (field[2] != ' ' and field[2] or '_') for field in fields] elif opt_mode == 'append': for tag, fields in iteritems(record): if tag not in CFG_BIBUPLOAD_CONTROLFIELD_TAGS: affected_tags[tag] = [(field[1], field[2]) for field in fields] # In Replace mode, take over old strong tags if applicable: if opt_mode == 'replace' or \ opt_mode == 'replace_or_insert': copy_strong_tags_from_old_record(record, rec_old) # Delete tags to correct in the record if opt_mode == 'correct': delete_tags_to_correct(record, rec_old) write_message(" -Delete the old tags to correct in the old record: DONE", verbose=2) # Delete tags specified if in delete mode if opt_mode == 'delete': record = delete_tags(record, rec_old) for tag, fields in iteritems(record): retained_tags[tag] = [tag + (field[1] != ' ' and field[1] or '_') + (field[2] != ' ' and field[2] or '_') for field in fields] #identify the tags that have been deleted for tag in existing_tags.keys(): if tag not in retained_tags: for item in existing_tags[tag]: tag_to_add = item[0:3] ind1, ind2 = item[3], item[4] if tag_to_add in affected_tags and (ind1, ind2) not in affected_tags[tag_to_add]: affected_tags[tag_to_add].append((ind1, ind2)) else: affected_tags[tag_to_add] = [(ind1, ind2)] else: deleted = list(set(existing_tags[tag]) - set(retained_tags[tag])) for item in deleted: tag_to_add = item[0:3] ind1, ind2 = item[3], item[4] if tag_to_add in affected_tags and (ind1, ind2) not in affected_tags[tag_to_add]: affected_tags[tag_to_add].append((ind1, ind2)) else: affected_tags[tag_to_add] = [(ind1, ind2)] write_message(" -Delete specified tags in the old record: DONE", verbose=2) # Append new tag to the old record and update the new record with the old_record modified if opt_mode == 'append' or opt_mode == 'correct': record = append_new_tag_to_old_record(record, rec_old) write_message(" -Append new tags to the old record: DONE", verbose=2) write_message(" -Affected Tags found after comparing upload and original records: %s"%(str(affected_tags)), verbose=2) # 005 tag should be added everytime the record is modified # If an exiting record is modified, its 005 tag should be overwritten with a new revision value if '005' in record: record_delete_field(record, '005') write_message(" Deleted the existing 005 tag.", verbose=2) last_revision = run_sql("""SELECT MAX(job_date) FROM "hstRECORD" WHERE id_bibrec=%s""", (rec_id, ))[0][0] if last_revision and last_revision.strftime("%Y%m%d%H%M%S.0") == record_modification_date.strftime("%Y%m%d%H%M%S.0"): ## We are updating the same record within the same seconds! It's less than ## the minimal granularity. Let's pause for 1 more second to take a breath :-) time.sleep(1) record_modification_date = datetime.now() error = record_add_field(record, '005', controlfield_value=record_modification_date.strftime("%Y%m%d%H%M%S.0")) if error is None: write_message(" Failed: Error during adding to 005 controlfield to record", verbose=1, stream=sys.stderr) return (1, int(rec_id)) else: error=None write_message(lambda: " -Added tag 005: DONE. " + str(record_get_field_value(record, '005', '', '')), verbose=2) # adding 005 to affected tags will delete the existing 005 entry # and update with the latest timestamp. if '005' not in affected_tags: affected_tags['005'] = [(' ', ' ')] write_message(" -Stage COMPLETED", verbose=2) record_deleted_p = False try: if not record_is_valid(record): msg = "ERROR: record is not valid" write_message(msg, verbose=1, stream=sys.stderr) return (1, -1, msg) # Have a look if we have FFT tags write_message("Stage 2: Start (Process FFT tags if exist).", verbose=2) record_had_FFT = False bibrecdocs = None if extract_tag_from_record(record, 'FFT') is not None: record_had_FFT = True if not writing_rights_p(): msg = "ERROR: no rights to write fulltext files" write_message(" Stage 2 failed: %s" % msg, verbose=1, stream=sys.stderr) raise StandardError(msg) try: bibrecdocs = BibRecDocs(rec_id) record = elaborate_fft_tags(record, rec_id, opt_mode, pretend=pretend, tmp_ids=tmp_ids, tmp_vers=tmp_vers, bibrecdocs=bibrecdocs) except Exception as e: register_exception() msg = " Stage 2 failed: ERROR: while elaborating FFT tags: %s" % e write_message(msg, verbose=1, stream=sys.stderr) return (1, int(rec_id), msg) if record is None: msg = " Stage 2 failed: ERROR: while elaborating FFT tags" write_message(msg, verbose=1, stream=sys.stderr) return (1, int(rec_id), msg) write_message(" -Stage COMPLETED", verbose=2) else: write_message(" -Stage NOT NEEDED", verbose=2) # Have a look if we have FFT tags write_message("Stage 2B: Start (Synchronize 8564 tags).", verbose=2) if record_had_FFT or extract_tag_from_record(record, '856') is not None: try: if bibrecdocs is None: bibrecdocs = BibRecDocs(rec_id) record = synchronize_8564(rec_id, record, record_had_FFT, bibrecdocs, pretend=pretend) # in case if FFT is in affected list make appropriate changes if not insert_mode_p: # because for insert, all tags are affected if ('4', ' ') not in affected_tags.get('856', []): if '856' not in affected_tags: affected_tags['856'] = [('4', ' ')] elif ('4', ' ') not in affected_tags['856']: affected_tags['856'].append(('4', ' ')) write_message(" -Modified field list updated with FFT details: %s" % str(affected_tags), verbose=2) except Exception as e: register_exception(alert_admin=True) msg = " Stage 2B failed: ERROR: while synchronizing 8564 tags: %s" % e write_message(msg, verbose=1, stream=sys.stderr) return (1, int(rec_id), msg) if record is None: msg = " Stage 2B failed: ERROR: while synchronizing 8564 tags" write_message(msg, verbose=1, stream=sys.stderr) return (1, int(rec_id), msg) write_message(" -Stage COMPLETED", verbose=2) else: write_message(" -Stage NOT NEEDED", verbose=2) write_message("Stage 3: Start (Apply fields deletion requests).", verbose=2) write_message(lambda: " Record before deletion:\n%s" % record_xml_output(record), verbose=9) # remove fields with __DELETE_FIELDS__ # NOTE:creating a temporary deep copy of record for iteration to avoid RunTimeError # RuntimeError due to change in dictionary size during iteration tmp_rec = copy.deepcopy(record) for tag in tmp_rec: for data_tuple in record[tag]: if (CFG_BIBUPLOAD_DELETE_CODE, CFG_BIBUPLOAD_DELETE_VALUE) in data_tuple[0]: # delete the tag with particular indicator pairs from original record record_delete_field(record, tag, data_tuple[1], data_tuple[2]) write_message(lambda: " Record after cleaning up fields to be deleted:\n%s" % record_xml_output(record), verbose=9) if opt_mode == 'append': write_message("Stage 3b: Drop duplicate fields in append mode.", verbose=2) record = record_drop_duplicate_fields(record) write_message(lambda: " Record after dropping duplicate fields:\n%s" % record_xml_output(record), verbose=9) # Update of the BibFmt write_message("Stage 4: Start (Update bibfmt).", verbose=2) updates_exist = not records_identical(record, original_record) if updates_exist: # if record_had_altered_bit, this must be set to true, since the # record has been altered. if record_had_altered_bit: oai_provenance_fields = record_get_field_instances(record, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4]) for oai_provenance_field in oai_provenance_fields: for i, (code, dummy_value) in enumerate(oai_provenance_field[0]): if code == CFG_OAI_PROVENANCE_ALTERED_SUBFIELD: oai_provenance_field[0][i] = (code, 'true') tmp_indicators = (CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3] == '_' and ' ' or CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4] == '_' and ' ' or CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4]) if tmp_indicators not in affected_tags.get(CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3], []): if CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3] not in affected_tags: affected_tags[CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3]] = [tmp_indicators] else: affected_tags[CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3]].append(tmp_indicators) write_message(lambda: " Updates exists:\n%s\n!=\n%s" % (record, original_record), verbose=9) # format the single record as xml rec_xml_new = record_xml_output(record) # Update bibfmt with the format xm of this record modification_date = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(record_get_field_value(record, '005'), '%Y%m%d%H%M%S.0')) error = update_bibfmt_format(rec_id, rec_xml_new, 'xm', modification_date, pretend=pretend) # Pre-generate recjson cache to prevent dog-piling behavior when # concurrent processes call get_record at the same time and the # does not exists (i.e. all process will try to generate and save # the recjson) from invenio_records.api import get_record as _get_record _get_record(rec_id) # Fire record signals. from invenio.base import signals if record_had_altered_bit: signals.record_after_update.send( 'bibupload', recid=rec_id) else: signals.record_after_create.send( 'bibupload', recid=rec_id) if error == 1: msg = " Failed: ERROR: during update_bibfmt_format 'xm'" write_message(msg, verbose=1, stream=sys.stderr) return (1, int(rec_id), msg) if CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE: error = update_bibfmt_format(rec_id, bytearray(marshal.dumps(record)), 'recstruct', modification_date, pretend=pretend) if error == 1: msg = " Failed: ERROR: during update_bibfmt_format 'recstruct'" write_message(msg, verbose=1, stream=sys.stderr) return (1, int(rec_id), msg) if not CFG_BIBUPLOAD_DISABLE_RECORD_REVISIONS: # archive MARCXML format of this record for version history purposes: if insert_mode_p: error = archive_marcxml_for_history(rec_id, affected_fields={}, pretend=pretend) else: error = archive_marcxml_for_history(rec_id, affected_fields=affected_tags, pretend=pretend) if error == 1: msg = " ERROR: Failed to archive MARCXML for history" write_message(msg, verbose=1, stream=sys.stderr) return (1, int(rec_id), msg) else: write_message(" -Archived MARCXML for history: DONE", verbose=2) # delete some formats like HB upon record change: if updates_exist or record_had_FFT: for format_to_delete in CFG_BIBUPLOAD_DELETE_FORMATS: try: delete_bibfmt_format(rec_id, format_to_delete, pretend=pretend) except: # OK, some formats like HB could not have been deleted, no big deal pass write_message(" -Stage COMPLETED", verbose=2) ## Let's assert that one and only one 005 tag is existing at this stage. assert len(record['005']) == 1 # Update the database MetaData write_message("Stage 5: Start (Update the database with the metadata).", verbose=2) if insert_mode_p: update_database_with_metadata(record, rec_id, oai_rec_id, pretend=pretend) write_message(" -Stage COMPLETED", verbose=2) elif opt_mode in ('replace', 'replace_or_insert', 'append', 'correct', 'delete') and updates_exist: # now we clear all the rows from bibrec_bibxxx from the old record_deleted_p = True delete_bibrec_bibxxx(rec_old, rec_id, affected_tags, pretend=pretend) # metadata update will insert tags that are available in affected_tags. # but for delete, once the tags have been deleted from bibrec_bibxxx, they dont have to be inserted # except for 005. if is_opt_mode_delete: tmp_affected_tags = copy.deepcopy(affected_tags) for tag in tmp_affected_tags: if tag != '005': affected_tags.pop(tag) write_message(" -Clean bibrec_bibxxx: DONE", verbose=2) update_database_with_metadata(record, rec_id, oai_rec_id, affected_tags, pretend=pretend) write_message(" -Stage COMPLETED", verbose=2) else: write_message(" -Stage NOT NEEDED in mode %s" % opt_mode, verbose=2) record_deleted_p = False # Finally we update the bibrec table with the current date write_message("Stage 6: Start (Update bibrec table with current date).", verbose=2) if opt_notimechange == 0 and (updates_exist or record_had_FFT): record_modification_date = convert_datestruct_to_datetext(time.localtime()) write_message(" -Retrieved current localtime: DONE", verbose=2) update_bibrec_date(record_modification_date, rec_id, insert_mode_p, record_creation_date, pretend=pretend) write_message(" -Stage COMPLETED", verbose=2) else: write_message(" -Stage NOT NEEDED", verbose=2) # Increase statistics if insert_mode_p: stat['nb_records_inserted'] += 1 else: stat['nb_records_updated'] += 1 # Upload of this record finish write_message("Record "+str(rec_id)+" DONE", verbose=1) return (0, int(rec_id), "") finally: if record_deleted_p: ## BibUpload has failed living the record deleted. We should ## back the original record then. update_database_with_metadata(original_record, rec_id, oai_rec_id, pretend=pretend) write_message(" Restored original record", verbose=1, stream=sys.stderr) def record_is_valid(record): """ Check if the record is valid. Currently this simply checks if the record has exactly one rec_id. @param record: the record @type record: recstruct @return: True if the record is valid @rtype: bool """ rec_ids = record_get_field_values(record, tag="001") if len(rec_ids) != 1: write_message(" The record is not valid: it has not a single rec_id: %s" % (rec_ids), stream=sys.stderr) return False return True def find_record_ids_by_oai_id(oaiId): """ A method finding the records identifier provided the oai identifier returns a list of identifiers matching a given oai identifier """ # Is this record already in invenio (matching by oaiid) if oaiId: recids = search_pattern(p=oaiId, f=CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, m='e') # Is this record already in invenio (matching by reportnumber i.e. # particularly 037. Idea: to avoid double insertions) repnumber = oaiId.split(":")[-1] if repnumber: recids |= search_pattern(p = repnumber, f = "reportnumber", m = 'e' ) # Is this record already in invenio (matching by reportnumber i.e. # particularly 037. Idea: to avoid double insertions) repnumber = "arXiv:" + oaiId.split(":")[-1] recids |= search_pattern(p = repnumber, f = "reportnumber", m = 'e' ) if CFG_BIBUPLOAD_MATCH_DELETED_RECORDS: return recids else: if CFG_CERN_SITE: return recids - (search_pattern(p='DELETED', f='980__%', m='e') | search_pattern(p='DUMMY', f='980__%', m='e')) else: return recids - search_pattern(p='DELETED', f='980__%', m='e') else: return intbitset() def bibupload_post_phase(record, mode=None, rec_id="", pretend=False, tmp_ids=None, tmp_vers=None): def _elaborate_tag(record, tag, fun): if extract_tag_from_record(record, tag) is not None: try: record = fun() except Exception as e: register_exception() write_message(" Stage failed: ERROR: while elaborating %s tags: %s" % (tag, e), verbose=1, stream=sys.stderr) return (1, int(rec_id)) # TODO: ? if record is None: write_message(" Stage failed: ERROR: while elaborating %s tags" % (tag, ), verbose=1, stream=sys.stderr) return (1, int(rec_id)) write_message(" -Stage COMPLETED", verbose=2) else: write_message(" -Stage NOT NEEDED", verbose=2) if tmp_ids is None: tmp_ids = {} if tmp_vers is None: tmp_vers = {} _elaborate_tag(record, "BDR", lambda: elaborate_brt_tags(record, rec_id = rec_id, mode = mode, pretend = pretend, tmp_ids = tmp_ids, tmp_vers = tmp_vers)) _elaborate_tag(record, "BDM", lambda: elaborate_mit_tags(record, rec_id = rec_id, mode = mode, pretend = pretend, tmp_ids = tmp_ids, tmp_vers = tmp_vers)) def submit_ticket_for_holding_pen(rec_id, err, msg, pretend=False): """ Submit a ticket via BibCatalog to report about a record that has been put into the Holding Pen. @rec_id: the affected record @err: the corresponding Exception msg: verbose message """ from invenio.legacy.bibsched import bibtask from invenio.legacy.webuser import get_email_from_username, get_uid_from_email user = task_get_task_param("user") uid = None if user: try: uid = get_uid_from_email(get_email_from_username(user)) except Exception as err: write_message("WARNING: can't reliably retrieve uid for user %s: %s" % (user, err), stream=sys.stderr) def insert_record_into_holding_pen(record, oai_id, pretend=False): query = """INSERT INTO "bibHOLDINGPEN" (oai_id, changeset_date, changeset_xml, id_bibrec) VALUES (%s, NOW(), %s, %s)""" xml_record = record_xml_output(record) bibrec_ids = find_record_ids_by_oai_id(oai_id) # here determining the identifier of the record if len(bibrec_ids) > 0: bibrec_id = bibrec_ids.pop() else: # id not found by using the oai_id, let's use a wider search based # on any information we might have. bibrec_id = retrieve_rec_id(record, 'holdingpen', pretend=pretend) if bibrec_id is None: bibrec_id = 0 if not pretend: run_sql(query, (oai_id, compress(xml_record), bibrec_id)) # record_id is logged as 0! ( We are not inserting into the main database) log_record_uploading(oai_id, task_get_task_param('task_id', 0), 0, 'H', pretend=pretend) stat['nb_holdingpen'] += 1 def print_out_bibupload_statistics(): """Print the statistics of the process""" out = "Task stats: %(nb_input)d input records, %(nb_updated)d updated, " \ "%(nb_inserted)d inserted, %(nb_errors)d errors, %(nb_holdingpen)d inserted to holding pen. " \ "Time %(nb_sec).2f sec." % { \ 'nb_input': stat['nb_records_to_upload'], 'nb_updated': stat['nb_records_updated'], 'nb_inserted': stat['nb_records_inserted'], 'nb_errors': stat['nb_errors'], 'nb_holdingpen': stat['nb_holdingpen'], 'nb_sec': time.time() - time.mktime(stat['exectime']) } write_message(out) def open_marc_file(path): """Open a file and return the data""" try: # open the file containing the marc document marc_file = open(path, 'r') marc = marc_file.read() marc_file.close() except IOError as erro: write_message("ERROR: %s" % erro, verbose=1, stream=sys.stderr) if erro.errno == 2: # No such file or directory # Not scary e = RecoverableError('File does not exist: %s' % path) else: e = StandardError('File not accessible: %s' % path) raise e return marc def xml_marc_to_records(xml_marc): """create the records""" # Creation of the records from the xml Marc in argument recs = create_records(xml_marc, 1, 1) if recs == []: msg = "ERROR: Cannot parse MARCXML file." write_message(msg, verbose=1, stream=sys.stderr) raise StandardError(msg) elif recs[0][0] is None: msg = "ERROR: MARCXML file has wrong format: %s" % recs write_message(msg, verbose=1, stream=sys.stderr) raise RecoverableError(msg) else: recs = map((lambda x:x[0]), recs) return recs def find_record_format(rec_id, bibformat): """Look whether record REC_ID is formatted in FORMAT, i.e. whether FORMAT exists in the bibfmt table for this record. Return the number of times it is formatted: 0 if not, 1 if yes, 2 if found more than once (should never occur). """ out = 0 query = """SELECT COUNT(*) FROM bibfmt WHERE id_bibrec=%s AND format=%s""" params = (rec_id, bibformat) res = [] res = run_sql(query, params) out = res[0][0] return out def find_record_from_recid(rec_id): """ Try to find record in the database from the REC_ID number. Return record ID if found, None otherwise. """ res = run_sql("SELECT id FROM bibrec WHERE id=%s", (rec_id,)) if res: return res[0][0] else: return None def find_record_from_sysno(sysno): """ Try to find record in the database from the external SYSNO number. Return record ID if found, None otherwise. """ bibxxx = 'bib'+CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[0:2]+'x' bibrec_bibxxx = 'bibrec_' + bibxxx res = run_sql("""SELECT bb.id_bibrec FROM %(bibrec_bibxxx)s AS bb, %(bibxxx)s AS b WHERE b.tag=%%s AND b.value=%%s AND bb.id_bibxxx=b.id""" % {'bibxxx': bibxxx, 'bibrec_bibxxx': bibrec_bibxxx}, (CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, sysno,)) for recid in res: if CFG_BIBUPLOAD_MATCH_DELETED_RECORDS: return recid[0] else: if record_exists(recid[0]) > 0: ## Only non deleted records return recid[0] return None def find_records_from_extoaiid(extoaiid, extoaisrc=None): """ Try to find records in the database from the external EXTOAIID number. Return list of record ID if found, None otherwise. """ assert(CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:5] == CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG[:5]) bibxxx = 'bib'+CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[0:2]+'x' bibrec_bibxxx = 'bibrec_' + bibxxx write_message(' Looking for extoaiid="%s" with extoaisrc="%s"' % (extoaiid, extoaisrc), verbose=9) id_bibrecs = intbitset(run_sql("""SELECT bb.id_bibrec FROM %(bibrec_bibxxx)s AS bb, %(bibxxx)s AS b WHERE b.tag=%%s AND b.value=%%s AND bb.id_bibxxx=b.id""" % {'bibxxx': bibxxx, 'bibrec_bibxxx': bibrec_bibxxx}, (CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, extoaiid,))) write_message(' Partially found %s for extoaiid="%s"' % (id_bibrecs, extoaiid), verbose=9) ret = intbitset() for id_bibrec in id_bibrecs: if not CFG_BIBUPLOAD_MATCH_DELETED_RECORDS: if record_exists(id_bibrec) < 1: ## We don't match not existing records continue record = get_record(id_bibrec) instances = record_get_field_instances(record, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[0:3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4]) write_message(' recid %s -> instances "%s"' % (id_bibrec, instances), verbose=9) for instance in instances: this_extoaisrc = field_get_subfield_values(instance, CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG[5]) this_extoaisrc = this_extoaisrc and this_extoaisrc[0] or None this_extoaiid = field_get_subfield_values(instance, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[5]) this_extoaiid = this_extoaiid and this_extoaiid[0] or None write_message(" this_extoaisrc -> %s, this_extoaiid -> %s" % (this_extoaisrc, this_extoaiid), verbose=9) if this_extoaiid == extoaiid: write_message(' recid %s -> provenance "%s"' % (id_bibrec, this_extoaisrc), verbose=9) if this_extoaisrc == extoaisrc: write_message('Found recid %s for extoaiid="%s" with provenance="%s"' % (id_bibrec, extoaiid, extoaisrc), verbose=9) ret.add(id_bibrec) break if this_extoaisrc is None: write_message('WARNING: Found recid %s for extoaiid="%s" that doesn\'t specify any provenance, while input record does.' % (id_bibrec, extoaiid), stream=sys.stderr) if extoaisrc is None: write_message('WARNING: Found recid %s for extoaiid="%s" that specify a provenance (%s), while input record does not have a provenance.' % (id_bibrec, extoaiid, this_extoaisrc), stream=sys.stderr) return ret def find_record_from_oaiid(oaiid): """ Try to find record in the database from the OAI ID number and OAI SRC. Return record ID if found, None otherwise. """ bibxxx = 'bib'+CFG_OAI_ID_FIELD[0:2]+'x' bibrec_bibxxx = 'bibrec_' + bibxxx res = run_sql("""SELECT bb.id_bibrec FROM %(bibrec_bibxxx)s AS bb, %(bibxxx)s AS b WHERE b.tag=%%s AND b.value=%%s AND bb.id_bibxxx=b.id""" % {'bibxxx': bibxxx, 'bibrec_bibxxx': bibrec_bibxxx}, (CFG_OAI_ID_FIELD, oaiid,)) for recid in res: if CFG_BIBUPLOAD_MATCH_DELETED_RECORDS: return recid[0] else: if record_exists(recid[0]) > 0: ## Only non deleted records return recid[0] return None def find_record_from_doi(doi): """ Try to find record in the database from the given DOI. Return record ID if found, None otherwise. """ bibxxx = 'bib02x' bibrec_bibxxx = 'bibrec_' + bibxxx res = run_sql("""SELECT bb.id_bibrec, bb.field_number FROM %(bibrec_bibxxx)s AS bb, %(bibxxx)s AS b WHERE b.tag=%%s AND b.value=%%s AND bb.id_bibxxx=b.id""" % {'bibxxx': bibxxx, 'bibrec_bibxxx': bibrec_bibxxx}, ('0247_a', doi,)) # For each of the result, make sure that it is really tagged as doi for (id_bibrec, field_number) in res: if not CFG_BIBUPLOAD_MATCH_DELETED_RECORDS: if record_exists(id_bibrec) < 1: ## We don't match not existing records continue res = run_sql("""SELECT bb.id_bibrec FROM %(bibrec_bibxxx)s AS bb, %(bibxxx)s AS b WHERE b.tag=%%s AND b.value=%%s AND bb.id_bibxxx=b.id and bb.field_number=%%s and bb.id_bibrec=%%s""" % {'bibxxx': bibxxx, 'bibrec_bibxxx': bibrec_bibxxx}, ('0247_2', "doi", field_number, id_bibrec)) if res and res[0][0] == id_bibrec: return res[0][0] return None def extract_tag_from_record(record, tag_number): """ Extract the tag_number for record.""" # first step verify if the record is not already in the database if record: return record.get(tag_number, None) return None def retrieve_rec_id(record, opt_mode, pretend=False, post_phase = False): """Retrieve the record Id from a record by using tag 001 or SYSNO or OAI ID or DOI tag. opt_mod is the desired mode. @param post_phase Tells if we are calling this method in the postprocessing phase. If true, we accept presence of 001 fields even in the insert mode @type post_phase boolean """ rec_id = None # 1st step: we look for the tag 001 tag_001 = extract_tag_from_record(record, '001') if tag_001 is not None: # We extract the record ID from the tag rec_id = tag_001[0][3] # if we are in insert mode => error if opt_mode == 'insert' and not post_phase: write_message(" Failed: tag 001 found in the xml" " submitted, you should use the option replace," " correct or append to replace an existing" " record. (-h for help)", verbose=1, stream=sys.stderr) return -1 else: # we found the rec id and we are not in insert mode => continue # we try to match rec_id against the database: if find_record_from_recid(rec_id) is not None: # okay, 001 corresponds to some known record return int(rec_id) elif opt_mode in ('replace', 'replace_or_insert'): if task_get_option('force'): # we found the rec_id but it's not in the system and we are # requested to replace records. Therefore we create on the fly # a empty record allocating the recid. write_message(" Warning: tag 001 found in the xml with" " value %(rec_id)s, but rec_id %(rec_id)s does" " not exist. Since the mode replace was" " requested the rec_id %(rec_id)s is allocated" " on-the-fly." % {"rec_id": rec_id}, stream=sys.stderr) return create_new_record(rec_id=rec_id, pretend=pretend) else: # Since --force was not used we are going to raise an error write_message(" Failed: tag 001 found in the xml" " submitted with value %(rec_id)s. The" " corresponding record however does not" " exists. If you want to really create" " such record, please use the --force" " parameter when calling bibupload." % { "rec_id": rec_id}, stream=sys.stderr) return -1 else: # The record doesn't exist yet. We shall have try to check # the SYSNO or OAI or DOI id later. write_message(" -Tag 001 value not found in database.", verbose=9) rec_id = None else: write_message(" -Tag 001 not found in the xml marc file.", verbose=9) if rec_id is None: # 2nd step we look for the SYSNO sysnos = record_get_field_values(record, CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[0:3], CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[3:4] != "_" and \ CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[3:4] or "", CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[4:5] != "_" and \ CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[4:5] or "", CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[5:6]) if sysnos: sysno = sysnos[0] # there should be only one external SYSNO write_message(" -Checking if SYSNO " + sysno + \ " exists in the database", verbose=9) # try to find the corresponding rec id from the database rec_id = find_record_from_sysno(sysno) if rec_id is not None: # rec_id found pass else: # The record doesn't exist yet. We will try to check # external and internal OAI ids later. write_message(" -Tag SYSNO value not found in database.", verbose=9) rec_id = None else: write_message(" -Tag SYSNO not found in the xml marc file.", verbose=9) if rec_id is None: # 2nd step we look for the external OAIID extoai_fields = record_get_field_instances(record, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[0:3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3:4] != "_" and \ CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3:4] or "", CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4:5] != "_" and \ CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4:5] or "") if extoai_fields: for field in extoai_fields: extoaiid = field_get_subfield_values(field, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[5:6]) extoaisrc = field_get_subfield_values(field, CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG[5:6]) if extoaiid: extoaiid = extoaiid[0] if extoaisrc: extoaisrc = extoaisrc[0] else: extoaisrc = None write_message(" -Checking if EXTOAIID %s (%s) exists in the database" % (extoaiid, extoaisrc), verbose=9) # try to find the corresponding rec id from the database rec_ids = find_records_from_extoaiid(extoaiid, extoaisrc) if rec_ids: # rec_id found rec_id = rec_ids.pop() break else: # The record doesn't exist yet. We will try to check # OAI id later. write_message(" -Tag EXTOAIID value not found in database.", verbose=9) rec_id = None else: write_message(" -Tag EXTOAIID not found in the xml marc file.", verbose=9) if rec_id is None: # 4th step we look for the OAI ID oaiidvalues = record_get_field_values(record, CFG_OAI_ID_FIELD[0:3], CFG_OAI_ID_FIELD[3:4] != "_" and \ CFG_OAI_ID_FIELD[3:4] or "", CFG_OAI_ID_FIELD[4:5] != "_" and \ CFG_OAI_ID_FIELD[4:5] or "", CFG_OAI_ID_FIELD[5:6]) if oaiidvalues: oaiid = oaiidvalues[0] # there should be only one OAI ID write_message(" -Check if local OAI ID " + oaiid + \ " exist in the database", verbose=9) # try to find the corresponding rec id from the database rec_id = find_record_from_oaiid(oaiid) if rec_id is not None: # rec_id found pass else: write_message(" -Tag OAI ID value not found in database.", verbose=9) rec_id = None else: write_message(" -Tag SYSNO not found in the xml marc file.", verbose=9) if rec_id is None: # 5th step we look for the DOI. record_dois = record_extract_dois(record) matching_recids = set() if record_dois: # try to find the corresponding rec id from the database for record_doi in record_dois: possible_recid = find_record_from_doi(record_doi) if possible_recid: matching_recids.add(possible_recid) if len(matching_recids) > 1: # Oops, this record refers to DOI existing in multiple records. # Dunno which one to choose. write_message(" Failed: Multiple records found in the" \ " database %s that match the DOI(s) in the input" \ " MARCXML %s" % (repr(matching_recids), repr(record_dois)), verbose=1, stream=sys.stderr) return -1 elif len(matching_recids) == 1: rec_id = matching_recids.pop() if opt_mode == 'insert': write_message(" Failed: DOI tag matching record #%s found in the xml" \ " submitted, you should use the option replace," \ " correct or append to replace an existing" \ " record. (-h for help)" % rec_id, verbose=1, stream=sys.stderr) return -1 else: write_message(" - Tag DOI value not found in database.", verbose=9) rec_id = None else: write_message(" -Tag DOI not found in the xml marc file.", verbose=9) # Now we should have detected rec_id from SYSNO or OAIID # tags. (None otherwise.) if rec_id: if opt_mode == 'insert': write_message(" Failed: Record found in the database," \ " you should use the option replace," \ " correct or append to replace an existing" \ " record. (-h for help)", verbose=1, stream=sys.stderr) return -1 else: if opt_mode != 'insert' and \ opt_mode != 'replace_or_insert': write_message(" Failed: Record not found in the database."\ " Please insert the file before updating it."\ " (-h for help)", verbose=1, stream=sys.stderr) return -1 return rec_id and int(rec_id) or None def check_record_doi_is_unique(rec_id, record): """ Check that DOI found in 'record' does not exist in any other record than 'recid'. Return (boolean, msg) where 'boolean' would be True if the DOI is unique. """ record_dois = record_extract_dois(record) if record_dois: matching_recids = set() for record_doi in record_dois: possible_recid = find_record_from_doi(record_doi) if possible_recid: matching_recids.add(possible_recid) if len(matching_recids) > 1: # Oops, this record refers to DOI existing in multiple records. msg = " Failed: Multiple records found in the" \ " database %s that match the DOI(s) in the input" \ " MARCXML %s" % (repr(matching_recids), repr(record_dois)) return (False, msg) elif len(matching_recids) == 1: matching_recid = matching_recids.pop() if str(matching_recid) != str(rec_id): # Oops, this record refers to DOI existing in a different record. msg = " Failed: DOI(s) %s found in this record (#%s)" \ " already exist(s) in another other record (#%s)" % \ (repr(record_dois), rec_id, matching_recid) return (False, msg) return (True, "") ### Insert functions def create_new_record(rec_id=None, pretend=False): """ Create new record in the database @param rec_id: if specified the new record will have this rec_id. @type rec_id: int @return: the allocated rec_id @rtype: int @note: in case of errors will be returned None """ if rec_id is not None: try: rec_id = int(rec_id) except (ValueError, TypeError) as error: write_message(" ERROR: during the creation_new_record function: %s " % error, verbose=1, stream=sys.stderr) return None if run_sql("SELECT id FROM bibrec WHERE id=%s", (rec_id, )): write_message(" ERROR: during the creation_new_record function: the requested rec_id %s already exists." % rec_id) return None if pretend: if rec_id: return rec_id else: return run_sql("SELECT max(id)+1 FROM bibrec")[0][0] if rec_id is not None: return run_sql("INSERT INTO bibrec (id, creation_date, modification_date) VALUES (%s, NOW(), NOW())", (rec_id, )) else: return run_sql("INSERT INTO bibrec (creation_date, modification_date) VALUES (NOW(), NOW())") def insert_bibfmt(id_bibrec, marc, bibformat, modification_date='1970-01-01 00:00:00', pretend=False): """Insert the format in the table bibfmt""" # compress the marc value pickled_marc = compress(str(marc)) try: time.strptime(modification_date, "%Y-%m-%d %H:%M:%S") except ValueError: modification_date = '1970-01-01 00:00:00' query = """INSERT INTO bibfmt (id_bibrec, format, last_updated, value) VALUES (%s, %s, %s, %s)""" if not pretend: row_id = run_sql(query, (id_bibrec, bibformat, modification_date, pickled_marc)) return row_id else: return 1 def insert_record_bibxxx(tag, value, pretend=False): """Insert the record into bibxxx""" # determine into which table one should insert the record table_name = 'bib'+tag[0:2]+'x' # check if the tag, value combination exists in the table query = """SELECT id,value FROM %s """ % table_name query += """ WHERE tag=%s AND value=%s""" params = (tag, value) res = None res = run_sql(query, params) # Note: compare now the found values one by one and look for # string binary equality (e.g. to respect lowercase/uppercase # match), regardless of the charset etc settings. Ideally we # could use a BINARY operator in the above SELECT statement, but # we would have to check compatibility on various MySQLdb versions # etc; this approach checks all matched values in Python, not in # MySQL, which is less cool, but more conservative, so it should # work better on most setups. if res: for row in res: row_id = row[0] row_value = row[1] if row_value == value: return (table_name, row_id) # We got here only when the tag, value combination was not found, # so it is now necessary to insert the tag, value combination into # bibxxx table as new. query = """INSERT INTO %s """ % table_name query += """ (tag, value) values (%s , %s)""" params = (tag, value) if not pretend: row_id = run_sql(query, params) else: return (table_name, 1) return (table_name, row_id) def insert_record_bibrec_bibxxx(table_name, id_bibxxx, field_number, id_bibrec, pretend=False): """Insert the record into bibrec_bibxxx""" # determine into which table one should insert the record full_table_name = 'bibrec_'+ table_name # insert the proper row into the table query = """INSERT INTO %s """ % full_table_name query += """(id_bibrec,id_bibxxx, field_number) values (%s , %s, %s)""" params = (id_bibrec, id_bibxxx, field_number) if not pretend: res = run_sql(query, params) else: return 1 return res def synchronize_8564(rec_id, record, record_had_FFT, bibrecdocs, pretend=False): """ Synchronize 8564_ tags and BibDocFile tables. This function directly manipulate the record parameter. @type rec_id: positive integer @param rec_id: the record identifier. @param record: the record structure as created by bibrecord.create_record @type record_had_FFT: boolean @param record_had_FFT: True if the incoming bibuploaded-record used FFT @return: the manipulated record (which is also modified as a side effect) """ def merge_marc_into_bibdocfile(field, pretend=False): """ Internal function that reads a single field and stores its content in BibDocFile tables. @param field: the 8564_ field containing a BibDocFile URL. """ write_message('Merging field: %s' % (field, ), verbose=9) url = field_get_subfield_values(field, 'u')[:1] or field_get_subfield_values(field, 'q')[:1] description = field_get_subfield_values(field, 'y')[:1] comment = field_get_subfield_values(field, 'z')[:1] if url: recid, docname, docformat = decompose_bibdocfile_url(url[0]) if recid != rec_id: write_message("INFO: URL %s is not pointing to a fulltext owned by this record (%s)" % (url, recid), stream=sys.stderr) else: try: bibdoc = bibrecdocs.get_bibdoc(docname) if description and not pretend: bibdoc.set_description(description[0], docformat) if comment and not pretend: bibdoc.set_comment(comment[0], docformat) except InvenioBibDocFileError: ## Apparently the referenced docname doesn't exist anymore. ## Too bad. Let's skip it. write_message("WARNING: docname %s does not seem to exist for record %s. Has it been renamed outside FFT?" % (docname, recid), stream=sys.stderr) def merge_bibdocfile_into_marc(field, subfields): """ Internal function that reads BibDocFile table entries referenced by the URL in the given 8564_ field and integrate the given information directly with the provided subfields. @param field: the 8564_ field containing a BibDocFile URL. @param subfields: the subfields corresponding to the BibDocFile URL generated after BibDocFile tables. """ write_message('Merging subfields %s into field %s' % (subfields, field), verbose=9) subfields = dict(subfields) ## We make a copy not to have side-effects subfield_to_delete = [] for subfield_position, (code, value) in enumerate(field_get_subfield_instances(field)): ## For each subfield instance already existing... if code in subfields: ## ...We substitute it with what is in BibDocFile tables record_modify_subfield(record, '856', code, subfields[code], subfield_position, field_position_global=field[4]) del subfields[code] else: ## ...We delete it otherwise subfield_to_delete.append(subfield_position) subfield_to_delete.sort() for counter, position in enumerate(subfield_to_delete): ## FIXME: Very hackish algorithm. Since deleting a subfield ## will alterate the position of following subfields, we ## are taking note of this and adjusting further position ## by using a counter. record_delete_subfield_from(record, '856', position - counter, field_position_global=field[4]) subfields = subfields.items() subfields.sort() for code, value in subfields: ## Let's add non-previously existing subfields record_add_subfield_into(record, '856', code, value, field_position_global=field[4]) def get_bibdocfile_managed_info(): """ Internal function, returns a dictionary of BibDocFile URL -> wanna-be subfields. This information is retrieved from internal BibDoc structures rather than from input MARC XML files @rtype: mapping @return: BibDocFile URL -> wanna-be subfields dictionary """ ret = {} latest_files = bibrecdocs.list_latest_files(list_hidden=False) for afile in latest_files: url = afile.get_url() ret[url] = {'u': url} description = afile.get_description() comment = afile.get_comment() subformat = afile.get_subformat() size = afile.get_size() if description: ret[url]['y'] = description if comment: ret[url]['z'] = comment if subformat: ret[url]['x'] = subformat ret[url]['s'] = str(size) return ret write_message("Synchronizing MARC of recid '%s' with:\n%s" % (rec_id, record), verbose=9) tags856s = record_get_field_instances(record, '856', '%', '%') write_message("Original 856%% instances: %s" % tags856s, verbose=9) tags8564s_to_add = get_bibdocfile_managed_info() write_message("BibDocFile instances: %s" % tags8564s_to_add, verbose=9) positions_tags8564s_to_remove = [] for local_position, field in enumerate(tags856s): if field[1] == '4' and field[2] == ' ': write_message('Analysing %s' % (field, ), verbose=9) for url in field_get_subfield_values(field, 'u') + field_get_subfield_values(field, 'q'): if url in tags8564s_to_add: # there exists a link in the MARC of the record and the connection exists in BibDoc tables if record_had_FFT: merge_bibdocfile_into_marc(field, tags8564s_to_add[url]) else: merge_marc_into_bibdocfile(field, pretend=pretend) del tags8564s_to_add[url] break elif bibdocfile_url_p(url) and decompose_bibdocfile_url(url)[0] == rec_id: # The link exists and is potentially correct-looking link to a document # moreover, it refers to current record id ... but it does not exist in # internal BibDoc structures. This could have happen in the case of renaming a document # or its removal. In both cases we have to remove link... a new one will be created positions_tags8564s_to_remove.append(local_position) write_message("%s to be deleted and re-synchronized" % (field, ), verbose=9) break record_delete_fields(record, '856', positions_tags8564s_to_remove) tags8564s_to_add = tags8564s_to_add.values() tags8564s_to_add.sort() ## FIXME: we are not yet able to preserve the sorting ## of 8564 tags WRT FFT in BibUpload. ## See ticket #1606. for subfields in tags8564s_to_add: subfields = subfields.items() subfields.sort() record_add_field(record, '856', '4', ' ', subfields=subfields) write_message('Final record: %s' % record, verbose=9) return record def _get_subfield_value(field, subfield_code, default=None): res = field_get_subfield_values(field, subfield_code) if res != [] and res != None: return res[0] else: return default def elaborate_mit_tags(record, rec_id, mode, pretend = False, tmp_ids = {}, tmp_vers = {}): """ Uploading MoreInfo -> BDM tags """ tuple_list = extract_tag_from_record(record, 'BDM') # Now gathering information from BDR tags - to be processed later write_message("Processing BDM entries of the record ") recordDocs = BibRecDocs(rec_id) if tuple_list: for mit in record_get_field_instances(record, 'BDM', ' ', ' '): relation_id = _get_subfield_value(mit, "r") bibdoc_id = _get_subfield_value(mit, "i") # checking for a possibly temporary ID if not (bibdoc_id is None): bibdoc_id = resolve_identifier(tmp_ids, bibdoc_id) bibdoc_ver = _get_subfield_value(mit, "v") if not (bibdoc_ver is None): bibdoc_ver = resolve_identifier(tmp_vers, bibdoc_ver) bibdoc_name = _get_subfield_value(mit, "n") bibdoc_fmt = _get_subfield_value(mit, "f") moreinfo_str = _get_subfield_value(mit, "m") if bibdoc_id == None: if bibdoc_name == None: raise StandardError("Incorrect relation. Neither name nor identifier of the first obejct has been specified") else: # retrieving the ID based on the document name (inside current record) # The document is attached to current record. try: bibdoc_id = recordDocs.get_docid(bibdoc_name) except: raise StandardError("BibDoc of a name %s does not exist within a record" % (bibdoc_name, )) else: if bibdoc_name != None: write_message("WARNING: both name and id of the first document of a relation have been specified. Ignoring the name", stream=sys.stderr) if (moreinfo_str is None or mode in ("replace", "correct")) and (not pretend): MoreInfo(docid=bibdoc_id , version = bibdoc_ver, docformat = bibdoc_fmt, relation = relation_id).delete() if (not moreinfo_str is None) and (not pretend): MoreInfo.create_from_serialised(moreinfo_str, docid=bibdoc_id, version = bibdoc_ver, docformat = bibdoc_fmt, relation = relation_id) return record def elaborate_brt_tags(record, rec_id, mode, pretend=False, tmp_ids = {}, tmp_vers = {}): """ Process BDR tags describing relations between existing objects """ tuple_list = extract_tag_from_record(record, 'BDR') # Now gathering information from BDR tags - to be processed later relations_to_create = [] write_message("Processing BDR entries of the record ") recordDocs = BibRecDocs(rec_id) #TODO: check what happens if there is no record yet ! Will the class represent an empty set? if tuple_list: for brt in record_get_field_instances(record, 'BDR', ' ', ' '): relation_id = _get_subfield_value(brt, "r") bibdoc1_id = None bibdoc1_name = None bibdoc1_ver = None bibdoc1_fmt = None bibdoc2_id = None bibdoc2_name = None bibdoc2_ver = None bibdoc2_fmt = None if not relation_id: bibdoc1_id = _get_subfield_value(brt, "i") bibdoc1_name = _get_subfield_value(brt, "n") if bibdoc1_id == None: if bibdoc1_name == None: raise StandardError("Incorrect relation. Neither name nor identifier of the first obejct has been specified") else: # retrieving the ID based on the document name (inside current record) # The document is attached to current record. try: bibdoc1_id = recordDocs.get_docid(bibdoc1_name) except: raise StandardError("BibDoc of a name %s does not exist within a record" % \ (bibdoc1_name, )) else: # resolving temporary identifier bibdoc1_id = resolve_identifier(tmp_ids, bibdoc1_id) if bibdoc1_name != None: write_message("WARNING: both name and id of the first document of a relation have been specified. Ignoring the name", stream=sys.stderr) bibdoc1_ver = _get_subfield_value(brt, "v") if not (bibdoc1_ver is None): bibdoc1_ver = resolve_identifier(tmp_vers, bibdoc1_ver) bibdoc1_fmt = _get_subfield_value(brt, "f") bibdoc2_id = _get_subfield_value(brt, "j") bibdoc2_name = _get_subfield_value(brt, "o") if bibdoc2_id == None: if bibdoc2_name == None: raise StandardError("Incorrect relation. Neither name nor identifier of the second obejct has been specified") else: # retrieving the ID based on the document name (inside current record) # The document is attached to current record. try: bibdoc2_id = recordDocs.get_docid(bibdoc2_name) except: raise StandardError("BibDoc of a name %s does not exist within a record" % (bibdoc2_name, )) else: bibdoc2_id = resolve_identifier(tmp_ids, bibdoc2_id) if bibdoc2_name != None: write_message("WARNING: both name and id of the first document of a relation have been specified. Ignoring the name", stream=sys.stderr) bibdoc2_ver = _get_subfield_value(brt, "w") if not (bibdoc2_ver is None): bibdoc2_ver = resolve_identifier(tmp_vers, bibdoc2_ver) bibdoc2_fmt = _get_subfield_value(brt, "g") control_command = _get_subfield_value(brt, "d") relation_type = _get_subfield_value(brt, "t") if not relation_type and not relation_id: raise StandardError("The relation type must be specified") more_info = _get_subfield_value(brt, "m") # the relation id might be specified in the case of updating # MoreInfo table instead of other fields rel_obj = None if not relation_id: rels = BibRelation.get_relations(rel_type = relation_type, bibdoc1_id = bibdoc1_id, bibdoc2_id = bibdoc2_id, bibdoc1_ver = bibdoc1_ver, bibdoc2_ver = bibdoc2_ver, bibdoc1_fmt = bibdoc1_fmt, bibdoc2_fmt = bibdoc2_fmt) if len(rels) > 0: rel_obj = rels[0] relation_id = rel_obj.id else: rel_obj = BibRelation(rel_id=relation_id) relations_to_create.append((relation_id, bibdoc1_id, bibdoc1_ver, bibdoc1_fmt, bibdoc2_id, bibdoc2_ver, bibdoc2_fmt, relation_type, more_info, rel_obj, control_command)) record_delete_field(record, 'BDR', ' ', ' ') if mode in ("insert", "replace_or_insert", "append", "correct", "replace"): # now creating relations between objects based on the data if not pretend: for (relation_id, bibdoc1_id, bibdoc1_ver, bibdoc1_fmt, bibdoc2_id, bibdoc2_ver, bibdoc2_fmt, rel_type, more_info, rel_obj, control_command) in relations_to_create: if rel_obj == None: rel_obj = BibRelation.create(bibdoc1_id = bibdoc1_id, bibdoc1_ver = bibdoc1_ver, bibdoc1_fmt = bibdoc1_fmt, bibdoc2_id = bibdoc2_id, bibdoc2_ver = bibdoc2_ver, bibdoc2_fmt = bibdoc2_fmt, rel_type = rel_type) relation_id = rel_obj.id if mode in ("replace"): # Clearing existing MoreInfo content rel_obj.get_more_info().delete() if more_info: MoreInfo.create_from_serialised(more_info, relation = relation_id) if control_command == "DELETE": rel_obj.delete() else: write_message("BDR tag is not processed in the %s mode" % (mode, )) return record def elaborate_fft_tags(record, rec_id, mode, pretend=False, tmp_ids = {}, tmp_vers = {}, bibrecdocs=None): """ Process FFT tags that should contain $a with file pathes or URLs to get the fulltext from. This function enriches record with proper 8564 URL tags, downloads fulltext files and stores them into var/data structure where appropriate. CFG_BIBUPLOAD_WGET_SLEEP_TIME defines time to sleep in seconds in between URL downloads. Note: if an FFT tag contains multiple $a subfields, we upload them into different 856 URL tags in the metadata. See regression test case test_multiple_fft_insert_via_http(). """ # Let's define some handy sub procedure. def _add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, modification_date, pretend=False): """Adds a new format for a given bibdoc. Returns True when everything's fine.""" write_message('Add new format to %s url: %s, format: %s, docname: %s, doctype: %s, newname: %s, description: %s, comment: %s, flags: %s, modification_date: %s' % (repr(bibdoc), url, docformat, docname, doctype, newname, description, comment, flags, modification_date), verbose=9) try: if not url: # Not requesting a new url. Just updating comment & description return _update_description_and_comment(bibdoc, docname, docformat, description, comment, flags, pretend=pretend) try: if not pretend: bibdoc.add_file_new_format(url, description=description, comment=comment, flags=flags, modification_date=modification_date) except StandardError as e: write_message("('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s') not inserted because format already exists (%s)." % (url, docformat, docname, doctype, newname, description, comment, flags, modification_date, e), stream=sys.stderr) raise except Exception as e: write_message("ERROR: in adding '%s' as a new format because of: %s" % (url, e), stream=sys.stderr) raise return True def _add_new_version(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, modification_date, pretend=False): """Adds a new version for a given bibdoc. Returns True when everything's fine.""" write_message('Add new version to %s url: %s, format: %s, docname: %s, doctype: %s, newname: %s, description: %s, comment: %s, flags: %s' % (repr(bibdoc), url, docformat, docname, doctype, newname, description, comment, flags), verbose=9) try: if not url: return _update_description_and_comment(bibdoc, docname, docformat, description, comment, flags, pretend=pretend) try: if not pretend: bibdoc.add_file_new_version(url, description=description, comment=comment, flags=flags, modification_date=modification_date) except StandardError as e: write_message("('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s') not inserted because '%s'." % (url, docformat, docname, doctype, newname, description, comment, flags, modification_date, e), stream=sys.stderr) raise except Exception as e: write_message("ERROR: in adding '%s' as a new version because of: %s" % (url, e), stream=sys.stderr) raise return True def _update_description_and_comment(bibdoc, docname, docformat, description, comment, flags, pretend=False): """Directly update comments and descriptions.""" write_message('Just updating description and comment for %s with format %s with description %s, comment %s and flags %s' % (docname, docformat, description, comment, flags), verbose=9) try: if not pretend: bibdoc.set_description(description, docformat) bibdoc.set_comment(comment, docformat) for flag in CFG_BIBDOCFILE_AVAILABLE_FLAGS: if flag in flags: bibdoc.set_flag(flag, docformat) else: bibdoc.unset_flag(flag, docformat) except StandardError as e: write_message("('%s', '%s', '%s', '%s', '%s') description and comment not updated because '%s'." % (docname, docformat, description, comment, flags, e)) raise return True def _process_document_moreinfos(more_infos, docname, version, docformat, mode): if not mode in ('correct', 'append', 'replace_or_insert', 'replace', 'insert'): #print("exited because the mode is incorrect") return docid = None try: docid = bibrecdocs.get_docid(docname) except: raise StandardError("MoreInfo: No document of a given name associated with the record") if not version: # We have to retrieve the most recent version ... version = bibrecdocs.get_bibdoc(docname).get_latest_version() doc_moreinfo_s, version_moreinfo_s, version_format_moreinfo_s, format_moreinfo_s = more_infos if mode in ("replace", "replace_or_insert"): if doc_moreinfo_s: #only if specified, otherwise do not touch MoreInfo(docid = docid).delete() if format_moreinfo_s: #only if specified... otherwise do not touch MoreInfo(docid = docid, docformat = docformat).delete() if not doc_moreinfo_s is None: MoreInfo.create_from_serialised(ser_str = doc_moreinfo_s, docid = docid) if not version_moreinfo_s is None: MoreInfo.create_from_serialised(ser_str = version_moreinfo_s, docid = docid, version = version) if not version_format_moreinfo_s is None: MoreInfo.create_from_serialised(ser_str = version_format_moreinfo_s, docid = docid, version = version, docformat = docformat) if not format_moreinfo_s is None: MoreInfo.create_from_serialised(ser_str = format_moreinfo_s, docid = docid, docformat = docformat) if mode == 'delete': raise StandardError('FFT tag specified but bibupload executed in --delete mode') tuple_list = extract_tag_from_record(record, 'FFT') if tuple_list: # FFT Tags analysis write_message("FFTs: "+str(tuple_list), verbose=9) docs = {} # docnames and their data for fft in record_get_field_instances(record, 'FFT', ' ', ' '): # Very first, we retrieve the potentially temporary odentifiers... #even if the rest fails, we should include them in teh dictionary version = _get_subfield_value(fft, 'v', '') # checking if version is temporary... if so, filling a different varaible is_tmp_ver, bibdoc_tmpver = parse_identifier(version) if is_tmp_ver: version = None else: bibdoc_tmpver = None if not version: #treating cases of empty string etc... version = None bibdoc_tmpid = field_get_subfield_values(fft, 'i') if bibdoc_tmpid: bibdoc_tmpid = bibdoc_tmpid[0] else: bibdoc_tmpid is_tmp_id, bibdoc_tmpid = parse_identifier(bibdoc_tmpid) if not is_tmp_id: bibdoc_tmpid = None # In the case of having temporary id's, we dont resolve them yet but signaklise that they have been used # value -1 means that identifier has been declared but not assigned a value yet if bibdoc_tmpid: if bibdoc_tmpid in tmp_ids: write_message("WARNING: the temporary identifier %s has been declared more than once. Ignoring the second occurance" % (bibdoc_tmpid, ), stream=sys.stderr) else: tmp_ids[bibdoc_tmpid] = -1 if bibdoc_tmpver: if bibdoc_tmpver in tmp_vers: write_message("WARNING: the temporary version identifier %s has been declared more than once. Ignoring the second occurance" % (bibdoc_tmpver, ), stream=sys.stderr) else: tmp_vers[bibdoc_tmpver] = -1 # Let's discover the type of the document # This is a legacy field and will not be enforced any particular # check on it. doctype = _get_subfield_value(fft, 't', 'Main') #Default is Main # Let's discover the url. url = field_get_subfield_values(fft, 'a') if url: url = url[0] try: check_valid_url(url) except StandardError as e: raise StandardError, "fft '%s' specifies in $a a location ('%s') with problems: %s" % (fft, url, e) else: url = '' #TODO: a lot of code can be compactified using similar syntax ... should be more readable on the longer scale # maybe right side expressions look a bit cryptic, but the elaborate_fft function would be much clearer if mode == 'correct' and doctype != 'FIX-MARC': arg2 = "" else: arg2 = KEEP_OLD_VALUE description = _get_subfield_value(fft, 'd', arg2) # Let's discover the description # description = field_get_subfield_values(fft, 'd') # if description != []: # description = description[0] # else: # if mode == 'correct' and doctype != 'FIX-MARC': ## If the user require to correct, and do not specify ## a description this means she really want to ## modify the description. # description = '' # else: # description = KEEP_OLD_VALUE # Let's discover the desired docname to be created/altered name = field_get_subfield_values(fft, 'n') if name: ## Let's remove undesired extensions name = file_strip_ext(name[0] + '.pdf') else: if url: name = get_docname_from_url(url) elif mode != 'correct' and doctype != 'FIX-MARC': raise StandardError, "WARNING: fft '%s' doesn't specifies either a location in $a or a docname in $n" % str(fft) else: continue # Let's discover the desired new docname in case we want to change it newname = field_get_subfield_values(fft, 'm') if newname: newname = file_strip_ext(newname[0] + '.pdf') else: newname = name # Let's discover the desired format docformat = field_get_subfield_values(fft, 'f') if docformat: docformat = normalize_format(docformat[0]) else: if url: docformat = guess_format_from_url(url) else: docformat = "" # Let's discover the icon icon = field_get_subfield_values(fft, 'x') if icon != []: icon = icon[0] if icon != KEEP_OLD_VALUE: try: check_valid_url(icon) except StandardError as e: raise StandardError, "fft '%s' specifies in $x an icon ('%s') with problems: %s" % (fft, icon, e) else: icon = '' # Let's discover the comment comment = field_get_subfield_values(fft, 'z') if comment != []: comment = comment[0] else: if mode == 'correct' and doctype != 'FIX-MARC': ## See comment on description comment = '' else: comment = KEEP_OLD_VALUE # Let's discover the restriction restriction = field_get_subfield_values(fft, 'r') if restriction != []: restriction = restriction[0] else: if mode == 'correct' and doctype != 'FIX-MARC': ## See comment on description restriction = '' else: restriction = KEEP_OLD_VALUE document_moreinfo = _get_subfield_value(fft, 'w') version_moreinfo = _get_subfield_value(fft, 'p') version_format_moreinfo = _get_subfield_value(fft, 'b') format_moreinfo = _get_subfield_value(fft, 'u') # Let's discover the timestamp of the file (if any) timestamp = field_get_subfield_values(fft, 's') if timestamp: try: timestamp = datetime(*(time.strptime(timestamp[0], "%Y-%m-%d %H:%M:%S")[:6])) except ValueError: write_message('WARNING: The timestamp is not in a good format, thus will be ignored. The format should be YYYY-MM-DD HH:MM:SS', stream=sys.stderr) timestamp = '' else: timestamp = '' flags = field_get_subfield_values(fft, 'o') for flag in flags: if flag not in CFG_BIBDOCFILE_AVAILABLE_FLAGS: raise StandardError, "fft '%s' specifies a non available flag: %s" % (fft, flag) if name in docs: # new format considered (doctype2, newname2, restriction2, version2, urls, dummybibdoc_moreinfos2, dummybibdoc_tmpid2, dummybibdoc_tmpver2 ) = docs[name] if doctype2 != doctype: raise StandardError, "fft '%s' specifies a different doctype from previous fft with docname '%s'" % (str(fft), name) if newname2 != newname: raise StandardError, "fft '%s' specifies a different newname from previous fft with docname '%s'" % (str(fft), name) if restriction2 != restriction: raise StandardError, "fft '%s' specifies a different restriction from previous fft with docname '%s'" % (str(fft), name) if version2 != version: raise StandardError, "fft '%s' specifies a different version than the previous fft with docname '%s'" % (str(fft), name) for (dummyurl2, format2, dummydescription2, dummycomment2, dummyflags2, dummytimestamp2) in urls: if docformat == format2: raise StandardError, "fft '%s' specifies a second file '%s' with the same format '%s' from previous fft with docname '%s'" % (str(fft), url, docformat, name) if url or docformat: urls.append((url, docformat, description, comment, flags, timestamp)) if icon: urls.append((icon, icon[len(file_strip_ext(icon)):] + ';icon', description, comment, flags, timestamp)) else: if url or docformat: docs[name] = (doctype, newname, restriction, version, [(url, docformat, description, comment, flags, timestamp)], [document_moreinfo, version_moreinfo, version_format_moreinfo, format_moreinfo], bibdoc_tmpid, bibdoc_tmpver) if icon: docs[name][4].append((icon, icon[len(file_strip_ext(icon)):] + ';icon', description, comment, flags, timestamp)) elif icon: docs[name] = (doctype, newname, restriction, version, [(icon, icon[len(file_strip_ext(icon)):] + ';icon', description, comment, flags, timestamp)], [document_moreinfo, version_moreinfo, version_format_moreinfo, format_moreinfo], bibdoc_tmpid, bibdoc_tmpver) else: docs[name] = (doctype, newname, restriction, version, [], [document_moreinfo, version_moreinfo, version_format_moreinfo, format_moreinfo], bibdoc_tmpid, bibdoc_tmpver) write_message('Result of FFT analysis:\n\tDocs: %s' % (docs,), verbose=9) # Let's remove all FFT tags record_delete_field(record, 'FFT', ' ', ' ') ## Let's pre-download all the URLs to see if, in case of mode 'correct' or 'append' ## we can avoid creating a new revision. for docname, (doctype, newname, restriction, version, urls, more_infos, bibdoc_tmpid, bibdoc_tmpver ) in docs.items(): downloaded_urls = [] try: bibdoc = bibrecdocs.get_bibdoc(docname) except InvenioBibDocFileError: ## A bibdoc with the given docname does not exists. ## So there is no chance we are going to revise an existing ## format with an identical file :-) bibdoc = None new_revision_needed = False for url, docformat, description, comment, flags, timestamp in urls: if url: try: downloaded_url = download_url(url, docformat) write_message("%s saved into %s" % (url, downloaded_url), verbose=9) except Exception as err: write_message("ERROR: in downloading '%s' because of: %s" % (url, err), stream=sys.stderr) raise if mode == 'correct' and bibdoc is not None and not new_revision_needed: downloaded_urls.append((downloaded_url, docformat, description, comment, flags, timestamp)) if not bibrecdocs.check_file_exists(downloaded_url, docformat): new_revision_needed = True else: write_message("WARNING: %s is already attached to bibdoc %s for recid %s" % (url, docname, rec_id), stream=sys.stderr) elif mode == 'append' and bibdoc is not None: if not bibrecdocs.check_file_exists(downloaded_url, docformat): downloaded_urls.append((downloaded_url, docformat, description, comment, flags, timestamp)) else: write_message("WARNING: %s is already attached to bibdoc %s for recid %s" % (url, docname, rec_id), stream=sys.stderr) else: downloaded_urls.append((downloaded_url, docformat, description, comment, flags, timestamp)) else: downloaded_urls.append(('', docformat, description, comment, flags, timestamp)) if mode == 'correct' and bibdoc is not None and not new_revision_needed: ## Since we don't need a new revision (because all the files ## that are being uploaded are different) ## we can simply remove the urls but keep the other information write_message("No need to add a new revision for docname %s for recid %s" % (docname, rec_id), verbose=2) docs[docname] = (doctype, newname, restriction, version, [('', docformat, description, comment, flags, timestamp) for (dummy, docformat, description, comment, flags, timestamp) in downloaded_urls], more_infos, bibdoc_tmpid, bibdoc_tmpver) for downloaded_url, dummy, dummy, dummy, dummy, dummy in downloaded_urls: ## Let's free up some space :-) if downloaded_url and os.path.exists(downloaded_url): os.remove(downloaded_url) else: if downloaded_urls or mode != 'append': docs[docname] = (doctype, newname, restriction, version, downloaded_urls, more_infos, bibdoc_tmpid, bibdoc_tmpver) else: ## In case we are in append mode and there are no urls to append ## we discard the whole FFT del docs[docname] if mode == 'replace': # First we erase previous bibdocs if not pretend: for bibdoc in bibrecdocs.list_bibdocs(): bibdoc.delete() bibrecdocs.dirty = True for docname, (doctype, newname, restriction, version, urls, more_infos, bibdoc_tmpid, bibdoc_tmpver) in iteritems(docs): write_message("Elaborating olddocname: '%s', newdocname: '%s', doctype: '%s', restriction: '%s', urls: '%s', mode: '%s'" % (docname, newname, doctype, restriction, urls, mode), verbose=9) if mode in ('insert', 'replace'): # new bibdocs, new docnames, new marc if newname in bibrecdocs.get_bibdoc_names(): write_message("('%s', '%s') not inserted because docname already exists." % (newname, urls), stream=sys.stderr) raise StandardError("('%s', '%s') not inserted because docname already exists." % (newname, urls), stream=sys.stderr) try: if not pretend: bibdoc = bibrecdocs.add_bibdoc(doctype, newname) bibdoc.set_status(restriction) else: bibdoc = None except Exception as e: write_message("('%s', '%s', '%s') not inserted because: '%s'." % (doctype, newname, urls, e), stream=sys.stderr) raise e for (url, docformat, description, comment, flags, timestamp) in urls: assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp, pretend=pretend)) elif mode == 'replace_or_insert': # to be thought as correct_or_insert try: bibdoc = bibrecdocs.get_bibdoc(docname) found_bibdoc = True except InvenioBibDocFileError: found_bibdoc = False else: if doctype not in ('PURGE', 'DELETE', 'EXPUNGE', 'REVERT', 'FIX-ALL', 'FIX-MARC', 'DELETE-FILE'): if newname != docname: try: if not pretend: bibrecdocs.change_name(newname=newname, docid=bibdoc.id) write_message(lambda: "After renaming: %s" % bibrecdocs, verbose=9) except StandardError as e: write_message('ERROR: in renaming %s to %s: %s' % (docname, newname, e), stream=sys.stderr) raise try: bibdoc = bibrecdocs.get_bibdoc(newname) found_bibdoc = True except InvenioBibDocFileError: found_bibdoc = False else: if doctype == 'PURGE': if not pretend: bibdoc.purge() bibrecdocs.dirty = True elif doctype == 'DELETE': if not pretend: bibdoc.delete() bibrecdocs.dirty = True elif doctype == 'EXPUNGE': if not pretend: bibdoc.expunge() bibrecdocs.dirty = True elif doctype == 'FIX-ALL': if not pretend: bibrecdocs.fix(docname) elif doctype == 'FIX-MARC': pass elif doctype == 'DELETE-FILE': if urls: for (url, docformat, description, comment, flags, timestamp) in urls: if not pretend: bibdoc.delete_file(docformat, version) elif doctype == 'REVERT': try: if not pretend: bibdoc.revert(version) except Exception, e: write_message('(%s, %s) not correctly reverted: %s' % (newname, version, e), stream=sys.stderr) raise else: if restriction != KEEP_OLD_VALUE: if not pretend: bibdoc.set_status(restriction) # Since the docname already existed we have to first # bump the version by pushing the first new file # then pushing the other files. if urls: (first_url, first_format, first_description, first_comment, first_flags, first_timestamp) = urls[0] other_urls = urls[1:] assert(_add_new_version(bibdoc, first_url, first_format, docname, doctype, newname, first_description, first_comment, first_flags, first_timestamp, pretend=pretend)) for (url, docformat, description, comment, flags, timestamp) in other_urls: assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp, pretend=pretend)) ## Let's refresh the list of bibdocs. if not found_bibdoc: if not pretend: bibdoc = bibrecdocs.add_bibdoc(doctype, newname) bibdoc.set_status(restriction) for (url, docformat, description, comment, flags, timestamp) in urls: assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp)) elif mode == 'correct': try: bibdoc = bibrecdocs.get_bibdoc(docname) found_bibdoc = True except InvenioBibDocFileError: found_bibdoc = False else: if doctype not in ('PURGE', 'DELETE', 'EXPUNGE', 'REVERT', 'FIX-ALL', 'FIX-MARC', 'DELETE-FILE'): if newname != docname: try: if not pretend: bibrecdocs.change_name(newname=newname, docid=bibdoc.id) write_message(lambda: "After renaming: %s" % bibrecdocs, verbose=9) except StandardError as e: write_message('ERROR: in renaming %s to %s: %s' % (docname, newname, e), stream=sys.stderr) raise try: bibdoc = bibrecdocs.get_bibdoc(newname) found_bibdoc = True except InvenioBibDocFileError: found_bibdoc = False else: if doctype == 'PURGE': if not pretend: bibdoc.purge() bibrecdocs.dirty = True elif doctype == 'DELETE': if not pretend: bibdoc.delete() bibrecdocs.dirty = True elif doctype == 'EXPUNGE': if not pretend: bibdoc.expunge() bibrecdocs.dirty = True elif doctype == 'FIX-ALL': if not pretend: bibrecdocs.fix(newname) elif doctype == 'FIX-MARC': pass elif doctype == 'DELETE-FILE': if urls: for (url, docformat, description, comment, flags, timestamp) in urls: if not pretend: bibdoc.delete_file(docformat, version) elif doctype == 'REVERT': try: if not pretend: bibdoc.revert(version) except Exception, e: write_message('(%s, %s) not correctly reverted: %s' % (newname, version, e), stream=sys.stderr) raise else: if restriction != KEEP_OLD_VALUE: if not pretend: bibdoc.set_status(restriction) if doctype and doctype != KEEP_OLD_VALUE: if not pretend: bibdoc.change_doctype(doctype) if urls: (first_url, first_format, first_description, first_comment, first_flags, first_timestamp) = urls[0] other_urls = urls[1:] assert(_add_new_version(bibdoc, first_url, first_format, docname, doctype, newname, first_description, first_comment, first_flags, first_timestamp, pretend=pretend)) for (url, docformat, description, comment, flags, timestamp) in other_urls: assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp, pretend=pretend)) if not found_bibdoc: if doctype in ('PURGE', 'DELETE', 'EXPUNGE', 'FIX-ALL', 'FIX-MARC', 'DELETE-FILE', 'REVERT'): write_message("('%s', '%s', '%s') not performed because '%s' docname didn't existed." % (doctype, newname, urls, docname), stream=sys.stderr) raise StandardError else: if not pretend: bibdoc = bibrecdocs.add_bibdoc(doctype, newname) bibdoc.set_status(restriction) for (url, docformat, description, comment, flags, timestamp) in urls: assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp)) elif mode == 'append': found_bibdoc = False try: bibdoc = bibrecdocs.get_bibdoc(docname) found_bibdoc = True except InvenioBibDocFileError: found_bibdoc = False else: for (url, docformat, description, comment, flags, timestamp) in urls: assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp, pretend=pretend)) if not found_bibdoc: try: if not pretend: bibdoc = bibrecdocs.add_bibdoc(doctype, docname) bibdoc.set_status(restriction) for (url, docformat, description, comment, flags, timestamp) in urls: assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp)) except Exception, e: register_exception() write_message("('%s', '%s', '%s') not appended because: '%s'." % (doctype, newname, urls, e), stream=sys.stderr) raise if not pretend and doctype not in ('PURGE', 'DELETE', 'EXPUNGE'): _process_document_moreinfos(more_infos, newname, version, urls and urls[0][1], mode) # resolving temporary version and identifier if bibdoc_tmpid: if bibdoc_tmpid in tmp_ids and tmp_ids[bibdoc_tmpid] != -1: write_message("WARNING: the temporary identifier %s has been declared more than once. Ignoring the second occurance" % (bibdoc_tmpid, ), stream=sys.stderr) else: tmp_ids[bibdoc_tmpid] = bibrecdocs.get_docid(docname) if bibdoc_tmpver: if bibdoc_tmpver in tmp_vers and tmp_vers[bibdoc_tmpver] != -1: write_message("WARNING: the temporary version identifier %s has been declared more than once. Ignoring the second occurance" % (bibdoc_tmpver, ), stream=sys.stderr) else: if version == None: if version: tmp_vers[bibdoc_tmpver] = version else: tmp_vers[bibdoc_tmpver] = bibrecdocs.get_bibdoc(docname).get_latest_version() else: tmp_vers[bibdoc_tmpver] = version return record ### Update functions def update_bibrec_date(record_modification_date, bibrec_id, insert_mode_p, record_creation_date=None, pretend=False): """ Update the date of the record in bibrec table. Note: record_creation_date is mandatory if insert_mode_p=True. """ if insert_mode_p: query = """UPDATE bibrec SET creation_date=%s, modification_date=%s WHERE id=%s""" params = (record_creation_date, record_modification_date, bibrec_id) else: query = """UPDATE bibrec SET modification_date=%s WHERE id=%s""" params = (record_modification_date, bibrec_id) if not pretend: run_sql(query, params) write_message(" -Update record creation/modification date: DONE" , verbose=2) def update_bibfmt_format(id_bibrec, format_value, format_name, modification_date=None, pretend=False): """Update the format in the table bibfmt""" if modification_date is None: modification_date = time.strftime('%Y-%m-%d %H:%M:%S') else: try: time.strptime(modification_date, "%Y-%m-%d %H:%M:%S") except ValueError: modification_date = '1970-01-01 00:00:00' # We check if the format is already in bibFmt nb_found = find_record_format(id_bibrec, format_name) if nb_found == 1: # we are going to update the format # compress the format_value value pickled_format_value = compress(str(format_value)) # update the format: query = """UPDATE bibfmt SET last_updated=%s, value=%s WHERE id_bibrec=%s AND format=%s""" params = (modification_date, pickled_format_value, id_bibrec, format_name) if not pretend: row_id = run_sql(query, params) if not pretend and row_id is None: write_message(" ERROR: during update_bibfmt_format function", verbose=1, stream=sys.stderr) return 1 else: write_message(" -Update the format %s in bibfmt: DONE" % format_name , verbose=2) return 0 elif nb_found > 1: write_message(" Failed: Same format %s found several time in bibfmt for the same record." % format_name, verbose=1, stream=sys.stderr) return 1 else: # Insert the format information in BibFMT res = insert_bibfmt(id_bibrec, format_value, format_name, modification_date, pretend=pretend) if res is None: write_message(" ERROR: during insert_bibfmt", verbose=1, stream=sys.stderr) return 1 else: write_message(" -Insert the format %s in bibfmt: DONE" % format_name , verbose=2) return 0 def delete_bibfmt_format(id_bibrec, format_name, pretend=False): """ Delete format FORMAT_NAME from bibfmt table fo record ID_BIBREC. """ if not pretend: run_sql("DELETE FROM bibfmt WHERE id_bibrec=%s and format=%s", (id_bibrec, format_name)) return 0 def archive_marcxml_for_history(recID, affected_fields, pretend=False): """ Archive current MARCXML format of record RECID from BIBFMT table into hstRECORD table. Useful to keep MARCXML history of records. Return 0 if everything went fine. Return 1 otherwise. """ res = run_sql("SELECT id_bibrec, value, last_updated FROM bibfmt WHERE format='xm' AND id_bibrec=%s", (recID,)) db_affected_fields = "" if affected_fields: tmp_affected_fields = {} for field in affected_fields: if field.isdigit(): #hack for tags from RevisionVerifier for ind in affected_fields[field]: tmp_affected_fields[(field + ind[0] + ind[1] + "%").replace(" ", "_")] = 1 else: pass #future implementation for fields tmp_affected_fields = tmp_affected_fields.keys() tmp_affected_fields.sort() db_affected_fields = ",".join(tmp_affected_fields) if res and not pretend: run_sql("""INSERT INTO "hstRECORD" (id_bibrec, marcxml, job_id, job_name, job_person, job_date, job_details, affected_fields) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)""", (res[0][0], res[0][1], task_get_task_param('task_id', 0), 'bibupload', task_get_task_param('user', 'UNKNOWN'), res[0][2], 'mode: ' + task_get_option('mode', 'UNKNOWN') + '; file: ' + task_get_option('file_path', 'UNKNOWN') + '.', db_affected_fields)) return 0 def update_database_with_metadata(record, rec_id, oai_rec_id="oai", affected_tags=None, pretend=False): """Update the database tables with the record and the record id given in parameter""" # extract only those tags that have been affected. # check happens at subfield level. This is to prevent overhead # associated with inserting already existing field with given ind pair write_message("update_database_with_metadata: record=%s, rec_id=%s, oai_rec_id=%s, affected_tags=%s" % (record, rec_id, oai_rec_id, affected_tags), verbose=9) tmp_record = {} if affected_tags: for tag in record.keys(): if tag in affected_tags.keys(): write_message(" -Tag %s found to be modified.Setting up for update" % tag, verbose=9) # initialize new list to hold affected field new_data_tuple_list = [] for data_tuple in record[tag]: ind1 = data_tuple[1] ind2 = data_tuple[2] if (ind1, ind2) in affected_tags[tag]: write_message(" -Indicator pair (%s, %s) added to update list" % (ind1, ind2), verbose=9) new_data_tuple_list.append(data_tuple) tmp_record[tag] = new_data_tuple_list write_message(lambda: " -Modified fields: \n%s" % record_xml_output(tmp_record), verbose=2) else: tmp_record = record for tag in tmp_record.keys(): # check if tag is not a special one: if tag not in CFG_BIBUPLOAD_SPECIAL_TAGS: # for each tag there is a list of tuples representing datafields tuple_list = tmp_record[tag] # this list should contain the elements of a full tag [tag, ind1, ind2, subfield_code] tag_list = [] tag_list.append(tag) for single_tuple in tuple_list: # these are the contents of a single tuple subfield_list = single_tuple[0] ind1 = single_tuple[1] ind2 = single_tuple[2] # append the ind's to the full tag if ind1 == '' or ind1 == ' ': tag_list.append('_') else: tag_list.append(ind1) if ind2 == '' or ind2 == ' ': tag_list.append('_') else: tag_list.append(ind2) datafield_number = single_tuple[4] if tag in CFG_BIBUPLOAD_SPECIAL_TAGS: # nothing to do for special tags (FFT, BDR, BDM) pass elif tag in CFG_BIBUPLOAD_CONTROLFIELD_TAGS and tag != "001": value = single_tuple[3] # get the full tag full_tag = ''.join(tag_list) # update the tables write_message(" insertion of the tag "+full_tag+" with the value "+value, verbose=9) # insert the tag and value into into bibxxx (table_name, bibxxx_row_id) = insert_record_bibxxx(full_tag, value, pretend=pretend) #print 'tname, bibrow', table_name, bibxxx_row_id; if table_name is None or bibxxx_row_id is None: write_message(" Failed: during insert_record_bibxxx", verbose=1, stream=sys.stderr) # connect bibxxx and bibrec with the table bibrec_bibxxx res = insert_record_bibrec_bibxxx(table_name, bibxxx_row_id, datafield_number, rec_id, pretend=pretend) if res is None: write_message(" Failed: during insert_record_bibrec_bibxxx", verbose=1, stream=sys.stderr) else: # get the tag and value from the content of each subfield for subfield in set(subfield_list): subtag = subfield[0] value = subfield[1] tag_list.append(subtag) # get the full tag full_tag = ''.join(tag_list) # update the tables write_message(" insertion of the tag "+full_tag+" with the value "+value, verbose=9) # insert the tag and value into into bibxxx (table_name, bibxxx_row_id) = insert_record_bibxxx(full_tag, value, pretend=pretend) if table_name is None or bibxxx_row_id is None: write_message(" Failed: during insert_record_bibxxx", verbose=1, stream=sys.stderr) # connect bibxxx and bibrec with the table bibrec_bibxxx res = insert_record_bibrec_bibxxx(table_name, bibxxx_row_id, datafield_number, rec_id, pretend=pretend) if res is None: write_message(" Failed: during insert_record_bibrec_bibxxx", verbose=1, stream=sys.stderr) # remove the subtag from the list tag_list.pop() tag_list.pop() tag_list.pop() tag_list.pop() write_message(" -Update the database with metadata: DONE", verbose=2) log_record_uploading(oai_rec_id, task_get_task_param('task_id', 0), rec_id, 'P', pretend=pretend) def append_new_tag_to_old_record(record, rec_old): """Append new tags to a old record""" def _append_tag(tag): if tag in CFG_BIBUPLOAD_CONTROLFIELD_TAGS: if tag == '001': pass else: # if it is a controlfield, just access the value for single_tuple in record[tag]: controlfield_value = single_tuple[3] # add the field to the old record newfield_number = record_add_field(rec_old, tag, controlfield_value=controlfield_value) if newfield_number is None: write_message(" ERROR: when adding the field"+tag, verbose=1, stream=sys.stderr) else: # For each tag there is a list of tuples representing datafields for single_tuple in record[tag]: # We retrieve the information of the tag subfield_list = single_tuple[0] ind1 = single_tuple[1] ind2 = single_tuple[2] if '%s%s%s' % (tag, ind1 == ' ' and '_' or ind1, ind2 == ' ' and '_' or ind2) in (CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:5], CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[:5]): ## We don't want to append the external identifier ## if it is already existing. if record_find_field(rec_old, tag, single_tuple)[0] is not None: write_message(" Not adding tag: %s ind1=%s ind2=%s subfields=%s: it's already there" % (tag, ind1, ind2, subfield_list), verbose=9) continue # We add the datafield to the old record write_message(" Adding tag: %s ind1=%s ind2=%s subfields=%s" % (tag, ind1, ind2, subfield_list), verbose=9) newfield_number = record_add_field(rec_old, tag, ind1, ind2, subfields=subfield_list) if newfield_number is None: write_message(" ERROR: when adding the field"+tag, verbose=1, stream=sys.stderr) # Go through each tag in the appended record for tag in record: _append_tag(tag) return rec_old def copy_strong_tags_from_old_record(record, rec_old): """ Look for strong tags in RECORD and REC_OLD. If no strong tags are found in RECORD, then copy them over from REC_OLD. This function modifies RECORD structure on the spot. """ for strong_tag in CFG_BIBUPLOAD_STRONG_TAGS: if not record_get_field_instances(record, strong_tag, strong_tag[3:4] or '%', strong_tag[4:5] or '%'): strong_tag_old_field_instances = record_get_field_instances(rec_old, strong_tag) if strong_tag_old_field_instances: for strong_tag_old_field_instance in strong_tag_old_field_instances: sf_vals, fi_ind1, fi_ind2, controlfield, dummy = strong_tag_old_field_instance record_add_field(record, strong_tag, fi_ind1, fi_ind2, controlfield, sf_vals) return ### Delete functions def delete_tags(record, rec_old): """ Returns a record structure with all the fields in rec_old minus the fields in record. @param record: The record containing tags to delete. @type record: record structure @param rec_old: The original record. @type rec_old: record structure @return: The modified record. @rtype: record structure """ returned_record = copy.deepcopy(rec_old) for tag, fields in iteritems(record): if tag in ('001', ): continue for field in fields: local_position = record_find_field(returned_record, tag, field)[1] if local_position is not None: record_delete_field(returned_record, tag, field_position_local=local_position) return returned_record def delete_tags_to_correct(record, rec_old): """ Delete tags from REC_OLD which are also existing in RECORD. When deleting, pay attention not only to tags, but also to indicators, so that fields with the same tags but different indicators are not deleted. """ ## Some fields are controlled via provenance information. ## We should re-add saved fields at the end. fields_to_readd = {} for tag in CFG_BIBUPLOAD_CONTROLLED_PROVENANCE_TAGS: if tag[:3] in record: tmp_field_instances = record_get_field_instances(record, tag[:3], tag[3], tag[4]) ## Let's discover the provenance that will be updated provenances_to_update = [] for instance in tmp_field_instances: for code, value in instance[0]: if code == tag[5]: if value not in provenances_to_update: provenances_to_update.append(value) break else: ## The provenance is not specified. ## let's add the special empty provenance. if '' not in provenances_to_update: provenances_to_update.append('') potential_fields_to_readd = record_get_field_instances(rec_old, tag[:3], tag[3], tag[4]) ## Let's take all the field corresponding to tag ## Let's save apart all the fields that should be updated, but ## since they have a different provenance not mentioned in record ## they should be preserved. fields = [] for sf_vals, ind1, ind2, dummy_cf, dummy_line in potential_fields_to_readd: for code, value in sf_vals: if code == tag[5]: if value not in provenances_to_update: fields.append(sf_vals) break else: if '' not in provenances_to_update: ## Empty provenance, let's protect in any case fields.append(sf_vals) fields_to_readd[tag] = fields # browse through all the tags from the MARCXML file: for tag in record: # check if the tag exists in the old record too: if tag in rec_old and tag != '001': # the tag does exist, so delete all record's tag+ind1+ind2 combinations from rec_old for dummy_sf_vals, ind1, ind2, dummy_cf, dummyfield_number in record[tag]: write_message(" Delete tag: " + tag + " ind1=" + ind1 + " ind2=" + ind2, verbose=9) record_delete_field(rec_old, tag, ind1, ind2) ## Ok, we readd necessary fields! for tag, fields in iteritems(fields_to_readd): for sf_vals in fields: write_message(" Adding tag: " + tag[:3] + " ind1=" + tag[3] + " ind2=" + tag[4] + " code=" + str(sf_vals), verbose=9) record_add_field(rec_old, tag[:3], tag[3], tag[4], subfields=sf_vals) def delete_bibrec_bibxxx(record, id_bibrec, affected_tags={}, pretend=False): """Delete the database record from the table bibxxx given in parameters""" # we clear all the rows from bibrec_bibxxx from the old record # clearing only those tags that have been modified. write_message(lambda: "delete_bibrec_bibxxx(record=%s, id_bibrec=%s, affected_tags=%s)" % (record, id_bibrec, affected_tags), verbose=9) for tag in affected_tags: # sanity check with record keys just to make sure its fine. if tag not in CFG_BIBUPLOAD_SPECIAL_TAGS: write_message("%s found in record"%tag, verbose=2) # for each name construct the bibrec_bibxxx table name table_name = 'bib'+tag[0:2]+'x' bibrec_table = 'bibrec_'+table_name # delete all the records with proper id_bibrec. Indicators matter for individual affected tags tmp_ind_1 = '' tmp_ind_2 = '' # construct exact tag value using indicators for ind_pair in affected_tags[tag]: if ind_pair[0] == ' ': tmp_ind_1 = '_' else: tmp_ind_1 = ind_pair[0] if ind_pair[1] == ' ': tmp_ind_2 = '_' else: tmp_ind_2 = ind_pair[1] # need to escape incase of underscore so that mysql treats it as a char tag_val = tag+"\\"+tmp_ind_1+"\\"+tmp_ind_2 + '%' query = """DELETE br.* FROM `%s` br,`%s` b where br.id_bibrec=%%s and br.id_bibxxx=b.id and b.tag like %%s""" % (bibrec_table, table_name) params = (id_bibrec, tag_val) write_message(query % params, verbose=9) if not pretend: run_sql(query, params) else: write_message("%s not found"%tag, verbose=2) def main(): """Main that construct all the bibtask.""" task_init(authorization_action='runbibupload', authorization_msg="BibUpload Task Submission", description="""Receive MARC XML file and update appropriate database tables according to options. Examples: $ bibupload -i input.xml """, help_specific_usage=""" -a, --append\t\tnew fields are appended to the existing record -c, --correct\t\tfields are replaced by the new ones in the existing record, except \t\t\twhen overridden by CFG_BIBUPLOAD_CONTROLLED_PROVENANCE_TAGS -i, --insert\t\tinsert the new record in the database -r, --replace\t\tthe existing record is entirely replaced by the new one, \t\t\texcept for fields in CFG_BIBUPLOAD_STRONG_TAGS -d, --delete\t\tspecified fields are deleted in existing record -n, --notimechange\tdo not change record last modification date when updating -o, --holdingpen\tInsert record into holding pen instead of the normal database --pretend\t\tdo not really insert/append/correct/replace the input file --force\t\twhen --replace, use provided 001 tag values, even if the matching \t\t\trecord does not exist (thus allocating it on-the-fly) --callback-url\tSend via a POST request a JSON-serialized answer (see admin guide), in \t\t\torder to provide a feedback to an external service about the outcome of the operation. --nonce\t\twhen used together with --callback add the nonce value in the JSON message. --special-treatment=MODE\tif "oracle" is specified, when used together with --callback_url, \t\t\tPOST an application/x-www-form-urlencoded request where the JSON message is encoded \t\t\tinside a form field called "results". """, version=__revision__, specific_params=("ircazdnoS:", [ "insert", "replace", "correct", "append", "reference", "delete", "notimechange", "holdingpen", "pretend", "force", "callback-url=", "nonce=", "special-treatment=", "stage=", ]), task_submit_elaborate_specific_parameter_fnc=task_submit_elaborate_specific_parameter, task_run_fnc=task_run_core, task_submit_check_options_fnc=task_submit_check_options) def task_submit_elaborate_specific_parameter(key, value, opts, args): # pylint: disable=W0613 """ Given the string key it checks it's meaning, eventually using the value. Usually it fills some key in the options dict. It must return True if it has elaborated the key, False, if it doesn't know that key. eg: if key in ['-n', '--number']: task_get_option(\1) = value return True return False """ # No time change option if key in ("-n", "--notimechange"): task_set_option('notimechange', 1) # Insert mode option elif key in ("-i", "--insert"): if task_get_option('mode') == 'replace': # if also replace found, then set to replace_or_insert task_set_option('mode', 'replace_or_insert') else: task_set_option('mode', 'insert') fix_argv_paths([args[0]]) task_set_option('file_path', os.path.abspath(args[0])) # Replace mode option elif key in ("-r", "--replace"): if task_get_option('mode') == 'insert': # if also insert found, then set to replace_or_insert task_set_option('mode', 'replace_or_insert') else: task_set_option('mode', 'replace') fix_argv_paths([args[0]]) task_set_option('file_path', os.path.abspath(args[0])) # Holding pen mode option elif key in ("-o", "--holdingpen"): write_message("Holding pen mode", verbose=3) task_set_option('mode', 'holdingpen') fix_argv_paths([args[0]]) task_set_option('file_path', os.path.abspath(args[0])) # Correct mode option elif key in ("-c", "--correct"): task_set_option('mode', 'correct') fix_argv_paths([args[0]]) task_set_option('file_path', os.path.abspath(args[0])) # Append mode option elif key in ("-a", "--append"): task_set_option('mode', 'append') fix_argv_paths([args[0]]) task_set_option('file_path', os.path.abspath(args[0])) # Deprecated reference mode option (now correct) elif key in ("-z", "--reference"): task_set_option('mode', 'correct') fix_argv_paths([args[0]]) task_set_option('file_path', os.path.abspath(args[0])) elif key in ("-d", "--delete"): task_set_option('mode', 'delete') fix_argv_paths([args[0]]) task_set_option('file_path', os.path.abspath(args[0])) elif key in ("--pretend",): task_set_option('pretend', True) fix_argv_paths([args[0]]) task_set_option('file_path', os.path.abspath(args[0])) elif key in ("--force",): task_set_option('force', True) fix_argv_paths([args[0]]) task_set_option('file_path', os.path.abspath(args[0])) elif key in ("--callback-url", ): task_set_option('callback_url', value) elif key in ("--nonce", ): task_set_option('nonce', value) elif key in ("--special-treatment", ): if value.lower() in CFG_BIBUPLOAD_ALLOWED_SPECIAL_TREATMENTS: if value.lower() == 'oracle': task_set_option('oracle_friendly', True) else: print("""The specified value is not in the list of allowed special treatments codes: %s""" % CFG_BIBUPLOAD_ALLOWED_SPECIAL_TREATMENTS, file=sys.stderr) return False elif key in ("-S", "--stage"): print("""WARNING: the --stage parameter is deprecated and ignored.""", file=sys.stderr) else: return False return True def task_submit_check_options(): """ Reimplement this method for having the possibility to check options before submitting the task, in order for example to provide default values. It must return False if there are errors in the options. """ if task_get_option('mode') is None: write_message("Please specify at least one update/insert mode!", stream=sys.stderr) return False file_path = task_get_option('file_path') if file_path is None: write_message("Missing filename! -h for help.", stream=sys.stderr) return False try: open(file_path).read().decode('utf-8') except IOError: write_message("""File is not accessible: %s""" % file_path, stream=sys.stderr) return False except UnicodeDecodeError: write_message("""File encoding is not valid utf-8: %s""" % file_path, stream=sys.stderr) return False return True def writing_rights_p(): """Return True in case bibupload has the proper rights to write in the fulltext file folder.""" if _WRITING_RIGHTS is not None: return _WRITING_RIGHTS try: if not os.path.exists(CFG_BIBDOCFILE_FILEDIR): os.makedirs(CFG_BIBDOCFILE_FILEDIR) fd, filename = tempfile.mkstemp(suffix='.txt', prefix='test', dir=CFG_BIBDOCFILE_FILEDIR) test = os.fdopen(fd, 'w') test.write('TEST') test.close() if open(filename).read() != 'TEST': raise IOError("Can not successfully write and readback %s" % filename) os.remove(filename) except: register_exception(alert_admin=True) return False return True def post_results_to_callback_url(results, callback_url): write_message("Sending feedback to %s" % callback_url) if not CFG_JSON_AVAILABLE: from warnings import warn warn("--callback-url used but simplejson/json not available") return json_results = json.dumps(results) write_message("Message to send: %s" % json_results, verbose=9) ## <scheme>://<netloc>/<path>?<query>#<fragment> scheme, dummynetloc, dummypath, dummyquery, dummyfragment = urlparse.urlsplit(callback_url) ## See: http://stackoverflow.com/questions/111945/is-there-any-way-to-do-http-put-in-python if scheme == 'http': opener = urllib2.build_opener(urllib2.HTTPHandler) elif scheme == 'https': opener = urllib2.build_opener(urllib2.HTTPSHandler) else: raise ValueError("Scheme not handled %s for callback_url %s" % (scheme, callback_url)) if task_get_option('oracle_friendly'): write_message("Oracle friendly mode requested", verbose=9) request = urllib2.Request(callback_url, data=urllib.urlencode({'results': json_results})) request.add_header('Content-Type', 'application/x-www-form-urlencoded') else: request = urllib2.Request(callback_url, data=json_results) request.add_header('Content-Type', 'application/json') request.add_header('User-Agent', make_user_agent_string('BibUpload')) write_message("Headers about to be sent: %s" % request.headers, verbose=9) write_message("Data about to be sent: %s" % request.data, verbose=9) res = opener.open(request) msg = res.read() write_message("Result of posting the feedback: %s %s" % (res.code, res.msg), verbose=9) write_message("Returned message is: %s" % msg, verbose=9) return res def bibupload_records(records, opt_mode=None, opt_notimechange=0, pretend=False, callback_url=None, results_for_callback=None): """perform the task of uploading a set of records returns list of (error_code, recid) tuples for separate records """ #Dictionaries maintaining temporary identifiers # Structure: identifier -> number tmp_ids = {} tmp_vers = {} results = [] # The first phase -> assigning meaning to temporary identifiers if opt_mode == 'reference': ## NOTE: reference mode has been deprecated in favour of 'correct' opt_mode = 'correct' record = None for record in records: record_id = record_extract_oai_id(record) task_sleep_now_if_required(can_stop_too=True) if opt_mode == "holdingpen": #inserting into the holding pen write_message("Inserting into holding pen", verbose=3) insert_record_into_holding_pen(record, record_id, pretend=pretend) else: write_message("Inserting into main database", verbose=3) error = bibupload( record, opt_mode = opt_mode, opt_notimechange = opt_notimechange, oai_rec_id = record_id, pretend = pretend, tmp_ids = tmp_ids, tmp_vers = tmp_vers) results.append(error) if error[0] == 1: if record: write_message(lambda: record_xml_output(record), stream=sys.stderr) else: write_message("Record could not have been parsed", stream=sys.stderr) stat['nb_errors'] += 1 if callback_url: results_for_callback['results'].append({'recid': error[1], 'success': False, 'error_message': error[2]}) elif error[0] == 2: if record: write_message(lambda: record_xml_output(record), stream=sys.stderr) else: write_message("Record could not have been parsed", stream=sys.stderr) if callback_url: results_for_callback['results'].append({'recid': error[1], 'success': False, 'error_message': error[2]}) elif error[0] == 0: if callback_url: from invenio.legacy.search_engine import print_record results_for_callback['results'].append({'recid': error[1], 'success': True, "marcxml": print_record(error[1], 'xm'), 'url': "%s/%s/%s" % (CFG_SITE_URL, CFG_SITE_RECORD, error[1])}) else: if callback_url: results_for_callback['results'].append({'recid': error[1], 'success': False, 'error_message': error[2]}) # stat us a global variable task_update_progress("Done %d out of %d." % \ (stat['nb_records_inserted'] + \ stat['nb_records_updated'], stat['nb_records_to_upload'])) # Second phase -> Now we can process all entries where temporary identifiers might appear (BDR, BDM) write_message("Identifiers table after processing: %s versions: %s" % (str(tmp_ids), str(tmp_vers)), verbose=2) write_message("Uploading BDR and BDM fields") if opt_mode != "holdingpen": for record in records: record_id = retrieve_rec_id(record, opt_mode, pretend=pretend, post_phase = True) bibupload_post_phase(record, rec_id = record_id, mode = opt_mode, pretend = pretend, tmp_ids = tmp_ids, tmp_vers = tmp_vers) return results def task_run_core(): """ Reimplement to add the body of the task.""" write_message("Input file '%s', input mode '%s'." % (task_get_option('file_path'), task_get_option('mode'))) write_message("STAGE 0:", verbose=2) if task_get_option('file_path') is not None: write_message("start preocessing", verbose=3) task_update_progress("Reading XML input") recs = xml_marc_to_records(open_marc_file(task_get_option('file_path'))) stat['nb_records_to_upload'] = len(recs) write_message(" -Open XML marc: DONE", verbose=2) task_sleep_now_if_required(can_stop_too=True) write_message("Entering records loop", verbose=3) callback_url = task_get_option('callback_url') results_for_callback = {'results': []} if recs is not None: # We proceed each record by record bibupload_records(records=recs, opt_mode=task_get_option('mode'), opt_notimechange=task_get_option('notimechange'), pretend=task_get_option('pretend'), callback_url=callback_url, results_for_callback=results_for_callback) else: write_message(" ERROR: bibupload failed: No record found", verbose=1, stream=sys.stderr) callback_url = task_get_option("callback_url") if callback_url: nonce = task_get_option("nonce") if nonce: results_for_callback["nonce"] = nonce post_results_to_callback_url(results_for_callback, callback_url) if task_get_task_param('verbose') >= 1: # Print out the statistics print_out_bibupload_statistics() # Check if they were errors return not stat['nb_errors'] >= 1 def log_record_uploading(oai_rec_id, task_id, bibrec_id, insertion_db, pretend=False): if oai_rec_id != "" and oai_rec_id != None: query = """UPDATE "oaiHARVESTLOG" SET date_inserted=NOW(), inserted_to_db=%s, id_bibrec=%s WHERE oai_id = %s AND bibupload_task_id = %s ORDER BY date_harvested LIMIT 1""" if not pretend: run_sql(query, (str(insertion_db), str(bibrec_id), str(oai_rec_id), str(task_id), )) if __name__ == "__main__": main()
gpl-2.0
tuxfux-hlp-notes/python-batches
archieves/batch-61/modules/myenv/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/sbcsgroupprober.py
2936
3291
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .charsetgroupprober import CharSetGroupProber from .sbcharsetprober import SingleByteCharSetProber from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel, Latin5CyrillicModel, MacCyrillicModel, Ibm866Model, Ibm855Model) from .langgreekmodel import Latin7GreekModel, Win1253GreekModel from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel from .langthaimodel import TIS620ThaiModel from .langhebrewmodel import Win1255HebrewModel from .hebrewprober import HebrewProber class SBCSGroupProber(CharSetGroupProber): def __init__(self): CharSetGroupProber.__init__(self) self._mProbers = [ SingleByteCharSetProber(Win1251CyrillicModel), SingleByteCharSetProber(Koi8rModel), SingleByteCharSetProber(Latin5CyrillicModel), SingleByteCharSetProber(MacCyrillicModel), SingleByteCharSetProber(Ibm866Model), SingleByteCharSetProber(Ibm855Model), SingleByteCharSetProber(Latin7GreekModel), SingleByteCharSetProber(Win1253GreekModel), SingleByteCharSetProber(Latin5BulgarianModel), SingleByteCharSetProber(Win1251BulgarianModel), SingleByteCharSetProber(Latin2HungarianModel), SingleByteCharSetProber(Win1250HungarianModel), SingleByteCharSetProber(TIS620ThaiModel), ] hebrewProber = HebrewProber() logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, False, hebrewProber) visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, True, hebrewProber) hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber) self._mProbers.extend([hebrewProber, logicalHebrewProber, visualHebrewProber]) self.reset()
gpl-3.0
BackupGGCode/python-for-android
python3-alpha/python3-src/Lib/test/test_dbm.py
55
5221
#! /usr/bin/env python3 """Test script for the dbm.open function based on testdumbdbm.py""" import os import unittest import glob import test.support # Skip tests if dbm module doesn't exist. dbm = test.support.import_module('dbm') _fname = test.support.TESTFN # # Iterates over every database module supported by dbm currently available, # setting dbm to use each in turn, and yielding that module # def dbm_iterator(): for name in dbm._names: try: mod = __import__(name, fromlist=['open']) except ImportError: continue dbm._modules[name] = mod yield mod # # Clean up all scratch databases we might have created during testing # def delete_files(): # we don't know the precise name the underlying database uses # so we use glob to locate all names for f in glob.glob(_fname + "*"): test.support.unlink(f) class AnyDBMTestCase(unittest.TestCase): _dict = {'0': b'', 'a': b'Python:', 'b': b'Programming', 'c': b'the', 'd': b'way', 'f': b'Guido', 'g': b'intended', } def init_db(self): f = dbm.open(_fname, 'n') for k in self._dict: f[k.encode("ascii")] = self._dict[k] f.close() def keys_helper(self, f): keys = sorted(k.decode("ascii") for k in f.keys()) dkeys = sorted(self._dict.keys()) self.assertEqual(keys, dkeys) return keys def test_error(self): self.assertTrue(issubclass(self.module.error, IOError)) def test_anydbm_not_existing(self): self.assertRaises(dbm.error, dbm.open, _fname) def test_anydbm_creation(self): f = dbm.open(_fname, 'c') self.assertEqual(list(f.keys()), []) for key in self._dict: f[key.encode("ascii")] = self._dict[key] self.read_helper(f) f.close() def test_anydbm_creation_n_file_exists_with_invalid_contents(self): with open(_fname, "w") as w: pass # create an empty file f = dbm.open(_fname, 'n') self.addCleanup(f.close) self.assertEqual(len(f), 0) def test_anydbm_modification(self): self.init_db() f = dbm.open(_fname, 'c') self._dict['g'] = f[b'g'] = b"indented" self.read_helper(f) f.close() def test_anydbm_read(self): self.init_db() f = dbm.open(_fname, 'r') self.read_helper(f) f.close() def test_anydbm_keys(self): self.init_db() f = dbm.open(_fname, 'r') keys = self.keys_helper(f) f.close() def test_anydbm_access(self): self.init_db() f = dbm.open(_fname, 'r') key = "a".encode("ascii") self.assertIn(key, f) assert(f[key] == b"Python:") f.close() def read_helper(self, f): keys = self.keys_helper(f) for key in self._dict: self.assertEqual(self._dict[key], f[key.encode("ascii")]) def tearDown(self): delete_files() def setUp(self): dbm._defaultmod = self.module delete_files() class WhichDBTestCase(unittest.TestCase): # Actual test methods are added to namespace after class definition. def __init__(self, *args): unittest.TestCase.__init__(self, *args) def test_whichdb(self): for module in dbm_iterator(): # Check whether whichdb correctly guesses module name # for databases opened with "module" module. # Try with empty files first name = module.__name__ if name == 'dbm.dumb': continue # whichdb can't support dbm.dumb delete_files() f = module.open(_fname, 'c') f.close() self.assertEqual(name, dbm.whichdb(_fname)) # Now add a key f = module.open(_fname, 'w') f[b"1"] = b"1" # and test that we can find it self.assertIn(b"1", f) # and read it self.assertTrue(f[b"1"] == b"1") f.close() self.assertEqual(name, dbm.whichdb(_fname)) def tearDown(self): delete_files() def setUp(self): delete_files() self.filename = test.support.TESTFN self.d = dbm.open(self.filename, 'c') self.d.close() def test_keys(self): self.d = dbm.open(self.filename, 'c') self.assertEqual(self.d.keys(), []) a = [(b'a', b'b'), (b'12345678910', b'019237410982340912840198242')] for k, v in a: self.d[k] = v self.assertEqual(sorted(self.d.keys()), sorted(k for (k, v) in a)) for k, v in a: self.assertIn(k, self.d) self.assertEqual(self.d[k], v) self.assertNotIn(b'xxx', self.d) self.assertRaises(KeyError, lambda: self.d[b'xxx']) self.d.close() def test_main(): classes = [WhichDBTestCase] for mod in dbm_iterator(): classes.append(type("TestCase-" + mod.__name__, (AnyDBMTestCase,), {'module': mod})) test.support.run_unittest(*classes) if __name__ == "__main__": test_main()
apache-2.0
LarsFronius/ansible
lib/ansible/modules/cloud/amazon/_ec2_ami_search.py
77
6757
#!/usr/bin/python # # (c) 2013, Nimbis Services # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['deprecated'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ec2_ami_search short_description: Retrieve AWS AMI information for a given operating system. deprecated: "Use M(ec2_ami_find) instead." version_added: "1.6" description: - Look up the most recent AMI on AWS for a given operating system. - Returns C(ami), C(aki), C(ari), C(serial), C(tag) - If there is no AKI or ARI associated with an image, these will be C(null). - Only supports images from cloud-images.ubuntu.com - 'Example output: C({"ami": "ami-69f5a900", "changed": false, "aki": "aki-88aa75e1", "tag": "release", "ari": null, "serial": "20131024"})' options: distro: description: Linux distribution (e.g., C(ubuntu)) required: true choices: ["ubuntu"] release: description: short name of the release (e.g., C(precise)) required: true stream: description: Type of release. required: false default: "server" choices: ["server", "desktop"] store: description: Back-end store for instance required: false default: "ebs" choices: ["ebs", "ebs-io1", "ebs-ssd", "instance-store"] arch: description: CPU architecture required: false default: "amd64" choices: ["i386", "amd64"] region: description: EC2 region required: false default: us-east-1 choices: ["ap-northeast-1", "ap-southeast-1", "ap-northeast-2", "ap-southeast-2", "ca-central-1", "eu-central-1", "eu-west-1", "eu-west-2", "sa-east-1", "us-east-1", "us-east-2", "us-west-1", "us-west-2", "us-gov-west-1"] virt: description: virutalization type required: false default: paravirtual choices: ["paravirtual", "hvm"] author: "Ansible Core Team (deprecated)" ''' EXAMPLES = ''' - name: Launch an Ubuntu 12.04 (Precise Pangolin) EC2 instance hosts: 127.0.0.1 connection: local tasks: - name: Get the Ubuntu precise AMI ec2_ami_search: distro: ubuntu release: precise region: us-west-1 store: instance-store register: ubuntu_image - name: Start the EC2 instance ec2: image: "{{ ubuntu_image.ami }}" instance_type: m1.small key_name: mykey ''' import csv from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url SUPPORTED_DISTROS = ['ubuntu'] AWS_REGIONS = ['ap-northeast-1', 'ap-southeast-1', 'ap-northeast-2', 'ap-southeast-2', 'ap-south-1', 'ca-central-1', 'eu-central-1', 'eu-west-1', 'eu-west-2', 'sa-east-1', 'us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', "us-gov-west-1"] def get_url(module, url): """ Get url and return response """ r, info = fetch_url(module, url) if info['status'] != 200: # Backwards compat info['status_code'] = info['status'] module.fail_json(**info) return r def ubuntu(module): """ Get the ami for ubuntu """ release = module.params['release'] stream = module.params['stream'] store = module.params['store'] arch = module.params['arch'] region = module.params['region'] virt = module.params['virt'] url = get_ubuntu_url(release, stream) req = get_url(module, url) reader = csv.reader(req, delimiter='\t') try: ami, aki, ari, tag, serial = lookup_ubuntu_ami(reader, release, stream, store, arch, region, virt) module.exit_json(changed=False, ami=ami, aki=aki, ari=ari, tag=tag, serial=serial) except KeyError: module.fail_json(msg="No matching AMI found") def lookup_ubuntu_ami(table, release, stream, store, arch, region, virt): """ Look up the Ubuntu AMI that matches query given a table of AMIs table: an iterable that returns a row of (release, stream, tag, serial, region, ami, aki, ari, virt) release: ubuntu release name stream: 'server' or 'desktop' store: 'ebs', 'ebs-io1', 'ebs-ssd' or 'instance-store' arch: 'i386' or 'amd64' region: EC2 region virt: 'paravirtual' or 'hvm' Returns (ami, aki, ari, tag, serial)""" expected = (release, stream, store, arch, region, virt) for row in table: (actual_release, actual_stream, tag, serial, actual_store, actual_arch, actual_region, ami, aki, ari, actual_virt) = row actual = (actual_release, actual_stream, actual_store, actual_arch, actual_region, actual_virt) if actual == expected: # aki and ari are sometimes blank if aki == '': aki = None if ari == '': ari = None return (ami, aki, ari, tag, serial) raise KeyError() def get_ubuntu_url(release, stream): url = "https://cloud-images.ubuntu.com/query/%s/%s/released.current.txt" return url % (release, stream) def main(): arg_spec = dict( distro=dict(required=True, choices=SUPPORTED_DISTROS), release=dict(required=True), stream=dict(required=False, default='server', choices=['desktop', 'server']), store=dict(required=False, default='ebs', choices=['ebs', 'ebs-io1', 'ebs-ssd', 'instance-store']), arch=dict(required=False, default='amd64', choices=['i386', 'amd64']), region=dict(required=False, default='us-east-1', choices=AWS_REGIONS), virt=dict(required=False, default='paravirtual', choices=['paravirtual', 'hvm']), ) module = AnsibleModule(argument_spec=arg_spec) distro = module.params['distro'] if distro == 'ubuntu': ubuntu(module) else: module.fail_json(msg="Unsupported distro: %s" % distro) if __name__ == '__main__': main()
gpl-3.0
keishi/chromium
tools/find_runtime_symbols/prepare_symbol_info.py
1
3853
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import json import logging import os import re import shutil import subprocess import sys import tempfile from parse_proc_maps import parse_proc_maps from util import executable_condition def prepare_symbol_info(maps_path, output_dir_path=None, loglevel=logging.WARN): log = logging.getLogger('prepare_symbol_info') log.setLevel(loglevel) handler = logging.StreamHandler() handler.setLevel(loglevel) formatter = logging.Formatter('%(message)s') handler.setFormatter(formatter) log.addHandler(handler) if not output_dir_path: matched = re.match('^(.*)\.maps$', os.path.basename(maps_path)) if matched: output_dir_path = matched.group(1) + '.pre' if not output_dir_path: matched = re.match('^/proc/(.*)/maps$', os.path.realpath(maps_path)) if matched: output_dir_path = matched.group(1) + '.pre' if not output_dir_path: output_dir_prefix = os.path.basename(maps_path) + '.pre' # TODO(dmikurube): Find another candidate for output_dir_path. log.info('Data for profiling will be collected in "%s".' % output_dir_path) output_dir_path_exists = False if os.path.exists(output_dir_path): if os.path.isdir(output_dir_path) and not os.listdir(output_dir_path): log.warn('Using an empty directory existing at "%s".' % output_dir_path) else: log.warn('A file or a directory exists at "%s".' % output_dir_path) output_dir_path_exists = True else: log.info('Creating a new directory at "%s".' % output_dir_path) os.mkdir(output_dir_path) if output_dir_path_exists: return 1 shutil.copyfile(maps_path, os.path.join(output_dir_path, 'maps')) with open(maps_path, mode='r') as f: maps = parse_proc_maps(f) log.debug('Listing up symbols.') nm_files = {} for entry in maps.iter(executable_condition): log.debug(' %016x-%016x +%06x %s' % ( entry.begin, entry.end, entry.offset, entry.name)) with tempfile.NamedTemporaryFile( prefix=os.path.basename(entry.name) + '.', suffix='.nm', delete=False, mode='w', dir=output_dir_path) as f: nm_filename = os.path.realpath(f.name) nm_succeeded = False cppfilt_succeeded = False p_nm = subprocess.Popen( 'nm -n --format bsd %s' % entry.name, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p_cppfilt = subprocess.Popen( 'c++filt', shell=True, stdin=p_nm.stdout, stdout=f, stderr=subprocess.PIPE) if p_nm.wait() == 0: nm_succeeded = True for line in p_nm.stderr: log.debug(line.rstrip()) if p_cppfilt.wait() == 0: cppfilt_succeeded = True for line in p_cppfilt.stderr: log.debug(line.rstrip()) if nm_succeeded and cppfilt_succeeded: nm_files[entry.name] = { 'file': os.path.basename(nm_filename), 'format': 'bsd', 'mangled': False} else: os.remove(nm_filename) with open(os.path.join(output_dir_path, 'nm.json'), 'w') as f: json.dump(nm_files, f, indent=2, sort_keys=True) log.info('Collected symbol information at "%s".' % output_dir_path) return 0 def main(): if not sys.platform.startswith('linux'): sys.stderr.write('This script work only on Linux.') return 1 if len(sys.argv) < 2: sys.stderr.write("""Usage: %s /path/to/maps [/path/to/output_data_dir/] """ % sys.argv[0]) return 1 elif len(sys.argv) == 2: sys.exit(prepare_symbol_info(sys.argv[1], loglevel=logging.DEBUG)) else: sys.exit(prepare_symbol_info(sys.argv[1], sys.argv[2], loglevel=logging.INFO)) return 0 if __name__ == '__main__': sys.exit(main())
bsd-3-clause
michaelaye/scikit-image
skimage/measure/setup.py
32
1775
#!/usr/bin/env python from skimage._build import cython import os base_path = os.path.abspath(os.path.dirname(__file__)) def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs config = Configuration('measure', parent_package, top_path) config.add_data_dir('tests') cython(['_ccomp.pyx'], working_path=base_path) cython(['_find_contours_cy.pyx'], working_path=base_path) cython(['_moments_cy.pyx'], working_path=base_path) cython(['_marching_cubes_cy.pyx'], working_path=base_path) cython(['_pnpoly.pyx'], working_path=base_path) config.add_extension('_ccomp', sources=['_ccomp.c'], include_dirs=[get_numpy_include_dirs()]) config.add_extension('_find_contours_cy', sources=['_find_contours_cy.c'], include_dirs=[get_numpy_include_dirs()]) config.add_extension('_moments_cy', sources=['_moments_cy.c'], include_dirs=[get_numpy_include_dirs()]) config.add_extension('_marching_cubes_cy', sources=['_marching_cubes_cy.c'], include_dirs=[get_numpy_include_dirs()]) config.add_extension('_pnpoly', sources=['_pnpoly.c'], include_dirs=[get_numpy_include_dirs(), '../_shared']) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(maintainer='scikit-image Developers', maintainer_email='scikit-image@googlegroups.com', description='Graph-based Image-processing Algorithms', url='https://github.com/scikit-image/scikit-image', license='Modified BSD', **(configuration(top_path='').todict()) )
bsd-3-clause
diegocortassa/TACTIC
src/pyasm/search/sql_test.py
1
18702
#!/usr/bin/python ########################################################### # # Copyright (c) 2005, Southpaw Technology # All Rights Reserved # # PROPRIETARY INFORMATION. This software is proprietary to # Southpaw Technology, and is not to be reproduced, transmitted, # or disclosed in any way without written permission. # # # import tacticenv from pyasm.security import * from transaction import * from search import * from sql import * from database_impl import * from pyasm.biz import Project from pyasm.common import Container from pyasm.unittest import UnittestEnvironment from pyasm.unittest import UnittestEnvironment import unittest class SqlTest(unittest.TestCase): def setUp(self): # intialiaze the framework as a batch process batch = Batch() from pyasm.web.web_init import WebInit WebInit().execute() self.test_env = UnittestEnvironment() self.test_env.create() def test_all(self): try: db_res = DbResource.get_default('unittest') sql = DbContainer.get(db_res) impl = sql.get_database_impl() db_type = impl.get_database_type() if db_type == "PostgreSQL": self.prefix = '''"unittest"."public".''' self.sthpw_prefix = '''"sthpw"."public".''' elif db_type == "Sqlite": self.prefix = "" self.sthpw_prefix = "" else: self.prefix = '''"unittest".''' self.sthpw_prefix = '''"sthpw".''' self._test_get_connect() self._test_select_class() self._test_insert_class() self._test_update_class() self._test_insert_and_delete() self._test_create_table() self._test_transaction() self._test_order_by() self._test_rpn_filters() self._test_search_filter() self._test_join() self._test_create_view() # it doesn't allow dropping of a column if db_type != 'Sqlite': self._test_add_drop_column() finally: Project.set_project('unittest') self.test_env.delete() def _test_get_connect(self): database= 'unittest' project = Project.get_by_code(database) db_resource= project.get_project_db_resource() sql1 = DbContainer.get(db_resource) sql2 = DbContainer.get(db_resource) self.assertEquals(sql1, sql2) def _test_select_class(self): """ test a select """ select = Select() db_res = DbResource.get_default('unittest') select.set_database(db_res) select.add_table("person") select.add_where("\"name_first\" = 'megumi'") select.add_order_by("name_last") statement = select.get_statement() sql = DbContainer.get(db_res) impl = sql.get_database_impl() db_type = impl.get_database_type() expected = '''SELECT %s"person".* FROM %s"person" WHERE "name_first" = 'megumi' ORDER BY "person"."name_last"''' % (self.prefix, self.prefix) self.assertEquals( expected, statement ) # test for doubling of apostrophe select = Select() select.set_database(db_res) select.add_table("person") select.add_filter('name_last', "john's", op='!=') statement = select.get_statement() expected = """SELECT %s"person".* FROM %s"person" WHERE "person"."name_last" != 'john''s'""" % (self.prefix, self.prefix) self.assertEquals( expected, statement ) def _test_insert_class(self): """test an insert""" insert = Insert() insert.set_table("person"); insert.set_value("name_first", "megumi"); insert.set_value("name_last", "takamori"); statement = insert.get_statement() db_res = DbResource.get_default('unittest') sql = DbContainer.get(db_res) if sql.get_database_type() == "Oracle": expected = "INSERT INTO \"person\" (\"id\", \"name_first\", \"name_last\") VALUES (\"person_id_seq\".nextval, 'megumi', 'takamori')" elif sql.get_database_type() == "SQLServer": expected = "INSERT INTO [person] (\"name_first\", \"name_last\") VALUES ('megumi', 'takamori')" else: expected = "INSERT INTO \"person\" (\"name_first\", \"name_last\") VALUES ('megumi', 'takamori')" self.assertEquals( expected, statement ) def _test_update_class(self): """test an update""" update = Update() update.set_table("person"); update.set_value("name_first", "megumi"); update.add_where("\"person_id\" = '1'"); statement = update.get_statement() expected = "UPDATE \"person\" SET \"name_first\" = 'megumi' WHERE \"person_id\" = '1'" self.assertEqual( expected, statement ) def _test_insert_and_delete(self): # ensure that we are *NOT* in a transaction Transaction.clear_stack() transaction = Transaction.get() # comment out for now #self.assertEquals( None, transaction ) db_res = DbResource.get_default('unittest') sql = DbContainer.get(db_res) self.assertEquals( False, sql.is_in_transaction() ) count_sql = """select count(*) from "person" where "name_first" = 'Bugs' and "name_last" = 'Bunny'""" num_records = sql.get_int(count_sql) self.assertEquals(0, num_records) # test with no transaction transaction = Transaction.get(create=True) insert = Insert() insert.set_table("person") insert.set_value("name_first", "Bugs") insert.set_value("name_last", "Bunny") statement = insert.get_statement() expected = '''INSERT INTO "person" ("name_first", "name_last") VALUES ('Bugs', 'Bunny')''' self.assertEquals(expected, statement) # with a db_res added, it should scope the database insert = Insert() insert.set_database(db_res) insert.set_table("person") insert.set_value("name_first", "Bugs") insert.set_value("name_last", "Bunny") statement = insert.get_statement() expected = '''INSERT INTO %s"person" ("name_first", "name_last") VALUES ('Bugs', 'Bunny')''' % self.prefix self.assertEquals(expected, statement) sql.do_update(statement) num_records = sql.get_int(count_sql) self.assertEquals(1, num_records) delete = """delete from "person" where "name_first" = 'Bugs' and "name_last" = 'Bunny'""" sql.do_update(delete) num_records = sql.get_int(count_sql) self.assertEquals(0, num_records) transaction.rollback() def _test_create_table(self): create = CreateTable() create.set_table("coffee") create.add_column("id", "int4") create.add_column("type", "varchar(10)") create.add_column("login", "varchar(30)") create.add_column("discussion", "text") create.set_primary_key("id") statement = create.get_statement() db_res = DbResource.get_default('unittest') sql = DbContainer.get(db_res) if sql.get_database_type() == "Oracle": expected = \ '''CREATE TABLE "coffee" ( "id" NUMBER, "type" VARCHAR2(10), "login" VARCHAR2(30), "discussion" CLOB, PRIMARY KEY ("id") );''' else: expected = \ '''CREATE TABLE "coffee" ( "id" int4, "type" varchar(10), "login" varchar(30), "discussion" text, PRIMARY KEY ("id"));''' statement = statement.replace("\n", "") statement = statement.replace(" ", " ") statement = statement.replace("\t", " ") expected = expected.replace("\n", "") statement = expected.replace(" ", " ") statement = expected.replace("\t", " ") self.assertEquals(expected, statement) def _test_transaction(self): """test a transaction""" database_type = Project.get_by_code("unittest").get_database_type() if database_type == "MySQL": print print "WARNING: !!!!!!!" print "_test_tranaction is disabled" print "WARNING: !!!!!!!" print return db_res = DbResource.get_default('unittest') sql = DbContainer.get(db_res) count_sql = 'select count(*) from "person"' num_records = sql.get_int(count_sql) # start the transaction, update and roll back sql.start() insert = Insert() insert.set_table("person") insert.set_value("name_first", "cow") insert.set_value("name_last", "sql") query = insert.get_statement() sql.do_update(query) new_num_records = sql.get_value(count_sql) self.assertEquals( new_num_records, num_records+1 ) sql.rollback() # dump after the rollback new_num_records = sql.get_int(count_sql) self.assertEqual( new_num_records, num_records ) def _test_order_by(self): select = Select() db_res = DbResource.get_default('unittest') select.set_database(db_res) select.add_table("asset") select.add_enum_order_by("code", ['cow', 'dog', 'horse']) expected = '''SELECT %s"asset".* FROM %s"asset" ORDER BY ( CASE "code" WHEN 'cow' THEN 1 WHEN 'dog' THEN 2 WHEN 'horse' THEN 3 ELSE 4 END )''' % (self.prefix, self.prefix) statement = select.get_statement() self.assertEqual(expected, statement) def _test_rpn_filters(self): select = Select() db_res = DbResource.get_default('unittest') select.set_database(db_res) select.add_table("asset") select.add_where("begin") select.add_where("\"code\" = 'chr001'") select.add_where("\"code\" = 'chr002'") select.add_where("\"code\" = 'chr003'") select.add_where("or") select.add_where("\"status\" = 'complete'") select.add_where("and") statement = select.get_statement() expected = """SELECT %s"asset".* FROM %s"asset" WHERE ( "code" = 'chr001' OR "code" = 'chr002' OR "code" = 'chr003' ) AND "status" = 'complete'""" % (self.prefix, self.prefix) self.assertEquals(expected, statement) # test some simple fringe cases select = Select() select.add_table("asset") select.add_where("begin") select.add_where("or") select.add_where("\"status\" = 'complete'") select.add_where("and") statement = select.get_statement() expected = """SELECT "asset".* FROM "asset" WHERE "status" = 'complete'""" self.assertEquals(expected, statement) # assumed begin select = Select() select.add_table("asset") select.add_where("\"status\" = 'retired'") select.add_where("\"code\" = 'chr001'") select.add_where("\"code\" = 'chr002'") select.add_where("or") statement = select.get_statement() expected = """SELECT "asset".* FROM "asset" WHERE "status" = 'retired' OR "code" = 'chr001' OR "code" = 'chr002'""" self.assertEquals(expected, statement) # add a more complex case search = Select() search.add_table("person") search.add_where("begin") search.add_where("begin") search.add_filter("login", "joe") search.add_filter("login", "mary") search.add_where("and") search.add_where("begin") search.add_filter("attr", "tom") search.add_filter("attr", "peter") search.add_where("and") search.add_where("or") statement = search.get_statement() expected = '''SELECT "person".* FROM "person" WHERE ( "person"."login" = 'joe' AND "person"."login" = 'mary' ) OR ( "person"."attr" = 'tom' AND "person"."attr" = 'peter' )''' self.assertEquals(expected, statement) # try to throw in an extra begin in the middle project_code = "unittest" filter_search_type = "unittest/city" search_type = 'sthpw/sobject_list' search = Search(search_type) search.add_filter("project_code", project_code) search.add_filter("search_type", filter_search_type) search.add_op("begin") values = ["chr001"] columns = ['keywords'] for column in columns: search.add_startswith_keyword_filter(column, values) statement = search.get_statement() expected = '''SELECT %s"sobject_list".* FROM %s"sobject_list" WHERE "sobject_list"."project_code" = 'unittest' AND "sobject_list"."search_type" = 'unittest/city' AND ( lower("sobject_list"."keywords") like lower('%% chr001%%') OR lower("sobject_list"."keywords") like lower('chr001%%') )''' % (self.sthpw_prefix, self.sthpw_prefix) self.assertEquals(expected, statement) def _test_search_filter(self): select = Select() db_res = DbResource.get_default('unittest') select.set_database(db_res) select.add_table("job") select.add_column("request_id") select.add_filter("code", "123MMS") select2 = Select() #db_res = DbResource.get_default('unittest') select2.set_database(db_res) select2.add_table("request") select2.add_select_filter("id", select) statement = select2.get_statement() expected = '''SELECT %s"request".* FROM %s"request" WHERE "request"."id" in ( SELECT %s"job"."request_id" FROM %s"job" WHERE "job"."code" = '123MMS' )''' % (self.prefix, self.prefix, self.prefix, self.prefix) self.assertEquals(expected, statement) select3 = Select() select3.set_database(db_res) select3.add_op("begin") select3.add_table("request") select3.add_select_filter("id", select) statement = select3.get_statement() expected = '''SELECT %s"request".* FROM %s"request" WHERE "request"."id" in ( SELECT %s"job"."request_id" FROM %s"job" WHERE "job"."code" = '123MMS' )''' % (self.prefix, self.prefix, self.prefix, self.prefix) self.assertEquals(expected, statement) def _test_add_drop_column(self): #Project.set_project('unittest') from pyasm.command import ColumnAddCmd, ColumnDropCmd, Command cmd = ColumnAddCmd('unittest/country','special_place','varchar(256)') Command.execute_cmd(cmd) search_type = 'unittest/country' # clear cache SearchType.clear_column_cache(search_type) DatabaseImpl.clear_table_cache() exists = SearchType.column_exists(search_type, 'special_place') self.assertEquals(exists, True) # now drop the column cmd = ColumnDropCmd(search_type,'special_place') Command.execute_cmd(cmd) # clear cache SearchType.clear_column_cache(search_type) cache_dict = Container.get("DatabaseImpl:column_info") # assume database is the same as sthpw database_type = Project.get_by_code("unittest").get_database_type() db_resource = DbResource.get_default('unittest') table_info = cache_dict.get("%s:%s" % (db_resource, "country")) self.assertEquals(table_info == None, True) key = "%s:%s" % (db_resource, "country") cache_dict[key] = None exists = SearchType.column_exists(search_type, 'special_place') self.assertEquals(exists, False) def _test_join(self): """ test a select """ Project.set_project('unittest') select = Select() db_res = DbResource.get_default('unittest') select.set_database(db_res) select.add_table("person") select.add_join('person','city', 'city_code','code') select.add_join('city','country', 'country_code','code') select.add_order_by("name_last") statement = select.get_statement() self.assertEquals(statement, '''SELECT %s"person".* FROM %s"person" LEFT OUTER JOIN %s"city" ON "person"."city_code" = "city"."code" LEFT OUTER JOIN %s"country" ON "city"."country_code" = "country"."code" ORDER BY "person"."name_last"''' % (self.prefix, self.prefix, self.prefix, self.prefix) ) search = Search('unittest/person') search.add_join('unittest/city', 'unittest/person') statement = search.get_statement() self.assertEquals(statement, '''SELECT %s"person".* FROM %s"person" LEFT OUTER JOIN %s"city" ON "person"."city_code" = "city"."code"''' % (self.prefix,self.prefix, self.prefix)) statement = search.get_statement() # this one has no schema connection, so will be ignored search.add_join('sthpw/login', 'unittest/person') self.assertEquals(statement, '''SELECT %s"person".* FROM %s"person" LEFT OUTER JOIN %s"city" ON "person"."city_code" = "city"."code"''' % (self.prefix, self.prefix, self.prefix)) search.add_join('unittest/country', 'unittest/city') statement = search.get_statement() self.assertEquals(statement, '''SELECT %s"person".* FROM %s"person" LEFT OUTER JOIN %s"city" ON "person"."city_code" = "city"."code" LEFT OUTER JOIN %s"country" ON "city"."country_code" = "country"."code"''' % (self.prefix, self.prefix, self.prefix, self.prefix) ) def _test_create_view(self): from sql import CreateView db_res = DbResource.get_default('unittest') sql = DbContainer.get(db_res) car_columns = sql.get_columns("car") sports_columns = sql.get_columns("sports_car_data") search = Search("unittest/car") search.add_join("unittest/sports_car_data") search.add_column("*", table="car") for sports_column in sports_columns: if sports_column not in car_columns: search.add_column(sports_column, table="sports_car_data") create_view = CreateView(search=search) create_view.set_view("sports_car") statement = create_view.get_statement() expected = '''CREATE VIEW "sports_car" AS SELECT "unittest"."public"."car".*, "unittest"."public"."sports_car_data"."acceleration", "unittest"."public"."sports_car_data"."horsepower", "unittest"."public"."sports_car_data"."top_speed" FROM "unittest"."public"."car" LEFT OUTER JOIN "unittest"."public"."sports_car_data" ON "car"."code" = "sports_car_data"."code"''' self.assertEquals(expected, statement) if __name__ == "__main__": Batch() unittest.main()
epl-1.0
kajigga/python_koans
python3/libs/colorama/winterm.py
86
1809
from . import win32 # from wincon.h class WinColor(object): BLACK = 0 BLUE = 1 GREEN = 2 CYAN = 3 RED = 4 MAGENTA = 5 YELLOW = 6 GREY = 7 # from wincon.h class WinStyle(object): NORMAL = 0x00 # dim text, dim background BRIGHT = 0x08 # bright text, dim background class WinTerm(object): def __init__(self): self._default = \ win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes self.set_attrs(self._default) self._default_fore = self._fore self._default_back = self._back self._default_style = self._style def get_attrs(self): return self._fore + self._back * 16 + self._style def set_attrs(self, value): self._fore = value & 7 self._back = (value >> 4) & 7 self._style = value & WinStyle.BRIGHT def reset_all(self, on_stderr=None): self.set_attrs(self._default) self.set_console(attrs=self._default) def fore(self, fore=None, on_stderr=False): if fore is None: fore = self._default_fore self._fore = fore self.set_console(on_stderr=on_stderr) def back(self, back=None, on_stderr=False): if back is None: back = self._default_back self._back = back self.set_console(on_stderr=on_stderr) def style(self, style=None, on_stderr=False): if style is None: style = self._default_style self._style = style self.set_console(on_stderr=on_stderr) def set_console(self, attrs=None, on_stderr=False): if attrs is None: attrs = self.get_attrs() handle = win32.STDOUT if on_stderr: handle = win32.STDERR win32.SetConsoleTextAttribute(handle, attrs)
mit
mjirayu/edx-theme
node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/input_test.py
604
3207
#!/usr/bin/env python # Copyright 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unit tests for the input.py file.""" import gyp.input import unittest import sys class TestFindCycles(unittest.TestCase): def setUp(self): self.nodes = {} for x in ('a', 'b', 'c', 'd', 'e'): self.nodes[x] = gyp.input.DependencyGraphNode(x) def _create_dependency(self, dependent, dependency): dependent.dependencies.append(dependency) dependency.dependents.append(dependent) def test_no_cycle_empty_graph(self): for label, node in self.nodes.iteritems(): self.assertEquals([], node.FindCycles()) def test_no_cycle_line(self): self._create_dependency(self.nodes['a'], self.nodes['b']) self._create_dependency(self.nodes['b'], self.nodes['c']) self._create_dependency(self.nodes['c'], self.nodes['d']) for label, node in self.nodes.iteritems(): self.assertEquals([], node.FindCycles()) def test_no_cycle_dag(self): self._create_dependency(self.nodes['a'], self.nodes['b']) self._create_dependency(self.nodes['a'], self.nodes['c']) self._create_dependency(self.nodes['b'], self.nodes['c']) for label, node in self.nodes.iteritems(): self.assertEquals([], node.FindCycles()) def test_cycle_self_reference(self): self._create_dependency(self.nodes['a'], self.nodes['a']) self.assertEquals([(self.nodes['a'], self.nodes['a'])], self.nodes['a'].FindCycles()) def test_cycle_two_nodes(self): self._create_dependency(self.nodes['a'], self.nodes['b']) self._create_dependency(self.nodes['b'], self.nodes['a']) self.assertEquals([(self.nodes['a'], self.nodes['b'], self.nodes['a'])], self.nodes['a'].FindCycles()) self.assertEquals([(self.nodes['b'], self.nodes['a'], self.nodes['b'])], self.nodes['b'].FindCycles()) def test_two_cycles(self): self._create_dependency(self.nodes['a'], self.nodes['b']) self._create_dependency(self.nodes['b'], self.nodes['a']) self._create_dependency(self.nodes['b'], self.nodes['c']) self._create_dependency(self.nodes['c'], self.nodes['b']) cycles = self.nodes['a'].FindCycles() self.assertTrue( (self.nodes['a'], self.nodes['b'], self.nodes['a']) in cycles) self.assertTrue( (self.nodes['b'], self.nodes['c'], self.nodes['b']) in cycles) self.assertEquals(2, len(cycles)) def test_big_cycle(self): self._create_dependency(self.nodes['a'], self.nodes['b']) self._create_dependency(self.nodes['b'], self.nodes['c']) self._create_dependency(self.nodes['c'], self.nodes['d']) self._create_dependency(self.nodes['d'], self.nodes['e']) self._create_dependency(self.nodes['e'], self.nodes['a']) self.assertEquals([(self.nodes['a'], self.nodes['b'], self.nodes['c'], self.nodes['d'], self.nodes['e'], self.nodes['a'])], self.nodes['a'].FindCycles()) if __name__ == '__main__': unittest.main()
apache-2.0
reinaH/osf.io
website/addons/s3/model.py
11
9341
# -*- coding: utf-8 -*- import pymongo from modularodm import fields from framework.auth.core import Auth from website.addons.base import exceptions from website.addons.base import AddonUserSettingsBase, AddonNodeSettingsBase, GuidFile from website.addons.base import StorageAddonBase from website.addons.s3 import utils from website.addons.s3.settings import ENCRYPT_UPLOADS_DEFAULT class S3GuidFile(GuidFile): __indices__ = [ { 'key_or_list': [ ('node', pymongo.ASCENDING), ('path', pymongo.ASCENDING), ], 'unique': True, } ] path = fields.StringField(index=True) @property def waterbutler_path(self): return '/' + self.path @property def provider(self): return 's3' @property def version_identifier(self): return 'version' @property def unique_identifier(self): return self._metadata_cache['extra']['md5'] class AddonS3UserSettings(AddonUserSettingsBase): access_key = fields.StringField() secret_key = fields.StringField() def to_json(self, user): ret = super(AddonS3UserSettings, self).to_json(user) ret['has_auth'] = self.has_auth if self.owner: ret['name'] = self.owner.display_full_name() ret['profile_url'] = self.owner.profile_url return ret @property def has_auth(self): return bool(self.access_key and self.secret_key) @property def is_valid(self): return utils.can_list(self.access_key, self.secret_key) def revoke_auth(self, auth=None, save=False): for node_settings in self.addons3nodesettings__authorized: node_settings.deauthorize(auth=auth, save=True) self.s3_osf_user, self.access_key, self.secret_key = None, None, None if save: self.save() return True class AddonS3NodeSettings(StorageAddonBase, AddonNodeSettingsBase): bucket = fields.StringField() encrypt_uploads = fields.BooleanField(default=ENCRYPT_UPLOADS_DEFAULT) user_settings = fields.ForeignField( 'addons3usersettings', backref='authorized' ) @property def folder_name(self): return self.bucket def find_or_create_file_guid(self, path): path = path.lstrip('/') return S3GuidFile.get_or_create(node=self.owner, path=path) @property def display_name(self): return u'{0}: {1}'.format(self.config.full_name, self.bucket) @property def complete(self): return self.has_auth and self.bucket is not None def authorize(self, user_settings, save=False): self.user_settings = user_settings self.owner.add_log( action='s3_node_authorized', params={ 'project': self.owner.parent_id, 'node': self.owner._id, }, auth=Auth(user_settings.owner), ) if save: self.save() def deauthorize(self, auth=None, log=True, save=False): self.bucket, self.user_settings = None, None if log: self.owner.add_log( action='s3_node_deauthorized', params={ 'project': self.owner.parent_id, 'node': self.owner._id, }, auth=auth, ) if save: self.save() def delete(self, save=True): self.deauthorize(log=False, save=False) super(AddonS3NodeSettings, self).delete(save=save) def serialize_waterbutler_credentials(self): if not self.has_auth: raise exceptions.AddonError('Cannot serialize credentials for S3 addon') return { 'access_key': self.user_settings.access_key, 'secret_key': self.user_settings.secret_key, } def serialize_waterbutler_settings(self): if not self.bucket: raise exceptions.AddonError('Cannot serialize settings for S3 addon') return { 'bucket': self.bucket, 'encrypt_uploads': self.encrypt_uploads } def create_waterbutler_log(self, auth, action, metadata): url = self.owner.web_url_for('addon_view_or_download_file', path=metadata['path'], provider='s3') self.owner.add_log( 's3_{0}'.format(action), auth=auth, params={ 'project': self.owner.parent_id, 'node': self.owner._id, 'path': metadata['materialized'], 'bucket': self.bucket, 'urls': { 'view': url, 'download': url + '?action=download' } }, ) def to_json(self, user): ret = super(AddonS3NodeSettings, self).to_json(user) user_settings = user.get_addon('s3') ret.update({ 'bucket': self.bucket or '', 'encrypt_uploads': self.encrypt_uploads, 'has_bucket': self.bucket is not None, 'user_is_owner': ( self.user_settings and self.user_settings.owner == user ), 'user_has_auth': bool(user_settings) and user_settings.has_auth, 'node_has_auth': self.has_auth, 'owner': None, 'bucket_list': None, 'valid_credentials': user_settings and user_settings.is_valid, }) if self.has_auth: ret['owner'] = self.user_settings.owner.fullname ret['owner_url'] = self.user_settings.owner.url ret['node_has_auth'] = True return ret @property def has_auth(self): return bool(self.user_settings and self.user_settings.has_auth) #TODO Update callbacks def before_register(self, node, user): """ :param Node node: :param User user: :return str: Alert message """ category = node.project_or_component if self.user_settings and self.user_settings.has_auth: return ( u'The contents of S3 add-ons cannot be registered at this time; ' u'the S3 bucket linked to this {category} will not be included ' u'as part of this registration.' ).format(**locals()) def after_fork(self, node, fork, user, save=True): """ :param Node node: Original node :param Node fork: Forked node :param User user: User creating fork :param bool save: Save settings after callback :return tuple: Tuple of cloned settings and alert message """ clone, _ = super(AddonS3NodeSettings, self).after_fork( node, fork, user, save=False ) # Copy authentication if authenticated by forking user if self.user_settings and self.user_settings.owner == user: clone.user_settings = self.user_settings clone.bucket = self.bucket message = ( 'Amazon Simple Storage authorization copied to forked {cat}.' ).format( cat=fork.project_or_component, ) else: message = ( 'Amazon Simple Storage authorization not copied to forked {cat}. You may ' 'authorize this fork on the <u><a href={url}>Settings</a></u> ' 'page.' ).format( cat=fork.project_or_component, url=fork.url + 'settings/' ) if save: clone.save() return clone, message def before_remove_contributor(self, node, removed): """ :param Node node: :param User removed: :return str: Alert message """ if self.user_settings and self.user_settings.owner == removed: return ( 'The Amazon Simple Storage add-on for this {category} is authenticated ' 'by {user}. Removing this user will also remove access ' 'to {bucket} unless another contributor re-authenticates.' ).format( category=node.project_or_component, user=removed.fullname, bucket=self.bucket ) def after_remove_contributor(self, node, removed, auth=None): """ :param Node node: :param User removed: :return str: Alert message """ if self.user_settings and self.user_settings.owner == removed: self.user_settings = None self.bucket = None self.save() message = ( u'Because the Amazon Simple Storage add-on for {category} "{title}" was ' u'authenticated by {user}, authentication information has been deleted.' ).format(category=node.category_display, title=node.title, user=removed.fullname) if not auth or auth.user != removed: url = node.web_url_for('node_setting') message += ( u' You can re-authenticate on the <u><a href="{url}">Settings</a></u> page.' ).format(url=url) # return message def after_delete(self, node, user): self.deauthorize(Auth(user=user), log=True, save=True)
apache-2.0
kamyu104/LeetCode
Python/increasing-order-search-tree.py
1
1425
# Time: O(n) # Space: O(h) # Given a tree, rearrange the tree in in-order so that the leftmost node # in the tree is now the root of the tree, and every node has no left child and only 1 right child. # # Example 1: # Input: [5,3,6,2,4,null,8,1,null,null,null,7,9] # # 5 # / \ # 3 6 # / \ \ # 2 4 8 # / / \ # 1 7 9 # # Output: [1,null,2,null,3,null,4,null,5,null,6,null,7,null,8,null,9] # # 1 # \ # 2 # \ # 3 # \ # 4 # \ # 5 # \ # 6 # \ # 7 # \ # 8 # \ # 9 # Note: # - The number of nodes in the given tree will be between 1 and 100. # - Each node will have a unique integer value from 0 to 1000. # Definition for a binary tree node. class TreeNode(object): def __init__(self, x): self.val = x self.left = None self.right = None class Solution(object): def increasingBST(self, root): """ :type root: TreeNode :rtype: TreeNode """ def increasingBSTHelper(root, tail): if not root: return tail result = increasingBSTHelper(root.left, root) root.left = None root.right = increasingBSTHelper(root.right, tail) return result return increasingBSTHelper(root, None)
mit
nipunbatra/bayespy
bayespy/utils/optimize.py
2
2279
###################################################################### # Copyright (C) 2011-2013 Jaakko Luttinen # # This file is licensed under Version 3.0 of the GNU General Public # License. See LICENSE for a text of the license. ###################################################################### ###################################################################### # This file is part of BayesPy. # # BayesPy is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # BayesPy is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with BayesPy. If not, see <http://www.gnu.org/licenses/>. ###################################################################### import numpy as np from scipy import optimize def minimize(f, x0, maxiter=None, verbose=False): """ Simple wrapper for SciPy's optimize. The given function must return a tuple: (value, gradient). """ options = {'disp': verbose} if maxiter is not None: options['maxiter'] = maxiter opt = optimize.minimize(f, x0, jac=True, method='CG', options=options) return opt.x def check_gradient(f, x0, verbose=True): """ Simple wrapper for SciPy's gradient checker. The given function must return a tuple: (value, gradient). Returns relative """ df = f(x0)[1] df_num = optimize.approx_fprime(x0, lambda x: f(x)[0], optimize.optimize._epsilon) abserr = np.linalg.norm(df-df_num) norm_num = np.linalg.norm(df_num) if abserr == 0 and norm_num == 0: err = 0 else: err = abserr / norm_num if verbose: print("Norm of numerical gradient: %g" % np.linalg.norm(df_num)) print("Norm of function gradient: %g" % np.linalg.norm(df)) print("Gradient relative error = %g and absolute error = %g" % (err, abserr)) return err
gpl-3.0
Yen-Chung-En/2015cdb_W12
static/Brython3.1.1-20150328-091302/Lib/threading.py
730
45641
"""Thread module emulating a subset of Java's threading model.""" import sys as _sys import _thread from time import sleep as _sleep try: from time import monotonic as _time except ImportError: from time import time as _time from traceback import format_exc as _format_exc from _weakrefset import WeakSet # Note regarding PEP 8 compliant names # This threading model was originally inspired by Java, and inherited # the convention of camelCase function and method names from that # language. Those original names are not in any imminent danger of # being deprecated (even for Py3k),so this module provides them as an # alias for the PEP 8 compliant names # Note that using the new PEP 8 compliant names facilitates substitution # with the multiprocessing module, which doesn't provide the old # Java inspired names. __all__ = ['active_count', 'Condition', 'current_thread', 'enumerate', 'Event', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread', 'Barrier', 'Timer', 'ThreadError', 'setprofile', 'settrace', 'local', 'stack_size'] # Rename some stuff so "from threading import *" is safe _start_new_thread = _thread.start_new_thread _allocate_lock = _thread.allocate_lock get_ident = _thread.get_ident ThreadError = _thread.error try: _CRLock = _thread.RLock except AttributeError: _CRLock = None TIMEOUT_MAX = _thread.TIMEOUT_MAX del _thread # Support for profile and trace hooks _profile_hook = None _trace_hook = None def setprofile(func): """Set a profile function for all threads started from the threading module. The func will be passed to sys.setprofile() for each thread, before its run() method is called. """ global _profile_hook _profile_hook = func def settrace(func): """Set a trace function for all threads started from the threading module. The func will be passed to sys.settrace() for each thread, before its run() method is called. """ global _trace_hook _trace_hook = func # Synchronization classes Lock = _allocate_lock def RLock(*args, **kwargs): """Factory function that returns a new reentrant lock. A reentrant lock must be released by the thread that acquired it. Once a thread has acquired a reentrant lock, the same thread may acquire it again without blocking; the thread must release it once for each time it has acquired it. """ if _CRLock is None: return _PyRLock(*args, **kwargs) return _CRLock(*args, **kwargs) class _RLock: """This class implements reentrant lock objects. A reentrant lock must be released by the thread that acquired it. Once a thread has acquired a reentrant lock, the same thread may acquire it again without blocking; the thread must release it once for each time it has acquired it. """ def __init__(self): self._block = _allocate_lock() self._owner = None self._count = 0 def __repr__(self): owner = self._owner try: owner = _active[owner].name except KeyError: pass return "<%s owner=%r count=%d>" % ( self.__class__.__name__, owner, self._count) def acquire(self, blocking=True, timeout=-1): """Acquire a lock, blocking or non-blocking. When invoked without arguments: if this thread already owns the lock, increment the recursion level by one, and return immediately. Otherwise, if another thread owns the lock, block until the lock is unlocked. Once the lock is unlocked (not owned by any thread), then grab ownership, set the recursion level to one, and return. If more than one thread is blocked waiting until the lock is unlocked, only one at a time will be able to grab ownership of the lock. There is no return value in this case. When invoked with the blocking argument set to true, do the same thing as when called without arguments, and return true. When invoked with the blocking argument set to false, do not block. If a call without an argument would block, return false immediately; otherwise, do the same thing as when called without arguments, and return true. When invoked with the floating-point timeout argument set to a positive value, block for at most the number of seconds specified by timeout and as long as the lock cannot be acquired. Return true if the lock has been acquired, false if the timeout has elapsed. """ me = get_ident() if self._owner == me: self._count = self._count + 1 return 1 rc = self._block.acquire(blocking, timeout) if rc: self._owner = me self._count = 1 return rc __enter__ = acquire def release(self): """Release a lock, decrementing the recursion level. If after the decrement it is zero, reset the lock to unlocked (not owned by any thread), and if any other threads are blocked waiting for the lock to become unlocked, allow exactly one of them to proceed. If after the decrement the recursion level is still nonzero, the lock remains locked and owned by the calling thread. Only call this method when the calling thread owns the lock. A RuntimeError is raised if this method is called when the lock is unlocked. There is no return value. """ if self._owner != get_ident(): raise RuntimeError("cannot release un-acquired lock") self._count = count = self._count - 1 if not count: self._owner = None self._block.release() def __exit__(self, t, v, tb): self.release() # Internal methods used by condition variables def _acquire_restore(self, state): self._block.acquire() self._count, self._owner = state def _release_save(self): if self._count == 0: raise RuntimeError("cannot release un-acquired lock") count = self._count self._count = 0 owner = self._owner self._owner = None self._block.release() return (count, owner) def _is_owned(self): return self._owner == get_ident() _PyRLock = _RLock class Condition: """Class that implements a condition variable. A condition variable allows one or more threads to wait until they are notified by another thread. If the lock argument is given and not None, it must be a Lock or RLock object, and it is used as the underlying lock. Otherwise, a new RLock object is created and used as the underlying lock. """ def __init__(self, lock=None): if lock is None: lock = RLock() self._lock = lock # Export the lock's acquire() and release() methods self.acquire = lock.acquire self.release = lock.release # If the lock defines _release_save() and/or _acquire_restore(), # these override the default implementations (which just call # release() and acquire() on the lock). Ditto for _is_owned(). try: self._release_save = lock._release_save except AttributeError: pass try: self._acquire_restore = lock._acquire_restore except AttributeError: pass try: self._is_owned = lock._is_owned except AttributeError: pass self._waiters = [] def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def __repr__(self): return "<Condition(%s, %d)>" % (self._lock, len(self._waiters)) def _release_save(self): self._lock.release() # No state to save def _acquire_restore(self, x): self._lock.acquire() # Ignore saved state def _is_owned(self): # Return True if lock is owned by current_thread. # This method is called only if __lock doesn't have _is_owned(). if self._lock.acquire(0): self._lock.release() return False else: return True def wait(self, timeout=None): """Wait until notified or until a timeout occurs. If the calling thread has not acquired the lock when this method is called, a RuntimeError is raised. This method releases the underlying lock, and then blocks until it is awakened by a notify() or notify_all() call for the same condition variable in another thread, or until the optional timeout occurs. Once awakened or timed out, it re-acquires the lock and returns. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). When the underlying lock is an RLock, it is not released using its release() method, since this may not actually unlock the lock when it was acquired multiple times recursively. Instead, an internal interface of the RLock class is used, which really unlocks it even when it has been recursively acquired several times. Another internal interface is then used to restore the recursion level when the lock is reacquired. """ if not self._is_owned(): raise RuntimeError("cannot wait on un-acquired lock") waiter = _allocate_lock() waiter.acquire() self._waiters.append(waiter) saved_state = self._release_save() try: # restore state no matter what (e.g., KeyboardInterrupt) if timeout is None: waiter.acquire() gotit = True else: if timeout > 0: gotit = waiter.acquire(True, timeout) else: gotit = waiter.acquire(False) if not gotit: try: self._waiters.remove(waiter) except ValueError: pass return gotit finally: self._acquire_restore(saved_state) def wait_for(self, predicate, timeout=None): """Wait until a condition evaluates to True. predicate should be a callable which result will be interpreted as a boolean value. A timeout may be provided giving the maximum time to wait. """ endtime = None waittime = timeout result = predicate() while not result: if waittime is not None: if endtime is None: endtime = _time() + waittime else: waittime = endtime - _time() if waittime <= 0: break self.wait(waittime) result = predicate() return result def notify(self, n=1): """Wake up one or more threads waiting on this condition, if any. If the calling thread has not acquired the lock when this method is called, a RuntimeError is raised. This method wakes up at most n of the threads waiting for the condition variable; it is a no-op if no threads are waiting. """ if not self._is_owned(): raise RuntimeError("cannot notify on un-acquired lock") __waiters = self._waiters waiters = __waiters[:n] if not waiters: return for waiter in waiters: waiter.release() try: __waiters.remove(waiter) except ValueError: pass def notify_all(self): """Wake up all threads waiting on this condition. If the calling thread has not acquired the lock when this method is called, a RuntimeError is raised. """ self.notify(len(self._waiters)) notifyAll = notify_all class Semaphore: """This class implements semaphore objects. Semaphores manage a counter representing the number of release() calls minus the number of acquire() calls, plus an initial value. The acquire() method blocks if necessary until it can return without making the counter negative. If not given, value defaults to 1. """ # After Tim Peters' semaphore class, but not quite the same (no maximum) def __init__(self, value=1): if value < 0: raise ValueError("semaphore initial value must be >= 0") self._cond = Condition(Lock()) self._value = value def acquire(self, blocking=True, timeout=None): """Acquire a semaphore, decrementing the internal counter by one. When invoked without arguments: if the internal counter is larger than zero on entry, decrement it by one and return immediately. If it is zero on entry, block, waiting until some other thread has called release() to make it larger than zero. This is done with proper interlocking so that if multiple acquire() calls are blocked, release() will wake exactly one of them up. The implementation may pick one at random, so the order in which blocked threads are awakened should not be relied on. There is no return value in this case. When invoked with blocking set to true, do the same thing as when called without arguments, and return true. When invoked with blocking set to false, do not block. If a call without an argument would block, return false immediately; otherwise, do the same thing as when called without arguments, and return true. When invoked with a timeout other than None, it will block for at most timeout seconds. If acquire does not complete successfully in that interval, return false. Return true otherwise. """ if not blocking and timeout is not None: raise ValueError("can't specify timeout for non-blocking acquire") rc = False endtime = None with self._cond: while self._value == 0: if not blocking: break if timeout is not None: if endtime is None: endtime = _time() + timeout else: timeout = endtime - _time() if timeout <= 0: break self._cond.wait(timeout) else: self._value = self._value - 1 rc = True return rc __enter__ = acquire def release(self): """Release a semaphore, incrementing the internal counter by one. When the counter is zero on entry and another thread is waiting for it to become larger than zero again, wake up that thread. """ with self._cond: self._value = self._value + 1 self._cond.notify() def __exit__(self, t, v, tb): self.release() class BoundedSemaphore(Semaphore): """Implements a bounded semaphore. A bounded semaphore checks to make sure its current value doesn't exceed its initial value. If it does, ValueError is raised. In most situations semaphores are used to guard resources with limited capacity. If the semaphore is released too many times it's a sign of a bug. If not given, value defaults to 1. Like regular semaphores, bounded semaphores manage a counter representing the number of release() calls minus the number of acquire() calls, plus an initial value. The acquire() method blocks if necessary until it can return without making the counter negative. If not given, value defaults to 1. """ def __init__(self, value=1): Semaphore.__init__(self, value) self._initial_value = value def release(self): """Release a semaphore, incrementing the internal counter by one. When the counter is zero on entry and another thread is waiting for it to become larger than zero again, wake up that thread. If the number of releases exceeds the number of acquires, raise a ValueError. """ with self._cond: if self._value >= self._initial_value: raise ValueError("Semaphore released too many times") self._value += 1 self._cond.notify() class Event: """Class implementing event objects. Events manage a flag that can be set to true with the set() method and reset to false with the clear() method. The wait() method blocks until the flag is true. The flag is initially false. """ # After Tim Peters' event class (without is_posted()) def __init__(self): self._cond = Condition(Lock()) self._flag = False def _reset_internal_locks(self): # private! called by Thread._reset_internal_locks by _after_fork() self._cond.__init__() def is_set(self): """Return true if and only if the internal flag is true.""" return self._flag isSet = is_set def set(self): """Set the internal flag to true. All threads waiting for it to become true are awakened. Threads that call wait() once the flag is true will not block at all. """ self._cond.acquire() try: self._flag = True self._cond.notify_all() finally: self._cond.release() def clear(self): """Reset the internal flag to false. Subsequently, threads calling wait() will block until set() is called to set the internal flag to true again. """ self._cond.acquire() try: self._flag = False finally: self._cond.release() def wait(self, timeout=None): """Block until the internal flag is true. If the internal flag is true on entry, return immediately. Otherwise, block until another thread calls set() to set the flag to true, or until the optional timeout occurs. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). This method returns the internal flag on exit, so it will always return True except if a timeout is given and the operation times out. """ self._cond.acquire() try: signaled = self._flag if not signaled: signaled = self._cond.wait(timeout) return signaled finally: self._cond.release() # A barrier class. Inspired in part by the pthread_barrier_* api and # the CyclicBarrier class from Java. See # http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and # http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/ # CyclicBarrier.html # for information. # We maintain two main states, 'filling' and 'draining' enabling the barrier # to be cyclic. Threads are not allowed into it until it has fully drained # since the previous cycle. In addition, a 'resetting' state exists which is # similar to 'draining' except that threads leave with a BrokenBarrierError, # and a 'broken' state in which all threads get the exception. class Barrier: """Implements a Barrier. Useful for synchronizing a fixed number of threads at known synchronization points. Threads block on 'wait()' and are simultaneously once they have all made that call. """ def __init__(self, parties, action=None, timeout=None): """Create a barrier, initialised to 'parties' threads. 'action' is a callable which, when supplied, will be called by one of the threads after they have all entered the barrier and just prior to releasing them all. If a 'timeout' is provided, it is uses as the default for all subsequent 'wait()' calls. """ self._cond = Condition(Lock()) self._action = action self._timeout = timeout self._parties = parties self._state = 0 #0 filling, 1, draining, -1 resetting, -2 broken self._count = 0 def wait(self, timeout=None): """Wait for the barrier. When the specified number of threads have started waiting, they are all simultaneously awoken. If an 'action' was provided for the barrier, one of the threads will have executed that callback prior to returning. Returns an individual index number from 0 to 'parties-1'. """ if timeout is None: timeout = self._timeout with self._cond: self._enter() # Block while the barrier drains. index = self._count self._count += 1 try: if index + 1 == self._parties: # We release the barrier self._release() else: # We wait until someone releases us self._wait(timeout) return index finally: self._count -= 1 # Wake up any threads waiting for barrier to drain. self._exit() # Block until the barrier is ready for us, or raise an exception # if it is broken. def _enter(self): while self._state in (-1, 1): # It is draining or resetting, wait until done self._cond.wait() #see if the barrier is in a broken state if self._state < 0: raise BrokenBarrierError assert self._state == 0 # Optionally run the 'action' and release the threads waiting # in the barrier. def _release(self): try: if self._action: self._action() # enter draining state self._state = 1 self._cond.notify_all() except: #an exception during the _action handler. Break and reraise self._break() raise # Wait in the barrier until we are relased. Raise an exception # if the barrier is reset or broken. def _wait(self, timeout): if not self._cond.wait_for(lambda : self._state != 0, timeout): #timed out. Break the barrier self._break() raise BrokenBarrierError if self._state < 0: raise BrokenBarrierError assert self._state == 1 # If we are the last thread to exit the barrier, signal any threads # waiting for the barrier to drain. def _exit(self): if self._count == 0: if self._state in (-1, 1): #resetting or draining self._state = 0 self._cond.notify_all() def reset(self): """Reset the barrier to the initial state. Any threads currently waiting will get the BrokenBarrier exception raised. """ with self._cond: if self._count > 0: if self._state == 0: #reset the barrier, waking up threads self._state = -1 elif self._state == -2: #was broken, set it to reset state #which clears when the last thread exits self._state = -1 else: self._state = 0 self._cond.notify_all() def abort(self): """Place the barrier into a 'broken' state. Useful in case of error. Any currently waiting threads and threads attempting to 'wait()' will have BrokenBarrierError raised. """ with self._cond: self._break() def _break(self): # An internal error was detected. The barrier is set to # a broken state all parties awakened. self._state = -2 self._cond.notify_all() @property def parties(self): """Return the number of threads required to trip the barrier.""" return self._parties @property def n_waiting(self): """Return the number of threads currently waiting at the barrier.""" # We don't need synchronization here since this is an ephemeral result # anyway. It returns the correct value in the steady state. if self._state == 0: return self._count return 0 @property def broken(self): """Return True if the barrier is in a broken state.""" return self._state == -2 # exception raised by the Barrier class class BrokenBarrierError(RuntimeError): pass # Helper to generate new thread names _counter = 0 def _newname(template="Thread-%d"): global _counter _counter = _counter + 1 return template % _counter # Active thread administration _active_limbo_lock = _allocate_lock() _active = {} # maps thread id to Thread object _limbo = {} # For debug and leak testing _dangling = WeakSet() # Main class for threads class Thread: """A class that represents a thread of control. This class can be safely subclassed in a limited fashion. There are two ways to specify the activity: by passing a callable object to the constructor, or by overriding the run() method in a subclass. """ __initialized = False # Need to store a reference to sys.exc_info for printing # out exceptions when a thread tries to use a global var. during interp. # shutdown and thus raises an exception about trying to perform some # operation on/with a NoneType __exc_info = _sys.exc_info # Keep sys.exc_clear too to clear the exception just before # allowing .join() to return. #XXX __exc_clear = _sys.exc_clear def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, *, daemon=None): """This constructor should always be called with keyword arguments. Arguments are: *group* should be None; reserved for future extension when a ThreadGroup class is implemented. *target* is the callable object to be invoked by the run() method. Defaults to None, meaning nothing is called. *name* is the thread name. By default, a unique name is constructed of the form "Thread-N" where N is a small decimal number. *args* is the argument tuple for the target invocation. Defaults to (). *kwargs* is a dictionary of keyword arguments for the target invocation. Defaults to {}. If a subclass overrides the constructor, it must make sure to invoke the base class constructor (Thread.__init__()) before doing anything else to the thread. """ assert group is None, "group argument must be None for now" if kwargs is None: kwargs = {} self._target = target self._name = str(name or _newname()) self._args = args self._kwargs = kwargs if daemon is not None: self._daemonic = daemon else: self._daemonic = current_thread().daemon self._ident = None self._started = Event() self._stopped = False self._block = Condition(Lock()) self._initialized = True # sys.stderr is not stored in the class like # sys.exc_info since it can be changed between instances self._stderr = _sys.stderr _dangling.add(self) def _reset_internal_locks(self): # private! Called by _after_fork() to reset our internal locks as # they may be in an invalid state leading to a deadlock or crash. if hasattr(self, '_block'): # DummyThread deletes _block self._block.__init__() self._started._reset_internal_locks() def __repr__(self): assert self._initialized, "Thread.__init__() was not called" status = "initial" if self._started.is_set(): status = "started" if self._stopped: status = "stopped" if self._daemonic: status += " daemon" if self._ident is not None: status += " %s" % self._ident return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status) def start(self): """Start the thread's activity. It must be called at most once per thread object. It arranges for the object's run() method to be invoked in a separate thread of control. This method will raise a RuntimeError if called more than once on the same thread object. """ if not self._initialized: raise RuntimeError("thread.__init__() not called") if self._started.is_set(): raise RuntimeError("threads can only be started once") with _active_limbo_lock: _limbo[self] = self try: _start_new_thread(self._bootstrap, ()) except Exception: with _active_limbo_lock: del _limbo[self] raise self._started.wait() def run(self): """Method representing the thread's activity. You may override this method in a subclass. The standard run() method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the args and kwargs arguments, respectively. """ try: if self._target: self._target(*self._args, **self._kwargs) finally: # Avoid a refcycle if the thread is running a function with # an argument that has a member that points to the thread. del self._target, self._args, self._kwargs def _bootstrap(self): # Wrapper around the real bootstrap code that ignores # exceptions during interpreter cleanup. Those typically # happen when a daemon thread wakes up at an unfortunate # moment, finds the world around it destroyed, and raises some # random exception *** while trying to report the exception in # _bootstrap_inner() below ***. Those random exceptions # don't help anybody, and they confuse users, so we suppress # them. We suppress them only when it appears that the world # indeed has already been destroyed, so that exceptions in # _bootstrap_inner() during normal business hours are properly # reported. Also, we only suppress them for daemonic threads; # if a non-daemonic encounters this, something else is wrong. try: self._bootstrap_inner() except: if self._daemonic and _sys is None: return raise def _set_ident(self): self._ident = get_ident() def _bootstrap_inner(self): try: self._set_ident() self._started.set() with _active_limbo_lock: _active[self._ident] = self del _limbo[self] if _trace_hook: _sys.settrace(_trace_hook) if _profile_hook: _sys.setprofile(_profile_hook) try: self.run() except SystemExit: pass except: # If sys.stderr is no more (most likely from interpreter # shutdown) use self._stderr. Otherwise still use sys (as in # _sys) in case sys.stderr was redefined since the creation of # self. if _sys: _sys.stderr.write("Exception in thread %s:\n%s\n" % (self.name, _format_exc())) else: # Do the best job possible w/o a huge amt. of code to # approximate a traceback (code ideas from # Lib/traceback.py) exc_type, exc_value, exc_tb = self._exc_info() try: print(( "Exception in thread " + self.name + " (most likely raised during interpreter shutdown):"), file=self._stderr) print(( "Traceback (most recent call last):"), file=self._stderr) while exc_tb: print(( ' File "%s", line %s, in %s' % (exc_tb.tb_frame.f_code.co_filename, exc_tb.tb_lineno, exc_tb.tb_frame.f_code.co_name)), file=self._stderr) exc_tb = exc_tb.tb_next print(("%s: %s" % (exc_type, exc_value)), file=self._stderr) # Make sure that exc_tb gets deleted since it is a memory # hog; deleting everything else is just for thoroughness finally: del exc_type, exc_value, exc_tb finally: # Prevent a race in # test_threading.test_no_refcycle_through_target when # the exception keeps the target alive past when we # assert that it's dead. #XXX self.__exc_clear() pass finally: with _active_limbo_lock: self._stop() try: # We don't call self._delete() because it also # grabs _active_limbo_lock. del _active[get_ident()] except: pass def _stop(self): self._block.acquire() self._stopped = True self._block.notify_all() self._block.release() def _delete(self): "Remove current thread from the dict of currently running threads." # Notes about running with _dummy_thread: # # Must take care to not raise an exception if _dummy_thread is being # used (and thus this module is being used as an instance of # dummy_threading). _dummy_thread.get_ident() always returns -1 since # there is only one thread if _dummy_thread is being used. Thus # len(_active) is always <= 1 here, and any Thread instance created # overwrites the (if any) thread currently registered in _active. # # An instance of _MainThread is always created by 'threading'. This # gets overwritten the instant an instance of Thread is created; both # threads return -1 from _dummy_thread.get_ident() and thus have the # same key in the dict. So when the _MainThread instance created by # 'threading' tries to clean itself up when atexit calls this method # it gets a KeyError if another Thread instance was created. # # This all means that KeyError from trying to delete something from # _active if dummy_threading is being used is a red herring. But # since it isn't if dummy_threading is *not* being used then don't # hide the exception. try: with _active_limbo_lock: del _active[get_ident()] # There must not be any python code between the previous line # and after the lock is released. Otherwise a tracing function # could try to acquire the lock again in the same thread, (in # current_thread()), and would block. except KeyError: if 'dummy_threading' not in _sys.modules: raise def join(self, timeout=None): """Wait until the thread terminates. This blocks the calling thread until the thread whose join() method is called terminates -- either normally or through an unhandled exception or until the optional timeout occurs. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). As join() always returns None, you must call isAlive() after join() to decide whether a timeout happened -- if the thread is still alive, the join() call timed out. When the timeout argument is not present or None, the operation will block until the thread terminates. A thread can be join()ed many times. join() raises a RuntimeError if an attempt is made to join the current thread as that would cause a deadlock. It is also an error to join() a thread before it has been started and attempts to do so raises the same exception. """ if not self._initialized: raise RuntimeError("Thread.__init__() not called") if not self._started.is_set(): raise RuntimeError("cannot join thread before it is started") if self is current_thread(): raise RuntimeError("cannot join current thread") self._block.acquire() try: if timeout is None: while not self._stopped: self._block.wait() else: deadline = _time() + timeout while not self._stopped: delay = deadline - _time() if delay <= 0: break self._block.wait(delay) finally: self._block.release() @property def name(self): """A string used for identification purposes only. It has no semantics. Multiple threads may be given the same name. The initial name is set by the constructor. """ assert self._initialized, "Thread.__init__() not called" return self._name @name.setter def name(self, name): assert self._initialized, "Thread.__init__() not called" self._name = str(name) @property def ident(self): """Thread identifier of this thread or None if it has not been started. This is a nonzero integer. See the thread.get_ident() function. Thread identifiers may be recycled when a thread exits and another thread is created. The identifier is available even after the thread has exited. """ assert self._initialized, "Thread.__init__() not called" return self._ident def is_alive(self): """Return whether the thread is alive. This method returns True just before the run() method starts until just after the run() method terminates. The module function enumerate() returns a list of all alive threads. """ assert self._initialized, "Thread.__init__() not called" return self._started.is_set() and not self._stopped isAlive = is_alive @property def daemon(self): """A boolean value indicating whether this thread is a daemon thread. This must be set before start() is called, otherwise RuntimeError is raised. Its initial value is inherited from the creating thread; the main thread is not a daemon thread and therefore all threads created in the main thread default to daemon = False. The entire Python program exits when no alive non-daemon threads are left. """ assert self._initialized, "Thread.__init__() not called" return self._daemonic @daemon.setter def daemon(self, daemonic): if not self._initialized: raise RuntimeError("Thread.__init__() not called") if self._started.is_set(): raise RuntimeError("cannot set daemon status of active thread"); self._daemonic = daemonic def isDaemon(self): return self.daemon def setDaemon(self, daemonic): self.daemon = daemonic def getName(self): return self.name def setName(self, name): self.name = name # The timer class was contributed by Itamar Shtull-Trauring class Timer(Thread): """Call a function after a specified number of seconds: t = Timer(30.0, f, args=None, kwargs=None) t.start() t.cancel() # stop the timer's action if it's still waiting """ def __init__(self, interval, function, args=None, kwargs=None): Thread.__init__(self) self.interval = interval self.function = function self.args = args if args is not None else [] self.kwargs = kwargs if kwargs is not None else {} self.finished = Event() def cancel(self): """Stop the timer if it hasn't finished yet.""" self.finished.set() def run(self): self.finished.wait(self.interval) if not self.finished.is_set(): self.function(*self.args, **self.kwargs) self.finished.set() # Special thread class to represent the main thread # This is garbage collected through an exit handler class _MainThread(Thread): def __init__(self): Thread.__init__(self, name="MainThread", daemon=False) self._started.set() self._set_ident() with _active_limbo_lock: _active[self._ident] = self def _exitfunc(self): self._stop() t = _pickSomeNonDaemonThread() while t: t.join() t = _pickSomeNonDaemonThread() self._delete() def _pickSomeNonDaemonThread(): for t in enumerate(): if not t.daemon and t.is_alive(): return t return None # Dummy thread class to represent threads not started here. # These aren't garbage collected when they die, nor can they be waited for. # If they invoke anything in threading.py that calls current_thread(), they # leave an entry in the _active dict forever after. # Their purpose is to return *something* from current_thread(). # They are marked as daemon threads so we won't wait for them # when we exit (conform previous semantics). class _DummyThread(Thread): def __init__(self): Thread.__init__(self, name=_newname("Dummy-%d"), daemon=True) # Thread._block consumes an OS-level locking primitive, which # can never be used by a _DummyThread. Since a _DummyThread # instance is immortal, that's bad, so release this resource. del self._block self._started.set() self._set_ident() with _active_limbo_lock: _active[self._ident] = self def _stop(self): pass def join(self, timeout=None): assert False, "cannot join a dummy thread" # Global API functions def current_thread(): """Return the current Thread object, corresponding to the caller's thread of control. If the caller's thread of control was not created through the threading module, a dummy thread object with limited functionality is returned. """ try: return _active[get_ident()] except KeyError: return _DummyThread() currentThread = current_thread def active_count(): """Return the number of Thread objects currently alive. The returned count is equal to the length of the list returned by enumerate(). """ with _active_limbo_lock: return len(_active) + len(_limbo) activeCount = active_count def _enumerate(): # Same as enumerate(), but without the lock. Internal use only. return list(_active.values()) + list(_limbo.values()) def enumerate(): """Return a list of all Thread objects currently alive. The list includes daemonic threads, dummy thread objects created by current_thread(), and the main thread. It excludes terminated threads and threads that have not yet been started. """ with _active_limbo_lock: return list(_active.values()) + list(_limbo.values()) from _thread import stack_size # Create the main thread object, # and make it available for the interpreter # (Py_Main) as threading._shutdown. _shutdown = _MainThread()._exitfunc # get thread-local implementation, either from the thread # module, or from the python fallback try: from _thread import _local as local except ImportError: from _threading_local import local def _after_fork(): # This function is called by Python/ceval.c:PyEval_ReInitThreads which # is called from PyOS_AfterFork. Here we cleanup threading module state # that should not exist after a fork. # Reset _active_limbo_lock, in case we forked while the lock was held # by another (non-forked) thread. http://bugs.python.org/issue874900 global _active_limbo_lock _active_limbo_lock = _allocate_lock() # fork() only copied the current thread; clear references to others. new_active = {} current = current_thread() with _active_limbo_lock: for thread in _enumerate(): # Any lock/condition variable may be currently locked or in an # invalid state, so we reinitialize them. thread._reset_internal_locks() if thread is current: # There is only one active thread. We reset the ident to # its new value since it can have changed. ident = get_ident() thread._ident = ident new_active[ident] = thread else: # All the others are already stopped. thread._stop() _limbo.clear() _active.clear() _active.update(new_active) assert len(_active) == 1
agpl-3.0
mapr/hue
desktop/core/ext-py/Django-1.6.10/tests/utils_tests/test_html.py
38
8672
# -*- coding: utf-8 -*- from __future__ import unicode_literals from datetime import datetime import os from django.utils import html from django.utils._os import upath from django.utils.encoding import force_text from django.utils.unittest import TestCase class TestUtilsHtml(TestCase): def check_output(self, function, value, output=None): """ Check that function(value) equals output. If output is None, check that function(value) equals value. """ if output is None: output = value self.assertEqual(function(value), output) def test_escape(self): f = html.escape items = ( ('&','&amp;'), ('<', '&lt;'), ('>', '&gt;'), ('"', '&quot;'), ("'", '&#39;'), ) # Substitution patterns for testing the above items. patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb") for value, output in items: for pattern in patterns: self.check_output(f, pattern % value, pattern % output) # Check repeated values. self.check_output(f, value * 2, output * 2) # Verify it doesn't double replace &. self.check_output(f, '<&', '&lt;&amp;') def test_format_html(self): self.assertEqual( html.format_html("{0} {1} {third} {fourth}", "< Dangerous >", html.mark_safe("<b>safe</b>"), third="< dangerous again", fourth=html.mark_safe("<i>safe again</i>") ), "&lt; Dangerous &gt; <b>safe</b> &lt; dangerous again <i>safe again</i>" ) def test_linebreaks(self): f = html.linebreaks items = ( ("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"), ("para1\nsub1\rsub2\n\npara2", "<p>para1<br />sub1<br />sub2</p>\n\n<p>para2</p>"), ("para1\r\n\r\npara2\rsub1\r\rpara4", "<p>para1</p>\n\n<p>para2<br />sub1</p>\n\n<p>para4</p>"), ("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"), ) for value, output in items: self.check_output(f, value, output) def test_strip_tags(self): f = html.strip_tags items = ( ('<p>See: &#39;&eacute; is an apostrophe followed by e acute</p>', 'See: &#39;&eacute; is an apostrophe followed by e acute'), ('<adf>a', 'a'), ('</adf>a', 'a'), ('<asdf><asdf>e', 'e'), ('hi, <f x', 'hi, <f x'), ('234<235, right?', '234<235, right?'), ('a4<a5 right?', 'a4<a5 right?'), ('b7>b2!', 'b7>b2!'), ('</fe', '</fe'), ('<x>b<y>', 'b'), ('a<p onclick="alert(\'<test>\')">b</p>c', 'abc'), ('a<p a >b</p>c', 'abc'), ('d<a:b c:d>e</p>f', 'def'), ('<strong>foo</strong><a href="http://example.com">bar</a>', 'foobar'), ) for value, output in items: self.check_output(f, value, output) # Some convoluted syntax for which parsing may differ between python versions output = html.strip_tags('<sc<!-- -->ript>test<<!-- -->/script>') self.assertNotIn('<script>', output) self.assertIn('test', output) output = html.strip_tags('<script>alert()</script>&h') self.assertNotIn('<script>', output) self.assertIn('alert()', output) # Test with more lengthy content (also catching performance regressions) for filename in ('strip_tags1.html', 'strip_tags2.txt'): path = os.path.join(os.path.dirname(upath(__file__)), 'files', filename) with open(path, 'r') as fp: content = force_text(fp.read()) start = datetime.now() stripped = html.strip_tags(content) elapsed = datetime.now() - start self.assertEqual(elapsed.seconds, 0) self.assertIn("Please try again.", stripped) self.assertNotIn('<', stripped) def test_strip_spaces_between_tags(self): f = html.strip_spaces_between_tags # Strings that should come out untouched. items = (' <adf>', '<adf> ', ' </adf> ', ' <f> x</f>') for value in items: self.check_output(f, value) # Strings that have spaces to strip. items = ( ('<d> </d>', '<d></d>'), ('<p>hello </p>\n<p> world</p>', '<p>hello </p><p> world</p>'), ('\n<p>\t</p>\n<p> </p>\n', '\n<p></p><p></p>\n'), ) for value, output in items: self.check_output(f, value, output) def test_strip_entities(self): f = html.strip_entities # Strings that should come out untouched. values = ("&", "&a", "&a", "a&#a") for value in values: self.check_output(f, value) # Valid entities that should be stripped from the patterns. entities = ("&#1;", "&#12;", "&a;", "&fdasdfasdfasdf;") patterns = ( ("asdf %(entity)s ", "asdf "), ("%(entity)s%(entity)s", ""), ("&%(entity)s%(entity)s", "&"), ("%(entity)s3", "3"), ) for entity in entities: for in_pattern, output in patterns: self.check_output(f, in_pattern % {'entity': entity}, output) def test_fix_ampersands(self): f = html.fix_ampersands # Strings without ampersands or with ampersands already encoded. values = ("a&#1;", "b", "&a;", "&amp; &x; ", "asdf") patterns = ( ("%s", "%s"), ("&%s", "&amp;%s"), ("&%s&", "&amp;%s&amp;"), ) for value in values: for in_pattern, out_pattern in patterns: self.check_output(f, in_pattern % value, out_pattern % value) # Strings with ampersands that need encoding. items = ( ("&#;", "&amp;#;"), ("&#875 ;", "&amp;#875 ;"), ("&#4abc;", "&amp;#4abc;"), ) for value, output in items: self.check_output(f, value, output) def test_escapejs(self): f = html.escapejs items = ( ('"double quotes" and \'single quotes\'', '\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027'), (r'\ : backslashes, too', '\\u005C : backslashes, too'), ('and lots of whitespace: \r\n\t\v\f\b', 'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008'), (r'<script>and this</script>', '\\u003Cscript\\u003Eand this\\u003C/script\\u003E'), ('paragraph separator:\u2029and line separator:\u2028', 'paragraph separator:\\u2029and line separator:\\u2028'), ) for value, output in items: self.check_output(f, value, output) def test_clean_html(self): f = html.clean_html items = ( ('<p>I <i>believe</i> in <b>semantic markup</b>!</p>', '<p>I <em>believe</em> in <strong>semantic markup</strong>!</p>'), ('I escape & I don\'t <a href="#" target="_blank">target</a>', 'I escape &amp; I don\'t <a href="#" >target</a>'), ('<p>I kill whitespace</p><br clear="all"><p>&nbsp;</p>', '<p>I kill whitespace</p>'), # also a regression test for #7267: this used to raise an UnicodeDecodeError ('<p>* foo</p><p>* bar</p>', '<ul>\n<li> foo</li><li> bar</li>\n</ul>'), ) for value, output in items: self.check_output(f, value, output) def test_remove_tags(self): f = html.remove_tags items = ( ("<b><i>Yes</i></b>", "b i", "Yes"), ("<a>x</a> <p><b>y</b></p>", "a b", "x <p>y</p>"), ) for value, tags, output in items: self.assertEqual(f(value, tags), output) def test_smart_urlquote(self): quote = html.smart_urlquote # Ensure that IDNs are properly quoted self.assertEqual(quote('http://öäü.com/'), 'http://xn--4ca9at.com/') self.assertEqual(quote('http://öäü.com/öäü/'), 'http://xn--4ca9at.com/%C3%B6%C3%A4%C3%BC/') # Ensure that everything unsafe is quoted, !*'();:@&=+$,/?#[]~ is considered safe as per RFC self.assertEqual(quote('http://example.com/path/öäü/'), 'http://example.com/path/%C3%B6%C3%A4%C3%BC/') self.assertEqual(quote('http://example.com/%C3%B6/ä/'), 'http://example.com/%C3%B6/%C3%A4/') self.assertEqual(quote('http://example.com/?x=1&y=2'), 'http://example.com/?x=1&y=2')
apache-2.0
boundarydevices/android_external_chromium_org
build/win/importlibs/filter_export_list.py
185
2373
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # """Help maintaining DLL import lists.""" import ast import optparse import re import sys _EXPORT_RE = re.compile(r""" ^\s*(?P<ordinal>[0-9]+) # The ordinal field. \s+(?P<hint>[0-9A-F]+) # The hint field. \s(?P<rva>........) # The RVA field. \s+(?P<name>[^ ]+) # And finally the name we're really after. """, re.VERBOSE) _USAGE = r"""\ Usage: %prog [options] [master-file] This script filters a list of exports from a DLL, generated from something like the following command line: C:\> dumpbin /exports user32.dll against a master list of imports built from e.g. C:\> dumpbin /exports user32.lib The point of this is to trim non-public exports from the list, and to normalize the names to their stdcall-mangled form for the generation of import libraries. Note that the export names from the latter incanatation are stdcall-mangled, e.g. they are suffixed with "@" and the number of argument bytes to the function. """ def _ReadMasterFile(master_file): # Slurp the master file. with open(master_file) as f: master_exports = ast.literal_eval(f.read()) master_mapping = {} for export in master_exports: name = export.split('@')[0] master_mapping[name] = export return master_mapping def main(): parser = optparse.OptionParser(usage=_USAGE) parser.add_option('-r', '--reverse', action='store_true', help='Reverse the matching, e.g. return the functions ' 'in the master list that aren\'t in the input.') options, args = parser.parse_args() if len(args) != 1: parser.error('Must provide a master file.') master_mapping = _ReadMasterFile(args[0]) found_exports = [] for line in sys.stdin: match = _EXPORT_RE.match(line) if match: export_name = master_mapping.get(match.group('name'), None) if export_name: found_exports.append(export_name) if options.reverse: # Invert the found_exports list. found_exports = set(master_mapping.values()) - set(found_exports) # Sort the found exports for tidy output. print '\n'.join(sorted(found_exports)) return 0 if __name__ == '__main__': sys.exit(main())
bsd-3-clause
mastbaum/rat-pac
python/SCons/Platform/sunos.py
19
1927
"""engine.SCons.Platform.sunos Platform-specific initialization for Sun systems. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Platform.Platform() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Platform/sunos.py 4043 2009/02/23 09:06:45 scons" import posix def generate(env): posix.generate(env) # Based on sunSparc 8:32bit # ARG_MAX=1048320 - 3000 for environment expansion env['MAXLINELENGTH'] = 1045320 env['PKGINFO'] = 'pkginfo' env['PKGCHK'] = '/usr/sbin/pkgchk' env['ENV']['PATH'] = env['ENV']['PATH'] + ':/opt/SUNWspro/bin:/usr/ccs/bin' # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
bsd-3-clause
tukutela/heroku-buildpack-python
vendor/pip-1.5.4/pip/_vendor/requests/structures.py
398
3575
# -*- coding: utf-8 -*- """ requests.structures ~~~~~~~~~~~~~~~~~~~ Data structures that power Requests. """ import os import collections from itertools import islice class IteratorProxy(object): """docstring for IteratorProxy""" def __init__(self, i): self.i = i # self.i = chain.from_iterable(i) def __iter__(self): return self.i def __len__(self): if hasattr(self.i, '__len__'): return len(self.i) if hasattr(self.i, 'len'): return self.i.len if hasattr(self.i, 'fileno'): return os.fstat(self.i.fileno()).st_size def read(self, n): return "".join(islice(self.i, None, n)) class CaseInsensitiveDict(collections.MutableMapping): """ A case-insensitive ``dict``-like object. Implements all methods and operations of ``collections.MutableMapping`` as well as dict's ``copy``. Also provides ``lower_items``. All keys are expected to be strings. The structure remembers the case of the last key to be set, and ``iter(instance)``, ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` will contain case-sensitive keys. However, querying and contains testing is case insensitive: cid = CaseInsensitiveDict() cid['Accept'] = 'application/json' cid['aCCEPT'] == 'application/json' # True list(cid) == ['Accept'] # True For example, ``headers['content-encoding']`` will return the value of a ``'Content-Encoding'`` response header, regardless of how the header name was originally stored. If the constructor, ``.update``, or equality comparison operations are given keys that have equal ``.lower()``s, the behavior is undefined. """ def __init__(self, data=None, **kwargs): self._store = dict() if data is None: data = {} self.update(data, **kwargs) def __setitem__(self, key, value): # Use the lowercased key for lookups, but store the actual # key alongside the value. self._store[key.lower()] = (key, value) def __getitem__(self, key): return self._store[key.lower()][1] def __delitem__(self, key): del self._store[key.lower()] def __iter__(self): return (casedkey for casedkey, mappedvalue in self._store.values()) def __len__(self): return len(self._store) def lower_items(self): """Like iteritems(), but with all lowercase keys.""" return ( (lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items() ) def __eq__(self, other): if isinstance(other, collections.Mapping): other = CaseInsensitiveDict(other) else: return NotImplemented # Compare insensitively return dict(self.lower_items()) == dict(other.lower_items()) # Copy is required def copy(self): return CaseInsensitiveDict(self._store.values()) def __repr__(self): return '%s(%r)' % (self.__class__.__name__, dict(self.items())) class LookupDict(dict): """Dictionary lookup object.""" def __init__(self, name=None): self.name = name super(LookupDict, self).__init__() def __repr__(self): return '<lookup \'%s\'>' % (self.name) def __getitem__(self, key): # We allow fall-through here, so values default to None return self.__dict__.get(key, None) def get(self, key, default=None): return self.__dict__.get(key, default)
mit
js0701/chromium-crosswalk
chrome/common/extensions/docs/server2/api_categorizer_test.py
87
2474
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import unittest from api_categorizer import APICategorizer from compiled_file_system import CompiledFileSystem from extensions_paths import CHROME_EXTENSIONS from object_store_creator import ObjectStoreCreator from test_file_system import TestFileSystem def _ToTestData(obj): '''Transforms |obj| into test data by turning a list of files into an object mapping that file to its contents (derived from its name). ''' return dict((name, name) for name in obj) _TEST_DATA = { 'api': { '_api_features.json': '{}', '_manifest_features.json': '{}', '_permission_features.json': '{}', }, 'docs': { 'templates': { 'json': { 'api_availabilities.json': '{}', 'manifest.json': '{}', 'permissions.json': '{}', }, 'public': { 'apps': _ToTestData([ 'alarms.html', 'app_window.html', 'experimental_bluetooth.html', 'experimental_power.html', 'storage.html', 'sockets_udp.html' ]), 'extensions': _ToTestData([ 'alarms.html', 'browserAction.html', 'experimental_history.html', 'experimental_power.html', 'infobars.html', 'storage.html', 'sockets_udp.html' ]), }, }, } } class APICategorizerTest(unittest.TestCase): def setUp(self): self._test_file_system = TestFileSystem( _TEST_DATA, relative_to=CHROME_EXTENSIONS) self._compiled_file_system = CompiledFileSystem.Factory( ObjectStoreCreator.ForTest()) def testGetAPICategory(self): def assertGetEqual(expected, category, only_on=None): for platform in ('apps', 'extensions'): get_category = APICategorizer( self._test_file_system, self._compiled_file_system, platform if only_on is None else only_on).GetCategory(category) self.assertEqual(expected, get_category) assertGetEqual('chrome', 'alarms') assertGetEqual('private', 'musicManagerPrivate') assertGetEqual('private', 'notDocumentedApi') assertGetEqual('experimental', 'experimental.bluetooth', only_on='apps') assertGetEqual('experimental', 'experimental.history', only_on='extensions') if __name__ == '__main__': unittest.main()
bsd-3-clause
pearsonlab/nipype
nipype/interfaces/mrtrix/tracking.py
10
15487
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ import os import os.path as op from ..base import CommandLineInputSpec, CommandLine, traits, TraitedSpec, File from ..traits_extension import isdefined from ...utils.filemanip import split_filename class FilterTracksInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='input tracks to be filtered') include_xor = ['include_file', 'include_spec'] include_file = File(exists=True, argstr='-include %s', desc='inclusion file', xor=include_xor) include_spec = traits.List(traits.Float, desc='inclusion specification in mm and radius (x y z r)', position=2, argstr='-include %s', minlen=4, maxlen=4, sep=',', units='mm', xor=include_xor) exclude_xor = ['exclude_file', 'exclude_spec'] exclude_file = File(exists=True, argstr='-exclude %s', desc='exclusion file', xor=exclude_xor) exclude_spec = traits.List(traits.Float, desc='exclusion specification in mm and radius (x y z r)', position=2, argstr='-exclude %s', minlen=4, maxlen=4, sep=',', units='mm', xor=exclude_xor) minimum_tract_length = traits.Float(argstr='-minlength %s', units='mm', desc="Sets the minimum length of any track in millimeters (default is 10 mm).") out_file = File(argstr='%s', position=-1, desc='Output filtered track filename', name_source=['in_file'], hash_files=False, name_template='%s_filt') no_mask_interpolation = traits.Bool(argstr='-nomaskinterp', desc="Turns off trilinear interpolation of mask images.") invert = traits.Bool(argstr='-invert', desc="invert the matching process, so that tracks that would" "otherwise have been included are now excluded and vice-versa.") quiet = traits.Bool(argstr='-quiet', position=1, desc="Do not display information messages or progress status.") debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.") class FilterTracksOutputSpec(TraitedSpec): out_file = File(exists=True, desc='the output filtered tracks') class FilterTracks(CommandLine): """ Use regions-of-interest to select a subset of tracks from a given MRtrix track file. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> filt = mrt.FilterTracks() >>> filt.inputs.in_file = 'tracks.tck' >>> filt.run() # doctest: +SKIP """ _cmd = 'filter_tracks' input_spec = FilterTracksInputSpec output_spec = FilterTracksOutputSpec class Tracks2ProbInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='tract file') template_file = File(exists=True, argstr='-template %s', position=1, desc='an image file to be used as a template for the output (the output image wil have the same transform and field of view)') voxel_dims = traits.List(traits.Float, argstr='-vox %s', sep=',', position=2, minlen=3, maxlen=3, desc='Three comma-separated numbers giving the size of each voxel in mm.') colour = traits.Bool(argstr='-colour', position=3, desc="add colour to the output image according to the direction of the tracks.") fraction = traits.Bool(argstr='-fraction', position=3, desc="produce an image of the fraction of fibres through each voxel (as a proportion of the total number in the file), rather than the count.") output_datatype = traits.Enum("Bit", "Int8", "UInt8", "Int16", "UInt16", "Int32", "UInt32", "float32", "float64", argstr='-datatype %s', position=2, desc='"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"') # , usedefault=True) resample = traits.Float(argstr='-resample %d', position=3, units='mm', desc='resample the tracks at regular intervals using Hermite interpolation. If omitted, the program will select an appropriate interpolation factor automatically.') out_filename = File(genfile=True, argstr='%s', position=-1, desc='output data file') class Tracks2ProbOutputSpec(TraitedSpec): tract_image = File(exists=True, desc='Output tract count or track density image') class Tracks2Prob(CommandLine): """ Convert a tract file into a map of the fraction of tracks to enter each voxel - also known as a tract density image (TDI) - in MRtrix's image format (.mif). This can be viewed using MRview or converted to Nifti using MRconvert. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> tdi = mrt.Tracks2Prob() >>> tdi.inputs.in_file = 'dwi_CSD_tracked.tck' >>> tdi.inputs.colour = True >>> tdi.run() # doctest: +SKIP """ _cmd = 'tracks2prob' input_spec = Tracks2ProbInputSpec output_spec = Tracks2ProbOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['tract_image'] = self.inputs.out_filename if not isdefined(outputs['tract_image']): outputs['tract_image'] = op.abspath(self._gen_outfilename()) else: outputs['tract_image'] = os.path.abspath(outputs['tract_image']) return outputs def _gen_filename(self, name): if name is 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) return name + '_TDI.mif' class StreamlineTrackInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='the image containing the source data.' 'The type of data required depends on the type of tracking as set in the preceeding argument. For DT methods, ' 'the base DWI are needed. For SD methods, the SH harmonic coefficients of the FOD are needed.') seed_xor = ['seed_file', 'seed_spec'] seed_file = File(exists=True, argstr='-seed %s', desc='seed file', xor=seed_xor) seed_spec = traits.List(traits.Float, desc='seed specification in mm and radius (x y z r)', position=2, argstr='-seed %s', minlen=4, maxlen=4, sep=',', units='mm', xor=seed_xor) include_xor = ['include_file', 'include_spec'] include_file = File(exists=True, argstr='-include %s', desc='inclusion file', xor=include_xor) include_spec = traits.List(traits.Float, desc='inclusion specification in mm and radius (x y z r)', position=2, argstr='-include %s', minlen=4, maxlen=4, sep=',', units='mm', xor=include_xor) exclude_xor = ['exclude_file', 'exclude_spec'] exclude_file = File(exists=True, argstr='-exclude %s', desc='exclusion file', xor=exclude_xor) exclude_spec = traits.List(traits.Float, desc='exclusion specification in mm and radius (x y z r)', position=2, argstr='-exclude %s', minlen=4, maxlen=4, sep=',', units='mm', xor=exclude_xor) mask_xor = ['mask_file', 'mask_spec'] mask_file = File(exists=True, argstr='-mask %s', desc='mask file. Only tracks within mask.', xor=mask_xor) mask_spec = traits.List(traits.Float, desc='Mask specification in mm and radius (x y z r). Tracks will be terminated when they leave the ROI.', position=2, argstr='-mask %s', minlen=4, maxlen=4, sep=',', units='mm', xor=mask_xor) inputmodel = traits.Enum('DT_STREAM', 'SD_PROB', 'SD_STREAM', argstr='%s', desc='input model type', usedefault=True, position=-3) stop = traits.Bool(argstr='-stop', desc="stop track as soon as it enters any of the include regions.") do_not_precompute = traits.Bool(argstr='-noprecomputed', desc="Turns off precomputation of the legendre polynomial values. Warning: this will slow down the algorithm by a factor of approximately 4.") unidirectional = traits.Bool(argstr='-unidirectional', desc="Track from the seed point in one direction only (default is to track in both directions).") no_mask_interpolation = traits.Bool(argstr='-nomaskinterp', desc="Turns off trilinear interpolation of mask images.") step_size = traits.Float(argstr='-step %s', units='mm', desc="Set the step size of the algorithm in mm (default is 0.2).") minimum_radius_of_curvature = traits.Float(argstr='-curvature %s', units='mm', desc="Set the minimum radius of curvature (default is 2 mm for DT_STREAM, 0 for SD_STREAM, 1 mm for SD_PROB and DT_PROB)") desired_number_of_tracks = traits.Int(argstr='-number %d', desc='Sets the desired number of tracks.' 'The program will continue to generate tracks until this number of tracks have been selected and written to the output file' '(default is 100 for *_STREAM methods, 1000 for *_PROB methods).') maximum_number_of_tracks = traits.Int(argstr='-maxnum %d', desc='Sets the maximum number of tracks to generate.' "The program will not generate more tracks than this number, even if the desired number of tracks hasn't yet been reached" '(default is 100 x number).') minimum_tract_length = traits.Float(argstr='-minlength %s', units='mm', desc="Sets the minimum length of any track in millimeters (default is 10 mm).") maximum_tract_length = traits.Float(argstr='-length %s', units='mm', desc="Sets the maximum length of any track in millimeters (default is 200 mm).") cutoff_value = traits.Float(argstr='-cutoff %s', units='NA', desc="Set the FA or FOD amplitude cutoff for terminating tracks (default is 0.1).") initial_cutoff_value = traits.Float(argstr='-initcutoff %s', units='NA', desc="Sets the minimum FA or FOD amplitude for initiating tracks (default is twice the normal cutoff).") initial_direction = traits.List(traits.Int, desc='Specify the initial tracking direction as a vector', argstr='-initdirection %s', minlen=2, maxlen=2, units='voxels') out_file = File(argstr='%s', position=-1, name_source=['in_file'], name_template='%s_tracked.tck', output_name='tracked', desc='output data file') class StreamlineTrackOutputSpec(TraitedSpec): tracked = File(exists=True, desc='output file containing reconstructed tracts') class StreamlineTrack(CommandLine): """ Performs tractography using one of the following models: 'dt_prob', 'dt_stream', 'sd_prob', 'sd_stream', Where 'dt' stands for diffusion tensor, 'sd' stands for spherical deconvolution, and 'prob' stands for probabilistic. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> strack = mrt.StreamlineTrack() >>> strack.inputs.inputmodel = 'SD_PROB' >>> strack.inputs.in_file = 'data.Bfloat' >>> strack.inputs.seed_file = 'seed_mask.nii' >>> strack.inputs.mask_file = 'mask.nii' >>> strack.cmdline 'streamtrack -mask mask.nii -seed seed_mask.nii SD_PROB data.Bfloat data_tracked.tck' >>> strack.run() # doctest: +SKIP """ _cmd = 'streamtrack' input_spec = StreamlineTrackInputSpec output_spec = StreamlineTrackOutputSpec class DiffusionTensorStreamlineTrackInputSpec(StreamlineTrackInputSpec): gradient_encoding_file = File(exists=True, argstr='-grad %s', mandatory=True, position=-2, desc='Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix') class DiffusionTensorStreamlineTrack(StreamlineTrack): """ Specialized interface to StreamlineTrack. This interface is used for streamline tracking from diffusion tensor data, and calls the MRtrix function 'streamtrack' with the option 'DT_STREAM' Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> dtstrack = mrt.DiffusionTensorStreamlineTrack() >>> dtstrack.inputs.in_file = 'data.Bfloat' >>> dtstrack.inputs.seed_file = 'seed_mask.nii' >>> dtstrack.run() # doctest: +SKIP """ input_spec = DiffusionTensorStreamlineTrackInputSpec def __init__(self, command=None, **inputs): inputs["inputmodel"] = "DT_STREAM" return super(DiffusionTensorStreamlineTrack, self).__init__(command, **inputs) class ProbabilisticSphericallyDeconvolutedStreamlineTrackInputSpec(StreamlineTrackInputSpec): maximum_number_of_trials = traits.Int(argstr='-trials %s', desc="Set the maximum number of sampling trials at each point (only used for probabilistic tracking).") class ProbabilisticSphericallyDeconvolutedStreamlineTrack(StreamlineTrack): """ Performs probabilistic tracking using spherically deconvolved data Specialized interface to StreamlineTrack. This interface is used for probabilistic tracking from spherically deconvolved data, and calls the MRtrix function 'streamtrack' with the option 'SD_PROB' Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> sdprobtrack = mrt.ProbabilisticSphericallyDeconvolutedStreamlineTrack() >>> sdprobtrack.inputs.in_file = 'data.Bfloat' >>> sdprobtrack.inputs.seed_file = 'seed_mask.nii' >>> sdprobtrack.run() # doctest: +SKIP """ input_spec = ProbabilisticSphericallyDeconvolutedStreamlineTrackInputSpec def __init__(self, command=None, **inputs): inputs["inputmodel"] = "SD_PROB" return super(ProbabilisticSphericallyDeconvolutedStreamlineTrack, self).__init__(command, **inputs) class SphericallyDeconvolutedStreamlineTrack(StreamlineTrack): """ Performs streamline tracking using spherically deconvolved data Specialized interface to StreamlineTrack. This interface is used for streamline tracking from spherically deconvolved data, and calls the MRtrix function 'streamtrack' with the option 'SD_STREAM' Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> sdtrack = mrt.SphericallyDeconvolutedStreamlineTrack() >>> sdtrack.inputs.in_file = 'data.Bfloat' >>> sdtrack.inputs.seed_file = 'seed_mask.nii' >>> sdtrack.run() # doctest: +SKIP """ input_spec = StreamlineTrackInputSpec def __init__(self, command=None, **inputs): inputs["inputmodel"] = "SD_STREAM" return super(SphericallyDeconvolutedStreamlineTrack, self).__init__(command, **inputs)
bsd-3-clause
Ayub-Khan/edx-platform
common/djangoapps/django_comment_common/tests.py
148
2266
from django.test import TestCase from opaque_keys.edx.locations import SlashSeparatedCourseKey from django_comment_common.models import Role from student.models import CourseEnrollment, User class RoleAssignmentTest(TestCase): """ Basic checks to make sure our Roles get assigned and unassigned as students are enrolled and unenrolled from a course. """ def setUp(self): super(RoleAssignmentTest, self).setUp() # Check a staff account because those used to get the Moderator role self.staff_user = User.objects.create_user( "patty", "patty@fake.edx.org", ) self.staff_user.is_staff = True self.student_user = User.objects.create_user( "hacky", "hacky@fake.edx.org" ) self.course_key = SlashSeparatedCourseKey("edX", "Fake101", "2012") CourseEnrollment.enroll(self.staff_user, self.course_key) CourseEnrollment.enroll(self.student_user, self.course_key) def test_enrollment_auto_role_creation(self): student_role = Role.objects.get( course_id=self.course_key, name="Student" ) self.assertEqual([student_role], list(self.staff_user.roles.all())) self.assertEqual([student_role], list(self.student_user.roles.all())) # The following was written on the assumption that unenrolling from a course # should remove all forum Roles for that student for that course. This is # not necessarily the case -- please see comments at the top of # django_comment_client.models.assign_default_role(). Leaving it for the # forums team to sort out. # # def test_unenrollment_auto_role_removal(self): # another_student = User.objects.create_user("sol", "sol@fake.edx.org") # CourseEnrollment.enroll(another_student, self.course_id) # # CourseEnrollment.unenroll(self.student_user, self.course_id) # # Make sure we didn't delete the actual Role # student_role = Role.objects.get( # course_id=self.course_id, # name="Student" # ) # self.assertNotIn(student_role, self.student_user.roles.all()) # self.assertIn(student_role, another_student.roles.all())
agpl-3.0
neumerance/deploy
.venv/lib/python2.7/site-packages/django/contrib/admin/views/main.py
85
16606
import operator from functools import reduce from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured from django.core.paginator import InvalidPage from django.core.urlresolvers import reverse from django.db import models from django.db.models.fields import FieldDoesNotExist from django.utils.datastructures import SortedDict from django.utils.encoding import force_str, force_text from django.utils.translation import ugettext, ugettext_lazy from django.utils.http import urlencode from django.contrib.admin import FieldListFilter from django.contrib.admin.options import IncorrectLookupParameters from django.contrib.admin.util import (quote, get_fields_from_path, lookup_needs_distinct, prepare_lookup_value) # Changelist settings ALL_VAR = 'all' ORDER_VAR = 'o' ORDER_TYPE_VAR = 'ot' PAGE_VAR = 'p' SEARCH_VAR = 'q' TO_FIELD_VAR = 't' IS_POPUP_VAR = 'pop' ERROR_FLAG = 'e' IGNORED_PARAMS = ( ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR, TO_FIELD_VAR) # Text to display within change-list table cells if the value is blank. EMPTY_CHANGELIST_VALUE = ugettext_lazy('(None)') class ChangeList(object): def __init__(self, request, model, list_display, list_display_links, list_filter, date_hierarchy, search_fields, list_select_related, list_per_page, list_max_show_all, list_editable, model_admin): self.model = model self.opts = model._meta self.lookup_opts = self.opts self.root_query_set = model_admin.queryset(request) self.list_display = list_display self.list_display_links = list_display_links self.list_filter = list_filter self.date_hierarchy = date_hierarchy self.search_fields = search_fields self.list_select_related = list_select_related self.list_per_page = list_per_page self.list_max_show_all = list_max_show_all self.model_admin = model_admin # Get search parameters from the query string. try: self.page_num = int(request.GET.get(PAGE_VAR, 0)) except ValueError: self.page_num = 0 self.show_all = ALL_VAR in request.GET self.is_popup = IS_POPUP_VAR in request.GET self.to_field = request.GET.get(TO_FIELD_VAR) self.params = dict(request.GET.items()) if PAGE_VAR in self.params: del self.params[PAGE_VAR] if ERROR_FLAG in self.params: del self.params[ERROR_FLAG] if self.is_popup: self.list_editable = () else: self.list_editable = list_editable self.query = request.GET.get(SEARCH_VAR, '') self.query_set = self.get_query_set(request) self.get_results(request) if self.is_popup: title = ugettext('Select %s') else: title = ugettext('Select %s to change') self.title = title % force_text(self.opts.verbose_name) self.pk_attname = self.lookup_opts.pk.attname def get_filters(self, request): lookup_params = self.params.copy() # a dictionary of the query string use_distinct = False # Remove all the parameters that are globally and systematically # ignored. for ignored in IGNORED_PARAMS: if ignored in lookup_params: del lookup_params[ignored] # Normalize the types of keys for key, value in lookup_params.items(): if not isinstance(key, str): # 'key' will be used as a keyword argument later, so Python # requires it to be a string. del lookup_params[key] lookup_params[force_str(key)] = value if not self.model_admin.lookup_allowed(key, value): raise SuspiciousOperation("Filtering by %s not allowed" % key) filter_specs = [] if self.list_filter: for list_filter in self.list_filter: if callable(list_filter): # This is simply a custom list filter class. spec = list_filter(request, lookup_params, self.model, self.model_admin) else: field_path = None if isinstance(list_filter, (tuple, list)): # This is a custom FieldListFilter class for a given field. field, field_list_filter_class = list_filter else: # This is simply a field name, so use the default # FieldListFilter class that has been registered for # the type of the given field. field, field_list_filter_class = list_filter, FieldListFilter.create if not isinstance(field, models.Field): field_path = field field = get_fields_from_path(self.model, field_path)[-1] spec = field_list_filter_class(field, request, lookup_params, self.model, self.model_admin, field_path=field_path) # Check if we need to use distinct() use_distinct = (use_distinct or lookup_needs_distinct(self.lookup_opts, field_path)) if spec and spec.has_output(): filter_specs.append(spec) # At this point, all the parameters used by the various ListFilters # have been removed from lookup_params, which now only contains other # parameters passed via the query string. We now loop through the # remaining parameters both to ensure that all the parameters are valid # fields and to determine if at least one of them needs distinct(). If # the lookup parameters aren't real fields, then bail out. try: for key, value in lookup_params.items(): lookup_params[key] = prepare_lookup_value(key, value) use_distinct = (use_distinct or lookup_needs_distinct(self.lookup_opts, key)) return filter_specs, bool(filter_specs), lookup_params, use_distinct except FieldDoesNotExist as e: raise IncorrectLookupParameters(e) def get_query_string(self, new_params=None, remove=None): if new_params is None: new_params = {} if remove is None: remove = [] p = self.params.copy() for r in remove: for k in list(p): if k.startswith(r): del p[k] for k, v in new_params.items(): if v is None: if k in p: del p[k] else: p[k] = v return '?%s' % urlencode(sorted(p.items())) def get_results(self, request): paginator = self.model_admin.get_paginator(request, self.query_set, self.list_per_page) # Get the number of objects, with admin filters applied. result_count = paginator.count # Get the total number of objects, with no admin filters applied. # Perform a slight optimization: Check to see whether any filters were # given. If not, use paginator.hits to calculate the number of objects, # because we've already done paginator.hits and the value is cached. if not self.query_set.query.where: full_result_count = result_count else: full_result_count = self.root_query_set.count() can_show_all = result_count <= self.list_max_show_all multi_page = result_count > self.list_per_page # Get the list of objects to display on this page. if (self.show_all and can_show_all) or not multi_page: result_list = self.query_set._clone() else: try: result_list = paginator.page(self.page_num+1).object_list except InvalidPage: raise IncorrectLookupParameters self.result_count = result_count self.full_result_count = full_result_count self.result_list = result_list self.can_show_all = can_show_all self.multi_page = multi_page self.paginator = paginator def _get_default_ordering(self): ordering = [] if self.model_admin.ordering: ordering = self.model_admin.ordering elif self.lookup_opts.ordering: ordering = self.lookup_opts.ordering return ordering def get_ordering_field(self, field_name): """ Returns the proper model field name corresponding to the given field_name to use for ordering. field_name may either be the name of a proper model field or the name of a method (on the admin or model) or a callable with the 'admin_order_field' attribute. Returns None if no proper model field name can be matched. """ try: field = self.lookup_opts.get_field(field_name) return field.name except models.FieldDoesNotExist: # See whether field_name is a name of a non-field # that allows sorting. if callable(field_name): attr = field_name elif hasattr(self.model_admin, field_name): attr = getattr(self.model_admin, field_name) else: attr = getattr(self.model, field_name) return getattr(attr, 'admin_order_field', None) def get_ordering(self, request, queryset): """ Returns the list of ordering fields for the change list. First we check the get_ordering() method in model admin, then we check the object's default ordering. Then, any manually-specified ordering from the query string overrides anything. Finally, a deterministic order is guaranteed by ensuring the primary key is used as the last ordering field. """ params = self.params ordering = list(self.model_admin.get_ordering(request) or self._get_default_ordering()) if ORDER_VAR in params: # Clear ordering and used params ordering = [] order_params = params[ORDER_VAR].split('.') for p in order_params: try: none, pfx, idx = p.rpartition('-') field_name = self.list_display[int(idx)] order_field = self.get_ordering_field(field_name) if not order_field: continue # No 'admin_order_field', skip it ordering.append(pfx + order_field) except (IndexError, ValueError): continue # Invalid ordering specified, skip it. # Add the given query's ordering fields, if any. ordering.extend(queryset.query.order_by) # Ensure that the primary key is systematically present in the list of # ordering fields so we can guarantee a deterministic order across all # database backends. pk_name = self.lookup_opts.pk.name if not (set(ordering) & set(['pk', '-pk', pk_name, '-' + pk_name])): # The two sets do not intersect, meaning the pk isn't present. So # we add it. ordering.append('-pk') return ordering def get_ordering_field_columns(self): """ Returns a SortedDict of ordering field column numbers and asc/desc """ # We must cope with more than one column having the same underlying sort # field, so we base things on column numbers. ordering = self._get_default_ordering() ordering_fields = SortedDict() if ORDER_VAR not in self.params: # for ordering specified on ModelAdmin or model Meta, we don't know # the right column numbers absolutely, because there might be more # than one column associated with that ordering, so we guess. for field in ordering: if field.startswith('-'): field = field[1:] order_type = 'desc' else: order_type = 'asc' for index, attr in enumerate(self.list_display): if self.get_ordering_field(attr) == field: ordering_fields[index] = order_type break else: for p in self.params[ORDER_VAR].split('.'): none, pfx, idx = p.rpartition('-') try: idx = int(idx) except ValueError: continue # skip it ordering_fields[idx] = 'desc' if pfx == '-' else 'asc' return ordering_fields def get_query_set(self, request): # First, we collect all the declared list filters. (self.filter_specs, self.has_filters, remaining_lookup_params, use_distinct) = self.get_filters(request) # Then, we let every list filter modify the queryset to its liking. qs = self.root_query_set for filter_spec in self.filter_specs: new_qs = filter_spec.queryset(request, qs) if new_qs is not None: qs = new_qs try: # Finally, we apply the remaining lookup parameters from the query # string (i.e. those that haven't already been processed by the # filters). qs = qs.filter(**remaining_lookup_params) except (SuspiciousOperation, ImproperlyConfigured): # Allow certain types of errors to be re-raised as-is so that the # caller can treat them in a special way. raise except Exception as e: # Every other error is caught with a naked except, because we don't # have any other way of validating lookup parameters. They might be # invalid if the keyword arguments are incorrect, or if the values # are not in the correct type, so we might get FieldError, # ValueError, ValidationError, or ?. raise IncorrectLookupParameters(e) # Use select_related() if one of the list_display options is a field # with a relationship and the provided queryset doesn't already have # select_related defined. if not qs.query.select_related: if self.list_select_related: qs = qs.select_related() else: for field_name in self.list_display: try: field = self.lookup_opts.get_field(field_name) except models.FieldDoesNotExist: pass else: if isinstance(field.rel, models.ManyToOneRel): qs = qs.select_related() break # Set ordering. ordering = self.get_ordering(request, qs) qs = qs.order_by(*ordering) # Apply keyword searches. def construct_search(field_name): if field_name.startswith('^'): return "%s__istartswith" % field_name[1:] elif field_name.startswith('='): return "%s__iexact" % field_name[1:] elif field_name.startswith('@'): return "%s__search" % field_name[1:] else: return "%s__icontains" % field_name if self.search_fields and self.query: orm_lookups = [construct_search(str(search_field)) for search_field in self.search_fields] for bit in self.query.split(): or_queries = [models.Q(**{orm_lookup: bit}) for orm_lookup in orm_lookups] qs = qs.filter(reduce(operator.or_, or_queries)) if not use_distinct: for search_spec in orm_lookups: if lookup_needs_distinct(self.lookup_opts, search_spec): use_distinct = True break if use_distinct: return qs.distinct() else: return qs def url_for_result(self, result): pk = getattr(result, self.pk_attname) return reverse('admin:%s_%s_change' % (self.opts.app_label, self.opts.module_name), args=(quote(pk),), current_app=self.model_admin.admin_site.name)
apache-2.0
yongshengwang/hue
desktop/core/ext-py/django-extensions-1.5.0/django_extensions/management/commands/update_permissions.py
35
1239
from django.core.management.base import BaseCommand from django.contrib.auth.management import create_permissions as _create_permissions from django_extensions.management.utils import signalcommand try: from django.apps import apps as django_apps get_models = lambda: None get_app = django_apps.get_app_config get_all_apps = django_apps.get_app_configs def create_permissions(app, models, verbosity): _create_permissions(app, verbosity) except ImportError: from django.db.models import get_models, get_app django_apps = None def get_all_apps(): apps = set() for model in get_models(): apps.add(get_app(model._meta.app_label)) return apps create_permissions = _create_permissions class Command(BaseCommand): args = '<app app ...>' help = 'reloads permissions for specified apps, or all apps if no args are specified' @signalcommand def handle(self, *args, **options): apps = set() if not args: apps = get_all_apps() else: for arg in args: apps.add(get_app(arg)) for app in apps: create_permissions(app, get_models(), int(options.get('verbosity', 3)))
apache-2.0
megawidget/ebb
vendor/gtest-1.7.0/scripts/fuse_gtest_files.py
2577
8813
#!/usr/bin/env python # # Copyright 2009, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """fuse_gtest_files.py v0.2.0 Fuses Google Test source code into a .h file and a .cc file. SYNOPSIS fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR Scans GTEST_ROOT_DIR for Google Test source code, and generates two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc. Then you can build your tests by adding OUTPUT_DIR to the include search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These two files contain everything you need to use Google Test. Hence you can "install" Google Test by copying them to wherever you want. GTEST_ROOT_DIR can be omitted and defaults to the parent directory of the directory holding this script. EXAMPLES ./fuse_gtest_files.py fused_gtest ./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest This tool is experimental. In particular, it assumes that there is no conditional inclusion of Google Test headers. Please report any problems to googletestframework@googlegroups.com. You can read http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for more information. """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import re import sets import sys # We assume that this file is in the scripts/ directory in the Google # Test root directory. DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..') # Regex for matching '#include "gtest/..."'. INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"') # Regex for matching '#include "src/..."'. INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"') # Where to find the source seed files. GTEST_H_SEED = 'include/gtest/gtest.h' GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h' GTEST_ALL_CC_SEED = 'src/gtest-all.cc' # Where to put the generated files. GTEST_H_OUTPUT = 'gtest/gtest.h' GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc' def VerifyFileExists(directory, relative_path): """Verifies that the given file exists; aborts on failure. relative_path is the file path relative to the given directory. """ if not os.path.isfile(os.path.join(directory, relative_path)): print 'ERROR: Cannot find %s in directory %s.' % (relative_path, directory) print ('Please either specify a valid project root directory ' 'or omit it on the command line.') sys.exit(1) def ValidateGTestRootDir(gtest_root): """Makes sure gtest_root points to a valid gtest root directory. The function aborts the program on failure. """ VerifyFileExists(gtest_root, GTEST_H_SEED) VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED) def VerifyOutputFile(output_dir, relative_path): """Verifies that the given output file path is valid. relative_path is relative to the output_dir directory. """ # Makes sure the output file either doesn't exist or can be overwritten. output_file = os.path.join(output_dir, relative_path) if os.path.exists(output_file): # TODO(wan@google.com): The following user-interaction doesn't # work with automated processes. We should provide a way for the # Makefile to force overwriting the files. print ('%s already exists in directory %s - overwrite it? (y/N) ' % (relative_path, output_dir)) answer = sys.stdin.readline().strip() if answer not in ['y', 'Y']: print 'ABORTED.' sys.exit(1) # Makes sure the directory holding the output file exists; creates # it and all its ancestors if necessary. parent_directory = os.path.dirname(output_file) if not os.path.isdir(parent_directory): os.makedirs(parent_directory) def ValidateOutputDir(output_dir): """Makes sure output_dir points to a valid output directory. The function aborts the program on failure. """ VerifyOutputFile(output_dir, GTEST_H_OUTPUT) VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT) def FuseGTestH(gtest_root, output_dir): """Scans folder gtest_root to generate gtest/gtest.h in output_dir.""" output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w') processed_files = sets.Set() # Holds all gtest headers we've processed. def ProcessFile(gtest_header_path): """Processes the given gtest header file.""" # We don't process the same header twice. if gtest_header_path in processed_files: return processed_files.add(gtest_header_path) # Reads each line in the given gtest header. for line in file(os.path.join(gtest_root, gtest_header_path), 'r'): m = INCLUDE_GTEST_FILE_REGEX.match(line) if m: # It's '#include "gtest/..."' - let's process it recursively. ProcessFile('include/' + m.group(1)) else: # Otherwise we copy the line unchanged to the output file. output_file.write(line) ProcessFile(GTEST_H_SEED) output_file.close() def FuseGTestAllCcToFile(gtest_root, output_file): """Scans folder gtest_root to generate gtest/gtest-all.cc in output_file.""" processed_files = sets.Set() def ProcessFile(gtest_source_file): """Processes the given gtest source file.""" # We don't process the same #included file twice. if gtest_source_file in processed_files: return processed_files.add(gtest_source_file) # Reads each line in the given gtest source file. for line in file(os.path.join(gtest_root, gtest_source_file), 'r'): m = INCLUDE_GTEST_FILE_REGEX.match(line) if m: if 'include/' + m.group(1) == GTEST_SPI_H_SEED: # It's '#include "gtest/gtest-spi.h"'. This file is not # #included by "gtest/gtest.h", so we need to process it. ProcessFile(GTEST_SPI_H_SEED) else: # It's '#include "gtest/foo.h"' where foo is not gtest-spi. # We treat it as '#include "gtest/gtest.h"', as all other # gtest headers are being fused into gtest.h and cannot be # #included directly. # There is no need to #include "gtest/gtest.h" more than once. if not GTEST_H_SEED in processed_files: processed_files.add(GTEST_H_SEED) output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,)) else: m = INCLUDE_SRC_FILE_REGEX.match(line) if m: # It's '#include "src/foo"' - let's process it recursively. ProcessFile(m.group(1)) else: output_file.write(line) ProcessFile(GTEST_ALL_CC_SEED) def FuseGTestAllCc(gtest_root, output_dir): """Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir.""" output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w') FuseGTestAllCcToFile(gtest_root, output_file) output_file.close() def FuseGTest(gtest_root, output_dir): """Fuses gtest.h and gtest-all.cc.""" ValidateGTestRootDir(gtest_root) ValidateOutputDir(output_dir) FuseGTestH(gtest_root, output_dir) FuseGTestAllCc(gtest_root, output_dir) def main(): argc = len(sys.argv) if argc == 2: # fuse_gtest_files.py OUTPUT_DIR FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1]) elif argc == 3: # fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR FuseGTest(sys.argv[1], sys.argv[2]) else: print __doc__ sys.exit(1) if __name__ == '__main__': main()
mit
gangadharkadam/johnerp
erpnext/stock/report/warehouse_wise_stock_balance/warehouse_wise_stock_balance.py
10
2593
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe import _ from frappe.utils import flt def execute(filters=None): if not filters: filters = {} columns = get_columns(filters) item_map = get_item_details(filters) iwb_map = get_item_warehouse_map(filters) data = [] for company in sorted(iwb_map): for item in sorted(iwb_map[company]): for wh in sorted(iwb_map[company][item]): qty_dict = iwb_map[company][item][wh] data.append([item, item_map[item]["item_name"], item_map[item]["description"], wh, qty_dict.opening_qty, qty_dict.in_qty, qty_dict.out_qty, qty_dict.bal_qty, company ]) return columns, data def get_columns(filters): """return columns based on filters""" columns = ["Item:Link/Item:100", "Item Name::150", "Description::150", \ "Warehouse:Link/Warehouse:100", "Opening Qty:Float:90", \ "In Qty:Float:80", "Out Qty:Float:80", "Balance Qty:Float:90", "Company:Link/Company:100"] return columns def get_conditions(filters): conditions = "" if not filters.get("from_date"): frappe.throw(_("'From Date' is required")) if filters.get("to_date"): conditions += " and posting_date <= '%s'" % filters["to_date"] else: frappe.throw(_("'To Date' is required")) return conditions #get all details def get_stock_ledger_entries(filters): conditions = get_conditions(filters) return frappe.db.sql("""select item_code, warehouse, posting_date, actual_qty, company from `tabStock Ledger Entry` where docstatus < 2 %s order by item_code, warehouse""" % conditions, as_dict=1) def get_item_warehouse_map(filters): sle = get_stock_ledger_entries(filters) iwb_map = {} for d in sle: iwb_map.setdefault(d.company, {}).setdefault(d.item_code, {}).\ setdefault(d.warehouse, frappe._dict({\ "opening_qty": 0.0, "in_qty": 0.0, "out_qty": 0.0, "bal_qty": 0.0 })) qty_dict = iwb_map[d.company][d.item_code][d.warehouse] if d.posting_date < filters["from_date"]: qty_dict.opening_qty += flt(d.actual_qty) elif d.posting_date >= filters["from_date"] and d.posting_date <= filters["to_date"]: if flt(d.actual_qty) > 0: qty_dict.in_qty += flt(d.actual_qty) else: qty_dict.out_qty += abs(flt(d.actual_qty)) qty_dict.bal_qty += flt(d.actual_qty) return iwb_map def get_item_details(filters): item_map = {} for d in frappe.db.sql("select name, item_name, description from tabItem", as_dict=1): item_map.setdefault(d.name, d) return item_map
agpl-3.0
filippog/pysnmp
examples/hlapi/asyncio/manager/cmdgen/getbulk-to-eom.py
1
1692
""" Bulk walk MIB +++++++++++++ Send a series of SNMP GETBULK requests using the following options: * with SNMPv3, user 'usr-none-none', no authentication, no privacy * over IPv4/UDP * to an Agent at demo.snmplabs.com:161 * for all OIDs past SNMPv2-MIB::system * run till end-of-mib condition is reported by Agent * based on asyncio I/O framework Functionally similar to: | $ snmpbulkwalk -v3 -lnoAuthNoPriv -u usr-none-none -Cn0 -Cr50 \ | demo.snmplabs.com SNMPv2-MIB::system """# import asyncio from pysnmp.hlapi.asyncio import * @asyncio.coroutine def run(varBinds): snmpEngine = SnmpEngine() while True: errorIndication, errorStatus, errorIndex, \ varBindTable = yield from bulkCmd( snmpEngine, UsmUserData('usr-none-none'), UdpTransportTarget(('demo.snmplabs.com', 161)), ContextData(), 0, 50, *varBinds) if errorIndication: print(errorIndication) break elif errorStatus: print('%s at %s' % ( errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex)-1][0] or '?' ) ) else: for varBindRow in varBindTable: for varBind in varBindRow: print(' = '.join([ x.prettyPrint() for x in varBind ])) varBinds = varBindTable[-1] if isEndOfMib(varBinds): break snmpEngine.transportDispatcher.closeDispatcher() loop = asyncio.get_event_loop() loop.run_until_complete( run([ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr'))]) )
bsd-3-clause
JeffRoy/mi-dataset
mi/dataset/driver/adcps_jln/stc/test/test_adcps_jln_stc_telemetered_driver.py
1
1065
__author__ = 'Mark Worden' from mi.core.log import get_logger log = get_logger() from mi.idk.config import Config import unittest import os from mi.dataset.driver.adcps_jln.stc.adcps_jln_stc_telemetered_driver import parse from mi.dataset.dataset_driver import ParticleDataHandler class SampleTest(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_one(self): sourceFilePath = os.path.join('mi', 'dataset', 'driver', 'adcps_jln', 'stc', 'resource', 'adcpt_20130929_091817.DAT') particle_data_hdlr_obj = ParticleDataHandler() particle_data_hdlr_obj = parse(Config().base_dir(), sourceFilePath, particle_data_hdlr_obj) log.debug("SAMPLES: %s", particle_data_hdlr_obj._samples) log.debug("FAILURE: %s", particle_data_hdlr_obj._failure) self.assertEquals(particle_data_hdlr_obj._failure, False) if __name__ == '__main__': test = SampleTest('test_one') test.test_one()
bsd-2-clause
Oslandia/qgis-versioning
test/plugin_test.py
1
7283
#!/usr/bin/env python3 from PyQt5.QtWidgets import QMessageBox from qgis.core import (QgsApplication, QgsVectorLayer, QgsProject) import sys import os import plugin import psycopg2 dbname = "epanet_test_db" wc_dbname = "epanet_test_wc_db" schema = "epanet" wcs = "epanet_wc" test_data_dir = os.path.dirname(os.path.realpath(__file__)) sql_file = os.path.join(test_data_dir, "epanet_test_db.sql") # Monkey path GUI stuff # This not ideal to monkey patch too much things. It will be better to put # most of the GUI things in methods and to monkey patch these methods like # it's done with selectDatabase class EmptyObject(object): def __getattr__(self, name): return EmptyObject() def __call__(self, *args): return EmptyObject() def generate_tempfile(*args): return ("/tmp/plugin_test_file.sqlite", None) def warning(*args): print(args[2]) return QMessageBox.Ok class QLineEdit: def __init__(*args): pass def text(self): return wcs def return_wc_database(): return wc_dbname iface = EmptyObject() iface.mainWindow = EmptyObject() iface.layerTreeView = EmptyObject() plugin.QDialog = EmptyObject() plugin.uic.loadUi = EmptyObject() plugin.QFileDialog.getSaveFileName = generate_tempfile plugin.QMessageBox.warning = warning plugin.QVBoxLayout = EmptyObject() plugin.QDialogButtonBox = EmptyObject() plugin.QDialogButtonBox.Cancel = 0 plugin.QDialogButtonBox.Ok = 0 plugin.QLineEdit = QLineEdit class PluginTest: def __init__(self, host, pguser): self.host = host self.pguser = pguser # create the test database os.system(f"psql -h {host} -U {pguser} {dbname} -f {sql_file}") pg_conn_info = f"dbname={dbname} host={host} user={pguser}" pcon = psycopg2.connect(pg_conn_info) pcur = pcon.cursor() pcur.execute(""" INSERT INTO epanet.junctions (id, elevation, geom) VALUES (33, 30, ST_GeometryFromText('POINT(3 3)',2154)); """) pcur.execute(""" INSERT INTO epanet.junctions (id, elevation, geom) VALUES (44, 40, ST_GeometryFromText('POINT(4 4)',2154)); """) pcon.commit() pcon.close() # Initialize project layer_source = f"""host='{host}' dbname='{dbname}' user='{pguser}' srid=2154 table="epanet"."junctions" (geom) sql=""" j_layer = QgsVectorLayer(layer_source, "junctions", "postgres") assert(j_layer and j_layer.isValid() and j_layer.featureCount() == 4) assert(QgsProject.instance().addMapLayer(j_layer, False)) root = QgsProject.instance().layerTreeRoot() group = root.addGroup("epanet_group") group.addLayer(j_layer) self.versioning_plugin = plugin.Plugin(iface) self.versioning_plugin.current_layers = [j_layer] self.versioning_plugin.current_group = group self.historize() def historize(self): root = QgsProject.instance().layerTreeRoot() # historize self.versioning_plugin.historize() assert(len(root.children()) == 1) group = root.children()[0] assert(group.name() == "trunk revision head") j_layer = group.children()[0].layer() assert(j_layer.name() == "junctions") self.versioning_plugin.current_layers = [j_layer] self.versioning_plugin.current_group = group def test_checkout(self): root = QgsProject.instance().layerTreeRoot() # checkout self.checkout() assert(len(root.children()) == 2) group = root.children()[1] assert(group.name() == self.get_working_name()) j_layer = group.children()[0].layer() assert(j_layer.name() == "junctions") assert(j_layer.featureCount() == 4) root.takeChild(group) def test_checkout_w_selected_features(self): root = QgsProject.instance().layerTreeRoot() # select the 2 last features group = root.children()[0] j_layer = group.children()[0].layer() assert(j_layer.name() == "junctions") for feat in j_layer.getFeatures("id > 30"): j_layer.select(feat.id()) # checkout self.checkout() assert(len(root.children()) == 2) group = root.children()[1] assert(group.name() == self.get_working_name()) j_layer = group.children()[0].layer() assert(j_layer.name() == "junctions") fids = [feature['id'] for feature in j_layer.getFeatures()] print(f"fids={fids}") assert(fids == [33, 44]) root.takeChild(group) def __del__(self): QgsProject.instance().clear() for schema in ['epanet', 'epanet_trunk_rev_head']: os.system("psql -h {} -U {} {} -c 'DROP SCHEMA {} CASCADE'".format( self.host, self.pguser, dbname, schema)) def checkout(self): raise Exception("Must be overrided") def get_working_name(self): raise Exception("Must be overrided") class SpatialitePluginTest(PluginTest): def __init__(self, host, pguser): super().__init__(host, pguser) def checkout(self): self.versioning_plugin.checkout() def get_working_name(self): return "working copy" class PgServerPluginTest(PluginTest): def __init__(self, host, pguser): super().__init__(host, pguser) def checkout(self): self.versioning_plugin.checkout_pg() def get_working_name(self): return wcs def __del__(self): super().__del__() os.system("psql -h {} -U {} {} " "-c 'DROP SCHEMA {} CASCADE'".format( self.host, self.pguser, dbname, self.get_working_name())) class PgLocalPluginTest(PluginTest): def __init__(self, host, pguser): super().__init__(host, pguser) # Monkey patch the GUI to return database name self.versioning_plugin.selectDatabase = return_wc_database def checkout(self): self.versioning_plugin.checkout_pg_distant() def get_working_name(self): return "epanet_trunk_rev_head" def __del__(self): super().__del__() os.system("psql -h {} -U {} {} " "-c 'DROP SCHEMA {} CASCADE'".format( self.host, self.pguser, wc_dbname, self.get_working_name())) def test(host, pguser): # create the test database os.system(f"dropdb --if-exists -h {host} -U {pguser} {wc_dbname}") os.system(f"createdb -h {host} -U {pguser} {wc_dbname}") os.system(f"dropdb --if-exists -h {host} -U {pguser} {dbname}") os.system(f"createdb -h {host} -U {pguser} {dbname}") qgs = QgsApplication([], False) qgs.initQgis() for test_class in [SpatialitePluginTest, PgLocalPluginTest, PgServerPluginTest]: test = test_class(host, pguser) test.test_checkout() del test test = test_class(host, pguser) test.test_checkout_w_selected_features() del test qgs.exitQgis() if __name__ == "__main__": if len(sys.argv) != 3: print("Usage: python3 versioning_base_test.py host pguser") else: test(*sys.argv[1:])
gpl-2.0
mrda/junkcode
sleep_sort.py
1
1602
#!/usr/bin/env python3 # # sleep_sort.py - A unique approach to sorting numbers :-) # Usage: sleep_sort.py 6 1 9 7 # 1 # 6 # 7 # 9 # # Copyright (C) 2019 Michael Davies <michael@the-davies.net> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA # 02111-1307, USA.# # import os import sys import time def my_child(sec): time.sleep(sec) print(sec) def sleep_sort(arr): for i in arr: try: pid = os.fork() except OSError: exit("Could not create a child process") if pid == 0: my_child(i) exit() for i in arr: finished = os.waitpid(0, 0) if __name__ == '__main__': if (len(sys.argv) == 1): progname = os.path.basename(__file__) sys.exit('Usage: %s <array of numbers to sort>' % progname) else: arr = [int(x) for x in sys.argv[1:]] sleep_sort(arr)
gpl-2.0
naslanidis/ansible
lib/ansible/modules/cloud/openstack/os_auth.py
6
2213
#!/usr/bin/python # Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: os_auth short_description: Retrieve an auth token version_added: "2.0" author: "Monty Taylor (@emonty)" description: - Retrieve an auth token from an OpenStack Cloud requirements: - "python >= 2.6" - "shade" extends_documentation_fragment: openstack ''' EXAMPLES = ''' - name: Authenticate to the cloud and retrieve the service catalog os_auth: cloud: rax-dfw - name: Show service catalog debug: var: service_catalog ''' try: import shade from shade import meta HAS_SHADE = True except ImportError: HAS_SHADE = False def main(): argument_spec = openstack_full_argument_spec() module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec, **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') try: cloud = shade.openstack_cloud(**module.params) module.exit_json( changed=False, ansible_facts=dict( auth_token=cloud.auth_token, service_catalog=cloud.service_catalog)) except shade.OpenStackCloudException as e: module.fail_json(msg=str(e)) # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ == '__main__': main()
gpl-3.0
levilucio/SyVOLT
GM2AUTOSAR_MM/collapse_rules/Himesis/HMoveOneInputDirectApplyDiffRulesLHS.py
6
17092
from core.himesis import Himesis, HimesisPreConditionPatternLHS import cPickle as pickle from uuid import UUID class HMoveOneInputDirectApplyDiffRulesLHS(HimesisPreConditionPatternLHS): def __init__(self): """ Creates the himesis graph representing the AToM3 model HMoveOneInputDirectApplyDiffRulesLHS. """ # Flag this instance as compiled now self.is_compiled = True super(HMoveOneInputDirectApplyDiffRulesLHS, self).__init__(name='HMoveOneInputDirectApplyDiffRulesLHS', num_nodes=4, edges=[]) # Add the edges self.add_edges([(0, 2), (3, 0)]) # Set the graph attributes self["mm__"] = pickle.loads("""(lp1 S'MT_pre__GM2AUTOSAR_MM' p2 aS'MoTifRule' p3 a.""") self["MT_constraint__"] = pickle.loads("""Vreturn True\u000a p1 .""") self["name"] = """""" self["GUID__"] = UUID('0ea9f228-d190-4625-b2e6-39bb648597be') # Set the node attributes self.vs[0]["MT_subtypeMatching__"] = False self.vs[0]["MT_pre__associationType"] = """ #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, # use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True """ self.vs[0]["MT_label__"] = """9""" self.vs[0]["MT_subtypes__"] = pickle.loads("""(lp1 .""") self.vs[0]["MT_dirty__"] = False self.vs[0]["mm__"] = """MT_pre__directLink_T""" self.vs[0]["GUID__"] = UUID('ee55b788-96b4-422e-b8c6-26a9c53feda1') self.vs[1]["MT_pivotOut__"] = """element1""" self.vs[1]["MT_subtypeMatching__"] = True self.vs[1]["MT_pre__classtype"] = """ #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, # use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True """ self.vs[1]["MT_pivotIn__"] = """element1""" self.vs[1]["MT_label__"] = """3""" self.vs[1]["MT_subtypes__"] = pickle.loads("""(lp1 S'MT_pre__EcuInstance' p2 aS'MT_pre__System' p3 aS'MT_pre__SystemMapping' p4 aS'MT_pre__ComponentPrototype' p5 aS'MT_pre__SwCompToEcuMapping_component' p6 aS'MT_pre__CompositionType' p7 aS'MT_pre__PPortPrototype' p8 aS'MT_pre__SwcToEcuMapping' p9 aS'MT_pre__SoftwareComposition' p10 aS'MT_pre__RPortPrototype' p11 aS'MT_pre__PortPrototype' p12 aS'MT_pre__ComponentType' p13 a.""") self.vs[1]["MT_dirty__"] = False self.vs[1]["mm__"] = """MT_pre__MetaModelElement_T""" self.vs[1]["MT_pre__cardinality"] = """ #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, # use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True """ self.vs[1]["MT_pre__name"] = """ #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, # use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True """ self.vs[1]["GUID__"] = UUID('f7acf165-6f06-49bd-8c63-e3eb11f4ef7d') self.vs[2]["MT_pivotOut__"] = """element2""" self.vs[2]["MT_subtypeMatching__"] = True self.vs[2]["MT_pre__classtype"] = """ #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, # use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True """ self.vs[2]["MT_pivotIn__"] = """element2""" self.vs[2]["MT_label__"] = """4""" self.vs[2]["MT_subtypes__"] = pickle.loads("""(lp1 S'MT_pre__EcuInstance' p2 aS'MT_pre__System' p3 aS'MT_pre__SystemMapping' p4 aS'MT_pre__ComponentPrototype' p5 aS'MT_pre__SwCompToEcuMapping_component' p6 aS'MT_pre__CompositionType' p7 aS'MT_pre__PPortPrototype' p8 aS'MT_pre__SwcToEcuMapping' p9 aS'MT_pre__SoftwareComposition' p10 aS'MT_pre__RPortPrototype' p11 aS'MT_pre__PortPrototype' p12 aS'MT_pre__ComponentType' p13 a.""") self.vs[2]["MT_dirty__"] = False self.vs[2]["mm__"] = """MT_pre__MetaModelElement_T""" self.vs[2]["MT_pre__cardinality"] = """ #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, # use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True """ self.vs[2]["MT_pre__name"] = """ #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, # use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True """ self.vs[2]["GUID__"] = UUID('19825800-c90d-430d-b488-f6ecbb443867') self.vs[3]["MT_subtypeMatching__"] = True self.vs[3]["MT_pre__classtype"] = """ #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, # use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True """ self.vs[3]["MT_label__"] = """5""" self.vs[3]["MT_subtypes__"] = pickle.loads("""(lp1 S'MT_pre__EcuInstance' p2 aS'MT_pre__System' p3 aS'MT_pre__SystemMapping' p4 aS'MT_pre__ComponentPrototype' p5 aS'MT_pre__SwCompToEcuMapping_component' p6 aS'MT_pre__CompositionType' p7 aS'MT_pre__PPortPrototype' p8 aS'MT_pre__SwcToEcuMapping' p9 aS'MT_pre__SoftwareComposition' p10 aS'MT_pre__RPortPrototype' p11 aS'MT_pre__PortPrototype' p12 aS'MT_pre__ComponentType' p13 a.""") self.vs[3]["MT_dirty__"] = False self.vs[3]["mm__"] = """MT_pre__MetaModelElement_T""" self.vs[3]["MT_pre__cardinality"] = """ #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, # use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True """ self.vs[3]["MT_pre__name"] = """ #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, # use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True """ self.vs[3]["GUID__"] = UUID('fa352ce0-c2e7-4ee2-af10-bb90957138c6') def eval_classtype3(self, attr_value, this): #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, # use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True def eval_cardinality3(self, attr_value, this): #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, # use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True def eval_name3(self, attr_value, this): #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, # use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True def eval_classtype4(self, attr_value, this): #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, # use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True def eval_cardinality4(self, attr_value, this): #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, # use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True def eval_name4(self, attr_value, this): #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, # use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True def eval_classtype5(self, attr_value, this): #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, # use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True def eval_cardinality5(self, attr_value, this): #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, # use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True def eval_name5(self, attr_value, this): #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, # use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True def eval_associationType9(self, attr_value, this): #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, # use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True def constraint(self, PreNode, graph): """ Executable constraint code. @param PreNode: Function taking an integer as parameter and returns the node corresponding to that label. """ return True
mit
BeATz-UnKNoWN/python-for-android
python-modules/pybluez/bluetooth/widcomm.py
68
28737
from btcommon import * import socket import struct import threading import os import _widcomm DEFAULT_MTU = 672 def dbg (*args): return sys.stdout.write (*args) sys.stdout.write ("\n") def BD_ADDR_to_str (bda): return "%02X:%02X:%02X:%02X:%02X:%02X" % \ (ord(bda[0]), ord(bda[1]), ord(bda[2]), ord(bda[3]), ord(bda[4]), ord(bda[5])) def str_to_BD_ADDR (s): digits = [ int (c, 16) for c in s.split(":") ] return struct.pack ("6B", *digits) class WCInquirer: DEVST_DOWN = 0 DEVST_UP = 1 DEVST_ERROR = 2 DEVST_UNLOADED = 3 DEVST_RELOADED = 4 def __init__ (self): self._wcinq = _widcomm._WCInquirer () port = self._wcinq.get_sockport () self.readsock = socket.socket (socket.AF_INET, socket.SOCK_STREAM) self.readsock.connect (("127.0.0.1", port)) self._wcinq.accept_client () self.recently_discovered = [] self.inquiry_in_progress = False self.sdp_query_in_progress = False def fileno (): return self.readsock.fileno () def start_inquiry (self): self.recently_discovered = [] self.inquiry_in_progress = self._wcinq.start_inquiry () def read_msg (self): intsize = struct.calcsize ("=i") msg_type = struct.unpack ("=i", self.readsock.recv (intsize))[0] if msg_type == _widcomm.INQ_DEVICE_RESPONDED: fmt = "=6s3s248si" data = self.readsock.recv (struct.calcsize (fmt)) bda, devclass, bdname, connected = struct.unpack (fmt, data) bdaddr = BD_ADDR_to_str (bda) bdname = bdname.strip ("\0") self.recently_discovered.append ((bdaddr, devclass, bdname, connected)) elif msg_type == _widcomm.INQ_INQUIRY_COMPLETE: fmt = "=ih" data = self.readsock.recv (struct.calcsize (fmt)) success, num_responses = struct.unpack (fmt, data) self.inquiry_in_progress = False elif msg_type == _widcomm.INQ_DISCOVERY_COMPLETE: self.sdp_query_in_progress = False elif msg_type == _widcomm.INQ_STACK_STATUS_CHANGE: fmt = "=i" data = self.readsock.recv (struct.calcsize (fmt)) new_status = struct.unpack (fmt, data)[0] def start_discovery (self, addr, uuid = None): bd_addr = str_to_BD_ADDR (addr) if uuid is not None: self.sdp_query_in_progress = \ self._wcinq.start_discovery (bd_addr, to_full_uuid (uuid)) else: self.sdp_query_in_progress = \ self._wcinq.start_discovery (bd_addr) self.sdp_query_in_progress = True def read_discovery_records (self, addr, uuid = None): if not is_valid_address (addr): raise ValueError ("invalid Bluetooth address") bd_addr = str_to_BD_ADDR (addr) if uuid is not None: dbg ("read_discovery_records (%s, %s)" % (addr, uuid)) return self._wcinq.read_discovery_records (bd_addr, to_full_uuid (uuid)) else: return self._wcinq.read_discovery_records (bd_addr) def is_device_ready (self): return self._wcinq.is_device_ready () def get_local_device_address (self): return self._wcinq.get_local_device_address () inquirer = WCInquirer () def discover_devices (duration=8, flush_cache=True, lookup_names=False): inquirer.start_inquiry () while inquirer.inquiry_in_progress: inquirer.read_msg () discovered = inquirer.recently_discovered[:] if not lookup_names: return [ tup[0] for tup in discovered ] if lookup_names: result = [] for bdaddr, devClass, bdName, bConnected in discovered: if bdName: result.append ((bdaddr, bdName)) else: result.append ((bdAddr, None)) return result def lookup_name (address, timeout=10): discover_devices () for bdaddr, devClass, bdName, bConnected in inquirer.recently_discovered: if bdaddr == address: return bdName def advertise_service (sock, name, service_id = "", service_classes = [], \ profiles = [], provider = "", description = "", protocols = []): sock._advertise_service (name, service_id, service_classes, profiles, provider, description, protocols) def stop_advertising (sock): sock._stop_advertising () def find_service (name = None, uuid = None, address = None): if address: if address == "localhost": raise NotImplementedError if not is_valid_address (address): raise ValueError ("invalid Bluetooth address") addresses = [ address ] else: addresses = discover_devices () if uuid and not is_valid_uuid (uuid): raise ValueError ("invalid uuid ", uuid) results = [] for addr in addresses: inquirer.start_discovery (addr, uuid) while inquirer.sdp_query_in_progress: inquirer.read_msg () results.extend (inquirer.read_discovery_records (addr, uuid)) return results def _port_return_code_to_str (code): k = { _widcomm.RFCOMM_SUCCESS : "Success", _widcomm.RFCOMM_ALREADY_OPENED : "Port already opened", _widcomm.RFCOMM_NOT_OPENED : "Connection not open", _widcomm.RFCOMM_HANDLE_ERROR: "This error should never occur " \ "(HANDLE_ERROR) and is a stack bug", _widcomm.RFCOMM_LINE_ERR: "Line error", _widcomm.RFCOMM_START_FAILED: "Connection attempt failed", _widcomm.RFCOMM_PAR_NEG_FAILED: "Parameter negotion (MTU) failed", _widcomm.RFCOMM_PORT_NEG_FAILED: "Port negotiation failed", _widcomm.RFCOMM_PEER_CONNECTION_FAILED: "Connection ended by remote "\ "side", _widcomm.RFCOMM_PEER_TIMEOUT: "Timeout by remote side", _widcomm.RFCOMM_INVALID_PARAMETER: "Invalid parameter", _widcomm.RFCOMM_UNKNOWN_ERROR: "Unknown error" } if code in k: return k[code] else: return "Invalid RFCOMM error code %s" % str (code) def _port_ev_code_to_str (code): d = { _widcomm.PORT_EV_RXFLAG : "Received certain character", _widcomm.PORT_EV_TXEMPTY : "Transmit Queue Empty", _widcomm.PORT_EV_CTS : "CTS changed state", _widcomm.PORT_EV_DSR : "DSR changed state", _widcomm.PORT_EV_RLSD : "RLSD changed state", _widcomm.PORT_EV_BREAK : "BREAK received", _widcomm.PORT_EV_ERR : "Line status error occurred", _widcomm.PORT_EV_RING : "Ring signal detected", _widcomm.PORT_EV_CTSS : "CTS state", _widcomm.PORT_EV_DSRS : "DSR state", _widcomm.PORT_EV_RLSDS : "RLSD state", _widcomm.PORT_EV_OVERRUN : "Receiver buffer overrun", _widcomm.PORT_EV_TXCHAR : "Any character transmitted", _widcomm.PORT_EV_CONNECTED : "RFCOMM connection established", _widcomm.PORT_EV_CONNECT_ERR : "Was not able to establish " \ "connection or disconnected", _widcomm.PORT_EV_FC : "Flow control enabled flag changed by remote", _widcomm.PORT_EV_FCS : "Flow control status true = enabled" } result = [] for k, v in d.items (): if code & k: result.append (v) if len (result) == 0: return "Invalid event code %d" % code else: return "\n".join (result) def _sdp_checkraise (code): if code == _widcomm.SDP_OK: return elif code == _widcomm.SDP_COULD_NOT_ADD_RECORD: raise BluetoothError ("Could not add SDP record") elif code == _widcomm.SDP_INVALID_RECORD: raise BluetoothError ("Invalid SDP record") elif code == _widcomm.SDP_INVALID_PARAMETERS: raise BluetoothError ("SDP: invalid parameters") raise RuntimeError ("unknown SDP status code %s" % code) class BluetoothSocket: def __init__ (self, proto = RFCOMM, _sockdata = None): if not proto in [ RFCOMM, L2CAP ]: raise ValueError ("invalid protocol") self.proto = proto if proto == RFCOMM: self.bind = self.rfcomm_bind self.listen = self.rfcomm_listen self.accept = self.rfcomm_accept self.connect = self.rfcomm_connect self.send = self.rfcomm_send self.recv = self.rfcomm_recv self.close = self.rfcomm_close self.getsockname = self.rfcomm_getsockname self.setblocking = self.rfcomm_setblocking self.settimeout = self.rfcomm_settimeout self.gettimeout = self.rfcomm_gettimeout self.dup = self.rfcomm_dup self.makefile = self.rfcomm_makefile self.fileno = self.rfcomm_fileno self.__make_cobjects = self.__rfcomm_make_cobjects self._advertise_service = self.__rfcomm_advertise_service if _sockdata: self._wc, self._if, self.readsock = _sockdata else: self.__make_cobjects () self.connected = self._wc.is_connected () elif proto == L2CAP: dbg ("creating l2cap socket") self.bind = self.l2cap_bind self.listen = self.l2cap_listen self.accept = self.l2cap_accept self.connect = self.l2cap_connect self.send = self.l2cap_send self.recv = self.l2cap_recv self.close = self.l2cap_close self.getsockname = self.l2cap_getsockname self.setblocking = self.l2cap_setblocking self.settimeout = self.l2cap_settimeout self.gettimeout = self.l2cap_gettimeout self.dup = self.l2cap_dup self.makefile = self.l2cap_makefile self.fileno = self.l2cap_fileno self.__make_cobjects = self.__l2cap_make_cobjects self._advertise_service = self.__l2cap_advertise_service if _sockdata: self._wc, self._if, self.readsock = _sockdata self.connected = True else: self.__make_cobjects () self.connected = False else: raise NotImplementedError () self.nonblocking = False self.connecting = False self.listening = False self.bound = False self.received_data = [] self.last_event_code = None self.port = 0 self._sdpservice = None def _stop_advertising (self): if not self._sdpservice: raise BluetoothError ("not advertising any services") self._sdpservice = None def __rfcomm_make_cobjects (self): self._wc = _widcomm._WCRfCommPort () self._if = _widcomm._WCRfCommIf () self.readsock = socket.socket (socket.AF_INET, socket.SOCK_STREAM) self.readsock.connect (("127.0.0.1", self._wc.get_sockport ())) self._wc.accept_client () def rfcomm_read_msg (self): intsize = struct.calcsize ("=i") msg_type_data = self.readsock.recv (intsize) msg_type = struct.unpack ("=i", msg_type_data)[0] if msg_type == _widcomm.RFCOMM_DATA_RECEIVED: datalen_fmt = "=i" datalen_data = self.readsock.recv (struct.calcsize (datalen_fmt)) datalen = struct.unpack (datalen_fmt, datalen_data)[0] self.received_data.append (self.readsock.recv (datalen)) elif msg_type == _widcomm.RFCOMM_EVENT_RECEIVED: fmt = "=I" data = self.readsock.recv (struct.calcsize (fmt)) code = struct.unpack (fmt, data)[0] dbg ("event %X received" % code) if code & _widcomm.PORT_EV_CONNECTED: self.connecting = False self.listening = False self.connected = True if code & _widcomm.PORT_EV_CONNECT_ERR: self.connecting = False self.listening = False self.connected = False raise BluetoothError ("Connection failed") if code & _widcomm.PORT_EV_RXFLAG: dbg ("Rx flag") if code & _widcomm.PORT_EV_TXEMPTY: dbg ("Tx queue empty") if code & _widcomm.PORT_EV_CTS: dbg ("CTS changed state") if code & _widcomm.PORT_EV_DSR: dbg ("DSR changed state") if code & _widcomm.PORT_EV_RLSD: dbg ("RLSD changed state") if code & _widcomm.PORT_EV_BREAK: dbg ("BREAK received") if code & _widcomm.PORT_EV_ERR: dbg ("Line status error") if code & _widcomm.PORT_EV_RING: dbg ("Ring") if code & _widcomm.PORT_EV_CTSS: dbg ("CTS state") if code & _widcomm.PORT_EV_DSRS: dbg ("DSR state") if code & _widcomm.PORT_EV_RLSDS: dbg ("RLSD state") if code & _widcomm.PORT_EV_OVERRUN: dbg ("Receive buffer overrun") if code & _widcomm.PORT_EV_TXCHAR: dbg ("Data transmitted") if code & _widcomm.PORT_EV_FC: dbg ("Flow control changed by remote") if code & _widcomm.PORT_EV_FCS: dbg ("Flow control status true = enabled") self.last_event_code = code def rfcomm_bind (self, addrport): addr, port = addrport if len (addr): raise ValueError ("Widcomm stack can't bind to " \ "user-specified adapter") result = self._if.assign_scn_value (RFCOMM_UUID, port) if not result: raise BluetoothError ("unable to bind to port") self.bound = True self.port = self._if.get_scn () def rfcomm_listen (self, backlog): if self.connected: raise BluetoothError ("already connected") if self.listening: raise BluetoothError ("already listening/connecting") if backlog != 1: raise ValueError ("Widcomm stack requires backlog == 1") port = self._if.get_scn () self._if.set_security_level ("", _widcomm.BTM_SEC_NONE, True) if not port: raise BluetoothError ("not bound to a port") result = self._wc.open_server (port, DEFAULT_MTU) if result != _widcomm.RFCOMM_SUCCESS: raise BluetoothError (_port_return_code_to_str (result)) self.listening = True def rfcomm_accept (self): if self.connected: raise BluetoothError ("already connected") while self.listening and not self.connected: dbg ("waiting for connection") self.rfcomm_read_msg () if self.connected: port = self._if.get_scn () client_bdaddr = BD_ADDR_to_str (self._wc.is_connected ()) # XXX widcomm API doesn't provide a way to determine the RFCOMM # channel number of the client client_port = 0 # create a new socket object and give it ownership of the # wrapped C++ objects, since those are the ones actually connected _sockdata = self._wc, self._if, self.readsock clientsock = BluetoothSocket (RFCOMM, _sockdata) # now create new C++ objects self.__rfcomm_make_cobjects () # self.bind (("", port)) # self.listen (1) return clientsock, (client_bdaddr, client_port) def rfcomm_connect (self, addrport): addr, port = addrport dbg ("connecting to %s port %d" % (addr, port)) if not is_valid_address (addr): raise ValueError ("invalid address %s" % addr) self._if.assign_scn_value (RFCOMM_UUID, port) self._if.set_security_level ("", _widcomm.BTM_SEC_NONE, False) result = self._wc.open_client (port, str_to_BD_ADDR (addr), DEFAULT_MTU) if result != _widcomm.RFCOMM_SUCCESS: raise BluetoothError (_port_return_code_to_str (result)) self.connecting = True while self.connecting: self.rfcomm_read_msg () if not self._wc.is_connected (): raise BluetoothError ("connection failed") def rfcomm_send (self, data): dbg ("sending: [%s]" % data) status, written = self._wc.write (data) if status == _widcomm.RFCOMM_SUCCESS: dbg ("sent okay") return written else: raise BluetoothError (_port_return_code_to_str (status)) def rfcomm_recv (self, numbytes): if self.nonblocking and not self.received_data: # XXX are we supposed to raise an exception, or just return None? return None while not self.received_data and self._wc.is_connected (): self.rfcomm_read_msg () if self.received_data: data = self.received_data.pop (0) if len(data) > numbytes: self.received_data.insert (0, data[numbytes:]) return data[:numbytes] else: return data def rfcomm_close (self): self._wc.close () self._wc = None self.bound = False self.connecting = False self.listening = False self.connected = False # return bt.close (self._sockfd) def rfcomm_getsockname (self): if not self.bound: raise BluetoothError ("Socket not bound") addr = inquirer.get_local_device_address () port = self._if.get_scn () return addr, port def rfcomm_setblocking (self, blocking): self.nonblocking = not blocking self.readsock.setblocking (blocking) def rfcomm_settimeout (self, timeout): raise NotImplementedError pass # if timeout < 0: raise ValueError ("invalid timeout") # # if timeout == 0: # self.setblocking (False) # else: # self.setblocking (True) # # XXX this doesn't look correct # timeout = 0 # winsock timeout still needs to be set 0 # # s = bt.settimeout (self._sockfd, timeout) # self._timeout = timeout def rfcomm_gettimeout (self): raise NotImplementedError # if self._blocking and not self._timeout: return None # return bt.gettimeout (self._sockfd) def rfcomm_fileno (self): return self.readsock.fileno () def rfcomm_dup (self): raise NotImplementedError def rfcomm_makefile (self): raise NotImplementedError def __rfcomm_advertise_service (self, name, service_id, service_classes, profiles, provider, description, protocols): if self._sdpservice is not None: raise BluetoothError ("Service already advertised") if not self.listening: raise BluetoothError ("Socket must be listening before advertised") if protocols: raise NotImplementedError ("extra protocols not yet supported in Widcomm stack") self._sdpservice = _widcomm._WCSdpService () if service_classes: service_classes = [ to_full_uuid (s) for s in service_classes ] _sdp_checkraise (self._sdpservice.add_service_class_id_list ( \ service_classes)) # self._if.set_security_level (name, _widcomm.BTM_SEC_NONE, True) _sdp_checkraise (self._sdpservice.add_rfcomm_protocol_descriptor ( \ self.port)) if profiles: for uuid, version in profiles: uuid = to_full_uuid (uuid) _sdp_checkraise (self._sdpservice.add_profile_descriptor_list (\ uuid, version)) _sdp_checkraise (self._sdpservice.add_service_name (name)) _sdp_checkraise (self._sdpservice.make_public_browseable ()) def __l2cap_make_cobjects (self): dbg ("__l2cap_make_cobjects") self._wc = _widcomm._WCL2CapConn () self._if = _widcomm._WCL2CapIf () self.readsock = socket.socket (socket.AF_INET, socket.SOCK_STREAM) self.readsock.connect (("127.0.0.1", self._wc.get_sockport ())) self._wc.accept_client () def l2cap_read_msg (self): intsize = struct.calcsize ("=i") msg_type_data = self.readsock.recv (intsize) msg_type = struct.unpack ("=i", msg_type_data)[0] if msg_type == _widcomm.L2CAP_DATA_RECEIVED: datalen_fmt = "=i" datalen_data = self.readsock.recv (struct.calcsize (datalen_fmt)) datalen = struct.unpack (datalen_fmt, datalen_data)[0] self.received_data.append (self.readsock.recv (datalen)) elif msg_type == _widcomm.L2CAP_INCOMING_CONNECTION: result = self._wc.accept () if not result: raise BluetoothError ("accept() failed") elif msg_type == _widcomm.L2CAP_REMOTE_DISCONNECTED: dbg ("L2CAP_REMOTE_DISCONNECTED") self.connecting = False self.listening = False self.connected = False elif msg_type == _widcomm.L2CAP_CONNECTED: self.connecting = False self.listening = False self.connected = True # elif msg_type == _widcomm.PORT_EV_CONNECT_ERR: # self.connecting = False # self.listening = False # raise BluetoothError ("Connection failed") def l2cap_bind (self, addrport): dbg ("l2cap_bind %s" % str(addrport)) addr, port = addrport if len (addr): raise ValueError ("Widcomm stack can't bind to " \ "user-specified adapter") result = self._if.assign_psm_value (L2CAP_UUID, port) if not result: raise BluetoothError ("unable to bind to port") self.bound = True self.port = self._if.get_psm () result = self._if.register () if not result: raise BluetoothError ("register() failed") def l2cap_listen (self, backlog): dbg ("l2cap_listen %s" % backlog) if self.connected: raise BluetoothError ("already connected") if self.listening: raise BluetoothError ("already listening/connecting") if backlog != 1: raise ValueError ("Widcomm stack requires backlog == 1") port = self._if.get_psm () self._if.set_security_level ("", _widcomm.BTM_SEC_NONE, True) if not port: raise BluetoothError ("not bound to a port") result = self._wc.listen (self._if) if not result: raise BluetoothError ("listen() failed. don't know why") self.listening = True def l2cap_accept (self): dbg ("l2cap_accept") if self.connected: raise BluetoothError ("already connected") while self.listening and not self.connected: dbg ("waiting for connection") self.l2cap_read_msg () if self.connected: port = self._if.get_psm () client_bdaddr = BD_ADDR_to_str (self._wc.remote_bd_addr ()) # XXX widcomm API doesn't provide a way to determine the L2CAP # PSM of the client client_port = 0 # create a new socket object and give it ownership of the # wrapped C++ objects, since those are the ones actually connected _sockdata = self._wc, self._if, self.readsock clientsock = BluetoothSocket (L2CAP, _sockdata) # now create new C++ objects self.__l2cap_make_cobjects () # self.bind (("", port)) # self.listen (1) return clientsock, (client_bdaddr, client_port) def l2cap_connect (self, addrport): addr, port = addrport dbg ("connecting to %s port %d" % (addr, port)) if not is_valid_address (addr): raise ValueError ("invalid address %s" % addr) if not self._if.assign_psm_value (L2CAP_UUID, port): raise BluetoothError ("Failed to assign PSM %d" % port) if not self._if.set_security_level ("", _widcomm.BTM_SEC_NONE, False): raise BluetoothError ("Failed to set security level") if not self._if.register (): raise BluetoothError ("Failed to register PSM") self.connecting = True if not self._wc.connect (self._if, str_to_BD_ADDR (addr)): raise BluetoothError ("Connect failed") while self.connecting: self.l2cap_read_msg () if not self.connected: raise BluetoothError ("connection failed") def l2cap_send (self, data): dbg ("sending: [%s]" % data) status, written = self._wc.write (data) if status: dbg ("sent okay") return written else: raise BluetoothError (_port_return_code_to_str (status)) def l2cap_recv (self, numbytes): if self.nonblocking and not self.received_data: # XXX are we supposed to raise an exception, or just return None? return None while not self.received_data and self.connected: self.l2cap_read_msg () if self.received_data: data = self.received_data.pop (0) if len(data) > numbytes: self.received_data.insert (0, data[numbytes:]) return data[:numbytes] else: return data def l2cap_close (self): self._wc.disconnect () self._if.deregister () self._wc = None self.bound = False self.connecting = False self.listening = False self.connected = False # return bt.close (self._sockfd) def l2cap_getsockname (self): if not self.bound: raise BluetoothError ("Socket not bound") addr = inquirer.get_local_device_address () port = self._if.get_psm () return addr, port def l2cap_setblocking (self, blocking): self.nonblocking = not blocking self.readsock.setblocking (blocking) def l2cap_settimeout (self, timeout): raise NotImplementedError # if timeout < 0: raise ValueError ("invalid timeout") # # if timeout == 0: # self.setblocking (False) # else: # self.setblocking (True) # # XXX this doesn't look correct # timeout = 0 # winsock timeout still needs to be set 0 # # s = bt.settimeout (self._sockfd, timeout) # self._timeout = timeout def l2cap_gettimeout (self): raise NotImplementedError # if self._blocking and not self._timeout: return None # return bt.gettimeout (self._sockfd) def l2cap_fileno (self): return self.readsock.fileno () def l2cap_dup (self): raise NotImplementedError # return BluetoothSocket (self._proto, sockfd=bt.dup (self._sockfd)) def l2cap_makefile (self): raise NotImplementedError def __l2cap_advertise_service (self, name, service_id, service_classes, profiles, provider, description, protocols): if self._sdpservice is not None: raise BluetoothError ("Service already advertised") if not self.listening: raise BluetoothError ("Socket must be listening before advertised") if protocols: raise NotImplementedError ("extra protocols not yet supported in Widcomm stack") self._sdpservice = _widcomm._WCSdpService () if service_classes: service_classes = [ to_full_uuid (s) for s in service_classes ] _sdp_checkraise (self._sdpservice.add_service_class_id_list ( \ service_classes)) _sdp_checkraise (self._sdpservice.add_l2cap_protocol_descriptor ( \ self.port)) if profiles: for uuid, version in profiles: uuid = to_full_uuid (uuid) _sdp_checkraise (self._sdpservice.add_profile_descriptor_list (\ uuid, version)) _sdp_checkraise (self._sdpservice.add_service_name (name)) _sdp_checkraise (self._sdpservice.make_public_browseable ()) class DeviceDiscoverer: def __init__ (self): raise NotImplementedError
apache-2.0
flamingspaz/remo
remo/featuredrep/tests/test_views.py
5
3542
from django.core.urlresolvers import reverse from django.shortcuts import redirect from django.test.client import Client import mock from mock import patch from nose.tools import eq_, ok_ from remo.base.tests import RemoTestCase, requires_permission from remo.featuredrep.models import FeaturedRep from remo.featuredrep.tests import FeaturedRepFactory from remo.profiles.tests import UserFactory class ViewsTest(RemoTestCase): def test_get_as_admin(self): user = UserFactory.create(groups=['Admin']) featured = FeaturedRepFactory.create() with self.login(user) as client: response = client.get(reverse('featuredrep_edit_featured', args=[featured.id])) self.assertJinja2TemplateUsed(response, 'featuredrep_alter.jinja') def test_get_as_council(self): user = UserFactory.create(groups=['Council']) featured = FeaturedRepFactory.create() with self.login(user) as client: response = client.get(reverse('featuredrep_edit_featured', args=[featured.id])) self.assertJinja2TemplateUsed(response, 'featuredrep_alter.jinja') @requires_permission() def test_get_as_other_user(self): user = UserFactory.create() featured = FeaturedRepFactory.create() with self.login(user) as client: client.get(reverse('featuredrep_edit_featured', args=[featured.id]), follow=True) def test_get_list_featured_page(self): """Get list featuredrep page.""" UserFactory.create(groups=['Admin']) FeaturedRepFactory.create_batch(3) response = Client().get(reverse('featuredrep_list_featured')) self.assertJinja2TemplateUsed(response, 'featuredrep_list.jinja') @patch('remo.featuredrep.views.messages.success') @patch('remo.featuredrep.views.redirect', wraps=redirect) @patch('remo.featuredrep.views.forms.FeaturedRepForm') def test_add_new_featured(self, form_mock, redirect_mock, messages_mock): form_mock.is_valid.return_value = True user = UserFactory.create(groups=['Admin']) with self.login(user) as client: response = client.post(reverse('featuredrep_add_featured'), follow=True) eq_(response.status_code, 200) messages_mock.assert_called_with(mock.ANY, 'New featured rep article created.') ok_(form_mock().save.called) @patch('remo.featuredrep.views.messages.success') @patch('remo.featuredrep.views.redirect', wraps=redirect) @patch('remo.featuredrep.views.forms.FeaturedRepForm') def test_edit_featured(self, form_mock, redirect_mock, messages_mock): form_mock.is_valid.return_value = True featured = FeaturedRepFactory.create() user = UserFactory.create(groups=['Admin']) with self.login(user) as client: response = client.post(reverse('featuredrep_edit_featured', args=[featured.id]), user=user, follow=True) eq_(response.status_code, 200) messages_mock.assert_called_with( mock.ANY, 'Featured rep article successfuly edited.') ok_(form_mock().save.called) @patch('remo.featuredrep.views.redirect', wraps=redirect) def test_delete_featured(self, redirect_mock): user = UserFactory.create(groups=['Admin']) featured = FeaturedRepFactory.create() with self.login(user) as client: client.post(reverse('featuredrep_delete_featured', args=[featured.id])) ok_(not FeaturedRep.objects.filter(pk=featured.id).exists())
bsd-3-clause
lmingcsce/p4factory
mininet/1sw_demo.py
9
2869
#!/usr/bin/python # Copyright 2013-present Barefoot Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mininet.net import Mininet from mininet.topo import Topo from mininet.log import setLogLevel, info from mininet.cli import CLI from p4_mininet import P4Switch, P4Host import argparse from time import sleep parser = argparse.ArgumentParser(description='Mininet demo') parser.add_argument('--behavioral-exe', help='Path to behavioral executable', type=str, action="store", required=True) parser.add_argument('--thrift-port', help='Thrift server port for table updates', type=int, action="store", default=22222) parser.add_argument('--num-hosts', help='Number of hosts to connect to switch', type=int, action="store", default=2) args = parser.parse_args() class SingleSwitchTopo(Topo): "Single switch connected to n (< 256) hosts." def __init__(self, sw_path, thrift_port, n, **opts): # Initialize topology and default options Topo.__init__(self, **opts) switch = self.addSwitch('s1', sw_path = sw_path, thrift_port = thrift_port, pcap_dump = True) for h in xrange(n): host = self.addHost('h%d' % (h + 1), ip = "10.0.%d.10/24" % h, mac = '00:04:00:00:00:%02x' %h) self.addLink(host, switch) def main(): num_hosts = args.num_hosts topo = SingleSwitchTopo(args.behavioral_exe, args.thrift_port, num_hosts ) net = Mininet(topo = topo, host = P4Host, switch = P4Switch, controller = None ) net.start() sw_mac = ["00:aa:bb:00:00:%02x" % n for n in xrange(num_hosts)] sw_addr = ["10.0.%d.1" % n for n in xrange(num_hosts)] for n in xrange(num_hosts): h = net.get('h%d' % (n + 1)) h.setARP(sw_addr[n], sw_mac[n]) h.setDefaultRoute("dev eth0 via %s" % sw_addr[n]) for n in xrange(num_hosts): h = net.get('h%d' % (n + 1)) h.describe() sleep(1) print "Ready !" CLI( net ) net.stop() if __name__ == '__main__': setLogLevel( 'info' ) main()
apache-2.0
arbrandes/edx-platform
common/lib/xmodule/xmodule/partitions/partitions.py
4
10851
"""Defines ``Group`` and ``UserPartition`` models for partitioning""" from collections import namedtuple from stevedore.extension import ExtensionManager # We use ``id`` in this file as the IDs of our Groups and UserPartitions, # which Pylint disapproves of. # pylint: disable=redefined-builtin # UserPartition IDs must be unique. The Cohort and Random UserPartitions (when they are # created via Studio) choose an unused ID in the range of 100 (historical) to MAX_INT. Therefore the # dynamic UserPartitionIDs must be under 100, and they have to be hard-coded to ensure # they are always the same whenever the dynamic partition is added (since the UserPartition # ID is stored in the xblock group_access dict). ENROLLMENT_TRACK_PARTITION_ID = 50 MINIMUM_STATIC_PARTITION_ID = 100 class UserPartitionError(Exception): """ Base Exception for when an error was found regarding user partitions. """ pass # lint-amnesty, pylint: disable=unnecessary-pass class NoSuchUserPartitionError(UserPartitionError): """ Exception to be raised when looking up a UserPartition by its ID fails. """ pass # lint-amnesty, pylint: disable=unnecessary-pass class NoSuchUserPartitionGroupError(UserPartitionError): """ Exception to be raised when looking up a UserPartition Group by its ID fails. """ pass # lint-amnesty, pylint: disable=unnecessary-pass class ReadOnlyUserPartitionError(UserPartitionError): """ Exception to be raised when attempting to modify a read only partition. """ pass # lint-amnesty, pylint: disable=unnecessary-pass class Group(namedtuple("Group", "id name")): """ An id and name for a group of students. The id should be unique within the UserPartition this group appears in. """ # in case we want to add to this class, a version will be handy # for deserializing old versions. (This will be serialized in courses) VERSION = 1 def __new__(cls, id, name): return super().__new__(cls, int(id), name) def to_json(self): """ 'Serialize' to a json-serializable representation. Returns: a dictionary with keys for the properties of the group. """ return { "id": self.id, "name": self.name, "version": Group.VERSION } @staticmethod def from_json(value): """ Deserialize a Group from a json-like representation. Args: value: a dictionary with keys for the properties of the group. Raises TypeError if the value doesn't have the right keys. """ if isinstance(value, Group): return value for key in ("id", "name", "version"): if key not in value: raise TypeError("Group dict {} missing value key '{}'".format( value, key)) if value["version"] != Group.VERSION: raise TypeError("Group dict {} has unexpected version".format( value)) return Group(value["id"], value["name"]) # The Stevedore extension point namespace for user partition scheme plugins. USER_PARTITION_SCHEME_NAMESPACE = 'openedx.user_partition_scheme' class UserPartition(namedtuple("UserPartition", "id name description groups scheme parameters active")): """A named way to partition users into groups, primarily intended for running experiments. It is expected that each user will be in at most one group in a partition. A Partition has an id, name, scheme, description, parameters, and a list of groups. The id is intended to be unique within the context where these are used. (e.g., for partitions of users within a course, the ids should be unique per-course). The scheme is used to assign users into groups. The parameters field is used to save extra parameters e.g., location of the block in case of VerificationPartitionScheme. Partitions can be marked as inactive by setting the "active" flag to False. Any group access rule referencing inactive partitions will be ignored when performing access checks. """ VERSION = 3 # The collection of user partition scheme extensions. scheme_extensions = None # The default scheme to be used when upgrading version 1 partitions. VERSION_1_SCHEME = "random" def __new__(cls, id, name, description, groups, scheme=None, parameters=None, active=True, scheme_id=VERSION_1_SCHEME): if not scheme: scheme = UserPartition.get_scheme(scheme_id) if parameters is None: parameters = {} return super().__new__(cls, int(id), name, description, groups, scheme, parameters, active) @staticmethod def get_scheme(name): """ Returns the user partition scheme with the given name. """ # Note: we're creating the extension manager lazily to ensure that the Python path # has been correctly set up. Trying to create this statically will fail, unfortunately. if not UserPartition.scheme_extensions: UserPartition.scheme_extensions = ExtensionManager(namespace=USER_PARTITION_SCHEME_NAMESPACE) try: scheme = UserPartition.scheme_extensions[name].plugin # lint-amnesty, pylint: disable=unsubscriptable-object except KeyError: raise UserPartitionError(f"Unrecognized scheme '{name}'") # lint-amnesty, pylint: disable=raise-missing-from scheme.name = name return scheme def to_json(self): """ 'Serialize' to a json-serializable representation. Returns: a dictionary with keys for the properties of the partition. """ return { "id": self.id, "name": self.name, "scheme": self.scheme.name, "description": self.description, "parameters": self.parameters, "groups": [g.to_json() for g in self.groups], "active": bool(self.active), "version": UserPartition.VERSION } @staticmethod def from_json(value): """ Deserialize a Group from a json-like representation. Args: value: a dictionary with keys for the properties of the group. Raises TypeError if the value doesn't have the right keys. """ if isinstance(value, UserPartition): return value for key in ("id", "name", "description", "version", "groups"): if key not in value: raise TypeError(f"UserPartition dict {value} missing value key '{key}'") if value["version"] == 1: # If no scheme was provided, set it to the default ('random') scheme_id = UserPartition.VERSION_1_SCHEME # Version changes should be backwards compatible in case the code # gets rolled back. If we see a version number greater than the current # version, we should try to read it rather than raising an exception. elif value["version"] >= 2: if "scheme" not in value: raise TypeError(f"UserPartition dict {value} missing value key 'scheme'") scheme_id = value["scheme"] else: raise TypeError(f"UserPartition dict {value} has unexpected version") parameters = value.get("parameters", {}) active = value.get("active", True) groups = [Group.from_json(g) for g in value["groups"]] scheme = UserPartition.get_scheme(scheme_id) if not scheme: raise TypeError(f"UserPartition dict {value} has unrecognized scheme {scheme_id}") if getattr(scheme, 'read_only', False): raise ReadOnlyUserPartitionError(f"UserPartition dict {value} uses scheme {scheme_id} which is read only") # lint-amnesty, pylint: disable=line-too-long if hasattr(scheme, "create_user_partition"): return scheme.create_user_partition( value["id"], value["name"], value["description"], groups, parameters, active, ) else: return UserPartition( value["id"], value["name"], value["description"], groups, scheme, parameters, active, ) def get_group(self, group_id): """ Returns the group with the specified id. Arguments: group_id (int): ID of the partition group. Raises: NoSuchUserPartitionGroupError: The specified group could not be found. """ for group in self.groups: if group.id == group_id: return group raise NoSuchUserPartitionGroupError( "Could not find a Group with ID [{group_id}] in UserPartition [{partition_id}].".format( group_id=group_id, partition_id=self.id ) ) def access_denied_message(self, block_key, user, user_group, allowed_groups): # lint-amnesty, pylint: disable=unused-argument """ Return a message that should be displayed to the user when they are not allowed to access content managed by this partition, or None if there is no applicable message. Arguments: block_key (:class:`.BlockUsageLocator`): The content being managed user (:class:`.User`): The user who was denied access user_group (:class:`.Group`): The current Group the user is in allowed_groups (list of :class:`.Group`): The groups who are allowed to see the content Returns: str """ return None def access_denied_fragment(self, block, user, user_group, allowed_groups): # lint-amnesty, pylint: disable=unused-argument """ Return an html fragment that should be displayed to the user when they are not allowed to access content managed by this partition, or None if there is no applicable message. Arguments: block (:class:`.XBlock`): The content being managed user (:class:`.User`): The user who was denied access user_group (:class:`.Group`): The current Group the user is in allowed_groups (list of :class:`.Group`): The groups who are allowed to see the content Returns: :class:`.Fragment` """ return None def get_partition_from_id(partitions, user_partition_id): """ Look for a user partition with a matching id in the provided list of partitions. Returns: A UserPartition, or None if not found. """ for partition in partitions: if partition.id == user_partition_id: return partition return None
agpl-3.0
yanheven/horizon
openstack_dashboard/dashboards/project/data_processing/wizard/forms.py
23
4806
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from django import template from django.template import defaultfilters from django.utils.encoding import force_text from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import messages from openstack_dashboard.api import sahara as saharaclient from openstack_dashboard.dashboards.project.data_processing.utils \ import helpers class ChoosePluginForm(forms.SelfHandlingForm): def __init__(self, request, *args, **kwargs): super(ChoosePluginForm, self).__init__(request, *args, **kwargs) self._generate_plugin_version_fields(request) self.help_text_template = ("project/data_processing.wizard/" "_plugin_select_help.html") def handle(self, request, context): try: hlps = helpers.Helpers(request) hlps.reset_guide() plugin_name = context["plugin_name"] request.session["plugin_name"] = plugin_name request.session["plugin_version"] = ( context[plugin_name + "_version"]) messages.success(request, _("Cluster type chosen")) return True except Exception: exceptions.handle(request, _("Unable to set cluster type")) return False def _generate_plugin_version_fields(self, request): sahara = saharaclient.client(request) plugins = sahara.plugins.list() plugin_choices = [(plugin.name, plugin.title) for plugin in plugins] self.fields["plugin_name"] = forms.ChoiceField( label=_("Plugin Name"), choices=plugin_choices, widget=forms.Select(attrs={"class": "switchable", "data-slug": "plugin"})) for plugin in plugins: field_name = plugin.name + "_version" choice_field = forms.ChoiceField( label=_("Version"), required=False, choices=[(version, version) for version in plugin.versions], widget=forms.Select( attrs={"class": "switched", "data-switch-on": "plugin", "data-plugin-" + plugin.name: plugin.title}) ) self.fields[field_name] = choice_field def get_help_text(self, extra_context=None): text = "" extra_context = extra_context or {} if self.help_text_template: tmpl = template.loader.get_template(self.help_text_template) context = template.RequestContext(self.request, extra_context) text += tmpl.render(context) else: text += defaultfilters.linebreaks(force_text(self.help_text)) return defaultfilters.safe(text) class Meta(object): name = _("Choose plugin type and version") class ChooseJobTypeForm(forms.SelfHandlingForm): guide_job_type = forms.ChoiceField( label=_("Job Type"), widget=forms.Select()) def __init__(self, request, *args, **kwargs): super(ChooseJobTypeForm, self).__init__(request, *args, **kwargs) self.help_text_template = ("project/data_processing.wizard/" "_job_type_select_help.html") self.fields["guide_job_type"].choices = \ self.populate_guide_job_type_choices() def populate_guide_job_type_choices(self): choices = [(x, helpers.JOB_TYPE_MAP[x][0]) for x in helpers.JOB_TYPE_MAP] return choices def handle(self, request, context): try: hlps = helpers.Helpers(request) job_type = context["guide_job_type"] if force_text(request.session.get("guide_job_type")) != ( force_text(helpers.JOB_TYPE_MAP[job_type][0])): hlps.reset_job_guide() request.session["guide_job_type"] = ( helpers.JOB_TYPE_MAP[job_type][0]) messages.success(request, _("Job type chosen")) return True except Exception: exceptions.handle(request, _("Unable to set job type")) return False
apache-2.0
xoseperez/rentalito
server/rentalito.py
1
8159
#! /usr/bin/python # -*- coding: utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # Rentalito # Copyright (C) 2013 by Xose Pérez # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. __app__ = "Rentalito" __version__ = "0.2" __author__ = "Xose Pérez" __contact__ = "xose.perez@gmail.com" __copyright__ = "Copyright (C) 2013 Xose Pérez" __license__ = 'GPL v3' import sys import time from datetime import datetime import ctypes from libs.Daemon import Daemon from libs.Config import Config from libs.Mosquitto import Mosquitto from libs.Processor import Processor class Rentalito(Daemon): """ Rentalito republish daemon. Glues the different components together """ debug = True mqtt = None processor = None publish_to = None minimum_time = 5 time_length_ratio = 0.5 topics = {} slots = [] slot_index = {} next_event_time = time.time() def log(self, message): """ Log method. TODO: replace with standard python logging facility """ if self.debug: timestamp = datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f') sys.stdout.write("[%s] %s\n" % (timestamp, message)) sys.stdout.flush() def cleanup(self): """ Clean up connections and unbind ports """ self.mqtt.disconnect() self.log("[INFO] Exiting") sys.exit() def mqtt_connect(self): """ Initiate connection to MQTT broker and bind callback methods """ self.mqtt.on_connect = self.mqtt_on_connect self.mqtt.on_disconnect = self.mqtt_on_disconnect self.mqtt.on_message = self.mqtt_on_message self.mqtt.on_subscribe = self.mqtt_on_subscribe self.mqtt.connect() def mqtt_on_connect(self, obj, result_code): """ Callback when connection to the MQTT broker has succedeed or failed """ if result_code == 0: self.log("[INFO] Connected to MQTT broker") self.mqtt.send_connected() for topic, new_topic in self.topics.iteritems(): self.log("[DEBUG] Subscribing to %s" % topic) self.mqtt.subscribe(topic, 0) else: self.stop() def mqtt_on_disconnect(self, obj, result_code): """ Callback when disconnecting from the MQTT broker """ if result_code != 0: time.sleep(3) self.mqtt_connect() def mqtt_on_subscribe(self, obj, mid, qos_list): """ Callback when succeeded subscription """ self.log("[INFO] Subscription with mid %s received." % mid) def mqtt_on_message(self, obj, msg): """ Incoming message, enqueue it """ topic = self.topics.get(msg.topic, False) if topic: try: value = ctypes.string_at(msg.payload, msg.payloadlen) except: value = msg.payload index = (self.slot_index.get(msg.topic, 0) + 1) % topic.get('slots', 1) self.slot_index[msg.topic] = index # Look for the slot found = False position = 0 for slot in self.slots: if slot['topic'] == msg.topic and slot['index'] == index: found = True break position = position + 1 if not found: slot = dict() slot['topic'] = msg.topic slot['index'] = index slot['value'] = value slot['repetitions']= topic.get('repetitions', None) expires = topic.get('expires', None) slot['expires'] = time.time() + expires if expires else None prioritary = topic.get('prioritary', 0) == 1 if found: if prioritary: slot = self.slots.pop(position) self.slots.insert(0, slot) else: if prioritary: self.slots.insert(0, slot) else: self.slots.append(slot) def push_message(self): if self.slots: # get next slot slot = self.slots.pop(0) # if slot has expired return, # we are not updating next_event_time so the loop will call # this method again very shortly if slot['expires'] and slot['expires'] < time.time(): return # preprocess message and send value = self.processor.process(slot['topic'], slot['value']) self.mqtt.publish(self.publish_to, value) # update repetitions and check if we have to reenqueue it if not slot['repetitions'] == 0: if slot['repetitions']: slot['repetitions'] = slot['repetitions'] - 1 self.slots.append(slot) # calculate next update time length = len(value.split('|')[-1:][0]) time_gap = max(self.minimum_time, length * self.time_length_ratio) self.next_event_time = time.time() + time_gap def prepare(self): """ Loads predefined messages """ self.slots.append({ 'topic': '/builtin/datetime', 'index': 1, 'value': None, 'repetitions': None, 'expires': None }) self.processor.add_filter('/builtin/datetime', { 'type': 'format', 'parameters': {'format': '{date}|{time}'} }) def run(self): """ Entry point, initiates components and loops forever... """ self.log("[INFO] Starting " + __app__ + " v" + __version__) self.mqtt_connect() self.prepare() while True: self.mqtt.loop() if time.time() > self.next_event_time: self.push_message() if __name__ == "__main__": config = Config('rentalito.yaml') manager = Rentalito(config.get('general', 'pidfile', '/tmp/rentalito.pid')) manager.stdout = config.get('general', 'stdout', '/dev/null') manager.stderr = config.get('general', 'stderr', '/dev/null') manager.debug = config.get('general', 'debug', False) manager.publish_to = config.get('general', 'publish_to', '/client/rentalito') manager.minimum_time = config.get('general', 'minimum_time', 5) manager.time_length_ratio = config.get('general', 'time_length_ratio', 0.5) manager.topics = config.get('topics', None, {}); mqtt = Mosquitto(config.get('mqtt', 'client_id')) mqtt.host = config.get('mqtt', 'host') mqtt.port = config.get('mqtt', 'port') mqtt.keepalive = config.get('mqtt', 'keepalive') mqtt.clean_session = config.get('mqtt', 'clean_session') mqtt.qos = config.get('mqtt', 'qos') mqtt.retain = config.get('mqtt', 'retain') mqtt.status_topic = config.get('mqtt', 'status_topic') mqtt.set_will = config.get('mqtt', 'set_will') manager.mqtt = mqtt processor = Processor(config.get('filters', None, {})) manager.processor = processor if len(sys.argv) == 2: if 'start' == sys.argv[1]: manager.start() elif 'stop' == sys.argv[1]: manager.stop() elif 'restart' == sys.argv[1]: manager.restart() else: print "Unknown command" sys.exit(2) sys.exit(0) else: print "usage: %s start|stop|restart" % sys.argv[0] sys.exit(2)
gpl-3.0
kaynfiretvguru/Eldritch
plugin.video.ex0dus/resources/lib/sources/de/ddl.py
4
3754
# -*- coding: utf-8 -*- """ Exodus Add-on Copyright (C) 2016 Exodus This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import json import re import urllib import urlparse from resources.lib.modules import client from resources.lib.modules import source_utils class source: def __init__(self): self.priority = 1 self.language = ['de'] self.domains = ['de.ddl.me'] self.base_link = 'http://de.ddl.me' self.search_link = '/search_99/?q=%s' def movie(self, imdb, title, localtitle, aliases, year): try: url = self.__get_direct_url(imdb) if not url: return return urllib.urlencode({'url': url}) except: return def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: return self.__get_direct_url(imdb) except: return def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return j = self.__get_json(url) if not j: return j = [v['info'] for k, v in j.items()] j = [(i['nr'], i['staffel'], i['sid']) for i in j] j = [(i[2]) for i in j if int(i[0]) == int(episode) and int(i[1]) == int(season)][0] return urllib.urlencode({'url': url, 'sid': j}) except: return def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) j = self.__get_json(data['url']) if not j: return sid = data['sid'] if 'sid' in data else j.keys()[0] pcnt = int(j[sid]['1']) if '1' in j[sid] else 1 for jHoster in j[sid]['links']: jLinks = [i[3] for i in j[sid]['links'][jHoster] if i[5] == 'stream'] if len(jLinks) < pcnt: continue h_url = jLinks[0] valid, hoster = source_utils.is_host_valid(h_url, hostDict) if not valid: continue h_url = h_url if pcnt == 1 else 'stack://' + ' , '.join(jLinks) try: sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'info' : '' if pcnt == 1 else 'multi-part', 'url': h_url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources def resolve(self, url): return url def __get_direct_url(self, imdb): try: query = urlparse.urljoin(self.base_link, self.search_link % imdb) r = client.request(query, output='geturl') if self.search_link in r: return return r except: return def __get_json(self, url): try: result = client.request(url) result = re.compile('var\s+subcats\s+=\s*(.*?);').findall(result)[0] return json.loads(result) except: return
gpl-2.0
ddrmanxbxfr/servo
tests/wpt/web-platform-tests/tools/py/py/_path/svnurl.py
192
14714
""" module defining a subversion path object based on the external command 'svn'. This modules aims to work with svn 1.3 and higher but might also interact well with earlier versions. """ import os, sys, time, re import py from py import path, process from py._path import common from py._path import svnwc as svncommon from py._path.cacheutil import BuildcostAccessCache, AgingCache DEBUG=False class SvnCommandPath(svncommon.SvnPathBase): """ path implementation that offers access to (possibly remote) subversion repositories. """ _lsrevcache = BuildcostAccessCache(maxentries=128) _lsnorevcache = AgingCache(maxentries=1000, maxseconds=60.0) def __new__(cls, path, rev=None, auth=None): self = object.__new__(cls) if isinstance(path, cls): rev = path.rev auth = path.auth path = path.strpath svncommon.checkbadchars(path) path = path.rstrip('/') self.strpath = path self.rev = rev self.auth = auth return self def __repr__(self): if self.rev == -1: return 'svnurl(%r)' % self.strpath else: return 'svnurl(%r, %r)' % (self.strpath, self.rev) def _svnwithrev(self, cmd, *args): """ execute an svn command, append our own url and revision """ if self.rev is None: return self._svnwrite(cmd, *args) else: args = ['-r', self.rev] + list(args) return self._svnwrite(cmd, *args) def _svnwrite(self, cmd, *args): """ execute an svn command, append our own url """ l = ['svn %s' % cmd] args = ['"%s"' % self._escape(item) for item in args] l.extend(args) l.append('"%s"' % self._encodedurl()) # fixing the locale because we can't otherwise parse string = " ".join(l) if DEBUG: print("execing %s" % string) out = self._svncmdexecauth(string) return out def _svncmdexecauth(self, cmd): """ execute an svn command 'as is' """ cmd = svncommon.fixlocale() + cmd if self.auth is not None: cmd += ' ' + self.auth.makecmdoptions() return self._cmdexec(cmd) def _cmdexec(self, cmd): try: out = process.cmdexec(cmd) except py.process.cmdexec.Error: e = sys.exc_info()[1] if (e.err.find('File Exists') != -1 or e.err.find('File already exists') != -1): raise py.error.EEXIST(self) raise return out def _svnpopenauth(self, cmd): """ execute an svn command, return a pipe for reading stdin """ cmd = svncommon.fixlocale() + cmd if self.auth is not None: cmd += ' ' + self.auth.makecmdoptions() return self._popen(cmd) def _popen(self, cmd): return os.popen(cmd) def _encodedurl(self): return self._escape(self.strpath) def _norev_delentry(self, path): auth = self.auth and self.auth.makecmdoptions() or None self._lsnorevcache.delentry((str(path), auth)) def open(self, mode='r'): """ return an opened file with the given mode. """ if mode not in ("r", "rU",): raise ValueError("mode %r not supported" % (mode,)) assert self.check(file=1) # svn cat returns an empty file otherwise if self.rev is None: return self._svnpopenauth('svn cat "%s"' % ( self._escape(self.strpath), )) else: return self._svnpopenauth('svn cat -r %s "%s"' % ( self.rev, self._escape(self.strpath))) def dirpath(self, *args, **kwargs): """ return the directory path of the current path joined with any given path arguments. """ l = self.strpath.split(self.sep) if len(l) < 4: raise py.error.EINVAL(self, "base is not valid") elif len(l) == 4: return self.join(*args, **kwargs) else: return self.new(basename='').join(*args, **kwargs) # modifying methods (cache must be invalidated) def mkdir(self, *args, **kwargs): """ create & return the directory joined with args. pass a 'msg' keyword argument to set the commit message. """ commit_msg = kwargs.get('msg', "mkdir by py lib invocation") createpath = self.join(*args) createpath._svnwrite('mkdir', '-m', commit_msg) self._norev_delentry(createpath.dirpath()) return createpath def copy(self, target, msg='copied by py lib invocation'): """ copy path to target with checkin message msg.""" if getattr(target, 'rev', None) is not None: raise py.error.EINVAL(target, "revisions are immutable") self._svncmdexecauth('svn copy -m "%s" "%s" "%s"' %(msg, self._escape(self), self._escape(target))) self._norev_delentry(target.dirpath()) def rename(self, target, msg="renamed by py lib invocation"): """ rename this path to target with checkin message msg. """ if getattr(self, 'rev', None) is not None: raise py.error.EINVAL(self, "revisions are immutable") self._svncmdexecauth('svn move -m "%s" --force "%s" "%s"' %( msg, self._escape(self), self._escape(target))) self._norev_delentry(self.dirpath()) self._norev_delentry(self) def remove(self, rec=1, msg='removed by py lib invocation'): """ remove a file or directory (or a directory tree if rec=1) with checkin message msg.""" if self.rev is not None: raise py.error.EINVAL(self, "revisions are immutable") self._svncmdexecauth('svn rm -m "%s" "%s"' %(msg, self._escape(self))) self._norev_delentry(self.dirpath()) def export(self, topath): """ export to a local path topath should not exist prior to calling this, returns a py.path.local instance """ topath = py.path.local(topath) args = ['"%s"' % (self._escape(self),), '"%s"' % (self._escape(topath),)] if self.rev is not None: args = ['-r', str(self.rev)] + args self._svncmdexecauth('svn export %s' % (' '.join(args),)) return topath def ensure(self, *args, **kwargs): """ ensure that an args-joined path exists (by default as a file). If you specify a keyword argument 'dir=True' then the path is forced to be a directory path. """ if getattr(self, 'rev', None) is not None: raise py.error.EINVAL(self, "revisions are immutable") target = self.join(*args) dir = kwargs.get('dir', 0) for x in target.parts(reverse=True): if x.check(): break else: raise py.error.ENOENT(target, "has not any valid base!") if x == target: if not x.check(dir=dir): raise dir and py.error.ENOTDIR(x) or py.error.EISDIR(x) return x tocreate = target.relto(x) basename = tocreate.split(self.sep, 1)[0] tempdir = py.path.local.mkdtemp() try: tempdir.ensure(tocreate, dir=dir) cmd = 'svn import -m "%s" "%s" "%s"' % ( "ensure %s" % self._escape(tocreate), self._escape(tempdir.join(basename)), x.join(basename)._encodedurl()) self._svncmdexecauth(cmd) self._norev_delentry(x) finally: tempdir.remove() return target # end of modifying methods def _propget(self, name): res = self._svnwithrev('propget', name) return res[:-1] # strip trailing newline def _proplist(self): res = self._svnwithrev('proplist') lines = res.split('\n') lines = [x.strip() for x in lines[1:]] return svncommon.PropListDict(self, lines) def info(self): """ return an Info structure with svn-provided information. """ parent = self.dirpath() nameinfo_seq = parent._listdir_nameinfo() bn = self.basename for name, info in nameinfo_seq: if name == bn: return info raise py.error.ENOENT(self) def _listdir_nameinfo(self): """ return sequence of name-info directory entries of self """ def builder(): try: res = self._svnwithrev('ls', '-v') except process.cmdexec.Error: e = sys.exc_info()[1] if e.err.find('non-existent in that revision') != -1: raise py.error.ENOENT(self, e.err) elif e.err.find("E200009:") != -1: raise py.error.ENOENT(self, e.err) elif e.err.find('File not found') != -1: raise py.error.ENOENT(self, e.err) elif e.err.find('not part of a repository')!=-1: raise py.error.ENOENT(self, e.err) elif e.err.find('Unable to open')!=-1: raise py.error.ENOENT(self, e.err) elif e.err.lower().find('method not allowed')!=-1: raise py.error.EACCES(self, e.err) raise py.error.Error(e.err) lines = res.split('\n') nameinfo_seq = [] for lsline in lines: if lsline: info = InfoSvnCommand(lsline) if info._name != '.': # svn 1.5 produces '.' dirs, nameinfo_seq.append((info._name, info)) nameinfo_seq.sort() return nameinfo_seq auth = self.auth and self.auth.makecmdoptions() or None if self.rev is not None: return self._lsrevcache.getorbuild((self.strpath, self.rev, auth), builder) else: return self._lsnorevcache.getorbuild((self.strpath, auth), builder) def listdir(self, fil=None, sort=None): """ list directory contents, possibly filter by the given fil func and possibly sorted. """ if isinstance(fil, str): fil = common.FNMatcher(fil) nameinfo_seq = self._listdir_nameinfo() if len(nameinfo_seq) == 1: name, info = nameinfo_seq[0] if name == self.basename and info.kind == 'file': #if not self.check(dir=1): raise py.error.ENOTDIR(self) paths = [self.join(name) for (name, info) in nameinfo_seq] if fil: paths = [x for x in paths if fil(x)] self._sortlist(paths, sort) return paths def log(self, rev_start=None, rev_end=1, verbose=False): """ return a list of LogEntry instances for this path. rev_start is the starting revision (defaulting to the first one). rev_end is the last revision (defaulting to HEAD). if verbose is True, then the LogEntry instances also know which files changed. """ assert self.check() #make it simpler for the pipe rev_start = rev_start is None and "HEAD" or rev_start rev_end = rev_end is None and "HEAD" or rev_end if rev_start == "HEAD" and rev_end == 1: rev_opt = "" else: rev_opt = "-r %s:%s" % (rev_start, rev_end) verbose_opt = verbose and "-v" or "" xmlpipe = self._svnpopenauth('svn log --xml %s %s "%s"' % (rev_opt, verbose_opt, self.strpath)) from xml.dom import minidom tree = minidom.parse(xmlpipe) result = [] for logentry in filter(None, tree.firstChild.childNodes): if logentry.nodeType == logentry.ELEMENT_NODE: result.append(svncommon.LogEntry(logentry)) return result #01234567890123456789012345678901234567890123467 # 2256 hpk 165 Nov 24 17:55 __init__.py # XXX spotted by Guido, SVN 1.3.0 has different aligning, breaks the code!!! # 1312 johnny 1627 May 05 14:32 test_decorators.py # class InfoSvnCommand: # the '0?' part in the middle is an indication of whether the resource is # locked, see 'svn help ls' lspattern = re.compile( r'^ *(?P<rev>\d+) +(?P<author>.+?) +(0? *(?P<size>\d+))? ' '*(?P<date>\w+ +\d{2} +[\d:]+) +(?P<file>.*)$') def __init__(self, line): # this is a typical line from 'svn ls http://...' #_ 1127 jum 0 Jul 13 15:28 branch/ match = self.lspattern.match(line) data = match.groupdict() self._name = data['file'] if self._name[-1] == '/': self._name = self._name[:-1] self.kind = 'dir' else: self.kind = 'file' #self.has_props = l.pop(0) == 'P' self.created_rev = int(data['rev']) self.last_author = data['author'] self.size = data['size'] and int(data['size']) or 0 self.mtime = parse_time_with_missing_year(data['date']) self.time = self.mtime * 1000000 def __eq__(self, other): return self.__dict__ == other.__dict__ #____________________________________________________ # # helper functions #____________________________________________________ def parse_time_with_missing_year(timestr): """ analyze the time part from a single line of "svn ls -v" the svn output doesn't show the year makes the 'timestr' ambigous. """ import calendar t_now = time.gmtime() tparts = timestr.split() month = time.strptime(tparts.pop(0), '%b')[1] day = time.strptime(tparts.pop(0), '%d')[2] last = tparts.pop(0) # year or hour:minute try: if ":" in last: raise ValueError() year = time.strptime(last, '%Y')[0] hour = minute = 0 except ValueError: hour, minute = time.strptime(last, '%H:%M')[3:5] year = t_now[0] t_result = (year, month, day, hour, minute, 0,0,0,0) if t_result > t_now: year -= 1 t_result = (year, month, day, hour, minute, 0,0,0,0) return calendar.timegm(t_result) class PathEntry: def __init__(self, ppart): self.strpath = ppart.firstChild.nodeValue.encode('UTF-8') self.action = ppart.getAttribute('action').encode('UTF-8') if self.action == 'A': self.copyfrom_path = ppart.getAttribute('copyfrom-path').encode('UTF-8') if self.copyfrom_path: self.copyfrom_rev = int(ppart.getAttribute('copyfrom-rev'))
mpl-2.0
bitcoinsSG/bitcoin
qa/rpc-tests/importmulti.py
11
17167
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class ImportMultiTest (BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 2 self.setup_clean_chain = True def setup_network(self, split=False): self.nodes = start_nodes(2, self.options.tmpdir) self.is_network_split=False def run_test (self): print ("Mining blocks...") self.nodes[0].generate(1) self.nodes[1].generate(1) # keyword definition PRIV_KEY = 'privkey' PUB_KEY = 'pubkey' ADDRESS_KEY = 'address' SCRIPT_KEY = 'script' node0_address1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) node0_address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) node0_address3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) #Check only one address assert_equal(node0_address1['ismine'], True) #Node 1 sync test assert_equal(self.nodes[1].getblockcount(),1) #Address Test - before import address_info = self.nodes[1].validateaddress(node0_address1['address']) assert_equal(address_info['iswatchonly'], False) assert_equal(address_info['ismine'], False) # RPC importmulti ----------------------------------------------- # Bitcoin Address print("Should import an address") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": address['address'] } }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].validateaddress(address['address']) assert_equal(address_assert['iswatchonly'], True) assert_equal(address_assert['ismine'], False) # ScriptPubKey + internal print("Should import a scriptPubKey with internal flag") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], "internal": True }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].validateaddress(address['address']) assert_equal(address_assert['iswatchonly'], True) assert_equal(address_assert['ismine'], False) # ScriptPubKey + !internal print("Should not import a scriptPubKey without internal flag") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'] }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -8) assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey') address_assert = self.nodes[1].validateaddress(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) # Address + Public key + !Internal print("Should import an address with public key") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": address['address'] }, "pubkeys": [ address['pubkey'] ] }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].validateaddress(address['address']) assert_equal(address_assert['iswatchonly'], True) assert_equal(address_assert['ismine'], False) # ScriptPubKey + Public key + internal print("Should import a scriptPubKey with internal and with public key") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) request = [{ "scriptPubKey": address['scriptPubKey'], "pubkeys": [ address['pubkey'] ], "internal": True }]; result = self.nodes[1].importmulti(request) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].validateaddress(address['address']) assert_equal(address_assert['iswatchonly'], True) assert_equal(address_assert['ismine'], False) # ScriptPubKey + Public key + !internal print("Should not import a scriptPubKey without internal and with public key") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) request = [{ "scriptPubKey": address['scriptPubKey'], "pubkeys": [ address['pubkey'] ] }]; result = self.nodes[1].importmulti(request) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -8) assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey') address_assert = self.nodes[1].validateaddress(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) # Address + Private key + !watchonly print("Should import an address with private key") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": address['address'] }, "keys": [ self.nodes[0].dumpprivkey(address['address']) ] }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].validateaddress(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], True) # Address + Private key + watchonly print("Should not import an address with private key and with watchonly") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": address['address'] }, "keys": [ self.nodes[0].dumpprivkey(address['address']) ], "watchonly": True }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -8) assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys') address_assert = self.nodes[1].validateaddress(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) # ScriptPubKey + Private key + internal print("Should import a scriptPubKey with internal and with private key") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], "keys": [ self.nodes[0].dumpprivkey(address['address']) ], "internal": True }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].validateaddress(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], True) # ScriptPubKey + Private key + !internal print("Should not import a scriptPubKey without internal and with private key") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], "keys": [ self.nodes[0].dumpprivkey(address['address']) ] }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -8) assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey') address_assert = self.nodes[1].validateaddress(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) # P2SH address sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']]) self.nodes[1].generate(100) transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) self.nodes[1].generate(1) transaction = self.nodes[1].gettransaction(transactionid); print("Should import a p2sh") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": multi_sig_script['address'] } }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].validateaddress(multi_sig_script['address']) assert_equal(address_assert['isscript'], True) assert_equal(address_assert['iswatchonly'], True) p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0] assert_equal(p2shunspent['spendable'], False) assert_equal(p2shunspent['solvable'], False) # P2SH + Redeem script sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']]) self.nodes[1].generate(100) transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) self.nodes[1].generate(1) transaction = self.nodes[1].gettransaction(transactionid); print("Should import a p2sh with respective redeem script") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": multi_sig_script['address'] }, "redeemscript": multi_sig_script['redeemScript'] }]) assert_equal(result[0]['success'], True) p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0] assert_equal(p2shunspent['spendable'], False) assert_equal(p2shunspent['solvable'], True) # P2SH + Redeem script + Private Keys + !Watchonly sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']]) self.nodes[1].generate(100) transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) self.nodes[1].generate(1) transaction = self.nodes[1].gettransaction(transactionid); print("Should import a p2sh with respective redeem script and private keys") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": multi_sig_script['address'] }, "redeemscript": multi_sig_script['redeemScript'], "keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])] }]) assert_equal(result[0]['success'], True) p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0] assert_equal(p2shunspent['spendable'], False) assert_equal(p2shunspent['solvable'], True) # P2SH + Redeem script + Private Keys + Watchonly sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']]) self.nodes[1].generate(100) transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) self.nodes[1].generate(1) transaction = self.nodes[1].gettransaction(transactionid); print("Should import a p2sh with respective redeem script and private keys") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": multi_sig_script['address'] }, "redeemscript": multi_sig_script['redeemScript'], "keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])], "watchonly": True }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -8) assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys') # Address + Public key + !Internal + Wrong pubkey print("Should not import an address with a wrong public key") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": address['address'] }, "pubkeys": [ address2['pubkey'] ] }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -5) assert_equal(result[0]['error']['message'], 'Consistency check failed') address_assert = self.nodes[1].validateaddress(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) # ScriptPubKey + Public key + internal + Wrong pubkey print("Should not import a scriptPubKey with internal and with a wrong public key") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) request = [{ "scriptPubKey": address['scriptPubKey'], "pubkeys": [ address2['pubkey'] ], "internal": True }]; result = self.nodes[1].importmulti(request) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -5) assert_equal(result[0]['error']['message'], 'Consistency check failed') address_assert = self.nodes[1].validateaddress(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) # Address + Private key + !watchonly + Wrong private key print("Should not import an address with a wrong private key") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": address['address'] }, "keys": [ self.nodes[0].dumpprivkey(address2['address']) ] }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -5) assert_equal(result[0]['error']['message'], 'Consistency check failed') address_assert = self.nodes[1].validateaddress(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) # ScriptPubKey + Private key + internal + Wrong private key print("Should not import a scriptPubKey with internal and with a wrong private key") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], "keys": [ self.nodes[0].dumpprivkey(address2['address']) ], "internal": True }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -5) assert_equal(result[0]['error']['message'], 'Consistency check failed') address_assert = self.nodes[1].validateaddress(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) if __name__ == '__main__': ImportMultiTest ().main ()
mit
tempbottle/v8.rs
build/gyp/pylib/gyp/generator/android.py
5
45018
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Notes: # # This generates makefiles suitable for inclusion into the Android build system # via an Android.mk file. It is based on make.py, the standard makefile # generator. # # The code below generates a separate .mk file for each target, but # all are sourced by the top-level GypAndroid.mk. This means that all # variables in .mk-files clobber one another, and furthermore that any # variables set potentially clash with other Android build system variables. # Try to avoid setting global variables where possible. import gyp import gyp.common import gyp.generator.make as make # Reuse global functions from make backend. import os import re import subprocess generator_default_variables = { 'OS': 'android', 'EXECUTABLE_PREFIX': '', 'EXECUTABLE_SUFFIX': '', 'STATIC_LIB_PREFIX': 'lib', 'SHARED_LIB_PREFIX': 'lib', 'STATIC_LIB_SUFFIX': '.a', 'SHARED_LIB_SUFFIX': '.so', 'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)', 'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)', 'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)', 'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)', 'LIB_DIR': '$(obj).$(TOOLSET)', 'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python. 'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python. 'RULE_INPUT_PATH': '$(RULE_SOURCES)', 'RULE_INPUT_EXT': '$(suffix $<)', 'RULE_INPUT_NAME': '$(notdir $<)', 'CONFIGURATION_NAME': '$(GYP_CONFIGURATION)', } # Make supports multiple toolsets generator_supports_multiple_toolsets = True # Generator-specific gyp specs. generator_additional_non_configuration_keys = [ # Boolean to declare that this target does not want its name mangled. 'android_unmangled_name', # Map of android build system variables to set. 'aosp_build_settings', ] generator_additional_path_sections = [] generator_extra_sources_for_rules = [] ALL_MODULES_FOOTER = """\ # "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from # all the included sub-makefiles. This is just here to clarify. gyp_all_modules: """ header = """\ # This file is generated by gyp; do not edit. """ # Map gyp target types to Android module classes. MODULE_CLASSES = { 'static_library': 'STATIC_LIBRARIES', 'shared_library': 'SHARED_LIBRARIES', 'executable': 'EXECUTABLES', } def IsCPPExtension(ext): return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx' def Sourceify(path): """Convert a path to its source directory form. The Android backend does not support options.generator_output, so this function is a noop.""" return path # Map from qualified target to path to output. # For Android, the target of these maps is a tuple ('static', 'modulename'), # ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string, # since we link by module. target_outputs = {} # Map from qualified target to any linkable output. A subset # of target_outputs. E.g. when mybinary depends on liba, we want to # include liba in the linker line; when otherbinary depends on # mybinary, we just want to build mybinary first. target_link_deps = {} class AndroidMkWriter(object): """AndroidMkWriter packages up the writing of one target-specific Android.mk. Its only real entry point is Write(), and is mostly used for namespacing. """ def __init__(self, android_top_dir): self.android_top_dir = android_top_dir def Write(self, qualified_target, relative_target, base_path, output_filename, spec, configs, part_of_all, write_alias_target): """The main entry point: writes a .mk file for a single target. Arguments: qualified_target: target we're generating relative_target: qualified target name relative to the root base_path: path relative to source root we're building in, used to resolve target-relative paths output_filename: output .mk file name to write spec, configs: gyp info part_of_all: flag indicating this target is part of 'all' write_alias_target: flag indicating whether to create short aliases for this target """ gyp.common.EnsureDirExists(output_filename) self.fp = open(output_filename, 'w') self.fp.write(header) self.qualified_target = qualified_target self.relative_target = relative_target self.path = base_path self.target = spec['target_name'] self.type = spec['type'] self.toolset = spec['toolset'] deps, link_deps = self.ComputeDeps(spec) # Some of the generation below can add extra output, sources, or # link dependencies. All of the out params of the functions that # follow use names like extra_foo. extra_outputs = [] extra_sources = [] self.android_class = MODULE_CLASSES.get(self.type, 'GYP') self.android_module = self.ComputeAndroidModule(spec) (self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec) self.output = self.output_binary = self.ComputeOutput(spec) # Standard header. self.WriteLn('include $(CLEAR_VARS)\n') # Module class and name. self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class) self.WriteLn('LOCAL_MODULE := ' + self.android_module) # Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE. # The library module classes fail if the stem is set. ComputeOutputParts # makes sure that stem == modulename in these cases. if self.android_stem != self.android_module: self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem) self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix) if self.toolset == 'host': self.WriteLn('LOCAL_IS_HOST_MODULE := true') self.WriteLn('LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)') else: self.WriteLn('LOCAL_MODULE_TARGET_ARCH := ' '$(TARGET_$(GYP_VAR_PREFIX)ARCH)') # Grab output directories; needed for Actions and Rules. if self.toolset == 'host': self.WriteLn('gyp_intermediate_dir := ' '$(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))') else: self.WriteLn('gyp_intermediate_dir := ' '$(call local-intermediates-dir,,$(GYP_VAR_PREFIX))') self.WriteLn('gyp_shared_intermediate_dir := ' '$(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))') self.WriteLn() # List files this target depends on so that actions/rules/copies/sources # can depend on the list. # TODO: doesn't pull in things through transitive link deps; needed? target_dependencies = [x[1] for x in deps if x[0] == 'path'] self.WriteLn('# Make sure our deps are built first.') self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES', local_pathify=True) # Actions must come first, since they can generate more OBJs for use below. if 'actions' in spec: self.WriteActions(spec['actions'], extra_sources, extra_outputs) # Rules must be early like actions. if 'rules' in spec: self.WriteRules(spec['rules'], extra_sources, extra_outputs) if 'copies' in spec: self.WriteCopies(spec['copies'], extra_outputs) # GYP generated outputs. self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True) # Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend # on both our dependency targets and our generated files. self.WriteLn('# Make sure our deps and generated files are built first.') self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) ' '$(GYP_GENERATED_OUTPUTS)') self.WriteLn() # Sources. if spec.get('sources', []) or extra_sources: self.WriteSources(spec, configs, extra_sources) self.WriteTarget(spec, configs, deps, link_deps, part_of_all, write_alias_target) # Update global list of target outputs, used in dependency tracking. target_outputs[qualified_target] = ('path', self.output_binary) # Update global list of link dependencies. if self.type == 'static_library': target_link_deps[qualified_target] = ('static', self.android_module) elif self.type == 'shared_library': target_link_deps[qualified_target] = ('shared', self.android_module) self.fp.close() return self.android_module def WriteActions(self, actions, extra_sources, extra_outputs): """Write Makefile code for any 'actions' from the gyp input. extra_sources: a list that will be filled in with newly generated source files, if any extra_outputs: a list that will be filled in with any outputs of these actions (used to make other pieces dependent on these actions) """ for action in actions: name = make.StringToMakefileVariable('%s_%s' % (self.relative_target, action['action_name'])) self.WriteLn('### Rules for action "%s":' % action['action_name']) inputs = action['inputs'] outputs = action['outputs'] # Build up a list of outputs. # Collect the output dirs we'll need. dirs = set() for out in outputs: if not out.startswith('$'): print ('WARNING: Action for target "%s" writes output to local path ' '"%s".' % (self.target, out)) dir = os.path.split(out)[0] if dir: dirs.add(dir) if int(action.get('process_outputs_as_sources', False)): extra_sources += outputs # Prepare the actual command. command = gyp.common.EncodePOSIXShellList(action['action']) if 'message' in action: quiet_cmd = 'Gyp action: %s ($@)' % action['message'] else: quiet_cmd = 'Gyp action: %s ($@)' % name if len(dirs) > 0: command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command cd_action = 'cd $(gyp_local_path)/%s; ' % self.path command = cd_action + command # The makefile rules are all relative to the top dir, but the gyp actions # are defined relative to their containing dir. This replaces the gyp_* # variables for the action rule with an absolute version so that the # output goes in the right place. # Only write the gyp_* rules for the "primary" output (:1); # it's superfluous for the "extra outputs", and this avoids accidentally # writing duplicate dummy rules for those outputs. main_output = make.QuoteSpaces(self.LocalPathify(outputs[0])) self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output) self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output) self.WriteLn('%s: gyp_intermediate_dir := ' '$(abspath $(gyp_intermediate_dir))' % main_output) self.WriteLn('%s: gyp_shared_intermediate_dir := ' '$(abspath $(gyp_shared_intermediate_dir))' % main_output) # Android's envsetup.sh adds a number of directories to the path including # the built host binary directory. This causes actions/rules invoked by # gyp to sometimes use these instead of system versions, e.g. bison. # The built host binaries may not be suitable, and can cause errors. # So, we remove them from the PATH using the ANDROID_BUILD_PATHS variable # set by envsetup. self.WriteLn('%s: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output) # Don't allow spaces in input/output filenames, but make an exception for # filenames which start with '$(' since it's okay for there to be spaces # inside of make function/macro invocations. for input in inputs: if not input.startswith('$(') and ' ' in input: raise gyp.common.GypError( 'Action input filename "%s" in target %s contains a space' % (input, self.target)) for output in outputs: if not output.startswith('$(') and ' ' in output: raise gyp.common.GypError( 'Action output filename "%s" in target %s contains a space' % (output, self.target)) self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' % (main_output, ' '.join(map(self.LocalPathify, inputs)))) self.WriteLn('\t@echo "%s"' % quiet_cmd) self.WriteLn('\t$(hide)%s\n' % command) for output in outputs[1:]: # Make each output depend on the main output, with an empty command # to force make to notice that the mtime has changed. self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output)) extra_outputs += outputs self.WriteLn() self.WriteLn() def WriteRules(self, rules, extra_sources, extra_outputs): """Write Makefile code for any 'rules' from the gyp input. extra_sources: a list that will be filled in with newly generated source files, if any extra_outputs: a list that will be filled in with any outputs of these rules (used to make other pieces dependent on these rules) """ if len(rules) == 0: return for rule in rules: if len(rule.get('rule_sources', [])) == 0: continue name = make.StringToMakefileVariable('%s_%s' % (self.relative_target, rule['rule_name'])) self.WriteLn('\n### Generated for rule "%s":' % name) self.WriteLn('# "%s":' % rule) inputs = rule.get('inputs') for rule_source in rule.get('rule_sources', []): (rule_source_dirname, rule_source_basename) = os.path.split(rule_source) (rule_source_root, rule_source_ext) = \ os.path.splitext(rule_source_basename) outputs = [self.ExpandInputRoot(out, rule_source_root, rule_source_dirname) for out in rule['outputs']] dirs = set() for out in outputs: if not out.startswith('$'): print ('WARNING: Rule for target %s writes output to local path %s' % (self.target, out)) dir = os.path.dirname(out) if dir: dirs.add(dir) extra_outputs += outputs if int(rule.get('process_outputs_as_sources', False)): extra_sources.extend(outputs) components = [] for component in rule['action']: component = self.ExpandInputRoot(component, rule_source_root, rule_source_dirname) if '$(RULE_SOURCES)' in component: component = component.replace('$(RULE_SOURCES)', rule_source) components.append(component) command = gyp.common.EncodePOSIXShellList(components) cd_action = 'cd $(gyp_local_path)/%s; ' % self.path command = cd_action + command if dirs: command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command # We set up a rule to build the first output, and then set up # a rule for each additional output to depend on the first. outputs = map(self.LocalPathify, outputs) main_output = outputs[0] self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output) self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output) self.WriteLn('%s: gyp_intermediate_dir := ' '$(abspath $(gyp_intermediate_dir))' % main_output) self.WriteLn('%s: gyp_shared_intermediate_dir := ' '$(abspath $(gyp_shared_intermediate_dir))' % main_output) # See explanation in WriteActions. self.WriteLn('%s: export PATH := ' '$(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output) main_output_deps = self.LocalPathify(rule_source) if inputs: main_output_deps += ' ' main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs]) self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' % (main_output, main_output_deps)) self.WriteLn('\t%s\n' % command) for output in outputs[1:]: # Make each output depend on the main output, with an empty command # to force make to notice that the mtime has changed. self.WriteLn('%s: %s ;' % (output, main_output)) self.WriteLn() self.WriteLn() def WriteCopies(self, copies, extra_outputs): """Write Makefile code for any 'copies' from the gyp input. extra_outputs: a list that will be filled in with any outputs of this action (used to make other pieces dependent on this action) """ self.WriteLn('### Generated for copy rule.') variable = make.StringToMakefileVariable(self.relative_target + '_copies') outputs = [] for copy in copies: for path in copy['files']: # The Android build system does not allow generation of files into the # source tree. The destination should start with a variable, which will # typically be $(gyp_intermediate_dir) or # $(gyp_shared_intermediate_dir). Note that we can't use an assertion # because some of the gyp tests depend on this. if not copy['destination'].startswith('$'): print ('WARNING: Copy rule for target %s writes output to ' 'local path %s' % (self.target, copy['destination'])) # LocalPathify() calls normpath, stripping trailing slashes. path = Sourceify(self.LocalPathify(path)) filename = os.path.split(path)[1] output = Sourceify(self.LocalPathify(os.path.join(copy['destination'], filename))) self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' % (output, path)) self.WriteLn('\t@echo Copying: $@') self.WriteLn('\t$(hide) mkdir -p $(dir $@)') self.WriteLn('\t$(hide) $(ACP) -rpf $< $@') self.WriteLn() outputs.append(output) self.WriteLn('%s = %s' % (variable, ' '.join(map(make.QuoteSpaces, outputs)))) extra_outputs.append('$(%s)' % variable) self.WriteLn() def WriteSourceFlags(self, spec, configs): """Write out the flags and include paths used to compile source files for the current target. Args: spec, configs: input from gyp. """ for configname, config in sorted(configs.iteritems()): extracted_includes = [] self.WriteLn('\n# Flags passed to both C and C++ files.') cflags, includes_from_cflags = self.ExtractIncludesFromCFlags( config.get('cflags', []) + config.get('cflags_c', [])) extracted_includes.extend(includes_from_cflags) self.WriteList(cflags, 'MY_CFLAGS_%s' % configname) self.WriteList(config.get('defines'), 'MY_DEFS_%s' % configname, prefix='-D', quoter=make.EscapeCppDefine) self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS') includes = list(config.get('include_dirs', [])) includes.extend(extracted_includes) includes = map(Sourceify, map(self.LocalPathify, includes)) includes = self.NormalizeIncludePaths(includes) self.WriteList(includes, 'LOCAL_C_INCLUDES_%s' % configname) self.WriteLn('\n# Flags passed to only C++ (and not C) files.') self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS_%s' % configname) self.WriteLn('\nLOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) ' '$(MY_DEFS_$(GYP_CONFIGURATION))') # Undefine ANDROID for host modules # TODO: the source code should not use macro ANDROID to tell if it's host # or target module. if self.toolset == 'host': self.WriteLn('# Undefine ANDROID for host modules') self.WriteLn('LOCAL_CFLAGS += -UANDROID') self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) ' '$(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))') self.WriteLn('LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))') # Android uses separate flags for assembly file invocations, but gyp expects # the same CFLAGS to be applied: self.WriteLn('LOCAL_ASFLAGS := $(LOCAL_CFLAGS)') def WriteSources(self, spec, configs, extra_sources): """Write Makefile code for any 'sources' from the gyp input. These are source files necessary to build the current target. We need to handle shared_intermediate directory source files as a special case by copying them to the intermediate directory and treating them as a genereated sources. Otherwise the Android build rules won't pick them up. Args: spec, configs: input from gyp. extra_sources: Sources generated from Actions or Rules. """ sources = filter(make.Compilable, spec.get('sources', [])) generated_not_sources = [x for x in extra_sources if not make.Compilable(x)] extra_sources = filter(make.Compilable, extra_sources) # Determine and output the C++ extension used by these sources. # We simply find the first C++ file and use that extension. all_sources = sources + extra_sources local_cpp_extension = '.cpp' for source in all_sources: (root, ext) = os.path.splitext(source) if IsCPPExtension(ext): local_cpp_extension = ext break if local_cpp_extension != '.cpp': self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension) # We need to move any non-generated sources that are coming from the # shared intermediate directory out of LOCAL_SRC_FILES and put them # into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files # that don't match our local_cpp_extension, since Android will only # generate Makefile rules for a single LOCAL_CPP_EXTENSION. local_files = [] for source in sources: (root, ext) = os.path.splitext(source) if '$(gyp_shared_intermediate_dir)' in source: extra_sources.append(source) elif '$(gyp_intermediate_dir)' in source: extra_sources.append(source) elif IsCPPExtension(ext) and ext != local_cpp_extension: extra_sources.append(source) else: local_files.append(os.path.normpath(os.path.join(self.path, source))) # For any generated source, if it is coming from the shared intermediate # directory then we add a Make rule to copy them to the local intermediate # directory first. This is because the Android LOCAL_GENERATED_SOURCES # must be in the local module intermediate directory for the compile rules # to work properly. If the file has the wrong C++ extension, then we add # a rule to copy that to intermediates and use the new version. final_generated_sources = [] # If a source file gets copied, we still need to add the orginal source # directory as header search path, for GCC searches headers in the # directory that contains the source file by default. origin_src_dirs = [] for source in extra_sources: local_file = source if not '$(gyp_intermediate_dir)/' in local_file: basename = os.path.basename(local_file) local_file = '$(gyp_intermediate_dir)/' + basename (root, ext) = os.path.splitext(local_file) if IsCPPExtension(ext) and ext != local_cpp_extension: local_file = root + local_cpp_extension if local_file != source: self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source))) self.WriteLn('\tmkdir -p $(@D); cp $< $@') origin_src_dirs.append(os.path.dirname(source)) final_generated_sources.append(local_file) # We add back in all of the non-compilable stuff to make sure that the # make rules have dependencies on them. final_generated_sources.extend(generated_not_sources) self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES') origin_src_dirs = gyp.common.uniquer(origin_src_dirs) origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs)) self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS') self.WriteList(local_files, 'LOCAL_SRC_FILES') # Write out the flags used to compile the source; this must be done last # so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path. self.WriteSourceFlags(spec, configs) def ComputeAndroidModule(self, spec): """Return the Android module name used for a gyp spec. We use the complete qualified target name to avoid collisions between duplicate targets in different directories. We also add a suffix to distinguish gyp-generated module names. """ if int(spec.get('android_unmangled_name', 0)): assert self.type != 'shared_library' or self.target.startswith('lib') return self.target if self.type == 'shared_library': # For reasons of convention, the Android build system requires that all # shared library modules are named 'libfoo' when generating -l flags. prefix = 'lib_' else: prefix = '' if spec['toolset'] == 'host': suffix = '_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp' else: suffix = '_gyp' if self.path: middle = make.StringToMakefileVariable('%s_%s' % (self.path, self.target)) else: middle = make.StringToMakefileVariable(self.target) return ''.join([prefix, middle, suffix]) def ComputeOutputParts(self, spec): """Return the 'output basename' of a gyp spec, split into filename + ext. Android libraries must be named the same thing as their module name, otherwise the linker can't find them, so product_name and so on must be ignored if we are building a library, and the "lib" prepending is not done for Android. """ assert self.type != 'loadable_module' # TODO: not supported? target = spec['target_name'] target_prefix = '' target_ext = '' if self.type == 'static_library': target = self.ComputeAndroidModule(spec) target_ext = '.a' elif self.type == 'shared_library': target = self.ComputeAndroidModule(spec) target_ext = '.so' elif self.type == 'none': target_ext = '.stamp' elif self.type != 'executable': print ("ERROR: What output file should be generated?", "type", self.type, "target", target) if self.type != 'static_library' and self.type != 'shared_library': target_prefix = spec.get('product_prefix', target_prefix) target = spec.get('product_name', target) product_ext = spec.get('product_extension') if product_ext: target_ext = '.' + product_ext target_stem = target_prefix + target return (target_stem, target_ext) def ComputeOutputBasename(self, spec): """Return the 'output basename' of a gyp spec. E.g., the loadable module 'foobar' in directory 'baz' will produce 'libfoobar.so' """ return ''.join(self.ComputeOutputParts(spec)) def ComputeOutput(self, spec): """Return the 'output' (full output path) of a gyp spec. E.g., the loadable module 'foobar' in directory 'baz' will produce '$(obj)/baz/libfoobar.so' """ if self.type == 'executable': # We install host executables into shared_intermediate_dir so they can be # run by gyp rules that refer to PRODUCT_DIR. path = '$(gyp_shared_intermediate_dir)' elif self.type == 'shared_library': if self.toolset == 'host': path = '$($(GYP_HOST_VAR_PREFIX)HOST_OUT_INTERMEDIATE_LIBRARIES)' else: path = '$($(GYP_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)' else: # Other targets just get built into their intermediate dir. if self.toolset == 'host': path = ('$(call intermediates-dir-for,%s,%s,true,,' '$(GYP_HOST_VAR_PREFIX))' % (self.android_class, self.android_module)) else: path = ('$(call intermediates-dir-for,%s,%s,,,$(GYP_VAR_PREFIX))' % (self.android_class, self.android_module)) assert spec.get('product_dir') is None # TODO: not supported? return os.path.join(path, self.ComputeOutputBasename(spec)) def NormalizeIncludePaths(self, include_paths): """ Normalize include_paths. Convert absolute paths to relative to the Android top directory. Args: include_paths: A list of unprocessed include paths. Returns: A list of normalized include paths. """ normalized = [] for path in include_paths: if path[0] == '/': path = gyp.common.RelativePath(path, self.android_top_dir) normalized.append(path) return normalized def ExtractIncludesFromCFlags(self, cflags): """Extract includes "-I..." out from cflags Args: cflags: A list of compiler flags, which may be mixed with "-I.." Returns: A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed. """ clean_cflags = [] include_paths = [] for flag in cflags: if flag.startswith('-I'): include_paths.append(flag[2:]) else: clean_cflags.append(flag) return (clean_cflags, include_paths) def ComputeAndroidLibraryModuleNames(self, libraries): """Compute the Android module names from libraries, ie spec.get('libraries') Args: libraries: the value of spec.get('libraries') Returns: A tuple (static_lib_modules, dynamic_lib_modules) """ static_lib_modules = [] dynamic_lib_modules = [] for libs in libraries: # Libs can have multiple words. for lib in libs.split(): # Filter the system libraries, which are added by default by the Android # build system. if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or lib.endswith('libgcc.a')): continue match = re.search(r'([^/]+)\.a$', lib) if match: static_lib_modules.append(match.group(1)) continue match = re.search(r'([^/]+)\.so$', lib) if match: dynamic_lib_modules.append(match.group(1)) continue # "-lstlport" -> libstlport if lib.startswith('-l'): if lib.endswith('_static'): static_lib_modules.append('lib' + lib[2:]) else: dynamic_lib_modules.append('lib' + lib[2:]) return (static_lib_modules, dynamic_lib_modules) def ComputeDeps(self, spec): """Compute the dependencies of a gyp spec. Returns a tuple (deps, link_deps), where each is a list of filenames that will need to be put in front of make for either building (deps) or linking (link_deps). """ deps = [] link_deps = [] if 'dependencies' in spec: deps.extend([target_outputs[dep] for dep in spec['dependencies'] if target_outputs[dep]]) for dep in spec['dependencies']: if dep in target_link_deps: link_deps.append(target_link_deps[dep]) deps.extend(link_deps) return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps)) def WriteTargetFlags(self, spec, configs, link_deps): """Write Makefile code to specify the link flags and library dependencies. spec, configs: input from gyp. link_deps: link dependency list; see ComputeDeps() """ if self.type != 'static_library': for configname, config in sorted(configs.iteritems()): ldflags = list(config.get('ldflags', [])) self.WriteLn('') self.WriteList(ldflags, 'LOCAL_LDFLAGS_%s' % configname) self.WriteLn('\nLOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))') # Libraries (i.e. -lfoo) # These must be included even for static libraries as some of them provide # implicit include paths through the build system. libraries = gyp.common.uniquer(spec.get('libraries', [])) static_libs, dynamic_libs = self.ComputeAndroidLibraryModuleNames( libraries) # Link dependencies (i.e. other gyp targets this target depends on) # These need not be included for static libraries as within the gyp build # we do not use the implicit include path mechanism. if self.type != 'static_library': static_link_deps = [x[1] for x in link_deps if x[0] == 'static'] shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared'] else: static_link_deps = [] shared_link_deps = [] # Only write the lists if they are non-empty. if static_libs or static_link_deps: self.WriteLn('') self.WriteList(static_libs + static_link_deps, 'LOCAL_STATIC_LIBRARIES') self.WriteLn('# Enable grouping to fix circular references') self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true') if dynamic_libs or shared_link_deps: self.WriteLn('') self.WriteList(dynamic_libs + shared_link_deps, 'LOCAL_SHARED_LIBRARIES') def WriteTarget(self, spec, configs, deps, link_deps, part_of_all, write_alias_target): """Write Makefile code to produce the final target of the gyp spec. spec, configs: input from gyp. deps, link_deps: dependency lists; see ComputeDeps() part_of_all: flag indicating this target is part of 'all' write_alias_target: flag indicating whether to create short aliases for this target """ self.WriteLn('### Rules for final target.') if self.type != 'none': self.WriteTargetFlags(spec, configs, link_deps) settings = spec.get('aosp_build_settings', {}) if settings: self.WriteLn('### Set directly by aosp_build_settings.') for k, v in settings.iteritems(): if isinstance(v, list): self.WriteList(v, k) else: self.WriteLn('%s := %s' % (k, make.QuoteIfNecessary(v))) self.WriteLn('') # Add to the set of targets which represent the gyp 'all' target. We use the # name 'gyp_all_modules' as the Android build system doesn't allow the use # of the Make target 'all' and because 'all_modules' is the equivalent of # the Make target 'all' on Android. if part_of_all and write_alias_target: self.WriteLn('# Add target alias to "gyp_all_modules" target.') self.WriteLn('.PHONY: gyp_all_modules') self.WriteLn('gyp_all_modules: %s' % self.android_module) self.WriteLn('') # Add an alias from the gyp target name to the Android module name. This # simplifies manual builds of the target, and is required by the test # framework. if self.target != self.android_module and write_alias_target: self.WriteLn('# Alias gyp target name.') self.WriteLn('.PHONY: %s' % self.target) self.WriteLn('%s: %s' % (self.target, self.android_module)) self.WriteLn('') # Add the command to trigger build of the target type depending # on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY # NOTE: This has to come last! modifier = '' if self.toolset == 'host': modifier = 'HOST_' if self.type == 'static_library': self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier) elif self.type == 'shared_library': self.WriteLn('LOCAL_PRELINK_MODULE := false') self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier) elif self.type == 'executable': # Executables are for build and test purposes only, so they're installed # to a directory that doesn't get included in the system image. self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)') self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier) else: self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp') self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true') if self.toolset == 'target': self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_VAR_PREFIX)') else: self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)') self.WriteLn() self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk') self.WriteLn() self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)') self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"') self.WriteLn('\t$(hide) mkdir -p $(dir $@)') self.WriteLn('\t$(hide) touch $@') self.WriteLn() self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX :=') def WriteList(self, value_list, variable=None, prefix='', quoter=make.QuoteIfNecessary, local_pathify=False): """Write a variable definition that is a list of values. E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out foo = blaha blahb but in a pretty-printed style. """ values = '' if value_list: value_list = [quoter(prefix + l) for l in value_list] if local_pathify: value_list = [self.LocalPathify(l) for l in value_list] values = ' \\\n\t' + ' \\\n\t'.join(value_list) self.fp.write('%s :=%s\n\n' % (variable, values)) def WriteLn(self, text=''): self.fp.write(text + '\n') def LocalPathify(self, path): """Convert a subdirectory-relative path into a normalized path which starts with the make variable $(LOCAL_PATH) (i.e. the top of the project tree). Absolute paths, or paths that contain variables, are just normalized.""" if '$(' in path or os.path.isabs(path): # path is not a file in the project tree in this case, but calling # normpath is still important for trimming trailing slashes. return os.path.normpath(path) local_path = os.path.join('$(LOCAL_PATH)', self.path, path) local_path = os.path.normpath(local_path) # Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH) # - i.e. that the resulting path is still inside the project tree. The # path may legitimately have ended up containing just $(LOCAL_PATH), though, # so we don't look for a slash. assert local_path.startswith('$(LOCAL_PATH)'), ( 'Path %s attempts to escape from gyp path %s !)' % (path, self.path)) return local_path def ExpandInputRoot(self, template, expansion, dirname): if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template: return template path = template % { 'INPUT_ROOT': expansion, 'INPUT_DIRNAME': dirname, } return os.path.normpath(path) def PerformBuild(data, configurations, params): # The android backend only supports the default configuration. options = params['options'] makefile = os.path.abspath(os.path.join(options.toplevel_dir, 'GypAndroid.mk')) env = dict(os.environ) env['ONE_SHOT_MAKEFILE'] = makefile arguments = ['make', '-C', os.environ['ANDROID_BUILD_TOP'], 'gyp_all_modules'] print 'Building: %s' % arguments subprocess.check_call(arguments, env=env) def GenerateOutput(target_list, target_dicts, data, params): options = params['options'] generator_flags = params.get('generator_flags', {}) builddir_name = generator_flags.get('output_dir', 'out') limit_to_target_all = generator_flags.get('limit_to_target_all', False) write_alias_targets = generator_flags.get('write_alias_targets', True) android_top_dir = os.environ.get('ANDROID_BUILD_TOP') assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.' def CalculateMakefilePath(build_file, base_name): """Determine where to write a Makefile for a given gyp file.""" # Paths in gyp files are relative to the .gyp file, but we want # paths relative to the source root for the master makefile. Grab # the path of the .gyp file as the base to relativize against. # E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp". base_path = gyp.common.RelativePath(os.path.dirname(build_file), options.depth) # We write the file in the base_path directory. output_file = os.path.join(options.depth, base_path, base_name) assert not options.generator_output, ( 'The Android backend does not support options.generator_output.') base_path = gyp.common.RelativePath(os.path.dirname(build_file), options.toplevel_dir) return base_path, output_file # TODO: search for the first non-'Default' target. This can go # away when we add verification that all targets have the # necessary configurations. default_configuration = None toolsets = set([target_dicts[target]['toolset'] for target in target_list]) for target in target_list: spec = target_dicts[target] if spec['default_configuration'] != 'Default': default_configuration = spec['default_configuration'] break if not default_configuration: default_configuration = 'Default' srcdir = '.' makefile_name = 'GypAndroid' + options.suffix + '.mk' makefile_path = os.path.join(options.toplevel_dir, makefile_name) assert not options.generator_output, ( 'The Android backend does not support options.generator_output.') gyp.common.EnsureDirExists(makefile_path) root_makefile = open(makefile_path, 'w') root_makefile.write(header) # We set LOCAL_PATH just once, here, to the top of the project tree. This # allows all the other paths we use to be relative to the Android.mk file, # as the Android build system expects. root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n') # Find the list of targets that derive from the gyp file(s) being built. needed_targets = set() for build_file in params['build_files']: for target in gyp.common.AllTargets(target_list, target_dicts, build_file): needed_targets.add(target) build_files = set() include_list = set() android_modules = {} for qualified_target in target_list: build_file, target, toolset = gyp.common.ParseQualifiedTarget( qualified_target) relative_build_file = gyp.common.RelativePath(build_file, options.toplevel_dir) build_files.add(relative_build_file) included_files = data[build_file]['included_files'] for included_file in included_files: # The included_files entries are relative to the dir of the build file # that included them, so we have to undo that and then make them relative # to the root dir. relative_include_file = gyp.common.RelativePath( gyp.common.UnrelativePath(included_file, build_file), options.toplevel_dir) abs_include_file = os.path.abspath(relative_include_file) # If the include file is from the ~/.gyp dir, we should use absolute path # so that relocating the src dir doesn't break the path. if (params['home_dot_gyp'] and abs_include_file.startswith(params['home_dot_gyp'])): build_files.add(abs_include_file) else: build_files.add(relative_include_file) base_path, output_file = CalculateMakefilePath(build_file, target + '.' + toolset + options.suffix + '.mk') spec = target_dicts[qualified_target] configs = spec['configurations'] part_of_all = (qualified_target in needed_targets and not int(spec.get('suppress_wildcard', False))) if limit_to_target_all and not part_of_all: continue relative_target = gyp.common.QualifiedTarget(relative_build_file, target, toolset) writer = AndroidMkWriter(android_top_dir) android_module = writer.Write(qualified_target, relative_target, base_path, output_file, spec, configs, part_of_all=part_of_all, write_alias_target=write_alias_targets) if android_module in android_modules: print ('ERROR: Android module names must be unique. The following ' 'targets both generate Android module name %s.\n %s\n %s' % (android_module, android_modules[android_module], qualified_target)) return android_modules[android_module] = qualified_target # Our root_makefile lives at the source root. Compute the relative path # from there to the output_file for including. mkfile_rel_path = gyp.common.RelativePath(output_file, os.path.dirname(makefile_path)) include_list.add(mkfile_rel_path) root_makefile.write('GYP_CONFIGURATION ?= %s\n' % default_configuration) root_makefile.write('GYP_VAR_PREFIX ?=\n') root_makefile.write('GYP_HOST_VAR_PREFIX ?=\n') root_makefile.write('GYP_HOST_MULTILIB ?=\n') # Write out the sorted list of includes. root_makefile.write('\n') for include_file in sorted(include_list): root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n') root_makefile.write('\n') if write_alias_targets: root_makefile.write(ALL_MODULES_FOOTER) root_makefile.close()
isc
ntuecon/server
pyenv/Lib/site-packages/win32comext/bits/test/test_bits.py
4
4089
from win32com.server.util import wrap import pythoncom, sys, os, time, win32api, win32event, tempfile from win32com.bits import bits TIMEOUT = 200 # ms StopEvent = win32event.CreateEvent(None, 0, 0, None) job_name = 'bits-pywin32-test' states = dict([(val, (name[13:])) for name, val in vars(bits).iteritems() if name.startswith('BG_JOB_STATE_')]) bcm = pythoncom.CoCreateInstance(bits.CLSID_BackgroundCopyManager, None, pythoncom.CLSCTX_LOCAL_SERVER, bits.IID_IBackgroundCopyManager) class BackgroundJobCallback: _com_interfaces_ = [bits.IID_IBackgroundCopyCallback] _public_methods_ = ["JobTransferred", "JobError", "JobModification"] def JobTransferred(self, job): print 'Job Transferred', job job.Complete() win32event.SetEvent(StopEvent) # exit msg pump def JobError(self, job, error): print 'Job Error', job, error f = error.GetFile() print 'While downloading', f.GetRemoteName() print 'To', f.GetLocalName() print 'The following error happened:' self._print_error(error) if f.GetRemoteName().endswith('missing-favicon.ico'): print 'Changing to point to correct file' f2 = f.QueryInterface(bits.IID_IBackgroundCopyFile2) favicon = 'http://www.python.org/favicon.ico' print 'Changing RemoteName from', f2.GetRemoteName(), 'to', favicon f2.SetRemoteName(favicon) job.Resume() else: job.Cancel() def _print_error(self, err): ctx, hresult = err.GetError() try: hresult_msg = win32api.FormatMessage(hresult) except win32api.error: hresult_msg = "" print "Context=0x%x, hresult=0x%x (%s)" % (ctx, hresult, hresult_msg) print err.GetErrorDescription() def JobModification(self, job, reserved): state = job.GetState() print 'Job Modification', job.GetDisplayName(), states.get(state) # Need to catch TRANSIENT_ERROR here, as JobError doesn't get # called (apparently) when the error is transient. if state == bits.BG_JOB_STATE_TRANSIENT_ERROR: print "Error details:" err = job.GetError() self._print_error(err) job = bcm.CreateJob(job_name, bits.BG_JOB_TYPE_DOWNLOAD) job.SetNotifyInterface(wrap(BackgroundJobCallback())) job.SetNotifyFlags(bits.BG_NOTIFY_JOB_TRANSFERRED | bits.BG_NOTIFY_JOB_ERROR | bits.BG_NOTIFY_JOB_MODIFICATION) # The idea here is to intentionally make one of the files fail to be # downloaded. Then the JobError notification will be triggered, where # we do fix the failing file by calling SetRemoteName to a valid URL # and call Resume() on the job, making the job finish successfully. # # Note to self: A domain that cannot be resolved will cause # TRANSIENT_ERROR instead of ERROR, and the JobError notification will # not be triggered! This can bite you during testing depending on how # your DNS is configured. For example, if you use OpenDNS.org's DNS # servers, an invalid hostname will *always* be resolved (they # redirect you to a search page), so be careful when testing. job.AddFile('http://www.python.org/favicon.ico', os.path.join(tempfile.gettempdir(), 'bits-favicon.ico')) job.AddFile('http://www.python.org/missing-favicon.ico', os.path.join(tempfile.gettempdir(), 'bits-missing-favicon.ico')) for f in job.EnumFiles(): print 'Downloading', f.GetRemoteName() print 'To', f.GetLocalName() job.Resume() while True: rc = win32event.MsgWaitForMultipleObjects( (StopEvent,), 0, TIMEOUT, win32event.QS_ALLEVENTS) if rc == win32event.WAIT_OBJECT_0: break elif rc == win32event.WAIT_OBJECT_0+1: if pythoncom.PumpWaitingMessages(): break # wm_quit
bsd-3-clause
khkaminska/scikit-learn
sklearn/utils/tests/test_shortest_path.py
303
2841
from collections import defaultdict import numpy as np from numpy.testing import assert_array_almost_equal from sklearn.utils.graph import (graph_shortest_path, single_source_shortest_path_length) def floyd_warshall_slow(graph, directed=False): N = graph.shape[0] #set nonzero entries to infinity graph[np.where(graph == 0)] = np.inf #set diagonal to zero graph.flat[::N + 1] = 0 if not directed: graph = np.minimum(graph, graph.T) for k in range(N): for i in range(N): for j in range(N): graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j]) graph[np.where(np.isinf(graph))] = 0 return graph def generate_graph(N=20): #sparse grid of distances rng = np.random.RandomState(0) dist_matrix = rng.random_sample((N, N)) #make symmetric: distances are not direction-dependent dist_matrix = dist_matrix + dist_matrix.T #make graph sparse i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2)) dist_matrix[i] = 0 #set diagonal to zero dist_matrix.flat[::N + 1] = 0 return dist_matrix def test_floyd_warshall(): dist_matrix = generate_graph(20) for directed in (True, False): graph_FW = graph_shortest_path(dist_matrix, directed, 'FW') graph_py = floyd_warshall_slow(dist_matrix.copy(), directed) assert_array_almost_equal(graph_FW, graph_py) def test_dijkstra(): dist_matrix = generate_graph(20) for directed in (True, False): graph_D = graph_shortest_path(dist_matrix, directed, 'D') graph_py = floyd_warshall_slow(dist_matrix.copy(), directed) assert_array_almost_equal(graph_D, graph_py) def test_shortest_path(): dist_matrix = generate_graph(20) # We compare path length and not costs (-> set distances to 0 or 1) dist_matrix[dist_matrix != 0] = 1 for directed in (True, False): if not directed: dist_matrix = np.minimum(dist_matrix, dist_matrix.T) graph_py = floyd_warshall_slow(dist_matrix.copy(), directed) for i in range(dist_matrix.shape[0]): # Non-reachable nodes have distance 0 in graph_py dist_dict = defaultdict(int) dist_dict.update(single_source_shortest_path_length(dist_matrix, i)) for j in range(graph_py[i].shape[0]): assert_array_almost_equal(dist_dict[j], graph_py[i, j]) def test_dijkstra_bug_fix(): X = np.array([[0., 0., 4.], [1., 0., 2.], [0., 5., 0.]]) dist_FW = graph_shortest_path(X, directed=False, method='FW') dist_D = graph_shortest_path(X, directed=False, method='D') assert_array_almost_equal(dist_D, dist_FW)
bsd-3-clause
ibc/MediaSoup
worker/deps/gyp/test/linux/gyptest-implicit-rpath.py
12
1188
#!/usr/bin/env python # Copyright (c) 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies that the implicit rpath is added only when needed. """ import TestGyp import re import subprocess import sys if sys.platform.startswith('linux'): test = TestGyp.TestGyp(formats=['ninja', 'make']) CHDIR = 'implicit-rpath' test.run_gyp('test.gyp', chdir=CHDIR) test.build('test.gyp', test.ALL, chdir=CHDIR) def GetRpaths(p): p = test.built_file_path(p, chdir=CHDIR) r = re.compile(r'Library rpath: \[([^\]]+)\]') proc = subprocess.Popen(['readelf', '-d', p], stdout=subprocess.PIPE) o = proc.communicate()[0].decode('utf-8') assert not proc.returncode return r.findall(o) if test.format == 'ninja': expect = '$ORIGIN/lib/' elif test.format == 'make': expect = '$ORIGIN/lib.target/' else: test.fail_test() if GetRpaths('shared_executable') != [expect]: test.fail_test() if GetRpaths('shared_executable_no_so_suffix') != [expect]: test.fail_test() if GetRpaths('static_executable'): test.fail_test() test.pass_test()
isc
cherry-hyx/hjb-test
pluginsAgent/RichAPM/packages/daemon.py
1
4260
# -*- coding: utf-8 -*- """ Unix下进程后台化 """ import atexit import os import re import subprocess import sys import warnings from os import path import grp import pwd # Ignore the DeprecationWarning caused by os.popen3 in Python 2.6 warnings.filterwarnings("ignore", category=DeprecationWarning) def find_process_by_name(name): # Check the os for a process that is not this one that looks the same pattern = '[%s]%s' % (name[0], name[1:]) # re remove grep itself try: output = subprocess.check_output('ps a | grep "%s"' % pattern, shell=True) except AttributeError: # Python 2.6 stdin, stdout, stderr = os.popen3('ps a | grep "%s"' % pattern) output = stdout.read() except subprocess.CalledProcessError: return [] pids = [int(pid) for pid in (re.findall(r'^\s*([0-9]+)\s', output.decode('latin-1')))] if os.getpid() in pids: pids.remove(os.getpid()) return pids def is_running(pidfile): if os.path.exists(pidfile): pid = open(pidfile).read().strip() try: os.kill(int(pid), 0) return pid except OSError: os.unlink(pidfile) # Found pidfile, no process pids = find_process_by_name(' '.join(sys.argv)) if pids: return pids[0] return None def get_pidfile_path(pidfile): """Return the normalized path for the pidfile, raising an exception if it can not written to. :return: str :raises: ValueError :raises: OSError """ if pidfile: pidfile = path.abspath(pidfile) else: app = sys.argv[0].split('/')[-1] pidfile = path.abspath("%s.pid" % app) if not os.access(path.dirname(pidfile), os.W_OK): raise ValueError('Cannot write to specified pid file path' ' %s' % pidfile) return pidfile def wrapper_remove_pidfile(pidfile_path): def _remove_pidfile(): try: os.unlink(pidfile_path) except OSError: pass return _remove_pidfile def get_default_pid_file_path(): selfpath = os.path.realpath(sys.argv[0]) dirname = os.path.dirname(selfpath) filename = os.path.splitext(os.path.basename(selfpath))[0] return "%s%s%s.pid" % (dirname, os.sep, filename) def daemonize(user=None, group=None, pid_file_path=None, nochdir=True): """Fork into a background process and setup the process, copied in part from http://www.jejik.com/files/examples/daemon3x.py """ pidfile_path = get_pidfile_path(pid_file_path or get_default_pid_file_path()) uid = pwd.getpwnam(user).pw_uid if user else os.getuid() gid = grp.getgrnam(group).gr_gid if group else os.getgid() # Write the pidfile if current uid != final uid if os.getuid() != uid: fd = open(pidfile_path, 'w') os.fchmod(fd.fileno(), 0o644) os.fchown(fd.fileno(), uid, gid) fd.close() try: pid = os.fork() if pid > 0: sys.exit(0) except OSError as error: raise OSError('Could not fork off parent: %s', error) # Set the user id if uid != os.getuid(): os.setuid(uid) # Set the group id if gid != os.getgid(): try: os.setgid(gid) except OSError as error: sys.stderr.write('Could not set group: %s' % error) # Decouple from parent environment if not nochdir: os.chdir('/') os.setsid() os.umask(0o022) # Fork again try: pid = os.fork() if pid > 0: sys.exit(0) except OSError as error: raise OSError('Could not fork child: %s', error) # redirect standard file descriptors sys.stdout.flush() sys.stderr.flush() si = open(os.devnull, 'r') so = open(os.devnull, 'a+') se = open(os.devnull, 'a+') os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) # Automatically call _remove_pidfile when the app exits atexit.register(wrapper_remove_pidfile(pidfile_path)) with open(pidfile_path, "w") as handle: handle.write(str(os.getpid()))
artistic-2.0
tvibliani/odoo
addons/sale_mrp/sale_mrp.py
225
4891
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved # $Id$ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class mrp_production(osv.osv): _inherit = 'mrp.production' def _ref_calc(self, cr, uid, ids, field_names=None, arg=False, context=None): """ Finds reference of sales order for production order. @param field_names: Names of fields. @param arg: User defined arguments @return: Dictionary of values. """ res = {} if not field_names: field_names = [] for id in ids: res[id] = {}.fromkeys(field_names, False) for f in field_names: field_name = False if f == 'sale_name': field_name = 'name' if f == 'sale_ref': field_name = 'client_order_ref' for key, value in self._get_sale_ref(cr, uid, ids, field_name).items(): res[key][f] = value return res def _get_sale_ref(self, cr, uid, ids, field_name=False): move_obj = self.pool.get('stock.move') def get_parent_move(move_id): move = move_obj.browse(cr, uid, move_id) if move.move_dest_id: return get_parent_move(move.move_dest_id.id) return move_id res = {} productions = self.browse(cr, uid, ids) for production in productions: res[production.id] = False if production.move_prod_id: parent_move_line = get_parent_move(production.move_prod_id.id) if parent_move_line: move = move_obj.browse(cr, uid, parent_move_line) if field_name == 'name': res[production.id] = move.procurement_id and move.procurement_id.sale_line_id and move.procurement_id.sale_line_id.order_id.name or False if field_name == 'client_order_ref': res[production.id] = move.procurement_id and move.procurement_id.sale_line_id and move.procurement_id.sale_line_id.order_id.client_order_ref or False return res _columns = { 'sale_name': fields.function(_ref_calc, multi='sale_name', type='char', string='Sale Name', help='Indicate the name of sales order.'), 'sale_ref': fields.function(_ref_calc, multi='sale_name', type='char', string='Sale Reference', help='Indicate the Customer Reference from sales order.'), } class sale_order(osv.Model): _inherit = 'sale.order' def _prepare_order_line_procurement(self, cr, uid, order, line, group_id=False, context=None): result = super(sale_order, self)._prepare_order_line_procurement(cr, uid, order, line, group_id=group_id, context=context) result['property_ids'] = [(6, 0, [x.id for x in line.property_ids])] return result class sale_order_line(osv.osv): _inherit = 'sale.order.line' _columns = { 'property_ids': fields.many2many('mrp.property', 'sale_order_line_property_rel', 'order_id', 'property_id', 'Properties', readonly=True, states={'draft': [('readonly', False)]}), } class stock_move(osv.osv): _inherit = 'stock.move' def _prepare_procurement_from_move(self, cr, uid, move, context=None): res = super(stock_move, self)._prepare_procurement_from_move(cr, uid, move, context=context) if res and move.procurement_id and move.procurement_id.property_ids: res['property_ids'] = [(6, 0, [x.id for x in move.procurement_id.property_ids])] return res def _action_explode(self, cr, uid, move, context=None): """ Explodes pickings. @param move: Stock moves @return: True """ if context is None: context = {} property_ids = map(int, move.procurement_id.sale_line_id.property_ids or []) return super(stock_move, self)._action_explode(cr, uid, move, context=dict(context, property_ids=property_ids))
agpl-3.0
INNUENDOCON/INNUca
src/SPAdes-3.9.0-Linux/share/spades/pyyaml2/resolver.py
474
8972
__all__ = ['BaseResolver', 'Resolver'] from error import * from nodes import * import re class ResolverError(YAMLError): pass class BaseResolver(object): DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str' DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq' DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map' yaml_implicit_resolvers = {} yaml_path_resolvers = {} def __init__(self): self.resolver_exact_paths = [] self.resolver_prefix_paths = [] def add_implicit_resolver(cls, tag, regexp, first): if not 'yaml_implicit_resolvers' in cls.__dict__: cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy() if first is None: first = [None] for ch in first: cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) add_implicit_resolver = classmethod(add_implicit_resolver) def add_path_resolver(cls, tag, path, kind=None): # Note: `add_path_resolver` is experimental. The API could be changed. # `new_path` is a pattern that is matched against the path from the # root to the node that is being considered. `node_path` elements are # tuples `(node_check, index_check)`. `node_check` is a node class: # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` # matches any kind of a node. `index_check` could be `None`, a boolean # value, a string value, or a number. `None` and `False` match against # any _value_ of sequence and mapping nodes. `True` matches against # any _key_ of a mapping node. A string `index_check` matches against # a mapping value that corresponds to a scalar key which content is # equal to the `index_check` value. An integer `index_check` matches # against a sequence value with the index equal to `index_check`. if not 'yaml_path_resolvers' in cls.__dict__: cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() new_path = [] for element in path: if isinstance(element, (list, tuple)): if len(element) == 2: node_check, index_check = element elif len(element) == 1: node_check = element[0] index_check = True else: raise ResolverError("Invalid path element: %s" % element) else: node_check = None index_check = element if node_check is str: node_check = ScalarNode elif node_check is list: node_check = SequenceNode elif node_check is dict: node_check = MappingNode elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ and not isinstance(node_check, basestring) \ and node_check is not None: raise ResolverError("Invalid node checker: %s" % node_check) if not isinstance(index_check, (basestring, int)) \ and index_check is not None: raise ResolverError("Invalid index checker: %s" % index_check) new_path.append((node_check, index_check)) if kind is str: kind = ScalarNode elif kind is list: kind = SequenceNode elif kind is dict: kind = MappingNode elif kind not in [ScalarNode, SequenceNode, MappingNode] \ and kind is not None: raise ResolverError("Invalid node kind: %s" % kind) cls.yaml_path_resolvers[tuple(new_path), kind] = tag add_path_resolver = classmethod(add_path_resolver) def descend_resolver(self, current_node, current_index): if not self.yaml_path_resolvers: return exact_paths = {} prefix_paths = [] if current_node: depth = len(self.resolver_prefix_paths) for path, kind in self.resolver_prefix_paths[-1]: if self.check_resolver_prefix(depth, path, kind, current_node, current_index): if len(path) > depth: prefix_paths.append((path, kind)) else: exact_paths[kind] = self.yaml_path_resolvers[path, kind] else: for path, kind in self.yaml_path_resolvers: if not path: exact_paths[kind] = self.yaml_path_resolvers[path, kind] else: prefix_paths.append((path, kind)) self.resolver_exact_paths.append(exact_paths) self.resolver_prefix_paths.append(prefix_paths) def ascend_resolver(self): if not self.yaml_path_resolvers: return self.resolver_exact_paths.pop() self.resolver_prefix_paths.pop() def check_resolver_prefix(self, depth, path, kind, current_node, current_index): node_check, index_check = path[depth-1] if isinstance(node_check, basestring): if current_node.tag != node_check: return elif node_check is not None: if not isinstance(current_node, node_check): return if index_check is True and current_index is not None: return if (index_check is False or index_check is None) \ and current_index is None: return if isinstance(index_check, basestring): if not (isinstance(current_index, ScalarNode) and index_check == current_index.value): return elif isinstance(index_check, int) and not isinstance(index_check, bool): if index_check != current_index: return return True def resolve(self, kind, value, implicit): if kind is ScalarNode and implicit[0]: if value == u'': resolvers = self.yaml_implicit_resolvers.get(u'', []) else: resolvers = self.yaml_implicit_resolvers.get(value[0], []) resolvers += self.yaml_implicit_resolvers.get(None, []) for tag, regexp in resolvers: if regexp.match(value): return tag implicit = implicit[1] if self.yaml_path_resolvers: exact_paths = self.resolver_exact_paths[-1] if kind in exact_paths: return exact_paths[kind] if None in exact_paths: return exact_paths[None] if kind is ScalarNode: return self.DEFAULT_SCALAR_TAG elif kind is SequenceNode: return self.DEFAULT_SEQUENCE_TAG elif kind is MappingNode: return self.DEFAULT_MAPPING_TAG class Resolver(BaseResolver): pass Resolver.add_implicit_resolver( u'tag:yaml.org,2002:bool', re.compile(ur'''^(?:yes|Yes|YES|no|No|NO |true|True|TRUE|false|False|FALSE |on|On|ON|off|Off|OFF)$''', re.X), list(u'yYnNtTfFoO')) Resolver.add_implicit_resolver( u'tag:yaml.org,2002:float', re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? |\.[0-9_]+(?:[eE][-+][0-9]+)? |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* |[-+]?\.(?:inf|Inf|INF) |\.(?:nan|NaN|NAN))$''', re.X), list(u'-+0123456789.')) Resolver.add_implicit_resolver( u'tag:yaml.org,2002:int', re.compile(ur'''^(?:[-+]?0b[0-1_]+ |[-+]?0[0-7_]+ |[-+]?(?:0|[1-9][0-9_]*) |[-+]?0x[0-9a-fA-F_]+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), list(u'-+0123456789')) Resolver.add_implicit_resolver( u'tag:yaml.org,2002:merge', re.compile(ur'^(?:<<)$'), [u'<']) Resolver.add_implicit_resolver( u'tag:yaml.org,2002:null', re.compile(ur'''^(?: ~ |null|Null|NULL | )$''', re.X), [u'~', u'n', u'N', u'']) Resolver.add_implicit_resolver( u'tag:yaml.org,2002:timestamp', re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? (?:[Tt]|[ \t]+)[0-9][0-9]? :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), list(u'0123456789')) Resolver.add_implicit_resolver( u'tag:yaml.org,2002:value', re.compile(ur'^(?:=)$'), [u'=']) # The following resolver is only for documentation purposes. It cannot work # because plain scalars cannot start with '!', '&', or '*'. Resolver.add_implicit_resolver( u'tag:yaml.org,2002:yaml', re.compile(ur'^(?:!|&|\*)$'), list(u'!&*'))
gpl-3.0
sashgorokhov/python-telegram-handler
telegram_handler/handlers.py
1
3436
import logging from io import BytesIO import requests from telegram_handler.formatters import HtmlFormatter logger = logging.getLogger(__name__) logger.setLevel(logging.NOTSET) logger.propagate = False __all__ = ['TelegramHandler'] MAX_MESSAGE_LEN = 4096 class TelegramHandler(logging.Handler): API_ENDPOINT = 'https://api.telegram.org' last_response = None def __init__(self, token, chat_id=None, level=logging.NOTSET, timeout=2, disable_notification=False, disable_web_page_preview=False, proxies=None): self.token = token self.disable_web_page_preview = disable_web_page_preview self.disable_notification = disable_notification self.timeout = timeout self.proxies = proxies self.chat_id = chat_id or self.get_chat_id() if not self.chat_id: level = logging.NOTSET logger.error('Did not get chat id. Setting handler logging level to NOTSET.') logger.info('Chat id: %s', self.chat_id) super(TelegramHandler, self).__init__(level=level) self.setFormatter(HtmlFormatter()) @classmethod def format_url(cls, token, method): return '%s/bot%s/%s' % (cls.API_ENDPOINT, token, method) def get_chat_id(self): response = self.request('getUpdates') if not response or not response.get('ok', False): logger.error('Telegram response is not ok: %s', str(response)) return try: return response['result'][-1]['message']['chat']['id'] except: logger.exception('Something went terribly wrong while obtaining chat id') logger.debug(response) def request(self, method, **kwargs): url = self.format_url(self.token, method) kwargs.setdefault('timeout', self.timeout) kwargs.setdefault('proxies', self.proxies) response = None try: response = requests.post(url, **kwargs) self.last_response = response response.raise_for_status() return response.json() except: logger.exception('Error while making POST to %s', url) logger.debug(str(kwargs)) if response is not None: logger.debug(response.content) return response def send_message(self, text, **kwargs): data = {'text': text} data.update(kwargs) return self.request('sendMessage', json=data) def send_document(self, text, document, **kwargs): data = {'caption': text} data.update(kwargs) return self.request('sendDocument', data=data, files={'document': ('traceback.txt', document, 'text/plain')}) def emit(self, record): text = self.format(record) data = { 'chat_id': self.chat_id, 'disable_web_page_preview': self.disable_web_page_preview, 'disable_notification': self.disable_notification, } if getattr(self.formatter, 'parse_mode', None): data['parse_mode'] = self.formatter.parse_mode if len(text) < MAX_MESSAGE_LEN: response = self.send_message(text, **data) else: response = self.send_document(text[:1000], document=BytesIO(text.encode()), **data) if response and not response.get('ok', False): logger.warning('Telegram responded with ok=false status! {}'.format(response))
mit
benjaminrigaud/django
django/db/models/sql/expressions.py
22
4490
import copy from django.core.exceptions import FieldError from django.db.models.constants import LOOKUP_SEP from django.db.models.fields import FieldDoesNotExist class SQLEvaluator(object): def __init__(self, expression, query, allow_joins=True, reuse=None): self.expression = expression self.opts = query.get_meta() self.reuse = reuse self.cols = [] self.expression.prepare(self, query, allow_joins) def relabeled_clone(self, change_map): clone = copy.copy(self) clone.cols = [] for node, col in self.cols: if hasattr(col, 'relabeled_clone'): clone.cols.append((node, col.relabeled_clone(change_map))) else: clone.cols.append((node, (change_map.get(col[0], col[0]), col[1]))) return clone def get_group_by_cols(self): cols = [] for node, col in self.cols: if hasattr(node, 'get_group_by_cols'): cols.extend(node.get_group_by_cols()) elif isinstance(col, tuple): cols.append(col) return cols def prepare(self): return self def as_sql(self, qn, connection): return self.expression.evaluate(self, qn, connection) ##################################################### # Visitor methods for initial expression preparation # ##################################################### def prepare_node(self, node, query, allow_joins): for child in node.children: if hasattr(child, 'prepare'): child.prepare(self, query, allow_joins) def prepare_leaf(self, node, query, allow_joins): if not allow_joins and LOOKUP_SEP in node.name: raise FieldError("Joined field references are not permitted in this query") field_list = node.name.split(LOOKUP_SEP) if node.name in query.aggregates: self.cols.append((node, query.aggregate_select[node.name])) else: try: _, sources, _, join_list, path = query.setup_joins( field_list, query.get_meta(), query.get_initial_alias(), can_reuse=self.reuse) self._used_joins = join_list targets, _, join_list = query.trim_joins(sources, join_list, path) if self.reuse is not None: self.reuse.update(join_list) for t in targets: self.cols.append((node, (join_list[-1], t.column))) except FieldDoesNotExist: raise FieldError("Cannot resolve keyword %r into field. " "Choices are: %s" % (self.name, [f.name for f in self.opts.fields])) ################################################## # Visitor methods for final expression evaluation # ################################################## def evaluate_node(self, node, qn, connection): expressions = [] expression_params = [] for child in node.children: if hasattr(child, 'evaluate'): sql, params = child.evaluate(self, qn, connection) else: sql, params = '%s', (child,) if len(getattr(child, 'children', [])) > 1: format = '(%s)' else: format = '%s' if sql: expressions.append(format % sql) expression_params.extend(params) return connection.ops.combine_expression(node.connector, expressions), expression_params def evaluate_leaf(self, node, qn, connection): col = None for n, c in self.cols: if n is node: col = c break if col is None: raise ValueError("Given node not found") if hasattr(col, 'as_sql'): return col.as_sql(qn, connection) else: return '%s.%s' % (qn(col[0]), qn(col[1])), [] def evaluate_date_modifier_node(self, node, qn, connection): timedelta = node.children.pop() sql, params = self.evaluate_node(node, qn, connection) node.children.append(timedelta) if (timedelta.days == timedelta.seconds == timedelta.microseconds == 0): return sql, params return connection.ops.date_interval_sql(sql, node.connector, timedelta), params
bsd-3-clause
awkspace/ansible
lib/ansible/modules/network/aci/aci_tenant_span_dst_group.py
12
6472
#!/usr/bin/python # -*- coding: utf-8 -*- # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: aci_tenant_span_dst_group short_description: Manage SPAN destination groups (span:DestGrp) description: - Manage SPAN destination groups on Cisco ACI fabrics. notes: - The C(tenant) used must exist before using this module in your playbook. The M(aci_tenant) module can be used for this. seealso: - module: aci_tenant - name: APIC Management Information Model reference description: More information about the internal APIC class B(span:DestGrp). link: https://developer.cisco.com/docs/apic-mim-ref/ author: - Dag Wieers (@dagwieers) version_added: '2.4' options: dst_group: description: - The name of the SPAN destination group. type: str required: yes aliases: [ name ] description: description: - The description of the SPAN destination group. type: str aliases: [ descr ] tenant: description: - The name of the tenant. type: str required: yes aliases: [ tenant_name ] state: description: - Use C(present) or C(absent) for adding or removing. - Use C(query) for listing an object or multiple objects. type: str choices: [ absent, present, query ] default: present extends_documentation_fragment: aci ''' # FIXME: Add more, better examples EXAMPLES = r''' - aci_tenant_span_dst_group: host: apic username: admin password: SomeSecretPassword dst_group: '{{ dst_group }}' description: '{{ descr }}' tenant: '{{ tenant }}' delegate_to: localhost ''' RETURN = r''' current: description: The existing configuration from the APIC after the module has finished returned: success type: list sample: [ { "fvTenant": { "attributes": { "descr": "Production environment", "dn": "uni/tn-production", "name": "production", "nameAlias": "", "ownerKey": "", "ownerTag": "" } } } ] error: description: The error information as returned from the APIC returned: failure type: dict sample: { "code": "122", "text": "unknown managed object class foo" } raw: description: The raw output returned by the APIC REST API (xml or json) returned: parse error type: str sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>' sent: description: The actual/minimal configuration pushed to the APIC returned: info type: list sample: { "fvTenant": { "attributes": { "descr": "Production environment" } } } previous: description: The original configuration from the APIC before the module has started returned: info type: list sample: [ { "fvTenant": { "attributes": { "descr": "Production", "dn": "uni/tn-production", "name": "production", "nameAlias": "", "ownerKey": "", "ownerTag": "" } } } ] proposed: description: The assembled configuration from the user-provided parameters returned: info type: dict sample: { "fvTenant": { "attributes": { "descr": "Production environment", "name": "production" } } } filter_string: description: The filter string used for the request returned: failure or debug type: str sample: ?rsp-prop-include=config-only method: description: The HTTP method used for the request to the APIC returned: failure or debug type: str sample: POST response: description: The HTTP response from the APIC returned: failure or debug type: str sample: OK (30 bytes) status: description: The HTTP status from the APIC returned: failure or debug type: int sample: 200 url: description: The HTTP url used for the request to the APIC returned: failure or debug type: str sample: https://10.11.12.13/api/mo/uni/tn-production.json ''' from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec from ansible.module_utils.basic import AnsibleModule def main(): argument_spec = aci_argument_spec() argument_spec.update( dst_group=dict(type='str', required=False, aliases=['name']), # Not required for querying all objects tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for querying all objects description=dict(type='str', aliases=['descr']), state=dict(type='str', default='present', choices=['absent', 'present', 'query']), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ ['state', 'absent', ['dst_group', 'tenant']], ['state', 'present', ['dst_group', 'tenant']], ], ) dst_group = module.params['dst_group'] description = module.params['description'] state = module.params['state'] tenant = module.params['tenant'] aci = ACIModule(module) aci.construct_url( root_class=dict( aci_class='fvTenant', aci_rn='tn-{0}'.format(tenant), module_object=tenant, target_filter={'name': tenant}, ), subclass_1=dict( aci_class='spanDestGrp', aci_rn='destgrp-{0}'.format(dst_group), module_object=dst_group, target_filter={'name': dst_group}, ), ) aci.get_existing() if state == 'present': aci.payload( aci_class='spanDestGrp', class_config=dict( name=dst_group, descr=description, ), ) aci.get_diff(aci_class='spanDestGrp') aci.post_config() elif state == 'absent': aci.delete_config() aci.exit_json() if __name__ == "__main__": main()
gpl-3.0
1013553207/django
django/contrib/gis/geos/prototypes/threadsafe.py
529
2859
import threading from django.contrib.gis.geos.libgeos import ( CONTEXT_PTR, error_h, lgeos, notice_h, ) class GEOSContextHandle(object): """ Python object representing a GEOS context handle. """ def __init__(self): # Initializing the context handler for this thread with # the notice and error handler. self.ptr = lgeos.initGEOS_r(notice_h, error_h) def __del__(self): if self.ptr and lgeos: lgeos.finishGEOS_r(self.ptr) # Defining a thread-local object and creating an instance # to hold a reference to GEOSContextHandle for this thread. class GEOSContext(threading.local): handle = None thread_context = GEOSContext() class GEOSFunc(object): """ Class that serves as a wrapper for GEOS C Functions, and will use thread-safe function variants when available. """ def __init__(self, func_name): try: # GEOS thread-safe function signatures end with '_r', and # take an additional context handle parameter. self.cfunc = getattr(lgeos, func_name + '_r') self.threaded = True # Create a reference here to thread_context so it's not # garbage-collected before an attempt to call this object. self.thread_context = thread_context except AttributeError: # Otherwise, use usual function. self.cfunc = getattr(lgeos, func_name) self.threaded = False def __call__(self, *args): if self.threaded: # If a context handle does not exist for this thread, initialize one. if not self.thread_context.handle: self.thread_context.handle = GEOSContextHandle() # Call the threaded GEOS routine with pointer of the context handle # as the first argument. return self.cfunc(self.thread_context.handle.ptr, *args) else: return self.cfunc(*args) def __str__(self): return self.cfunc.__name__ # argtypes property def _get_argtypes(self): return self.cfunc.argtypes def _set_argtypes(self, argtypes): if self.threaded: new_argtypes = [CONTEXT_PTR] new_argtypes.extend(argtypes) self.cfunc.argtypes = new_argtypes else: self.cfunc.argtypes = argtypes argtypes = property(_get_argtypes, _set_argtypes) # restype property def _get_restype(self): return self.cfunc.restype def _set_restype(self, restype): self.cfunc.restype = restype restype = property(_get_restype, _set_restype) # errcheck property def _get_errcheck(self): return self.cfunc.errcheck def _set_errcheck(self, errcheck): self.cfunc.errcheck = errcheck errcheck = property(_get_errcheck, _set_errcheck)
bsd-3-clause
titom1986/CouchPotatoServer
libs/bs4/diagnose.py
431
6315
"""Diagnostic functions, mainly for use when doing tech support.""" import cProfile from StringIO import StringIO from HTMLParser import HTMLParser import bs4 from bs4 import BeautifulSoup, __version__ from bs4.builder import builder_registry import os import pstats import random import tempfile import time import traceback import sys import cProfile def diagnose(data): """Diagnostic suite for isolating common problems.""" print "Diagnostic running on Beautiful Soup %s" % __version__ print "Python version %s" % sys.version basic_parsers = ["html.parser", "html5lib", "lxml"] for name in basic_parsers: for builder in builder_registry.builders: if name in builder.features: break else: basic_parsers.remove(name) print ( "I noticed that %s is not installed. Installing it may help." % name) if 'lxml' in basic_parsers: basic_parsers.append(["lxml", "xml"]) from lxml import etree print "Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION)) if 'html5lib' in basic_parsers: import html5lib print "Found html5lib version %s" % html5lib.__version__ if hasattr(data, 'read'): data = data.read() elif os.path.exists(data): print '"%s" looks like a filename. Reading data from the file.' % data data = open(data).read() elif data.startswith("http:") or data.startswith("https:"): print '"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data print "You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup." return print for parser in basic_parsers: print "Trying to parse your markup with %s" % parser success = False try: soup = BeautifulSoup(data, parser) success = True except Exception, e: print "%s could not parse the markup." % parser traceback.print_exc() if success: print "Here's what %s did with the markup:" % parser print soup.prettify() print "-" * 80 def lxml_trace(data, html=True, **kwargs): """Print out the lxml events that occur during parsing. This lets you see how lxml parses a document when no Beautiful Soup code is running. """ from lxml import etree for event, element in etree.iterparse(StringIO(data), html=html, **kwargs): print("%s, %4s, %s" % (event, element.tag, element.text)) class AnnouncingParser(HTMLParser): """Announces HTMLParser parse events, without doing anything else.""" def _p(self, s): print(s) def handle_starttag(self, name, attrs): self._p("%s START" % name) def handle_endtag(self, name): self._p("%s END" % name) def handle_data(self, data): self._p("%s DATA" % data) def handle_charref(self, name): self._p("%s CHARREF" % name) def handle_entityref(self, name): self._p("%s ENTITYREF" % name) def handle_comment(self, data): self._p("%s COMMENT" % data) def handle_decl(self, data): self._p("%s DECL" % data) def unknown_decl(self, data): self._p("%s UNKNOWN-DECL" % data) def handle_pi(self, data): self._p("%s PI" % data) def htmlparser_trace(data): """Print out the HTMLParser events that occur during parsing. This lets you see how HTMLParser parses a document when no Beautiful Soup code is running. """ parser = AnnouncingParser() parser.feed(data) _vowels = "aeiou" _consonants = "bcdfghjklmnpqrstvwxyz" def rword(length=5): "Generate a random word-like string." s = '' for i in range(length): if i % 2 == 0: t = _consonants else: t = _vowels s += random.choice(t) return s def rsentence(length=4): "Generate a random sentence-like string." return " ".join(rword(random.randint(4,9)) for i in range(length)) def rdoc(num_elements=1000): """Randomly generate an invalid HTML document.""" tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table'] elements = [] for i in range(num_elements): choice = random.randint(0,3) if choice == 0: # New tag. tag_name = random.choice(tag_names) elements.append("<%s>" % tag_name) elif choice == 1: elements.append(rsentence(random.randint(1,4))) elif choice == 2: # Close a tag. tag_name = random.choice(tag_names) elements.append("</%s>" % tag_name) return "<html>" + "\n".join(elements) + "</html>" def benchmark_parsers(num_elements=100000): """Very basic head-to-head performance benchmark.""" print "Comparative parser benchmark on Beautiful Soup %s" % __version__ data = rdoc(num_elements) print "Generated a large invalid HTML document (%d bytes)." % len(data) for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]: success = False try: a = time.time() soup = BeautifulSoup(data, parser) b = time.time() success = True except Exception, e: print "%s could not parse the markup." % parser traceback.print_exc() if success: print "BS4+%s parsed the markup in %.2fs." % (parser, b-a) from lxml import etree a = time.time() etree.HTML(data) b = time.time() print "Raw lxml parsed the markup in %.2fs." % (b-a) import html5lib parser = html5lib.HTMLParser() a = time.time() parser.parse(data) b = time.time() print "Raw html5lib parsed the markup in %.2fs." % (b-a) def profile(num_elements=100000, parser="lxml"): filehandle = tempfile.NamedTemporaryFile() filename = filehandle.name data = rdoc(num_elements) vars = dict(bs4=bs4, data=data, parser=parser) cProfile.runctx('bs4.BeautifulSoup(data, parser)' , vars, vars, filename) stats = pstats.Stats(filename) # stats.strip_dirs() stats.sort_stats("cumulative") stats.print_stats('_html5lib|bs4', 50) if __name__ == '__main__': diagnose(sys.stdin.read())
gpl-3.0
TimYi/django
django/contrib/admindocs/urls.py
574
1183
from django.conf.urls import url from django.contrib.admindocs import views urlpatterns = [ url('^$', views.BaseAdminDocsView.as_view(template_name='admin_doc/index.html'), name='django-admindocs-docroot'), url('^bookmarklets/$', views.BookmarkletsView.as_view(), name='django-admindocs-bookmarklets'), url('^tags/$', views.TemplateTagIndexView.as_view(), name='django-admindocs-tags'), url('^filters/$', views.TemplateFilterIndexView.as_view(), name='django-admindocs-filters'), url('^views/$', views.ViewIndexView.as_view(), name='django-admindocs-views-index'), url('^views/(?P<view>[^/]+)/$', views.ViewDetailView.as_view(), name='django-admindocs-views-detail'), url('^models/$', views.ModelIndexView.as_view(), name='django-admindocs-models-index'), url('^models/(?P<app_label>[^\.]+)\.(?P<model_name>[^/]+)/$', views.ModelDetailView.as_view(), name='django-admindocs-models-detail'), url('^templates/(?P<template>.*)/$', views.TemplateDetailView.as_view(), name='django-admindocs-templates'), ]
bsd-3-clause
idiap/zentas
python/experiments/datapaths.py
1
3777
# Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/ # Written by James Newling <jnewling@idiap.ch> import socket datapath_filenames = {} datapath_filenames["yearpredictionmsd_fn"] = "YearPredictionMSD.txt" datapath_filenames["htru2_fn"] = "HTRU_2.csv" datapath_filenames["epileptic_fn"] = "data.csv" datapath_filenames["mopac_fn"] = "allUsers.lcl.csv" #I set the paths to data here, but you need to download data separately if socket.gethostname() == "goudurix12": import goudurix12paths reload(goudurix12paths) datapaths = goudurix12paths.datapaths elif socket.gethostname() == "goudurix11": import goudurix11paths reload(goudurix11paths) datapaths = goudurix11paths.datapaths elif socket.gethostname() == "idbean": import idbeanpaths reload(idbeanpaths) datapaths = idbeanpaths.datapaths else: print "unknown host in datapaths.py, certain data paths may need to be set" tobeset = "/this/path/needs/to/be/set/in/datapaths.py" datapaths = {} # path to bin from http://leon.bottou.org/projects/infimnist # at http://leon.bottou.org/_media/projects/infimnist.tar.gz datapaths["infiexec"] = tobeset # path to where mnist data can be written datapaths["infidpath"] = tobeset #path to where to save a figure datapaths["nipsflow_poster"] = tobeset datapaths["nipsflow_slide1"] = tobeset datapaths["nipsflow_slide2"] = tobeset datapaths["nips_plot1"] = tobeset datapaths["nips_plot2"] = tobeset datapaths["nips_plot3"] = tobeset datapaths["nips_plot3_greedy"] = tobeset #path to eakmeans install dir datapaths["eaklibdir"] = tobeset #path to where to save a figure datapaths["kscalingfigpath"] = tobeset # path to where the files cod-rna cod-rna.r cod-rna.t # from website https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary.html#cod-rna datapaths["rnaraw"] = tobeset datapaths["rnawrite"] = tobeset #path to where files a1.txt, europediff.txt etc are # from website https://cs.joensuu.fi/sipu/datasets/ datapaths["joensuuwrite"] = tobeset datapaths["joensuuraw"] = tobeset datapaths["yearpredictionmsd_write"] = tobeset #path to where YearPredictionMSD.txt is #from https://archive.ics.uci.edu/ml/datasets/YearPredictionMSD datapaths["yearpredictionmsd_raw"] = tobeset datapaths["htru2_write"] = tobeset #path to where HTRU_2.csv is #from https://archive.ics.uci.edu/ml/datasets/HTRU2 datapaths["htru2_raw"] = tobeset datapaths["epileptic_write"] = tobeset #path to where data.csv is #https://archive.ics.uci.edu/ml/datasets/Epileptic+Seizure+Recognition# datapaths["epileptic_raw"] = tobeset datapaths["mopac_write"] = tobeset #path to where allUsers.lcl.csv is #https://archive.ics.uci.edu/ml/machine-learning-databases/00391/ datapaths["mopac_raw"] = tobeset #name of cPickle file where to write results from experiment datapaths["pkl_results_dir"] = tobeset #name of directory where the sizes of datasets can be stored (so that they don't have to #be loaded to check) datapaths["pkl_results_dir"] = tobeset # base direcory of font ttf files. # The font used in the poster is downloadable at # https://fontlibrary.org/en/font/cmu-bright datapaths["font_dirs"] = tobesest #directory where figures for a small demo of how clarans runs are saved datapaths["smld_clarans_demo_dir"] = tobesest #full path name of figure comparing #implementations and #evaluations datapaths["smld_impl_vs_eval_fn_0"] = tobesest #as above, but with stopping criteria datapaths["smld_impl_vs_eval_fn_1"] = tobesest #the results file from levels.py datapaths["comparing_levels_fn"] = tobesest #the figure file from levels.py datapaths["comparing_levels_fig_fn"] = tobesest
gpl-3.0
dpassante/ansible
lib/ansible/utils/plugin_docs.py
6
11072
# Copyright: (c) 2012, Jan-Piet Mens <jpmens () gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible import constants as C from ansible.release import __version__ as ansible_version from ansible.errors import AnsibleError, AnsibleAssertionError from ansible.module_utils.six import string_types from ansible.module_utils._text import to_native from ansible.module_utils.common._collections_compat import MutableMapping, MutableSet, MutableSequence from ansible.parsing.plugin_docs import read_docstring from ansible.parsing.yaml.loader import AnsibleLoader from ansible.utils.display import Display display = Display() # modules that are ok that they do not have documentation strings BLACKLIST = { 'MODULE': frozenset(('async_wrapper',)), 'CACHE': frozenset(('base',)), } def merge_fragment(target, source): for key, value in source.items(): if key in target: # assumes both structures have same type if isinstance(target[key], MutableMapping): value.update(target[key]) elif isinstance(target[key], MutableSet): value.add(target[key]) elif isinstance(target[key], MutableSequence): value = sorted(frozenset(value + target[key])) else: raise Exception("Attempt to extend a documentation fragement, invalid type for %s" % key) target[key] = value def _process_versions_and_dates(fragment, is_module, return_docs, callback): def process_deprecation(deprecation, top_level=False): if not isinstance(deprecation, MutableMapping): return if (is_module or top_level) and 'removed_in' in deprecation: # used in module deprecations callback(deprecation, 'removed_in', 'removed_from_collection') if 'removed_at_date' in deprecation: callback(deprecation, 'removed_at_date', 'removed_from_collection') if not (is_module or top_level) and 'version' in deprecation: # used in plugin option deprecations callback(deprecation, 'version', 'removed_from_collection') def process_option_specifiers(specifiers): for specifier in specifiers: if not isinstance(specifier, MutableMapping): continue if 'version_added' in specifier: callback(specifier, 'version_added', 'version_added_collection') if isinstance(specifier.get('deprecated'), MutableMapping): process_deprecation(specifier['deprecated']) def process_options(options): for option in options.values(): if not isinstance(option, MutableMapping): continue if 'version_added' in option: callback(option, 'version_added', 'version_added_collection') if not is_module: if isinstance(option.get('env'), list): process_option_specifiers(option['env']) if isinstance(option.get('ini'), list): process_option_specifiers(option['ini']) if isinstance(option.get('vars'), list): process_option_specifiers(option['vars']) if isinstance(option.get('suboptions'), MutableMapping): process_options(option['suboptions']) def process_return_values(return_values): for return_value in return_values.values(): if not isinstance(return_value, MutableMapping): continue if 'version_added' in return_value: callback(return_value, 'version_added', 'version_added_collection') if isinstance(return_value.get('contains'), MutableMapping): process_return_values(return_value['contains']) if not fragment: return if return_docs: process_return_values(fragment) return if 'version_added' in fragment: callback(fragment, 'version_added', 'version_added_collection') if isinstance(fragment.get('deprecated'), MutableMapping): process_deprecation(fragment['deprecated'], top_level=True) if isinstance(fragment.get('options'), MutableMapping): process_options(fragment['options']) def add_collection_to_versions_and_dates(fragment, collection_name, is_module, return_docs=False): def add(options, option, collection_name_field): if collection_name_field not in options: options[collection_name_field] = collection_name _process_versions_and_dates(fragment, is_module, return_docs, add) def remove_current_collection_from_versions_and_dates(fragment, collection_name, is_module, return_docs=False): def remove(options, option, collection_name_field): if options.get(collection_name_field) == collection_name: del options[collection_name_field] _process_versions_and_dates(fragment, is_module, return_docs, remove) def add_fragments(doc, filename, fragment_loader, is_module=False): fragments = doc.pop('extends_documentation_fragment', []) if isinstance(fragments, string_types): fragments = [fragments] unknown_fragments = [] # doc_fragments are allowed to specify a fragment var other than DOCUMENTATION # with a . separator; this is complicated by collections-hosted doc_fragments that # use the same separator. Assume it's collection-hosted normally first, try to load # as-specified. If failure, assume the right-most component is a var, split it off, # and retry the load. for fragment_slug in fragments: fragment_name = fragment_slug fragment_var = 'DOCUMENTATION' fragment_class = fragment_loader.get(fragment_name) if fragment_class is None and '.' in fragment_slug: splitname = fragment_slug.rsplit('.', 1) fragment_name = splitname[0] fragment_var = splitname[1].upper() fragment_class = fragment_loader.get(fragment_name) if fragment_class is None: unknown_fragments.append(fragment_slug) continue fragment_yaml = getattr(fragment_class, fragment_var, None) if fragment_yaml is None: if fragment_var != 'DOCUMENTATION': # if it's asking for something specific that's missing, that's an error unknown_fragments.append(fragment_slug) continue else: fragment_yaml = '{}' # TODO: this is still an error later since we require 'options' below... fragment = AnsibleLoader(fragment_yaml, file_name=filename).get_single_data() real_collection_name = 'ansible.builtin' real_fragment_name = getattr(fragment_class, '_load_name') if real_fragment_name.startswith('ansible_collections.'): real_collection_name = '.'.join(real_fragment_name.split('.')[1:3]) add_collection_to_versions_and_dates(fragment, real_collection_name, is_module=is_module) if 'notes' in fragment: notes = fragment.pop('notes') if notes: if 'notes' not in doc: doc['notes'] = [] doc['notes'].extend(notes) if 'seealso' in fragment: seealso = fragment.pop('seealso') if seealso: if 'seealso' not in doc: doc['seealso'] = [] doc['seealso'].extend(seealso) if 'options' not in fragment: raise Exception("missing options in fragment (%s), possibly misformatted?: %s" % (fragment_name, filename)) # ensure options themselves are directly merged if 'options' in doc: try: merge_fragment(doc['options'], fragment.pop('options')) except Exception as e: raise AnsibleError("%s options (%s) of unknown type: %s" % (to_native(e), fragment_name, filename)) else: doc['options'] = fragment.pop('options') # merge rest of the sections try: merge_fragment(doc, fragment) except Exception as e: raise AnsibleError("%s (%s) of unknown type: %s" % (to_native(e), fragment_name, filename)) if unknown_fragments: raise AnsibleError('unknown doc_fragment(s) in file {0}: {1}'.format(filename, to_native(', '.join(unknown_fragments)))) def get_docstring(filename, fragment_loader, verbose=False, ignore_errors=False, collection_name=None, is_module=False): """ DOCUMENTATION can be extended using documentation fragments loaded by the PluginLoader from the doc_fragments plugins. """ data = read_docstring(filename, verbose=verbose, ignore_errors=ignore_errors) if data.get('doc', False): # add collection name to versions and dates if collection_name is not None: add_collection_to_versions_and_dates(data['doc'], collection_name, is_module=is_module) # add fragments to documentation add_fragments(data['doc'], filename, fragment_loader=fragment_loader, is_module=is_module) if data.get('returndocs', False): # add collection name to versions and dates if collection_name is not None: add_collection_to_versions_and_dates(data['returndocs'], collection_name, is_module=is_module, return_docs=True) return data['doc'], data['plainexamples'], data['returndocs'], data['metadata'] def get_versioned_doclink(path): """ returns a versioned documentation link for the current Ansible major.minor version; used to generate in-product warning/error links to the configured DOCSITE_ROOT_URL (eg, https://docs.ansible.com/ansible/2.8/somepath/doc.html) :param path: relative path to a document under docs/docsite/rst; :return: absolute URL to the specified doc for the current version of Ansible """ path = to_native(path) try: base_url = C.config.get_config_value('DOCSITE_ROOT_URL') if not base_url.endswith('/'): base_url += '/' if path.startswith('/'): path = path[1:] split_ver = ansible_version.split('.') if len(split_ver) < 3: raise RuntimeError('invalid version ({0})'.format(ansible_version)) doc_version = '{0}.{1}'.format(split_ver[0], split_ver[1]) # check to see if it's a X.Y.0 non-rc prerelease or dev release, if so, assume devel (since the X.Y doctree # isn't published until beta-ish) if split_ver[2].startswith('0'): # exclude rc; we should have the X.Y doctree live by rc1 if any((pre in split_ver[2]) for pre in ['a', 'b']) or len(split_ver) > 3 and 'dev' in split_ver[3]: doc_version = 'devel' return '{0}{1}/{2}'.format(base_url, doc_version, path) except Exception as ex: return '(unable to create versioned doc link for path {0}: {1})'.format(path, to_native(ex))
gpl-3.0
smathot/mantra
mantra/v4l2_cid.py
1
1941
V4L2_CTRL_CLASS_USER = 0x00980000 V4L2_CTRL_CLASS_MPEG = 0x00990000 V4L2_CTRL_CLASS_CAMERA = 0x009a0000 V4L2_CID_BASE = V4L2_CTRL_CLASS_USER | 0x900 V4L2_CID_USER_BASE = V4L2_CID_BASE V4L2_CID_PRIVATE_BASE = 0x08000000 V4L2_CID_USER_CLASS = V4L2_CTRL_CLASS_USER | 1 V4L2_CID_BRIGHTNESS = V4L2_CID_BASE + 0 V4L2_CID_CONTRAST = V4L2_CID_BASE + 1 V4L2_CID_SATURATION = V4L2_CID_BASE + 2 V4L2_CID_HUE = V4L2_CID_BASE + 3 V4L2_CID_AUDIO_VOLUME = V4L2_CID_BASE + 5 V4L2_CID_AUDIO_BALANCE = V4L2_CID_BASE + 6 V4L2_CID_AUDIO_BASS = V4L2_CID_BASE + 7 V4L2_CID_AUDIO_TREBLE = V4L2_CID_BASE + 8 V4L2_CID_AUDIO_MUTE = V4L2_CID_BASE + 9 V4L2_CID_AUDIO_LOUDNESS = V4L2_CID_BASE + 10 V4L2_CID_BLACK_LEVEL = V4L2_CID_BASE + 11 # Deprecated V4L2_CID_AUTO_WHITE_BALANCE = V4L2_CID_BASE + 12 V4L2_CID_DO_WHITE_BALANCE = V4L2_CID_BASE + 13 V4L2_CID_RED_BALANCE = V4L2_CID_BASE + 14 V4L2_CID_BLUE_BALANCE = V4L2_CID_BASE + 15 V4L2_CID_GAMMA = V4L2_CID_BASE + 16 V4L2_CID_WHITENESS = V4L2_CID_GAMMA # Deprecated V4L2_CID_EXPOSURE = V4L2_CID_BASE + 17 V4L2_CID_AUTOGAIN = V4L2_CID_BASE + 18 V4L2_CID_GAIN = V4L2_CID_BASE + 19 V4L2_CID_HFLIP = V4L2_CID_BASE + 20 V4L2_CID_VFLIP = V4L2_CID_BASE + 21 # Deprecated; use V4L2_CID_PAN_RESET and V4L2_CID_TILT_RESET V4L2_CID_HCENTER = V4L2_CID_BASE + 22 V4L2_CID_VCENTER = V4L2_CID_BASE + 23 V4L2_CID_POWER_LINE_FREQUENCY = V4L2_CID_BASE + 24 v4l2_power_line_frequency = ( V4L2_CID_POWER_LINE_FREQUENCY_DISABLED, V4L2_CID_POWER_LINE_FREQUENCY_50HZ, V4L2_CID_POWER_LINE_FREQUENCY_60HZ, ) = range(3) V4L2_CID_HUE_AUTO = V4L2_CID_BASE + 25 V4L2_CID_WHITE_BALANCE_TEMPERATURE = V4L2_CID_BASE + 26 V4L2_CID_SHARPNESS = V4L2_CID_BASE + 27 V4L2_CID_BACKLIGHT_COMPENSATION = V4L2_CID_BASE + 28 V4L2_CID_CHROMA_AGC = V4L2_CID_BASE + 29 V4L2_CID_COLOR_KILLER = V4L2_CID_BASE + 30 V4L2_CID_LASTP1 = V4L2_CID_BASE + 31 V4L2_CID_MPEG_BASE = V4L2_CTRL_CLASS_MPEG | 0x900 V4L2_CID_MPEG_CLASS = V4L2_CTRL_CLASS_MPEG | 1
gpl-2.0
ns950/calibre
src/calibre/library/test.py
14
4822
#!/usr/bin/env python2 __license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net' __docformat__ = 'restructuredtext en' ''' Unit tests for database layer. ''' import sys, unittest, os, cStringIO from itertools import repeat from calibre.ptempfile import PersistentTemporaryDirectory from calibre.library.database2 import LibraryDatabase2 from calibre.ebooks.metadata import MetaInformation class DBTest(unittest.TestCase): img = '\xff\xd8\xff\xe0\x00\x10JFIF\x00\x01\x01\x01\x00d\x00d\x00\x00\xff\xdb\x00C\x00\x05\x03\x04\x04\x04\x03\x05\x04\x04\x04\x05\x05\x05\x06\x07\x0c\x08\x07\x07\x07\x07\x0f\x0b\x0b\t\x0c\x11\x0f\x12\x12\x11\x0f\x11\x11\x13\x16\x1c\x17\x13\x14\x1a\x15\x11\x11\x18!\x18\x1a\x1d\x1d\x1f\x1f\x1f\x13\x17"$"\x1e$\x1c\x1e\x1f\x1e\xff\xdb\x00C\x01\x05\x05\x05\x07\x06\x07\x0e\x08\x08\x0e\x1e\x14\x11\x14\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\xff\xc0\x00\x11\x08\x00\x01\x00\x01\x03\x01\x11\x00\x02\x11\x01\x03\x11\x01\xff\xc4\x00\x14\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\xff\xc4\x00\x14\x10\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xc4\x00\x14\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\xff\xc4\x00\x14\x11\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xda\x00\x0c\x03\x01\x00\x02\x11\x03\x11\x00?\x00p\xf9+\xff\xd9' def setUp(self): self.tdir = PersistentTemporaryDirectory('_calibre_dbtest') self.db = LibraryDatabase2(self.tdir) f = open(os.path.join(self.tdir, 'test.txt'), 'w+b') f.write('test') paths = list(repeat(f, 3)) formats = list(repeat('txt', 3)) m1 = MetaInformation('Test Ebook 1', ['Test Author 1']) m1.tags = ['tag1', 'tag2'] m1.publisher = 'Test Publisher 1' m1.rating = 2 m1.series = 'Test Series 1' m1.series_index = 3 m1.author_sort = 'as1' m1.isbn = 'isbn1' m1.cover_data = ('jpg', self.img) m2 = MetaInformation('Test Ebook 2', ['Test Author 2']) m2.tags = ['tag3', 'tag4'] m2.publisher = 'Test Publisher 2' m2.rating = 3 m2.series = 'Test Series 2' m2.series_index = 1 m2.author_sort = 'as1' m2.isbn = 'isbn1' self.db.add_books(paths, formats, [m1, m2, m2], add_duplicates=True) self.m1, self.m2 = m1, m2 def testAdding(self): m1, m2 = self.db.get_metadata(1, True), self.db.get_metadata(2, True) for p in ('title', 'authors', 'publisher', 'rating', 'series', 'series_index', 'author_sort', 'isbn', 'tags'): def ga(mi, p): val = getattr(mi, p) if isinstance(val, list): val = set(val) return val self.assertEqual(ga(self.m1, p), ga(m1, p)) self.assertEqual(ga(self.m2, p), ga(m2, p)) self.assertEqual(self.db.format(1, 'txt', index_is_id=True), 'test') self.assertEqual(self.db.formats(1, index_is_id=True), 'TXT') self.db.add_format(1, 'html', cStringIO.StringIO('<html/>'), index_is_id=True) self.assertEqual(self.db.formats(1, index_is_id=True), 'HTML,TXT') self.db.remove_format(1, 'html', index_is_id=True) self.assertEqual(self.db.formats(1, index_is_id=True), 'TXT') self.assertNotEqual(self.db.cover(1, index_is_id=True), None) self.assertEqual(self.db.cover(2, index_is_id=True), None) def testMetadata(self): self.db.refresh('timestamp', True) for x in ('title', 'author_sort', 'series', 'publisher', 'isbn', 'series_index', 'rating'): val = 3 if x in ['rating', 'series_index'] else 'dummy' getattr(self.db, 'set_'+x)(3, val) self.db.refresh_ids([3]) self.assertEqual(getattr(self.db, x)(2), val) self.db.set_authors(3, ['new auth']) self.db.refresh_ids([3]) self.assertEqual('new auth', self.db.authors(2)) self.assertEqual(self.db.format(3, 'txt', index_is_id=True), 'test') def testSorting(self): self.db.sort('authors', True) self.assertEqual(self.db.authors(0), 'Test Author 1') self.db.sort('rating', False) self.assertEqual(self.db.rating(0), 3) def suite(): return unittest.TestLoader().loadTestsFromTestCase(DBTest) def test(): unittest.TextTestRunner(verbosity=2).run(suite()) def main(args=sys.argv): test() return 0 if __name__ == '__main__': sys.exit(main())
gpl-3.0
Werkov/PyQt4
pyuic/uic/widget-plugins/qscintilla.py
7
1843
############################################################################# ## ## Copyright (c) 2011 Riverbank Computing Limited <info@riverbankcomputing.com> ## ## This file is part of PyQt. ## ## This file may be used under the terms of the GNU General Public ## License versions 2.0 or 3.0 as published by the Free Software ## Foundation and appearing in the files LICENSE.GPL2 and LICENSE.GPL3 ## included in the packaging of this file. Alternatively you may (at ## your option) use any later version of the GNU General Public ## License if such license has been publicly approved by Riverbank ## Computing Limited (or its successors, if any) and the KDE Free Qt ## Foundation. In addition, as a special exception, Riverbank gives you ## certain additional rights. These rights are described in the Riverbank ## GPL Exception version 1.1, which can be found in the file ## GPL_EXCEPTION.txt in this package. ## ## If you are unsure which license is appropriate for your use, please ## contact the sales department at sales@riverbankcomputing.com. ## ## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE ## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. ## ############################################################################# # If pluginType is MODULE, the plugin loader will call moduleInformation. The # variable MODULE is inserted into the local namespace by the plugin loader. pluginType = MODULE # moduleInformation() must return a tuple (module, widget_list). If "module" # is "A" and any widget from this module is used, the code generator will write # "import A". If "module" is "A[.B].C", the code generator will write # "from A[.B] import C". Each entry in "widget_list" must be unique. def moduleInformation(): return "PyQt4.Qsci", ("QsciScintilla", )
gpl-2.0
indashnet/InDashNet.Open.UN2000
android/external/chromium_org/chrome/test/functional/passwords.py
65
15740
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os from urlparse import urlparse import pyauto_functional # Must be imported before pyauto import pyauto import test_utils from webdriver_pages import settings class PasswordTest(pyauto.PyUITest): """Tests that passwords work correctly.""" INFOBAR_TYPE = 'password_infobar' URL = 'https://accounts.google.com/ServiceLogin' URL_HTTPS = 'https://accounts.google.com/Login' URL_LOGOUT = 'https://accounts.google.com/Logout' HOSTNAME = 'https://' + urlparse(URL).netloc + '/' USERNAME_ELEM = 'Email' PASSWORD_ELEM = 'Passwd' USERNAME = 'test@google.com' PASSWORD = 'test.password' def Debug(self): """Test method for experimentation. This method will not run automatically. """ while True: raw_input('Interact with the browser and hit <enter> to dump passwords. ') print '*' * 20 self.pprint(self.GetSavedPasswords()) def setUp(self): pyauto.PyUITest.setUp(self) self.assertFalse(self.GetSavedPasswords()) def _AssertWithinOneSecond(self, time1, time2): self.assertTrue(abs(time1 - time2) < 1.0, 'Times not within an acceptable range. ' 'First was %lf, second was %lf' % (time1, time2)) def _ConstructPasswordDictionary(self, username_value, password_value, signon_realm, origin_url, username_element, password_element, action_target, time=1279650942.0, submit_element='submit', blacklist=False): """Construct a password dictionary with all the required fields.""" return {'username_value': username_value, 'password_value': password_value, 'signon_realm': signon_realm, 'time': time, 'origin_url': origin_url, 'username_element': username_element, 'password_element': password_element, 'submit_element': submit_element, 'action_target': action_target, 'blacklist': blacklist} def _ClickOnLoginPage(self, window_index, tab_index): # In some cases (such as on Windows) the current page displays an account # name and e-mail, rather than an e-mail and password. Clicking on a # particular DOM element causes the e-mail and password to be displayed. click_js = """ var elements = document.getElementsByClassName("accounts"); if (elements && elements.length > 0) { elements = elements[0].getElementsByTagName("p"); if (elements && elements.length > 0) elements[0].onclick(); } window.domAutomationController.send("done"); """ self.ExecuteJavascript(click_js, tab_index, window_index) # Wait until username/password is filled by the Password manager on the # login page. js_template = """ var value = ""; var element = document.getElementById("%s"); if (element) value = element.value; window.domAutomationController.send(value); """ self.assertTrue(self.WaitUntil( lambda: self.ExecuteJavascript(js_template % self.USERNAME_ELEM, tab_index, window_index) != '' and self.ExecuteJavascript(js_template % self.PASSWORD_ELEM, tab_index, window_index) != '')) def testSavePassword(self): """Test saving a password and getting saved passwords.""" password1 = self._ConstructPasswordDictionary( 'user@example.com', 'test.password', 'https://www.example.com/', 'https://www.example.com/login', 'username', 'password', 'https://www.example.com/login/') self.assertTrue(self.AddSavedPassword(password1)) self.assertEqual(self.GetSavedPasswords(), [password1]) def testRemovePasswords(self): """Verify that saved passwords can be removed.""" password1 = self._ConstructPasswordDictionary( 'user1@example.com', 'test1.password', 'https://www.example.com/', 'https://www.example.com/login', 'username1', 'password', 'https://www.example.com/login/') password2 = self._ConstructPasswordDictionary( 'user2@example.com', 'test2.password', 'https://www.example.com/', 'https://www.example.com/login', 'username2', 'password2', 'https://www.example.com/login/') self.AddSavedPassword(password1) self.AddSavedPassword(password2) self.assertEquals(2, len(self.GetSavedPasswords())) self.assertEquals([password1, password2], self.GetSavedPasswords()) self.RemoveSavedPassword(password1) self.assertEquals(1, len(self.GetSavedPasswords())) self.assertEquals([password2], self.GetSavedPasswords()) self.RemoveSavedPassword(password2) # TODO: GetSavedPasswords() doesn't return anything when empty. # http://crbug.com/64603 # self.assertFalse(self.GetSavedPasswords()) def testDisplayAndSavePasswordInfobar(self): """Verify password infobar displays and able to save password.""" creds = self.GetPrivateInfo()['test_google_account'] username = creds['username'] password = creds['password'] # Disable one-click login infobar for sync. self.SetPrefs(pyauto.kReverseAutologinEnabled, False) test_utils.GoogleAccountsLogin(self, username, password) # Wait until page completes loading. self.WaitUntil( lambda: self.GetDOMValue('document.readyState'), expect_retval='complete') self.PerformActionOnInfobar( 'accept', infobar_index=test_utils.WaitForInfobarTypeAndGetIndex( self, self.INFOBAR_TYPE)) self.NavigateToURL(self.URL_LOGOUT) self.NavigateToURL(self.URL_HTTPS) self._ClickOnLoginPage(0, 0) test_utils.VerifyGoogleAccountCredsFilled(self, username, password, tab_index=0, windex=0) def testNeverSavePasswords(self): """Verify passwords not saved/deleted when 'never for this site' chosen.""" creds1 = self.GetPrivateInfo()['test_google_account'] # Disable one-click login infobar for sync. self.SetPrefs(pyauto.kReverseAutologinEnabled, False) test_utils.GoogleAccountsLogin( self, creds1['username'], creds1['password']) self.PerformActionOnInfobar( 'accept', infobar_index=test_utils.WaitForInfobarTypeAndGetIndex( self, self.INFOBAR_TYPE)) self.assertEquals(1, len(self.GetSavedPasswords())) self.AppendTab(pyauto.GURL(creds1['logout_url'])) creds2 = self.GetPrivateInfo()['test_google_account_2'] test_utils.GoogleAccountsLogin( self, creds2['username'], creds2['password'], tab_index=1) # Selecting 'Never for this site' option on password infobar. self.PerformActionOnInfobar( 'cancel', infobar_index=test_utils.WaitForInfobarTypeAndGetIndex( self, self.INFOBAR_TYPE, tab_index=1), tab_index=1) # TODO: GetSavedPasswords() doesn't return anything when empty. # http://crbug.com/64603 # self.assertFalse(self.GetSavedPasswords()) # TODO: Check the exceptions list def testSavedPasswordInTabsAndWindows(self): """Verify saved username/password shows in window and tab.""" creds = self.GetPrivateInfo()['test_google_account'] username = creds['username'] password = creds['password'] # Disable one-click login infobar for sync. self.SetPrefs(pyauto.kReverseAutologinEnabled, False) # Login to Google a/c test_utils.GoogleAccountsLogin(self, username, password) self.PerformActionOnInfobar( 'accept', infobar_index=test_utils.WaitForInfobarTypeAndGetIndex( self, self.INFOBAR_TYPE)) self.NavigateToURL(self.URL_LOGOUT) self.NavigateToURL(self.URL) self._ClickOnLoginPage(0, 0) test_utils.VerifyGoogleAccountCredsFilled(self, username, password, tab_index=0, windex=0) self.AppendTab(pyauto.GURL(self.URL)) self._ClickOnLoginPage(0, 1) test_utils.VerifyGoogleAccountCredsFilled(self, username, password, tab_index=1, windex=0) def testLoginCredsNotShownInIncognito(self): """Verify login creds are not shown in Incognito mode.""" creds = self.GetPrivateInfo()['test_google_account'] username = creds['username'] password = creds['password'] # Disable one-click login infobar for sync. self.SetPrefs(pyauto.kReverseAutologinEnabled, False) # Login to Google account. test_utils.GoogleAccountsLogin(self, username, password) self.PerformActionOnInfobar( 'accept', infobar_index=test_utils.WaitForInfobarTypeAndGetIndex( self, self.INFOBAR_TYPE)) self.NavigateToURL(self.URL_LOGOUT) self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW) self.NavigateToURL(self.URL, 1, 0) email_value = self.GetDOMValue('document.getElementById("Email").value', tab_index=0, windex=1) passwd_value = self.GetDOMValue('document.getElementById("Passwd").value', tab_index=0, windex=1) self.assertEqual(email_value, '', msg='Email creds displayed %s.' % email_value) self.assertEqual(passwd_value, '', msg='Password creds displayed.') def testPasswordAutofilledInIncognito(self): """Verify saved password is autofilled in Incognito mode. Saved passwords should be autofilled once the username is entered in incognito mode. """ action_target = self.HOSTNAME driver = self.NewWebDriver() password_dict = self._ConstructPasswordDictionary( self.USERNAME, self.PASSWORD, self.HOSTNAME, self.URL, self.USERNAME_ELEM, self.PASSWORD_ELEM, action_target) self.AddSavedPassword(password_dict) self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW) self.NavigateToURL(self.URL, 1, 0) # Switch to window 1. driver.switch_to_window(driver.window_handles[1]) driver.find_element_by_id( self.USERNAME_ELEM).send_keys(self.USERNAME + '\t') incognito_passwd = self.GetDOMValue( 'document.getElementById("Passwd").value', tab_index=0, windex=1) self.assertEqual(incognito_passwd, self.PASSWORD, msg='Password creds did not autofill in incognito mode.') def testInfoBarDisappearByNavigatingPage(self): """Test password infobar is dismissed when navigating to different page.""" creds = self.GetPrivateInfo()['test_google_account'] # Disable one-click login infobar for sync. self.SetPrefs(pyauto.kReverseAutologinEnabled, False) # Login to Google account. test_utils.GoogleAccountsLogin(self, creds['username'], creds['password']) self.PerformActionOnInfobar( 'accept', infobar_index=test_utils.WaitForInfobarTypeAndGetIndex( self, self.INFOBAR_TYPE)) self.NavigateToURL('chrome://version') self.assertTrue(self.WaitForInfobarCount(0)) # To make sure user is navigated to Version page. self.assertTrue(self.WaitUntil(self.GetActiveTabTitle, expect_retval='About Version')) test_utils.AssertInfobarTypeDoesNotAppear(self, self.INFOBAR_TYPE) def testInfoBarDisappearByReload(self): """Test that Password infobar disappears by the page reload.""" creds = self.GetPrivateInfo()['test_google_account'] # Disable one-click login infobar for sync. self.SetPrefs(pyauto.kReverseAutologinEnabled, False) # Login to Google a/c test_utils.GoogleAccountsLogin(self, creds['username'], creds['password']) self.PerformActionOnInfobar( 'accept', infobar_index=test_utils.WaitForInfobarTypeAndGetIndex( self, self.INFOBAR_TYPE)) self.ReloadTab() test_utils.AssertInfobarTypeDoesNotAppear(self, self.INFOBAR_TYPE) def testPasswdInfoNotStoredWhenAutocompleteOff(self): """Verify that password infobar does not appear when autocomplete is off. If the password field has autocomplete turned off, then the password infobar should not offer to save the password info. """ password_info = {'Email': self.USERNAME, 'Passwd': self.PASSWORD} # Disable one-click login infobar for sync. self.SetPrefs(pyauto.kReverseAutologinEnabled, False) url = self.GetHttpURLForDataPath( os.path.join('password', 'password_autocomplete_off_test.html')) self.NavigateToURL(url) for key, value in password_info.iteritems(): script = ('document.getElementById("%s").value = "%s"; ' 'window.domAutomationController.send("done");') % (key, value) self.ExecuteJavascript(script, 0, 0) self.assertTrue(self.SubmitForm('loginform')) test_utils.AssertInfobarTypeDoesNotAppear(self, self.INFOBAR_TYPE) def _SendCharToPopulateField(self, char, tab_index=0, windex=0): """Simulate a char being typed into a field. Args: char: the char value to be typed into the field. tab_index: tab index to work on. Defaults to 0 (first tab). windex: window index to work on. Defaults to 0 (first window). """ CHAR_KEYPRESS = ord((char).upper()) # ASCII char key press. KEY_DOWN_TYPE = 0 # kRawKeyDownType KEY_UP_TYPE = 3 # kKeyUpType self.SendWebkitKeyEvent(KEY_DOWN_TYPE, CHAR_KEYPRESS, tab_index, windex) self.SendWebkitCharEvent(char, tab_index, windex) self.SendWebkitKeyEvent(KEY_UP_TYPE, CHAR_KEYPRESS, tab_index, windex) def testClearFetchedCredForNewUserName(self): """Verify that the fetched credentials are cleared for a new username. This test requires sending key events rather than pasting a new username into the Email field. """ creds = self.GetPrivateInfo()['test_google_account'] username = creds['username'] password = creds['password'] # Disable one-click login infobar for sync. self.SetPrefs(pyauto.kReverseAutologinEnabled, False) # Login to Google a/c test_utils.GoogleAccountsLogin(self, username, password) self.PerformActionOnInfobar( 'accept', infobar_index=test_utils.WaitForInfobarTypeAndGetIndex( self, self.INFOBAR_TYPE)) self.NavigateToURL(self.URL_LOGOUT) self.NavigateToURL(self.URL) self._ClickOnLoginPage(0, 0) test_utils.VerifyGoogleAccountCredsFilled(self, username, password, tab_index=0, windex=0) clear_username_field = ( 'document.getElementById("Email").value = ""; ' 'window.domAutomationController.send("done");') set_focus = ( 'document.getElementById("Email").focus(); ' 'window.domAutomationController.send("done");') self.ExecuteJavascript(clear_username_field, 0, 0) self.ExecuteJavascript(set_focus, 0, 0) self._SendCharToPopulateField('t', tab_index=0, windex=0) passwd_value = self.GetDOMValue('document.getElementById("Passwd").value') self.assertFalse(passwd_value, msg='Password field not empty for new username.') def testPasswordInfobarShowsForBlockedDomain(self): """Verify that password infobar shows when cookies are blocked. Password infobar should be shown if cookies are blocked for Google accounts domain. """ creds = self.GetPrivateInfo()['test_google_account'] username = creds['username'] password = creds['password'] # Block cookies for Google accounts domain. self.SetPrefs(pyauto.kContentSettingsPatternPairs, {'https://accounts.google.com/': {'cookies': 2}}) test_utils.GoogleAccountsLogin(self, username, password) test_utils.WaitForInfobarTypeAndGetIndex(self, self.INFOBAR_TYPE) if __name__ == '__main__': pyauto_functional.Main()
apache-2.0
aisipos/django
tests/file_storage/test_generate_filename.py
37
4090
import os import warnings from django.core.files.base import ContentFile from django.core.files.storage import Storage from django.db.models import FileField from django.test import SimpleTestCase class AWSS3Storage(Storage): """ Simulate an AWS S3 storage which uses Unix-like paths and allows any characters in file names but where there aren't actual folders but just keys. """ prefix = 'mys3folder/' def _save(self, name, content): """ This method is important to test that Storage.save() doesn't replace '\' with '/' (rather FileSystemStorage.save() does). """ return name def get_valid_name(self, name): return name def get_available_name(self, name, max_length=None): return name def generate_filename(self, filename): """ This is the method that's important to override when using S3 so that os.path() isn't called, which would break S3 keys. """ return self.prefix + self.get_valid_name(filename) class GenerateFilenameStorageTests(SimpleTestCase): def test_filefield_get_directory_deprecation(self): with warnings.catch_warnings(record=True) as warns: warnings.simplefilter('always') f = FileField(upload_to='some/folder/') self.assertEqual(f.get_directory_name(), os.path.normpath('some/folder/')) self.assertEqual(len(warns), 1) self.assertEqual( warns[0].message.args[0], 'FileField now delegates file name and folder processing to the ' 'storage. get_directory_name() will be removed in Django 2.0.' ) def test_filefield_get_filename_deprecation(self): with warnings.catch_warnings(record=True) as warns: warnings.simplefilter('always') f = FileField(upload_to='some/folder/') self.assertEqual(f.get_filename('some/folder/test.txt'), 'test.txt') self.assertEqual(len(warns), 1) self.assertEqual( warns[0].message.args[0], 'FileField now delegates file name and folder processing to the ' 'storage. get_filename() will be removed in Django 2.0.' ) def test_filefield_generate_filename(self): f = FileField(upload_to='some/folder/') self.assertEqual( f.generate_filename(None, 'test with space.txt'), os.path.normpath('some/folder/test_with_space.txt') ) def test_filefield_generate_filename_with_upload_to(self): def upload_to(instance, filename): return 'some/folder/' + filename f = FileField(upload_to=upload_to) self.assertEqual( f.generate_filename(None, 'test with space.txt'), os.path.normpath('some/folder/test_with_space.txt') ) def test_filefield_awss3_storage(self): """ Simulate a FileField with an S3 storage which uses keys rather than folders and names. FileField and Storage shouldn't have any os.path() calls that break the key. """ storage = AWSS3Storage() folder = 'not/a/folder/' f = FileField(upload_to=folder, storage=storage) key = 'my-file-key\\with odd characters' data = ContentFile('test') expected_key = AWSS3Storage.prefix + folder + key # Simulate call to f.save() result_key = f.generate_filename(None, key) self.assertEqual(result_key, expected_key) result_key = storage.save(result_key, data) self.assertEqual(result_key, expected_key) # Repeat test with a callable. def upload_to(instance, filename): # Return a non-normalized path on purpose. return folder + filename f = FileField(upload_to=upload_to, storage=storage) # Simulate call to f.save() result_key = f.generate_filename(None, key) self.assertEqual(result_key, expected_key) result_key = storage.save(result_key, data) self.assertEqual(result_key, expected_key)
bsd-3-clause
gregdek/ansible
lib/ansible/modules/cloud/ovirt/ovirt_disk.py
4
29219
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (c) 2016 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ovirt_disk short_description: "Module to manage Virtual Machine and floating disks in oVirt/RHV" version_added: "2.2" author: "Ondra Machacek (@machacekondra)" description: - "Module to manage Virtual Machine and floating disks in oVirt/RHV." options: id: description: - "ID of the disk to manage. Either C(id) or C(name) is required." name: description: - "Name of the disk to manage. Either C(id) or C(name)/C(alias) is required." aliases: ['alias'] description: description: - "Description of the disk image to manage." version_added: "2.5" vm_name: description: - "Name of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)." vm_id: description: - "ID of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)." state: description: - "Should the Virtual Machine disk be present/absent/attached/detached." choices: ['present', 'absent', 'attached', 'detached'] default: 'present' download_image_path: description: - "Path on a file system where disk should be downloaded." - "Note that you must have an valid oVirt/RHV engine CA in your system trust store or you must provide it in C(ca_file) parameter." - "Note that the disk is not downloaded when the file already exists, but you can forcibly download the disk when using C(force) I (true)." version_added: "2.3" upload_image_path: description: - "Path to disk image, which should be uploaded." - "Note that currently we support only compatibility version 0.10 of the qcow disk." - "Note that you must have an valid oVirt/RHV engine CA in your system trust store or you must provide it in C(ca_file) parameter." - "Note that there is no reliable way to achieve idempotency, so if you want to upload the disk even if the disk with C(id) or C(name) exists, then please use C(force) I(true). If you will use C(force) I(false), which is default, then the disk image won't be uploaded." version_added: "2.3" size: description: - "Size of the disk. Size should be specified using IEC standard units. For example 10GiB, 1024MiB, etc." - "Size can be only increased, not decreased." interface: description: - "Driver of the storage interface." - "It's required parameter when creating the new disk." choices: ['virtio', 'ide', 'virtio_scsi'] default: 'virtio' format: description: - Specify format of the disk. - Note that this option isn't idempotent as it's not currently possible to change format of the disk via API. choices: ['raw', 'cow'] sparse: required: False type: bool version_added: "2.5" description: - "I(True) if the disk should be sparse (also known as I(thin provision)). If the parameter is omitted, cow disks will be created as sparse and raw disks as I(preallocated)" - Note that this option isn't idempotent as it's not currently possible to change sparseness of the disk via API. storage_domain: description: - "Storage domain name where disk should be created. By default storage is chosen by oVirt/RHV engine." storage_domains: description: - "Storage domain names where disk should be copied." - "C(**IMPORTANT**)" - "There is no reliable way to achieve idempotency, so every time you specify this parameter the disks are copied, so please handle your playbook accordingly to not copy the disks all the time. This is valid only for VM and floating disks, template disks works as expected." version_added: "2.3" force: description: - "Please take a look at C(image_path) documentation to see the correct usage of this parameter." version_added: "2.3" type: bool profile: description: - "Disk profile name to be attached to disk. By default profile is chosen by oVirt/RHV engine." quota_id: description: - "Disk quota ID to be used for disk. By default quota is chosen by oVirt/RHV engine." version_added: "2.5" bootable: description: - "I(True) if the disk should be bootable. By default when disk is created it isn't bootable." type: bool shareable: description: - "I(True) if the disk should be shareable. By default when disk is created it isn't shareable." type: bool logical_unit: description: - "Dictionary which describes LUN to be directly attached to VM:" - "C(address) - Address of the storage server. Used by iSCSI." - "C(port) - Port of the storage server. Used by iSCSI." - "C(target) - iSCSI target." - "C(lun_id) - LUN id." - "C(username) - CHAP Username to be used to access storage server. Used by iSCSI." - "C(password) - CHAP Password of the user to be used to access storage server. Used by iSCSI." - "C(storage_type) - Storage type either I(fcp) or I(iscsi)." sparsify: description: - "I(True) if the disk should be sparsified." - "Sparsification frees space in the disk image that is not used by its filesystem. As a result, the image will occupy less space on the storage." - "Note that this parameter isn't idempotent, as it's not possible to check if the disk should be or should not be sparsified." version_added: "2.4" type: bool openstack_volume_type: description: - "Name of the openstack volume type. This is valid when working with cinder." version_added: "2.4" image_provider: description: - "When C(state) is I(exported) disk is exported to given Glance image provider." - "C(**IMPORTANT**)" - "There is no reliable way to achieve idempotency, so every time you specify this parameter the disk is exported, so please handle your playbook accordingly to not export the disk all the time. This option is valid only for template disks." version_added: "2.4" host: description: - "When the hypervisor name is specified the newly created disk or an existing disk will refresh its information about the underlying storage( Disk size, Serial, Product ID, Vendor ID ...) The specified host will be used for gathering the storage related information. This option is only valid for passthrough disks. This option requires at least the logical_unit.id to be specified" version_added: "2.8" wipe_after_delete: description: - "If the disk's Wipe After Delete is enabled, then the disk is first wiped." type: bool activate: description: - I(True) if the disk should be activated. version_added: "2.8" type: bool extends_documentation_fragment: ovirt ''' EXAMPLES = ''' # Examples don't contain auth parameter for simplicity, # look at ovirt_auth module to see how to reuse authentication: # Create and attach new disk to VM - ovirt_disk: name: myvm_disk vm_name: rhel7 size: 10GiB format: cow interface: virtio storage_domain: data # Attach logical unit to VM rhel7 - ovirt_disk: vm_name: rhel7 logical_unit: target: iqn.2016-08-09.brq.str-01:omachace id: 1IET_000d0001 address: 10.34.63.204 interface: virtio # Detach disk from VM - ovirt_disk: state: detached name: myvm_disk vm_name: rhel7 size: 10GiB format: cow interface: virtio # Change Disk Name - ovirt_disk: id: 00000000-0000-0000-0000-000000000000 storage_domain: data name: "new_disk_name" vm_name: rhel7 # Upload local image to disk and attach it to vm: # Since Ansible 2.3 - ovirt_disk: name: mydisk vm_name: myvm interface: virtio size: 10GiB format: cow image_path: /path/to/mydisk.qcow2 storage_domain: data # Download disk to local file system: # Since Ansible 2.3 - ovirt_disk: id: 7de90f31-222c-436c-a1ca-7e655bd5b60c download_image_path: /home/user/mydisk.qcow2 # Export disk as image to Glance domain # Since Ansible 2.4 - ovirt_disks: id: 7de90f31-222c-436c-a1ca-7e655bd5b60c image_provider: myglance state: exported # Defining a specific quota while creating a disk image: # Since Ansible 2.5 - ovirt_quotas_facts: data_center: Default name: myquota - ovirt_disk: name: mydisk size: 10GiB storage_domain: data description: somedescriptionhere quota_id: "{{ ovirt_quotas[0]['id'] }}" ''' RETURN = ''' id: description: "ID of the managed disk" returned: "On success if disk is found." type: str sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c disk: description: "Dictionary of all the disk attributes. Disk attributes can be found on your oVirt/RHV instance at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/disk." returned: "On success if disk is found and C(vm_id) or C(vm_name) wasn't passed." type: dict disk_attachment: description: "Dictionary of all the disk attachment attributes. Disk attachment attributes can be found on your oVirt/RHV instance at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/disk_attachment." returned: "On success if disk is found and C(vm_id) or C(vm_name) was passed and VM was found." type: dict ''' import os import time import traceback import ssl from ansible.module_utils.six.moves.http_client import HTTPSConnection, IncompleteRead from ansible.module_utils.six.moves.urllib.parse import urlparse try: import ovirtsdk4.types as otypes except ImportError: pass from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ovirt import ( BaseModule, check_sdk, check_params, create_connection, convert_to_bytes, equal, follow_link, get_id_by_name, ovirt_full_argument_spec, search_by_name, wait, ) def _search_by_lun(disks_service, lun_id): """ Find disk by LUN ID. """ res = [ disk for disk in disks_service.list(search='disk_type=lun') if ( disk.lun_storage.id == lun_id ) ] return res[0] if res else None def transfer(connection, module, direction, transfer_func): transfers_service = connection.system_service().image_transfers_service() transfer = transfers_service.add( otypes.ImageTransfer( image=otypes.Image( id=module.params['id'], ), direction=direction, ) ) transfer_service = transfers_service.image_transfer_service(transfer.id) try: # After adding a new transfer for the disk, the transfer's status will be INITIALIZING. # Wait until the init phase is over. The actual transfer can start when its status is "Transferring". while transfer.phase == otypes.ImageTransferPhase.INITIALIZING: time.sleep(module.params['poll_interval']) transfer = transfer_service.get() proxy_url = urlparse(transfer.proxy_url) context = ssl.create_default_context() auth = module.params['auth'] if auth.get('insecure'): context.check_hostname = False context.verify_mode = ssl.CERT_NONE elif auth.get('ca_file'): context.load_verify_locations(cafile=auth.get('ca_file')) proxy_connection = HTTPSConnection( proxy_url.hostname, proxy_url.port, context=context, ) transfer_func( transfer_service, proxy_connection, proxy_url, transfer.signed_ticket ) return True finally: transfer_service.finalize() while transfer.phase in [ otypes.ImageTransferPhase.TRANSFERRING, otypes.ImageTransferPhase.FINALIZING_SUCCESS, ]: time.sleep(module.params['poll_interval']) transfer = transfer_service.get() if transfer.phase in [ otypes.ImageTransferPhase.UNKNOWN, otypes.ImageTransferPhase.FINISHED_FAILURE, otypes.ImageTransferPhase.FINALIZING_FAILURE, otypes.ImageTransferPhase.CANCELLED, ]: raise Exception( "Error occurred while uploading image. The transfer is in %s" % transfer.phase ) if module.params.get('logical_unit'): disks_service = connection.system_service().disks_service() wait( service=disks_service.service(module.params['id']), condition=lambda d: d.status == otypes.DiskStatus.OK, wait=module.params['wait'], timeout=module.params['timeout'], ) def download_disk_image(connection, module): def _transfer(transfer_service, proxy_connection, proxy_url, transfer_ticket): BUF_SIZE = 128 * 1024 transfer_headers = { 'Authorization': transfer_ticket, } proxy_connection.request( 'GET', proxy_url.path, headers=transfer_headers, ) r = proxy_connection.getresponse() path = module.params["download_image_path"] image_size = int(r.getheader('Content-Length')) with open(path, "wb") as mydisk: pos = 0 while pos < image_size: to_read = min(image_size - pos, BUF_SIZE) chunk = r.read(to_read) if not chunk: raise RuntimeError("Socket disconnected") mydisk.write(chunk) pos += len(chunk) return transfer( connection, module, otypes.ImageTransferDirection.DOWNLOAD, transfer_func=_transfer, ) def upload_disk_image(connection, module): def _transfer(transfer_service, proxy_connection, proxy_url, transfer_ticket): BUF_SIZE = 128 * 1024 path = module.params['upload_image_path'] image_size = os.path.getsize(path) proxy_connection.putrequest("PUT", proxy_url.path) proxy_connection.putheader('Content-Length', "%d" % (image_size,)) proxy_connection.endheaders() with open(path, "rb") as disk: pos = 0 while pos < image_size: to_read = min(image_size - pos, BUF_SIZE) chunk = disk.read(to_read) if not chunk: transfer_service.pause() raise RuntimeError("Unexpected end of file at pos=%d" % pos) proxy_connection.send(chunk) pos += len(chunk) return transfer( connection, module, otypes.ImageTransferDirection.UPLOAD, transfer_func=_transfer, ) class DisksModule(BaseModule): def build_entity(self): logical_unit = self._module.params.get('logical_unit') disk = otypes.Disk( id=self._module.params.get('id'), name=self._module.params.get('name'), description=self._module.params.get('description'), format=otypes.DiskFormat( self._module.params.get('format') ) if self._module.params.get('format') else None, sparse=self._module.params.get( 'sparse' ) if self._module.params.get( 'sparse' ) is not None else self._module.params.get('format') != 'raw', openstack_volume_type=otypes.OpenStackVolumeType( name=self.param('openstack_volume_type') ) if self.param('openstack_volume_type') else None, provisioned_size=convert_to_bytes( self._module.params.get('size') ), storage_domains=[ otypes.StorageDomain( name=self._module.params.get('storage_domain'), ), ], quota=otypes.Quota(id=self._module.params.get('quota_id')) if self.param('quota_id') else None, shareable=self._module.params.get('shareable'), wipe_after_delete=self.param('wipe_after_delete'), lun_storage=otypes.HostStorage( type=otypes.StorageType( logical_unit.get('storage_type', 'iscsi') ), logical_units=[ otypes.LogicalUnit( address=logical_unit.get('address'), port=logical_unit.get('port', 3260), target=logical_unit.get('target'), id=logical_unit.get('id'), username=logical_unit.get('username'), password=logical_unit.get('password'), ) ], ) if logical_unit else None, ) if hasattr(disk, 'initial_size'): disk.initial_size = convert_to_bytes( self._module.params.get('size') ) return disk def update_storage_domains(self, disk_id): changed = False disk_service = self._service.service(disk_id) disk = disk_service.get() sds_service = self._connection.system_service().storage_domains_service() # We don't support move&copy for non file based storages: if disk.storage_type != otypes.DiskStorageType.IMAGE: return changed # Initiate move: if self._module.params['storage_domain']: new_disk_storage_id = get_id_by_name(sds_service, self._module.params['storage_domain']) changed = self.action( action='move', entity=disk, action_condition=lambda d: new_disk_storage_id != d.storage_domains[0].id, wait_condition=lambda d: d.status == otypes.DiskStatus.OK, storage_domain=otypes.StorageDomain( id=new_disk_storage_id, ), post_action=lambda _: time.sleep(self._module.params['poll_interval']), )['changed'] if self._module.params['storage_domains']: for sd in self._module.params['storage_domains']: new_disk_storage = search_by_name(sds_service, sd) changed = changed or self.action( action='copy', entity=disk, action_condition=( lambda disk: new_disk_storage.id not in [sd.id for sd in disk.storage_domains] ), wait_condition=lambda disk: disk.status == otypes.DiskStatus.OK, storage_domain=otypes.StorageDomain( id=new_disk_storage.id, ), )['changed'] return changed def _update_check(self, entity): return ( equal(self._module.params.get('name'), entity.name) and equal(self._module.params.get('description'), entity.description) and equal(self.param('quota_id'), getattr(entity.quota, 'id', None)) and equal(convert_to_bytes(self._module.params.get('size')), entity.provisioned_size) and equal(self._module.params.get('shareable'), entity.shareable) and equal(self.param('wipe_after_delete'), entity.wipe_after_delete) ) class DiskAttachmentsModule(DisksModule): def build_entity(self): return otypes.DiskAttachment( disk=super(DiskAttachmentsModule, self).build_entity(), interface=otypes.DiskInterface( self._module.params.get('interface') ) if self._module.params.get('interface') else None, bootable=self._module.params.get('bootable'), active=self.param('activate'), ) def update_check(self, entity): return ( super(DiskAttachmentsModule, self)._update_check(follow_link(self._connection, entity.disk)) and equal(self._module.params.get('interface'), str(entity.interface)) and equal(self._module.params.get('bootable'), entity.bootable) and equal(self.param('activate'), entity.active) ) def searchable_attributes(module): """ Return all searchable disk attributes passed to module. """ attributes = { 'name': module.params.get('name'), 'Storage.name': module.params.get('storage_domain'), 'vm_names': module.params.get('vm_name'), } return dict((k, v) for k, v in attributes.items() if v is not None) def main(): argument_spec = ovirt_full_argument_spec( state=dict( choices=['present', 'absent', 'attached', 'detached', 'exported'], default='present' ), id=dict(default=None), name=dict(default=None, aliases=['alias']), description=dict(default=None), vm_name=dict(default=None), vm_id=dict(default=None), size=dict(default=None), interface=dict(default=None,), storage_domain=dict(default=None), storage_domains=dict(default=None, type='list'), profile=dict(default=None), quota_id=dict(default=None), format=dict(default='cow', choices=['raw', 'cow']), sparse=dict(default=None, type='bool'), bootable=dict(default=None, type='bool'), shareable=dict(default=None, type='bool'), logical_unit=dict(default=None, type='dict'), download_image_path=dict(default=None), upload_image_path=dict(default=None, aliases=['image_path']), force=dict(default=False, type='bool'), sparsify=dict(default=None, type='bool'), openstack_volume_type=dict(default=None), image_provider=dict(default=None), host=dict(default=None), wipe_after_delete=dict(type='bool', default=None), activate=dict(default=None, type='bool'), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) lun = module.params.get('logical_unit') host = module.params['host'] # Fail when host is specified with the LUN id. Lun id is needed to identify # an existing disk if already available inthe environment. if (host and lun is None) or (host and lun.get("id") is None): module.fail_json( msg="Can not use parameter host ({0!s}) without " "specifying the logical_unit id".format(host) ) check_sdk(module) check_params(module) try: disk = None state = module.params['state'] auth = module.params.get('auth') connection = create_connection(auth) disks_service = connection.system_service().disks_service() disks_module = DisksModule( connection=connection, module=module, service=disks_service, ) if lun: disk = _search_by_lun(disks_service, lun.get('id')) ret = None # First take care of creating the VM, if needed: if state in ('present', 'detached', 'attached'): ret = disks_module.create( entity=disk, search_params=searchable_attributes(module), result_state=otypes.DiskStatus.OK if lun is None else None, fail_condition=lambda d: d.status == otypes.DiskStatus.ILLEGAL if lun is None else False, ) is_new_disk = ret['changed'] ret['changed'] = ret['changed'] or disks_module.update_storage_domains(ret['id']) # We need to pass ID to the module, so in case we want detach/attach disk # we have this ID specified to attach/detach method: module.params['id'] = ret['id'] if disk is None else disk.id # Upload disk image in case it's new disk or force parameter is passed: if module.params['upload_image_path'] and (is_new_disk or module.params['force']): uploaded = upload_disk_image(connection, module) ret['changed'] = ret['changed'] or uploaded # Download disk image in case it's file don't exist or force parameter is passed: if ( module.params['download_image_path'] and (not os.path.isfile(module.params['download_image_path']) or module.params['force']) ): downloaded = download_disk_image(connection, module) ret['changed'] = ret['changed'] or downloaded # Disk sparsify, only if disk is of image type: disk = disks_service.disk_service(module.params['id']).get() if disk.storage_type == otypes.DiskStorageType.IMAGE: ret = disks_module.action( action='sparsify', action_condition=lambda d: module.params['sparsify'], wait_condition=lambda d: d.status == otypes.DiskStatus.OK, ) # Export disk as image to glance domain elif state == 'exported': disk = disks_module.search_entity() if disk is None: module.fail_json( msg="Can not export given disk '%s', it doesn't exist" % module.params.get('name') or module.params.get('id') ) if disk.storage_type == otypes.DiskStorageType.IMAGE: ret = disks_module.action( action='export', action_condition=lambda d: module.params['image_provider'], wait_condition=lambda d: d.status == otypes.DiskStatus.OK, storage_domain=otypes.StorageDomain(name=module.params['image_provider']), ) elif state == 'absent': ret = disks_module.remove() # If VM was passed attach/detach disks to/from the VM: if module.params.get('vm_id') is not None or module.params.get('vm_name') is not None and state != 'absent': vms_service = connection.system_service().vms_service() # If `vm_id` isn't specified, find VM by name: vm_id = module.params['vm_id'] if vm_id is None: vm_id = getattr(search_by_name(vms_service, module.params['vm_name']), 'id', None) if vm_id is None: module.fail_json( msg="VM don't exists, please create it first." ) disk_attachments_service = vms_service.vm_service(vm_id).disk_attachments_service() disk_attachments_module = DiskAttachmentsModule( connection=connection, module=module, service=disk_attachments_service, changed=ret['changed'] if ret else False, ) if state == 'present' or state == 'attached': ret = disk_attachments_module.create() if lun is None: wait( service=disk_attachments_service.service(ret['id']), condition=lambda d: follow_link(connection, d.disk).status == otypes.DiskStatus.OK, wait=module.params['wait'], timeout=module.params['timeout'], ) elif state == 'detached': ret = disk_attachments_module.remove() # When the host parameter is specified and the disk is not being # removed, refresh the information about the LUN. if state != 'absent' and host: hosts_service = connection.system_service().hosts_service() host_id = get_id_by_name(hosts_service, host) disks_service.disk_service(disk.id).refresh_lun(otypes.Host(id=host_id)) module.exit_json(**ret) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=auth.get('token') is None) if __name__ == "__main__": main()
gpl-3.0
rcomer/iris
lib/iris/tests/unit/analysis/test_COUNT.py
5
3683
# Copyright Iris contributors # # This file is part of Iris and is released under the LGPL license. # See COPYING and COPYING.LESSER in the root of the repository for full # licensing details. """Unit tests for the :data:`iris.analysis.COUNT` aggregator.""" # Import iris.tests first so that some things can be initialised before # importing anything else. import iris.tests as tests import numpy as np import numpy.ma as ma from iris.analysis import COUNT from iris.cube import Cube from iris.coords import DimCoord from iris._lazy_data import as_lazy_data, is_lazy_data class Test_basics(tests.IrisTest): def setUp(self): data = np.array([1, 2, 3, 4, 5]) coord = DimCoord([6, 7, 8, 9, 10], long_name="foo") self.cube = Cube(data) self.cube.add_dim_coord(coord, 0) self.lazy_cube = Cube(as_lazy_data(data)) self.lazy_cube.add_dim_coord(coord, 0) self.func = lambda x: x >= 3 def test_name(self): self.assertEqual(COUNT.name(), "count") def test_no_function(self): exp_emsg = r"function must be a callable. Got <.* 'NoneType'>" with self.assertRaisesRegex(TypeError, exp_emsg): COUNT.lazy_aggregate(self.lazy_cube.lazy_data(), axis=0) def test_not_callable(self): with self.assertRaisesRegex(TypeError, "function must be a callable"): COUNT.aggregate(self.cube.data, axis=0, function="wibble") def test_lazy_not_callable(self): with self.assertRaisesRegex(TypeError, "function must be a callable"): COUNT.lazy_aggregate( self.lazy_cube.lazy_data(), axis=0, function="wibble" ) def test_collapse(self): data = COUNT.aggregate(self.cube.data, axis=0, function=self.func) self.assertArrayEqual(data, [3]) def test_lazy(self): lazy_data = COUNT.lazy_aggregate( self.lazy_cube.lazy_data(), axis=0, function=self.func ) self.assertTrue(is_lazy_data(lazy_data)) def test_lazy_collapse(self): lazy_data = COUNT.lazy_aggregate( self.lazy_cube.lazy_data(), axis=0, function=self.func ) self.assertArrayEqual(lazy_data.compute(), [3]) class Test_units_func(tests.IrisTest): def test(self): self.assertIsNotNone(COUNT.units_func) new_units = COUNT.units_func(None) self.assertEqual(new_units, 1) class Test_masked(tests.IrisTest): def setUp(self): self.cube = Cube(ma.masked_equal([1, 2, 3, 4, 5], 3)) self.cube.add_dim_coord(DimCoord([6, 7, 8, 9, 10], long_name="foo"), 0) self.func = lambda x: x >= 3 def test_ma(self): data = COUNT.aggregate(self.cube.data, axis=0, function=self.func) self.assertArrayEqual(data, [2]) class Test_lazy_masked(tests.IrisTest): def setUp(self): lazy_data = as_lazy_data(ma.masked_equal([1, 2, 3, 4, 5], 3)) self.lazy_cube = Cube(lazy_data) self.lazy_cube.add_dim_coord( DimCoord([6, 7, 8, 9, 10], long_name="foo"), 0 ) self.func = lambda x: x >= 3 def test_ma(self): lazy_data = COUNT.lazy_aggregate( self.lazy_cube.lazy_data(), axis=0, function=self.func ) self.assertTrue(is_lazy_data(lazy_data)) self.assertArrayEqual(lazy_data.compute(), [2]) class Test_aggregate_shape(tests.IrisTest): def test(self): shape = () kwargs = dict() self.assertTupleEqual(COUNT.aggregate_shape(**kwargs), shape) kwargs = dict(wibble="wobble") self.assertTupleEqual(COUNT.aggregate_shape(**kwargs), shape) if __name__ == "__main__": tests.main()
lgpl-3.0
reverendjkn/build-a-bot
imports/setup.py
1
2948
# build-a-bot # REVEREND JKN 2015 # Imports import sources.source as source import random import sys MAX_TRIES_PER_PAGE = -2 MAX_TWEETS = -2 MAX_MINS = -2 MAX_MINS_BETWEEN_TWEETS = -2 f = open('imports/user/genSettings.txt', 'r') for line in f: lineWords = line.split(':') if(len(lineWords) > 1): lineWords[1] = lineWords[1].split('\n')[0] if (lineWords[0] == 'MAX_TRIES_PER_PAGE'): MAX_TRIES_PER_PAGE = int(lineWords[1]) elif(lineWords[0] == 'MAX_TWEETS'): MAX_TWEETS = int(lineWords[1]) elif(lineWords[0] == 'MAX_MINS'): MAX_MINS = int(lineWords[1]) elif(lineWords[0] == 'MAX_MINS_BETWEEN_TWEETS'): MAX_MINS_BETWEEN_TWEETS = int(lineWords[1]) f.close() if(MAX_TWEETS == -1): MAX_TWEETS = sys.maxint if(MAX_MINS == -1): MAX_MINS = sys.maxint if(MAX_TRIES_PER_PAGE == -2): print('MAX_TRIES_PER_PAGE not set. Using default.') MAX_TRIES_PER_PAGE = 20 if(MAX_TWEETS == -2): print('MAX_TWEETS not set. Using default.') MAX_TWEETS = sys.maxint if(MAX_MINS == -2): print('MAX_MINS not set. Using default.') MAX_MINS = sys.maxint if(MAX_MINS_BETWEEN_TWEETS == -2): print('MAX_MINS_BETWEEN_TWEETS not set. Using default.') MAX_MINS_BETWEEN_TWEETS = 60 # TODO: Dynamically add sources based on source text files # Youtube setup YoutubeSplitArg1 = '' YoutubeSplitArg2 = '' YoutubeURLFormatString = '' YoutubeVidURLs = [] f = open('imports/user/source_youtube.txt', 'r') for line in f: lineWords = line.split('==') if(len(lineWords) > 1): lineWords[1] = lineWords[1].split('\n')[0] if (lineWords[0] == 'splitArg1'): YoutubeSplitArg1 = lineWords[1] elif(lineWords[0] == 'splitArg2'): YoutubeSplitArg2 = lineWords[1] elif(lineWords[0] == 'urlFormatString'): YoutubeURLFormatString = lineWords[1] else: lineWords[0] = lineWords[0].split('\n')[0] if(len(lineWords[0]) == 11): YoutubeVidURLs.append(lineWords[0]) f.close() if(YoutubeSplitArg1 == ''): raise Exception("Setup Error: YoutubeSplitArg1 not found") if(YoutubeSplitArg2 == ''): raise Exception("Setup Error: YoutubeSplitArg2 not found") if(YoutubeURLFormatString == ''): raise Exception("Setup Error: YoutubeURLFormatString not found") if(YoutubeVidURLs == []): raise Exception("Setup Error: No YoutubeVidURLs defined") YoutubeSplitArgs = [ YoutubeSplitArg1, YoutubeSplitArg2 ] Youtube = source.Source( YoutubeURLFormatString, YoutubeSplitArgs, YoutubeVidURLs) sourceObjects = [ Youtube ] def getSourceObject(): index = random.randint( 0, len(sourceObjects) - 1 ) return sourceObjects[index] def getMaxTweets(): return MAX_TWEETS def getMaxMins(): return MAX_MINS def getMaxMinsBetweenTweets(): return MAX_MINS_BETWEEN_TWEETS def getMaxTriesPerPage(): return MAX_TRIES_PER_PAGE
gpl-2.0
John-Hart/autorest
src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/ParameterFlattening/setup.py
28
1109
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- # coding: utf-8 from setuptools import setup, find_packages NAME = "autorestparameterflattening" VERSION = "1.0.0" # To install the library, run the following # # python setup.py install # # prerequisite: setuptools # http://pypi.python.org/pypi/setuptools REQUIRES = ["msrest>=0.2.0"] setup( name=NAME, version=VERSION, description="AutoRestParameterFlattening", author_email="", url="", keywords=["Swagger", "AutoRestParameterFlattening"], install_requires=REQUIRES, packages=find_packages(), include_package_data=True, long_description="""\ Resource Flattening for AutoRest """ )
mit
btabibian/scikit-learn
examples/feature_selection/plot_f_test_vs_mi.py
82
1671
""" =========================================== Comparison of F-test and mutual information =========================================== This example illustrates the differences between univariate F-test statistics and mutual information. We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the target depends on them as follows: y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is completely irrelevant. The code below plots the dependency of y against individual x_i and normalized values of univariate F-tests statistics and mutual information. As F-test captures only linear dependency, it rates x_1 as the most discriminative feature. On the other hand, mutual information can capture any kind of dependency between variables and it rates x_2 as the most discriminative feature, which probably agrees better with our intuitive perception for this example. Both methods correctly marks x_3 as irrelevant. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.feature_selection import f_regression, mutual_info_regression np.random.seed(0) X = np.random.rand(1000, 3) y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000) f_test, _ = f_regression(X, y) f_test /= np.max(f_test) mi = mutual_info_regression(X, y) mi /= np.max(mi) plt.figure(figsize=(15, 5)) for i in range(3): plt.subplot(1, 3, i + 1) plt.scatter(X[:, i], y, edgecolor='black', s=20) plt.xlabel("$x_{}$".format(i + 1), fontsize=14) if i == 0: plt.ylabel("$y$", fontsize=14) plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]), fontsize=16) plt.show()
bsd-3-clause
newemailjdm/scipy
scipy/io/arff/arffread.py
25
21335
#! /usr/bin/env python # Last Change: Mon Aug 20 08:00 PM 2007 J from __future__ import division, print_function, absolute_import import re import itertools import datetime from functools import partial import numpy as np from scipy._lib.six import next """A module to read arff files.""" __all__ = ['MetaData', 'loadarff', 'ArffError', 'ParseArffError'] # An Arff file is basically two parts: # - header # - data # # A header has each of its components starting by @META where META is one of # the keyword (attribute of relation, for now). # TODO: # - both integer and reals are treated as numeric -> the integer info is lost ! # - Replace ValueError by ParseError or something # We know can handle the following: # - numeric and nominal attributes # - missing values for numeric attributes r_meta = re.compile('^\s*@') # Match a comment r_comment = re.compile(r'^%') # Match an empty line r_empty = re.compile(r'^\s+$') # Match a header line, that is a line which starts by @ + a word r_headerline = re.compile(r'^@\S*') r_datameta = re.compile(r'^@[Dd][Aa][Tt][Aa]') r_relation = re.compile(r'^@[Rr][Ee][Ll][Aa][Tt][Ii][Oo][Nn]\s*(\S*)') r_attribute = re.compile(r'^@[Aa][Tt][Tt][Rr][Ii][Bb][Uu][Tt][Ee]\s*(..*$)') # To get attributes name enclosed with '' r_comattrval = re.compile(r"'(..+)'\s+(..+$)") # To get attributes name enclosed with '', possibly spread across multilines r_mcomattrval = re.compile(r"'([..\n]+)'\s+(..+$)") # To get normal attributes r_wcomattrval = re.compile(r"(\S+)\s+(..+$)") #------------------------- # Module defined exception #------------------------- class ArffError(IOError): pass class ParseArffError(ArffError): pass #------------------ # Various utilities #------------------ # An attribute is defined as @attribute name value def parse_type(attrtype): """Given an arff attribute value (meta data), returns its type. Expect the value to be a name.""" uattribute = attrtype.lower().strip() if uattribute[0] == '{': return 'nominal' elif uattribute[:len('real')] == 'real': return 'numeric' elif uattribute[:len('integer')] == 'integer': return 'numeric' elif uattribute[:len('numeric')] == 'numeric': return 'numeric' elif uattribute[:len('string')] == 'string': return 'string' elif uattribute[:len('relational')] == 'relational': return 'relational' elif uattribute[:len('date')] == 'date': return 'date' else: raise ParseArffError("unknown attribute %s" % uattribute) def get_nominal(attribute): """If attribute is nominal, returns a list of the values""" return attribute.split(',') def read_data_list(ofile): """Read each line of the iterable and put it in a list.""" data = [next(ofile)] if data[0].strip()[0] == '{': raise ValueError("This looks like a sparse ARFF: not supported yet") data.extend([i for i in ofile]) return data def get_ndata(ofile): """Read the whole file to get number of data attributes.""" data = [next(ofile)] loc = 1 if data[0].strip()[0] == '{': raise ValueError("This looks like a sparse ARFF: not supported yet") for i in ofile: loc += 1 return loc def maxnomlen(atrv): """Given a string containing a nominal type definition, returns the string len of the biggest component. A nominal type is defined as seomthing framed between brace ({}). Parameters ---------- atrv : str Nominal type definition Returns ------- slen : int length of longest component Examples -------- maxnomlen("{floup, bouga, fl, ratata}") returns 6 (the size of ratata, the longest nominal value). >>> maxnomlen("{floup, bouga, fl, ratata}") 6 """ nomtp = get_nom_val(atrv) return max(len(i) for i in nomtp) def get_nom_val(atrv): """Given a string containing a nominal type, returns a tuple of the possible values. A nominal type is defined as something framed between braces ({}). Parameters ---------- atrv : str Nominal type definition Returns ------- poss_vals : tuple possible values Examples -------- >>> get_nom_val("{floup, bouga, fl, ratata}") ('floup', 'bouga', 'fl', 'ratata') """ r_nominal = re.compile('{(.+)}') m = r_nominal.match(atrv) if m: return tuple(i.strip() for i in m.group(1).split(',')) else: raise ValueError("This does not look like a nominal string") def get_date_format(atrv): r_date = re.compile(r"[Dd][Aa][Tt][Ee]\s+[\"']?(.+?)[\"']?$") m = r_date.match(atrv) if m: pattern = m.group(1).strip() # convert time pattern from Java's SimpleDateFormat to C's format datetime_unit = None if "yyyy" in pattern: pattern = pattern.replace("yyyy", "%Y") datetime_unit = "Y" elif "yy": pattern = pattern.replace("yy", "%y") datetime_unit = "Y" if "MM" in pattern: pattern = pattern.replace("MM", "%m") datetime_unit = "M" if "dd" in pattern: pattern = pattern.replace("dd", "%d") datetime_unit = "D" if "HH" in pattern: pattern = pattern.replace("HH", "%H") datetime_unit = "h" if "mm" in pattern: pattern = pattern.replace("mm", "%M") datetime_unit = "m" if "ss" in pattern: pattern = pattern.replace("ss", "%S") datetime_unit = "s" if "z" in pattern or "Z" in pattern: raise ValueError("Date type attributes with time zone not supported, yet") if datetime_unit is None: raise ValueError("Invalid or unsupported date format") return pattern, datetime_unit else: raise ValueError("Invalid or no date format") def go_data(ofile): """Skip header. the first next() call of the returned iterator will be the @data line""" return itertools.dropwhile(lambda x: not r_datameta.match(x), ofile) #---------------- # Parsing header #---------------- def tokenize_attribute(iterable, attribute): """Parse a raw string in header (eg starts by @attribute). Given a raw string attribute, try to get the name and type of the attribute. Constraints: * The first line must start with @attribute (case insensitive, and space like characters before @attribute are allowed) * Works also if the attribute is spread on multilines. * Works if empty lines or comments are in between Parameters ---------- attribute : str the attribute string. Returns ------- name : str name of the attribute value : str value of the attribute next : str next line to be parsed Examples -------- If attribute is a string defined in python as r"floupi real", will return floupi as name, and real as value. >>> iterable = iter([0] * 10) # dummy iterator >>> tokenize_attribute(iterable, r"@attribute floupi real") ('floupi', 'real', 0) If attribute is r"'floupi 2' real", will return 'floupi 2' as name, and real as value. >>> tokenize_attribute(iterable, r" @attribute 'floupi 2' real ") ('floupi 2', 'real', 0) """ sattr = attribute.strip() mattr = r_attribute.match(sattr) if mattr: # atrv is everything after @attribute atrv = mattr.group(1) if r_comattrval.match(atrv): name, type = tokenize_single_comma(atrv) next_item = next(iterable) elif r_wcomattrval.match(atrv): name, type = tokenize_single_wcomma(atrv) next_item = next(iterable) else: # Not sure we should support this, as it does not seem supported by # weka. raise ValueError("multi line not supported yet") #name, type, next_item = tokenize_multilines(iterable, atrv) else: raise ValueError("First line unparsable: %s" % sattr) if type == 'relational': raise ValueError("relational attributes not supported yet") return name, type, next_item def tokenize_single_comma(val): # XXX we match twice the same string (here and at the caller level). It is # stupid, but it is easier for now... m = r_comattrval.match(val) if m: try: name = m.group(1).strip() type = m.group(2).strip() except IndexError: raise ValueError("Error while tokenizing attribute") else: raise ValueError("Error while tokenizing single %s" % val) return name, type def tokenize_single_wcomma(val): # XXX we match twice the same string (here and at the caller level). It is # stupid, but it is easier for now... m = r_wcomattrval.match(val) if m: try: name = m.group(1).strip() type = m.group(2).strip() except IndexError: raise ValueError("Error while tokenizing attribute") else: raise ValueError("Error while tokenizing single %s" % val) return name, type def read_header(ofile): """Read the header of the iterable ofile.""" i = next(ofile) # Pass first comments while r_comment.match(i): i = next(ofile) # Header is everything up to DATA attribute ? relation = None attributes = [] while not r_datameta.match(i): m = r_headerline.match(i) if m: isattr = r_attribute.match(i) if isattr: name, type, i = tokenize_attribute(ofile, i) attributes.append((name, type)) else: isrel = r_relation.match(i) if isrel: relation = isrel.group(1) else: raise ValueError("Error parsing line %s" % i) i = next(ofile) else: i = next(ofile) return relation, attributes #-------------------- # Parsing actual data #-------------------- def safe_float(x): """given a string x, convert it to a float. If the stripped string is a ?, return a Nan (missing value). Parameters ---------- x : str string to convert Returns ------- f : float where float can be nan Examples -------- >>> safe_float('1') 1.0 >>> safe_float('1\\n') 1.0 >>> safe_float('?\\n') nan """ if '?' in x: return np.nan else: return float(x) def safe_nominal(value, pvalue): svalue = value.strip() if svalue in pvalue: return svalue elif svalue == '?': return svalue else: raise ValueError("%s value not in %s" % (str(svalue), str(pvalue))) def safe_date(value, date_format, datetime_unit): date_str = value.strip().strip("'").strip('"') if date_str == '?': return np.datetime64('NaT', datetime_unit) else: dt = datetime.datetime.strptime(date_str, date_format) return np.datetime64(dt).astype("datetime64[%s]" % datetime_unit) def get_delim(line): """Given a string representing a line of data, check whether the delimiter is ',' or space. Parameters ---------- line : str line of data Returns ------- delim : {',', ' '} Examples -------- >>> get_delim(',') ',' >>> get_delim(' ') ' ' >>> get_delim(', ') ',' >>> get_delim('x') Traceback (most recent call last): ... ValueError: delimiter not understood: x """ if ',' in line: return ',' if ' ' in line: return ' ' raise ValueError("delimiter not understood: " + line) class MetaData(object): """Small container to keep useful informations on a ARFF dataset. Knows about attributes names and types. Examples -------- :: data, meta = loadarff('iris.arff') # This will print the attributes names of the iris.arff dataset for i in meta: print i # This works too meta.names() # Getting attribute type types = meta.types() Notes ----- Also maintains the list of attributes in order, i.e. doing for i in meta, where meta is an instance of MetaData, will return the different attribute names in the order they were defined. """ def __init__(self, rel, attr): self.name = rel # We need the dictionary to be ordered # XXX: may be better to implement an ordered dictionary self._attributes = {} self._attrnames = [] for name, value in attr: tp = parse_type(value) self._attrnames.append(name) if tp == 'nominal': self._attributes[name] = (tp, get_nom_val(value)) elif tp == 'date': self._attributes[name] = (tp, get_date_format(value)[0]) else: self._attributes[name] = (tp, None) def __repr__(self): msg = "" msg += "Dataset: %s\n" % self.name for i in self._attrnames: msg += "\t%s's type is %s" % (i, self._attributes[i][0]) if self._attributes[i][1]: msg += ", range is %s" % str(self._attributes[i][1]) msg += '\n' return msg def __iter__(self): return iter(self._attrnames) def __getitem__(self, key): return self._attributes[key] def names(self): """Return the list of attribute names.""" return self._attrnames def types(self): """Return the list of attribute types.""" attr_types = [self._attributes[name][0] for name in self._attrnames] return attr_types def loadarff(f): """ Read an arff file. The data is returned as a record array, which can be accessed much like a dictionary of numpy arrays. For example, if one of the attributes is called 'pressure', then its first 10 data points can be accessed from the ``data`` record array like so: ``data['pressure'][0:10]`` Parameters ---------- f : file-like or str File-like object to read from, or filename to open. Returns ------- data : record array The data of the arff file, accessible by attribute names. meta : `MetaData` Contains information about the arff file such as name and type of attributes, the relation (name of the dataset), etc... Raises ------ ParseArffError This is raised if the given file is not ARFF-formatted. NotImplementedError The ARFF file has an attribute which is not supported yet. Notes ----- This function should be able to read most arff files. Not implemented functionality include: * date type attributes * string type attributes It can read files with numeric and nominal attributes. It cannot read files with sparse data ({} in the file). However, this function can read files with missing data (? in the file), representing the data points as NaNs. Examples -------- >>> from scipy.io import arff >>> from cStringIO import StringIO >>> content = \"\"\" ... @relation foo ... @attribute width numeric ... @attribute height numeric ... @attribute color {red,green,blue,yellow,black} ... @data ... 5.0,3.25,blue ... 4.5,3.75,green ... 3.0,4.00,red ... \"\"\" >>> f = StringIO(content) >>> data, meta = arff.loadarff(f) >>> data array([(5.0, 3.25, 'blue'), (4.5, 3.75, 'green'), (3.0, 4.0, 'red')], dtype=[('width', '<f8'), ('height', '<f8'), ('color', '|S6')]) >>> meta Dataset: foo \twidth's type is numeric \theight's type is numeric \tcolor's type is nominal, range is ('red', 'green', 'blue', 'yellow', 'black') """ if hasattr(f, 'read'): ofile = f else: ofile = open(f, 'rt') try: return _loadarff(ofile) finally: if ofile is not f: # only close what we opened ofile.close() def _loadarff(ofile): # Parse the header file try: rel, attr = read_header(ofile) except ValueError as e: msg = "Error while parsing header, error was: " + str(e) raise ParseArffError(msg) # Check whether we have a string attribute (not supported yet) hasstr = False for name, value in attr: type = parse_type(value) if type == 'string': hasstr = True meta = MetaData(rel, attr) # XXX The following code is not great # Build the type descriptor descr and the list of convertors to convert # each attribute to the suitable type (which should match the one in # descr). # This can be used once we want to support integer as integer values and # not as numeric anymore (using masked arrays ?). acls2dtype = {'real': float, 'integer': float, 'numeric': float} acls2conv = {'real': safe_float, 'integer': safe_float, 'numeric': safe_float} descr = [] convertors = [] if not hasstr: for name, value in attr: type = parse_type(value) if type == 'date': date_format, datetime_unit = get_date_format(value) descr.append((name, "datetime64[%s]" % datetime_unit)) convertors.append(partial(safe_date, date_format=date_format, datetime_unit=datetime_unit)) elif type == 'nominal': n = maxnomlen(value) descr.append((name, 'S%d' % n)) pvalue = get_nom_val(value) convertors.append(partial(safe_nominal, pvalue=pvalue)) else: descr.append((name, acls2dtype[type])) convertors.append(safe_float) #dc.append(acls2conv[type]) #sdescr.append((name, acls2sdtype[type])) else: # How to support string efficiently ? Ideally, we should know the max # size of the string before allocating the numpy array. raise NotImplementedError("String attributes not supported yet, sorry") ni = len(convertors) # Get the delimiter from the first line of data: def next_data_line(row_iter): """Assumes we are already in the data part (eg after @data).""" raw = next(row_iter) while r_empty.match(raw) or r_comment.match(raw): raw = next(row_iter) return raw try: try: dtline = next_data_line(ofile) delim = get_delim(dtline) except ValueError as e: raise ParseArffError("Error while parsing delimiter: " + str(e)) finally: ofile.seek(0, 0) ofile = go_data(ofile) # skip the @data line next(ofile) def generator(row_iter, delim=','): # TODO: this is where we are spending times (~80%). I think things # could be made more efficiently: # - We could for example "compile" the function, because some values # do not change here. # - The function to convert a line to dtyped values could also be # generated on the fly from a string and be executed instead of # looping. # - The regex are overkill: for comments, checking that a line starts # by % should be enough and faster, and for empty lines, same thing # --> this does not seem to change anything. # We do not abstract skipping comments and empty lines for performances # reason. raw = next(row_iter) while r_empty.match(raw) or r_comment.match(raw): raw = next(row_iter) # 'compiling' the range since it does not change # Note, I have already tried zipping the converters and # row elements and got slightly worse performance. elems = list(range(ni)) row = raw.split(delim) yield tuple([convertors[i](row[i]) for i in elems]) for raw in row_iter: while r_comment.match(raw) or r_empty.match(raw): raw = next(row_iter) row = raw.split(delim) yield tuple([convertors[i](row[i]) for i in elems]) a = generator(ofile, delim=delim) # No error should happen here: it is a bug otherwise data = np.fromiter(a, descr) return data, meta #----- # Misc #----- def basic_stats(data): nbfac = data.size * 1. / (data.size - 1) return np.nanmin(data), np.nanmax(data), np.mean(data), np.std(data) * nbfac def print_attribute(name, tp, data): type = tp[0] if type == 'numeric' or type == 'real' or type == 'integer': min, max, mean, std = basic_stats(data) print("%s,%s,%f,%f,%f,%f" % (name, type, min, max, mean, std)) else: msg = name + ",{" for i in range(len(tp[1])-1): msg += tp[1][i] + "," msg += tp[1][-1] msg += "}" print(msg) def test_weka(filename): data, meta = loadarff(filename) print(len(data.dtype)) print(data.size) for i in meta: print_attribute(i,meta[i],data[i]) # make sure nose does not find this as a test test_weka.__test__ = False if __name__ == '__main__': import sys filename = sys.argv[1] test_weka(filename)
bsd-3-clause
onitake/ansible
lib/ansible/modules/cloud/vmware/vmware_guest_snapshot_facts.py
39
4970
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2018, Ansible Project # Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: vmware_guest_snapshot_facts short_description: Gather facts about virtual machine's snapshots in vCenter description: - This module can be used to gather facts about virtual machine's snapshots. version_added: 2.6 author: - Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com> notes: - Tested on vSphere 6.0 and 6.5 requirements: - "python >= 2.6" - PyVmomi options: name: description: - Name of the VM to work with. - This is required if C(uuid) is not supplied. uuid: description: - UUID of the instance to manage if known, this value is VMware's unique identifier. - This is required if C(name) is not supplied. - The C(folder) is ignored, if C(uuid) is provided. folder: description: - Destination folder, absolute or relative path to find an existing guest. - This is required only, if multiple virtual machines with same name are found on given vCenter. - The folder should include the datacenter. ESX's datacenter is ha-datacenter - 'Examples:' - ' folder: /ha-datacenter/vm' - ' folder: ha-datacenter/vm' - ' folder: /datacenter1/vm' - ' folder: datacenter1/vm' - ' folder: /datacenter1/vm/folder1' - ' folder: datacenter1/vm/folder1' - ' folder: /folder1/datacenter1/vm' - ' folder: folder1/datacenter1/vm' - ' folder: /folder1/datacenter1/vm/folder2' datacenter: description: - Name of the datacenter. required: True extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' - name: Gather snapshot facts about the virtual machine in the given vCenter vmware_guest_snapshot_facts: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" datacenter: "{{ datacenter_name }}" name: "{{ guest_name }}" delegate_to: localhost register: snapshot_facts ''' RETURN = """ guest_snapshots: description: metadata about the snapshot facts returned: always type: dict sample: { "current_snapshot": { "creation_time": "2018-02-10T14:48:31.999459+00:00", "description": "", "id": 28, "name": "snap_0003", "state": "poweredOff" }, "snapshots": [ { "creation_time": "2018-02-10T14:48:31.999459+00:00", "description": "", "id": 28, "name": "snap_0003", "state": "poweredOff" } ] } """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.vmware import PyVmomi, list_snapshots, vmware_argument_spec class PyVmomiHelper(PyVmomi): def __init__(self, module): super(PyVmomiHelper, self).__init__(module) @staticmethod def gather_guest_snapshot_facts(vm_obj=None): """ Function to return snpashot related facts about given virtual machine Args: vm_obj: Virtual Machine Managed object Returns: Dictionary containing snapshot facts """ if vm_obj is None: return {} return list_snapshots(vm=vm_obj) def main(): argument_spec = vmware_argument_spec() argument_spec.update( name=dict(type='str'), uuid=dict(type='str'), folder=dict(type='str'), datacenter=dict(required=True, type='str'), ) module = AnsibleModule(argument_spec=argument_spec, required_together=[['name', 'folder']], required_one_of=[['name', 'uuid']], ) if module.params['folder']: # FindByInventoryPath() does not require an absolute path # so we should leave the input folder path unmodified module.params['folder'] = module.params['folder'].rstrip('/') pyv = PyVmomiHelper(module) # Check if the VM exists before continuing vm = pyv.get_vm() if not vm: # If UUID is set, getvm select UUID, show error message accordingly. module.fail_json(msg="Unable to gather facts about snapshots for" " non-existing VM ['%s']" % (module.params.get('uuid') or module.params.get('name'))) results = dict(changed=False, guest_snapshots=pyv.gather_guest_snapshot_facts(vm_obj=vm)) module.exit_json(**results) if __name__ == '__main__': main()
gpl-3.0
quinot/ansible
lib/ansible/modules/system/iptables.py
18
22594
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2015, Linus Unnebäck <linus@folkdatorn.se> # Copyright: (c) 2017, Sébastien DA ROCHA <sebastien@da-rocha.net> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'core'} DOCUMENTATION = ''' --- module: iptables short_description: Modify the systems iptables version_added: "2.0" author: - Linus Unnebäck (@LinusU) <linus@folkdatorn.se> - Sébastien DA ROCHA (@sebastiendarocha) description: - Iptables is used to set up, maintain, and inspect the tables of IP packet filter rules in the Linux kernel. - This module does not handle the saving and/or loading of rules, but rather only manipulates the current rules that are present in memory. This is the same as the behaviour of the C(iptables) and C(ip6tables) command which this module uses internally. notes: - This module just deals with individual rules. If you need advanced chaining of rules the recommended way is to template the iptables restore file. options: table: description: - This option specifies the packet matching table which the command should operate on. If the kernel is configured with automatic module loading, an attempt will be made to load the appropriate module for that table if it is not already there. choices: [ filter, nat, mangle, raw, security ] default: filter state: description: - Whether the rule should be absent or present. choices: [ absent, present ] default: present action: description: - Whether the rule should be appended at the bottom or inserted at the top. - If the rule already exists the chain won't be modified. choices: [ append, insert ] default: append version_added: "2.2" rule_num: description: - Insert the rule as the given rule number. This works only with action = 'insert'. version_added: "2.5" ip_version: description: - Which version of the IP protocol this rule should apply to. choices: [ ipv4, ipv6 ] default: ipv4 chain: description: - Chain to operate on. - "This option can either be the name of a user defined chain or any of the builtin chains: 'INPUT', 'FORWARD', 'OUTPUT', 'PREROUTING', 'POSTROUTING', 'SECMARK', 'CONNSECMARK'." protocol: description: - The protocol of the rule or of the packet to check. - The specified protocol can be one of tcp, udp, udplite, icmp, esp, ah, sctp or the special keyword "all", or it can be a numeric value, representing one of these protocols or a different one. A protocol name from /etc/protocols is also allowed. A "!" argument before the protocol inverts the test. The number zero is equivalent to all. "all" will match with all protocols and is taken as default when this option is omitted. source: description: - Source specification. - Address can be either a network name, a hostname, a network IP address (with /mask), or a plain IP address. - Hostnames will be resolved once only, before the rule is submitted to the kernel. Please note that specifying any name to be resolved with a remote query such as DNS is a really bad idea. - The mask can be either a network mask or a plain number, specifying the number of 1's at the left side of the network mask. Thus, a mask of 24 is equivalent to 255.255.255.0. A "!" argument before the address specification inverts the sense of the address. destination: description: - Destination specification. - Address can be either a network name, a hostname, a network IP address (with /mask), or a plain IP address. - Hostnames will be resolved once only, before the rule is submitted to the kernel. Please note that specifying any name to be resolved with a remote query such as DNS is a really bad idea. - The mask can be either a network mask or a plain number, specifying the number of 1's at the left side of the network mask. Thus, a mask of 24 is equivalent to 255.255.255.0. A "!" argument before the address specification inverts the sense of the address. tcp_flags: description: - TCP flags specification. - C(tcp_flags) expects a dict with the two keys C(flags) and C(flags_set). - The C(flags) list is the mask, a list of flags you want to examine. - The C(flags_set) list tells which one(s) should be set. If one of the two values is missing, the --tcp-flags option will be ignored. default: {} version_added: "2.4" match: description: - Specifies a match to use, that is, an extension module that tests for a specific property. The set of matches make up the condition under which a target is invoked. Matches are evaluated first to last if specified as an array and work in short-circuit fashion, i.e. if one extension yields false, evaluation will stop. default: [] jump: description: - This specifies the target of the rule; i.e., what to do if the packet matches it. The target can be a user-defined chain (other than the one this rule is in), one of the special builtin targets which decide the fate of the packet immediately, or an extension (see EXTENSIONS below). If this option is omitted in a rule (and the goto parameter is not used), then matching the rule will have no effect on the packet's fate, but the counters on the rule will be incremented. log_prefix: description: - Specifies a log text for the rule. Only make sense with a LOG jump. version_added: "2.5" goto: description: - This specifies that the processing should continue in a user specified chain. Unlike the jump argument return will not continue processing in this chain but instead in the chain that called us via jump. in_interface: description: - Name of an interface via which a packet was received (only for packets entering the INPUT, FORWARD and PREROUTING chains). When the "!" argument is used before the interface name, the sense is inverted. If the interface name ends in a "+", then any interface which begins with this name will match. If this option is omitted, any interface name will match. out_interface: description: - Name of an interface via which a packet is going to be sent (for packets entering the FORWARD, OUTPUT and POSTROUTING chains). When the "!" argument is used before the interface name, the sense is inverted. If the interface name ends in a "+", then any interface which begins with this name will match. If this option is omitted, any interface name will match. fragment: description: - This means that the rule only refers to second and further fragments of fragmented packets. Since there is no way to tell the source or destination ports of such a packet (or ICMP type), such a packet will not match any rules which specify them. When the "!" argument precedes fragment argument, the rule will only match head fragments, or unfragmented packets. set_counters: description: - This enables the administrator to initialize the packet and byte counters of a rule (during INSERT, APPEND, REPLACE operations). source_port: description: - Source port or port range specification. This can either be a service name or a port number. An inclusive range can also be specified, using the format first:last. If the first port is omitted, '0' is assumed; if the last is omitted, '65535' is assumed. If the first port is greater than the second one they will be swapped. destination_port: description: - Destination port or port range specification. This can either be a service name or a port number. An inclusive range can also be specified, using the format first:last. If the first port is omitted, '0' is assumed; if the last is omitted, '65535' is assumed. If the first port is greater than the second one they will be swapped. to_ports: description: - "This specifies a destination port or range of ports to use: without this, the destination port is never altered. This is only valid if the rule also specifies one of the following protocols: tcp, udp, dccp or sctp." to_destination: description: - This specifies a destination address to use with DNAT. - Without this, the destination address is never altered. version_added: "2.1" to_source: description: - This specifies a source address to use with SNAT. - Without this, the source address is never altered. version_added: "2.2" syn: description: - This allows matching packets that have the SYN bit set and the ACK and RST bits unset. - When negated, this matches all packets with the RST or the ACK bits set. choices: [ ignore, match, negate ] default: ignore version_added: "2.5" set_dscp_mark: description: - This allows specifying a DSCP mark to be added to packets. It takes either an integer or hex value. - Mutually exclusive with C(set_dscp_mark_class). version_added: "2.1" set_dscp_mark_class: description: - This allows specifying a predefined DiffServ class which will be translated to the corresponding DSCP mark. - Mutually exclusive with C(set_dscp_mark). version_added: "2.1" comment: description: - This specifies a comment that will be added to the rule. ctstate: description: - "C(ctstate) is a list of the connection states to match in the conntrack module. Possible states are: 'INVALID', 'NEW', 'ESTABLISHED', 'RELATED', 'UNTRACKED', 'SNAT', 'DNAT'" choices: [ DNAT, ESTABLISHED, INVALID, NEW, RELATED, SNAT, UNTRACKED ] default: [] limit: description: - Specifies the maximum average number of matches to allow per second. - The number can specify units explicitly, using `/second', `/minute', `/hour' or `/day', or parts of them (so `5/second' is the same as `5/s'). limit_burst: description: - Specifies the maximum burst before the above limit kicks in. version_added: "2.1" uid_owner: description: - Specifies the UID or username to use in match by owner rule. version_added: "2.1" reject_with: description: - 'Specifies the error packet type to return while rejecting. It implies "jump: REJECT"' version_added: "2.1" icmp_type: description: - This allows specification of the ICMP type, which can be a numeric ICMP type, type/code pair, or one of the ICMP type names shown by the command 'iptables -p icmp -h' version_added: "2.2" flush: description: - Flushes the specified table and chain of all rules. - If no chain is specified then the entire table is purged. - Ignores all other parameters. version_added: "2.2" policy: description: - Set the policy for the chain to the given target. - Only built-in chains can have policies. - This parameter requires the C(chain) parameter. - Ignores all other parameters. choices: [ ACCEPT, DROP, QUEUE, RETURN ] version_added: "2.2" ''' EXAMPLES = ''' # Block specific IP - iptables: chain: INPUT source: 8.8.8.8 jump: DROP become: yes # Forward port 80 to 8600 - iptables: table: nat chain: PREROUTING in_interface: eth0 protocol: tcp match: tcp destination_port: 80 jump: REDIRECT to_ports: 8600 comment: Redirect web traffic to port 8600 become: yes # Allow related and established connections - iptables: chain: INPUT ctstate: ESTABLISHED,RELATED jump: ACCEPT become: yes # Allow new incoming SYN packets on TCP port 22 (SSH). - iptables: chain: INPUT protocol: tcp destination_port: 22 ctstate: NEW syn: match jump: ACCEPT comment: Accept new SSH connections. # Tag all outbound tcp packets with DSCP mark 8 - iptables: chain: OUTPUT jump: DSCP table: mangle set_dscp_mark: 8 protocol: tcp # Tag all outbound tcp packets with DSCP DiffServ class CS1 - iptables: chain: OUTPUT jump: DSCP table: mangle set_dscp_mark_class: CS1 protocol: tcp # Insert a rule on line 5 - iptables: chain: INPUT protocol: tcp destination_port: 8080 jump: ACCEPT rule_num: 5 # Set the policy for the INPUT chain to DROP - iptables: chain: INPUT policy: DROP # Reject tcp with tcp-reset - iptables: chain: INPUT protocol: tcp reject_with: tcp-reset ip_version: ipv4 ''' import re from ansible.module_utils.basic import AnsibleModule BINS = dict( ipv4='iptables', ipv6='ip6tables', ) ICMP_TYPE_OPTIONS = dict( ipv4='--icmp-type', ipv6='--icmpv6-type', ) def append_param(rule, param, flag, is_list): if is_list: for item in param: append_param(rule, item, flag, False) else: if param is not None: if param[0] == '!': rule.extend(['!', flag, param[1:]]) else: rule.extend([flag, param]) def append_tcp_flags(rule, param, flag): if param: if 'flags' in param and 'flags_set' in param: rule.extend([flag, ','.join(param['flags']), ','.join(param['flags_set'])]) def append_match_flag(rule, param, flag, negatable): if param == 'match': rule.extend([flag]) elif negatable and param == 'negate': rule.extend(['!', flag]) def append_csv(rule, param, flag): if param: rule.extend([flag, ','.join(param)]) def append_match(rule, param, match): if param: rule.extend(['-m', match]) def append_jump(rule, param, jump): if param: rule.extend(['-j', jump]) def construct_rule(params): rule = [] append_param(rule, params['protocol'], '-p', False) append_param(rule, params['source'], '-s', False) append_param(rule, params['destination'], '-d', False) append_param(rule, params['match'], '-m', True) append_tcp_flags(rule, params['tcp_flags'], '--tcp-flags') append_param(rule, params['jump'], '-j', False) append_param(rule, params['log_prefix'], '--log-prefix', False) append_param(rule, params['to_destination'], '--to-destination', False) append_param(rule, params['to_source'], '--to-source', False) append_param(rule, params['goto'], '-g', False) append_param(rule, params['in_interface'], '-i', False) append_param(rule, params['out_interface'], '-o', False) append_param(rule, params['fragment'], '-f', False) append_param(rule, params['set_counters'], '-c', False) append_param(rule, params['source_port'], '--source-port', False) append_param(rule, params['destination_port'], '--destination-port', False) append_param(rule, params['to_ports'], '--to-ports', False) append_param(rule, params['set_dscp_mark'], '--set-dscp', False) append_param( rule, params['set_dscp_mark_class'], '--set-dscp-class', False) append_match_flag(rule, params['syn'], '--syn', True) append_match(rule, params['comment'], 'comment') append_param(rule, params['comment'], '--comment', False) if 'conntrack' in params['match']: append_csv(rule, params['ctstate'], '--ctstate') elif 'state' in params['match']: append_csv(rule, params['ctstate'], '--state') elif params['ctstate']: append_match(rule, params['ctstate'], 'conntrack') append_csv(rule, params['ctstate'], '--ctstate') append_match(rule, params['limit'] or params['limit_burst'], 'limit') append_param(rule, params['limit'], '--limit', False) append_param(rule, params['limit_burst'], '--limit-burst', False) append_match(rule, params['uid_owner'], 'owner') append_param(rule, params['uid_owner'], '--uid-owner', False) if params['jump'] is None: append_jump(rule, params['reject_with'], 'REJECT') append_param(rule, params['reject_with'], '--reject-with', False) append_param( rule, params['icmp_type'], ICMP_TYPE_OPTIONS[params['ip_version']], False) return rule def push_arguments(iptables_path, action, params, make_rule=True): cmd = [iptables_path] cmd.extend(['-t', params['table']]) cmd.extend([action, params['chain']]) if action == '-I' and params['rule_num']: cmd.extend([params['rule_num']]) if make_rule: cmd.extend(construct_rule(params)) return cmd def check_present(iptables_path, module, params): cmd = push_arguments(iptables_path, '-C', params) rc, _, __ = module.run_command(cmd, check_rc=False) return (rc == 0) def append_rule(iptables_path, module, params): cmd = push_arguments(iptables_path, '-A', params) module.run_command(cmd, check_rc=True) def insert_rule(iptables_path, module, params): cmd = push_arguments(iptables_path, '-I', params) module.run_command(cmd, check_rc=True) def remove_rule(iptables_path, module, params): cmd = push_arguments(iptables_path, '-D', params) module.run_command(cmd, check_rc=True) def flush_table(iptables_path, module, params): cmd = push_arguments(iptables_path, '-F', params, make_rule=False) module.run_command(cmd, check_rc=True) def set_chain_policy(iptables_path, module, params): cmd = push_arguments(iptables_path, '-P', params, make_rule=False) cmd.append(params['policy']) module.run_command(cmd, check_rc=True) def get_chain_policy(iptables_path, module, params): cmd = push_arguments(iptables_path, '-L', params) rc, out, _ = module.run_command(cmd, check_rc=True) chain_header = out.split("\n")[0] result = re.search(r'\(policy ([A-Z]+)\)', chain_header) if result: return result.group(1) return None def main(): module = AnsibleModule( supports_check_mode=True, argument_spec=dict( table=dict(type='str', default='filter', choices=['filter', 'nat', 'mangle', 'raw', 'security']), state=dict(type='str', default='present', choices=['absent', 'present']), action=dict(type='str', default='append', choices=['append', 'insert']), ip_version=dict(type='str', default='ipv4', choices=['ipv4', 'ipv6']), chain=dict(type='str'), rule_num=dict(type='str'), protocol=dict(type='str'), source=dict(type='str'), to_source=dict(type='str'), destination=dict(type='str'), to_destination=dict(type='str'), match=dict(type='list', default=[]), tcp_flags=dict(type='dict', default={}), jump=dict(type='str'), log_prefix=dict(type='str'), goto=dict(type='str'), in_interface=dict(type='str'), out_interface=dict(type='str'), fragment=dict(type='str'), set_counters=dict(type='str'), source_port=dict(type='str'), destination_port=dict(type='str'), to_ports=dict(type='str'), set_dscp_mark=dict(type='str'), set_dscp_mark_class=dict(type='str'), comment=dict(type='str'), ctstate=dict(type='list', default=[]), limit=dict(type='str'), limit_burst=dict(type='str'), uid_owner=dict(type='str'), reject_with=dict(type='str'), icmp_type=dict(type='str'), syn=dict(type='str', default='ignore', choices=['ignore', 'match', 'negate']), flush=dict(type='bool', default=False), policy=dict(type='str', choices=['ACCEPT', 'DROP', 'QUEUE', 'RETURN']), ), mutually_exclusive=( ['set_dscp_mark', 'set_dscp_mark_class'], ['flush', 'policy'], ), ) args = dict( changed=False, failed=False, ip_version=module.params['ip_version'], table=module.params['table'], chain=module.params['chain'], flush=module.params['flush'], rule=' '.join(construct_rule(module.params)), state=module.params['state'], ) ip_version = module.params['ip_version'] iptables_path = module.get_bin_path(BINS[ip_version], True) # Check if chain option is required if args['flush'] is False and args['chain'] is None: module.fail_json(msg="Either chain or flush parameter must be specified.") # Flush the table if args['flush'] is True: args['changed'] = True if not module.check_mode: flush_table(iptables_path, module, module.params) # Set the policy elif module.params['policy']: current_policy = get_chain_policy(iptables_path, module, module.params) if not current_policy: module.fail_json(msg='Can\'t detect current policy') changed = current_policy != module.params['policy'] args['changed'] = changed if changed and not module.check_mode: set_chain_policy(iptables_path, module, module.params) else: insert = (module.params['action'] == 'insert') rule_is_present = check_present(iptables_path, module, module.params) should_be_present = (args['state'] == 'present') # Check if target is up to date args['changed'] = (rule_is_present != should_be_present) if args['changed'] is False: # Target is already up to date module.exit_json(**args) # Check only; don't modify if not module.check_mode: if should_be_present: if insert: insert_rule(iptables_path, module, module.params) else: append_rule(iptables_path, module, module.params) else: remove_rule(iptables_path, module, module.params) module.exit_json(**args) if __name__ == '__main__': main()
gpl-3.0
TVLuke/FressenImPC
python/fressenimpc.py
1
6738
from datetime import * from twython import * from twitterdata import * from time import time import time import math sondermenu_url = "http://www.uk-sh.de/uksh_media/Speisepl%C3%A4ne/L%C3%BCbeck+_+Sondermen%C3%BC+/Sondermen%C3%BC+KW+"; #Week 1 t_11="1. Schinkennudeln + Tomatensauce; 2. Kartoffelauflauf, Schnittlauchsauce, Broccoli. Suppe: Gemüse"; t_12="1. Seelachsfilet, Dillsauce, Blattspinat, Kartoffeln. 2. Champignons, Spagetti, Käsesahnesauce. Suppe: Kräuter"; t_13="1. Schweinabraten, Rotkohl, Kartoffel; 2. Blumenkohl-Käsebratling. Kartoffeln. Kräutersauce. Suppe: Broccoli"; t_14="1. Rindergeschnetzeltes, Champignons, Spätzle; 2. Fagottini, Gemüsestreifen, Broccoli, Tomatensauce. Suppe: Champ."; t_15="1. Putenbraten, Rustika-Gemüse; 2. Milchreis mit Kirschsauce. Suppe: Kartoffelsuppe."; #Week 2 t_21="1. Tortellini mit Rind und Paprikastreifen; 2. Sellerie-Knusperschnitzel mit Reis & Tomatensauce. Suppe: Möhren."; t_22="1. Hackbraten vom Rind mit Bohnen; 2. Saccotini Rucola mit Blattspinat. Tagessuppe. Blumenkohl"; t_23="1. Hähnchenbrustfiletspieß & Champignonsauce mit Gemüse; 2. Grießbrei mit Fruchtsauce. Tagessuppe: Gemüse"; t_24="1. Sauerbraten, Rosinensauce, Rotkohl, Kartoffelklöße; 2. Buonfatti Mediterran buntes Gemüse, Tomatensauce. Suppe: Spinat"; t_25="1. ged. Fischfilet, Senfsauce, Petersilienkartoffeln; 2. Maultaschen mit Spinat im Gemüsesud. Tagessuppe: Spargel"; #Week 3 t_31="1. Hähnchenbrustfilet, S. Hollandaise, Romanogemüse; 2. Kartoffeltaschen mit Frischkäse, Broccoli. Suppe: Gemüse."; t_32="1. Königsberger Klopse, Kapernsauce, Petersilienkart. 2. Vegetarisches Chili"; t_33="1. Bolognese Gabelspaghetti; 2. Veg. Bratwurst, Erbsen-Maisgemüse in Rahm, Kartoffelpü. Suppe: Broccoli"; t_34="1. Geflügelfrikadelle, Kohlrabi in Rahm, Petersilienkartoffeln. 2. Frühlingsrolle mit Reis. Tagessuppe: Champignon"; t_35="1. Bratwurst, Majoransauce, Erbsen-Karotten-Püree; 2. Spätzle-Gemüsepfanne mit Käsesauce. Suppe: Kartoffel"; #Week 4 (also Week 0) t_01="1. Hähnchenbrustfilet, Kaisergemüse, Petersilienkart.; 2. Vollkorn-Pilzbratling, Zitronensauc. Gemüsereis. Suppe: Möhren."; t_02="1. Fischfilet, Remouladensauce, Majonäse-Kartoffelsalat; 2. Spagetti Napoli mit Parmesan. Suppe: Blumenkohl."; t_03="1. Paniertes Schweineschnitzel mit Nudeln; 2. Spinat-Omlett mir Zwiebelsauce, Röstkartoffeln. Suppe: Gemüse"; t_04="1. Gulasch vom Schwein, Erbsen, Spirelli; 2. Gnocci mit Tomaten- Paprikasauce. Suppe: Spinat"; t_05="1. Hackbällchen, Pilzrahmsauce, Gemüsereis, Kartoffeln; 2. Champignon-Blätterteig-Tasche. Suppe: Spargel."; foodDictionary = {"11":t_11, "12":t_12, "13":t_13, "14":t_14, "15":t_15, "21": t_21, "22":t_22, "23":t_23, "24":t_24, "25":t_25, "31":t_31, "32":t_32, "33":t_33, "34":t_34, "35":t_35, "01":t_01, "02":t_02, "03":t_03, "04":t_04, "05":t_05} number_of_weeks=4 #tweets any text give to it to the preconfigured twitter account def tweet(text): print(text) twitter = Twython(oauth_consumer_key, oauth_consumer_secret, oauth_access_token, oauth_access_token_secret) #twitter.update_status(status=text) #gets the number of the week ranging from 0 to numberofweeks-1 def getWeekNumber(d): now = datetime(d.tm_year,d.tm_mon,d.tm_mday) start = datetime(2012, 9, 28) #this was the first week with the new food-plan dif = now-start difdays = dif.days difdays=math.floor(difdays/7)+1 weeknumber = difdays%number_of_weeks; return weeknumber #gets the calenderweek, which is the number of weeks since the year begun def kw(d): dt = datetime(d.tm_year,d.tm_mon,d.tm_mday) weekoftheyear = dt.isocalendar()[1] return weekoftheyear def minuteofthehour(d): return d.tm_min def houroftheday(d): return d.tm_hour def dayoftheweek(d): return d.tm_wday+1 def getTodaysMeal(d): return foodDictionary[""+str(getWeekNumber(d))+str(dayoftheweek(d))] def getTommorowsMeal(d): day = dayoftheweek(d) week= getWeekNumber(d); #if checked on sunday we have to set day to 1 and go to the next week if day==7: day=1 week=week+1 if week==number_of_weeks: week=0 return foodDictionary[""+str(week)+str(day)] def isWeekEnd(d): if dayoftheweek(d)==6 or dayoftheweek(d)==7: return 1 return 0 def tommorowIsWeekEnd(d): if dayoftheweek(d)==5: return 1 return 0 def isHollyday(d): #german reunification day if d.tm_mday==3 and d.tm_mon==10: return 1 #christmas if d.tm_mday==24 and d.tm_mon==12: return 1 #more christmas if d.tm_mday==25 and d.tm_mon==12: return 1 #still christmas if d.tm_mday==26 and d.tm_mon==12: return 1 #new years eve if d.tm_mday==31 and d.tm_mon==12: return 1 #new year if d.tm_mday==1 and d.tm_mon==1: return 1 #german labour day if d.tm_mday==1 and d.tm_mon==5: return 1 return 0 #very unelegant... def daybeforehollyday(d): #get the epoch from current date object x = time.mktime(d); #add 86400 seconds (one day) x=x+86400 #create a struct time object from that d2 = time.localtime(x) #check if that date is a weekend return isHollyday(d2) def sondermenue(d): return "Sondermenü: "+sondermenu_url+str(kw(d))+".pdf" def mainfunction(d): #april fools joke if d.tm_mon==4 and d.tm_mday==1: if houroftheday(d)==7 and minuteofthehour(d)==30: #its 7:30 in the morning tweet("Heute kocht Jamie Oliver nur für @Flasher1984!") tweet("Sondermenü: http://www.youtube.com/watch?v=oHg5SJYRHA0") #happy holydays if d.tm_mon==12 and d.tm_mday==20: if houroftheday(d)==13 and minuteofthehour(d)==0: tweet("Ich bin raus. Happy Hollidays Bitches.") #actual checks if not(isHollyday(d)) and not(isWeekEnd(d)): #print("have to do stuff") if houroftheday(d)==7 and minuteofthehour(d)==30: tweet("Heute "+getTodaysMeal(d)) tweet(sondermenue()) if houroftheday(d)==11 and minuteofthehour(d)==0: tweet("11am Update "+getTodaysMeal(d)) tweet(sondermenue(d)) if not(daybeforehollyday(d)) and not(tommorowIsWeekEnd(d)): if houroftheday(d)==17 and minuteofthehour(d)==0: tweet("Morgen "+getTommorowsMeal(d)) while 1: #Get local time d = time.localtime() #Call the main fuction mainfunction(d) #sleep for about 1 minute time.sleep(59.555)
apache-2.0
suncycheng/intellij-community
python/helpers/pycharm/django_test_manage.py
4
5219
#!/usr/bin/env python import os import sys from django.core.management import ManagementUtility from pycharm_run_utils import import_system_module from teamcity import teamcity_presence_env_var inspect = import_system_module("inspect") project_directory = sys.argv.pop() #ensure project directory is given priority when looking for settings files sys.path.insert(0, project_directory) #import settings to prevent circular dependencies later on import django.db try: from django.conf import settings apps = settings.INSTALLED_APPS except: pass from django.core import management from django.core.management.commands.test import Command try: # setup environment # this stuff was done earlier by setup_environ() which was removed in 1.4 sys.path.append(os.path.join(project_directory, os.pardir)) project_name = os.path.basename(project_directory) __import__(project_name) except ImportError: # project has custom structure (project directory is not importable) pass finally: sys.path.pop() manage_file = os.getenv('PYCHARM_DJANGO_MANAGE_MODULE') if not manage_file: manage_file = 'manage' try: __import__(manage_file) except ImportError as e: print ("Failed to import" + str(manage_file) + " in ["+ ",".join(sys.path) +"] " + str(e)) settings_file = os.getenv('DJANGO_SETTINGS_MODULE') if not settings_file: settings_file = 'settings' import django if django.VERSION[0:2] >= (1, 7): if not settings.configured: settings.configure() django.setup() class PycharmTestCommand(Command): def get_runner(self): TEST_RUNNER = 'django_test_runner.run_tests' test_path = TEST_RUNNER.split('.') # Allow for Python 2.5 relative paths if len(test_path) > 1: test_module_name = '.'.join(test_path[:-1]) else: test_module_name = '.' test_module = __import__(test_module_name, {}, {}, test_path[-1]) test_runner = getattr(test_module, test_path[-1]) return test_runner def handle(self, *test_labels, **options): # handle south migration in tests commands = management.get_commands() if hasattr(settings, "SOUTH_TESTS_MIGRATE") and not settings.SOUTH_TESTS_MIGRATE: # point at the core syncdb command when creating tests # tests should always be up to date with the most recent model structure commands['syncdb'] = 'django.core' elif 'south' in settings.INSTALLED_APPS: try: from south.management.commands import MigrateAndSyncCommand commands['syncdb'] = MigrateAndSyncCommand() from south.hacks import hacks if hasattr(hacks, "patch_flush_during_test_db_creation"): hacks.patch_flush_during_test_db_creation() except ImportError: commands['syncdb'] = 'django.core' verbosity = int(options.get('verbosity', 1)) interactive = options.get('interactive', True) failfast = options.get('failfast', False) TestRunner = self.get_runner() if not inspect.ismethod(TestRunner): our_options = {"verbosity" : int(verbosity), "interactive" : interactive, "failfast" : failfast} options.update(our_options) failures = TestRunner(test_labels, **options) else: test_runner = TestRunner(verbosity=verbosity, interactive=interactive, failfast=failfast) failures = test_runner.run_tests(test_labels) if failures: sys.exit(bool(failures)) class PycharmTestManagementUtility(ManagementUtility): def __init__(self, argv=None): ManagementUtility.__init__(self, argv) def execute(self): from django_test_runner import is_nosetest if is_nosetest(settings) and "_JB_USE_OLD_RUNNERS" not in os.environ: # New way to run django-nose is to install teamcity-runners plugin # there is no easy way to get qname in 2.7 so string is used name = "teamcity.nose_report.TeamcityReport" # emulate TC to enable plugin os.environ.update({teamcity_presence_env_var: "1"}) # NOSE_PLUGINS could be list or tuple. Adding teamcity plugin to it try: settings.NOSE_PLUGINS += [name] except TypeError: settings.NOSE_PLUGINS += (name, ) except AttributeError: settings.NOSE_PLUGINS = [name] # This file is required to init and monkeypatch new runners # noinspection PyUnresolvedReferences import _jb_runner_tools super(PycharmTestManagementUtility, self).execute() else: PycharmTestCommand().run_from_argv(self.argv) if __name__ == "__main__": try: custom_settings = __import__(settings_file) splitted_settings = settings_file.split('.') if len(splitted_settings) != 1: settings_name = '.'.join(splitted_settings[:-1]) settings_module = __import__(settings_name, globals(), locals(), [splitted_settings[-1]]) custom_settings = getattr(settings_module, splitted_settings[-1]) except ImportError: print ("There is no such settings file " + str(settings_file) + "\n") try: subcommand = sys.argv[1] except IndexError: subcommand = 'help' # Display help if no arguments were given. if subcommand == 'test': utility = PycharmTestManagementUtility(sys.argv) else: utility = ManagementUtility() utility.execute()
apache-2.0
rishuatgithub/MLPy
fruits_with_colors_KNNlearn.py
1
1673
#Building to Model to predict fruits data with colors using SKlearn and KNN # Author: Rishu Shrivastava (rishu.shrivastava@gmail.com) # Date : June 4, 2017 import numpy as np import matplotlib.pyplot as mp import pandas as pd from sklearn.model_selection import train_test_split from matplotlib import cm from sklearn.neighbors import KNeighborsClassifier #reading data fruits = pd.read_table('./data/fruit_data_with_colors.txt') print("Displaying sample rows of Flower data set") print(fruits.head()) #create a mapping from fruit label value to fruit name to make results easier to interpret print("Lookup fruit names to make it easier to interpret the prediction") lookup_fruit_name = dict(zip(fruits.fruit_label.unique(), fruits.fruit_name.unique())) print(lookup_fruit_name) #plotting scatter matrix X = fruits[['height', 'width', 'mass']] y = fruits['fruit_label'] #creating a train and test data set. Split it in 75%/25% print("Generating train and test dataset") X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) cmap = cm.get_cmap('gnuplot') scatter = pd.scatter_matrix(X_train, c= y_train, marker = 'o', s=40, hist_kwds={'bins':15}, figsize=(9,9), cmap=cmap) mp.show() # Training the Dataset using KNN algorithm | neighbours=5 print("Training KNNeighbour Classifier") knn = KNeighborsClassifier(n_neighbors = 5) knn.fit(X_train, y_train) print("The ACCURACY score = ",knn.score(X_test,y_test)) # first example: a small fruit with mass 20g, width 4.3 cm, height 5.5 cm fruit_prediction = knn.predict([[20, 4.3, 5.5]]) print("PREDICTING fruit with mass 20g, width 4.3 cm, height 5.5 cm : ",lookup_fruit_name[fruit_prediction[0]])
apache-2.0
marcioweck/PSSLib
reference/deap/examples/ga/onemax.py
9
3711
# This file is part of DEAP. # # DEAP is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation, either version 3 of # the License, or (at your option) any later version. # # DEAP is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with DEAP. If not, see <http://www.gnu.org/licenses/>. import random from deap import base from deap import creator from deap import tools creator.create("FitnessMax", base.Fitness, weights=(1.0,)) creator.create("Individual", list, fitness=creator.FitnessMax) toolbox = base.Toolbox() # Attribute generator toolbox.register("attr_bool", random.randint, 0, 1) # Structure initializers toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, 100) toolbox.register("population", tools.initRepeat, list, toolbox.individual) def evalOneMax(individual): return sum(individual), # Operator registering toolbox.register("evaluate", evalOneMax) toolbox.register("mate", tools.cxTwoPoint) toolbox.register("mutate", tools.mutFlipBit, indpb=0.05) toolbox.register("select", tools.selTournament, tournsize=3) def main(): random.seed(64) pop = toolbox.population(n=300) CXPB, MUTPB, NGEN = 0.5, 0.2, 40 print("Start of evolution") # Evaluate the entire population fitnesses = list(map(toolbox.evaluate, pop)) for ind, fit in zip(pop, fitnesses): ind.fitness.values = fit print(" Evaluated %i individuals" % len(pop)) # Begin the evolution for g in range(NGEN): print("-- Generation %i --" % g) # Select the next generation individuals offspring = toolbox.select(pop, len(pop)) # Clone the selected individuals offspring = list(map(toolbox.clone, offspring)) # Apply crossover and mutation on the offspring for child1, child2 in zip(offspring[::2], offspring[1::2]): if random.random() < CXPB: toolbox.mate(child1, child2) del child1.fitness.values del child2.fitness.values for mutant in offspring: if random.random() < MUTPB: toolbox.mutate(mutant) del mutant.fitness.values # Evaluate the individuals with an invalid fitness invalid_ind = [ind for ind in offspring if not ind.fitness.valid] fitnesses = map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit print(" Evaluated %i individuals" % len(invalid_ind)) # The population is entirely replaced by the offspring pop[:] = offspring # Gather all the fitnesses in one list and print the stats fits = [ind.fitness.values[0] for ind in pop] length = len(pop) mean = sum(fits) / length sum2 = sum(x*x for x in fits) std = abs(sum2 / length - mean**2)**0.5 print(" Min %s" % min(fits)) print(" Max %s" % max(fits)) print(" Avg %s" % mean) print(" Std %s" % std) print("-- End of (successful) evolution --") best_ind = tools.selBest(pop, 1)[0] print("Best individual is %s, %s" % (best_ind, best_ind.fitness.values)) if __name__ == "__main__": main()
lgpl-3.0
hopple/mdviewer
mdviewer.py
1
2962
from jinja2 import Environment, FileSystemLoader import argparse import select import markdown import codecs import argparse import os import sys #This script wait for save event in markdown file, transforms the modified markdown file #into html using python markdown, and renders the content using python Jinja2 template #This script is mainly written for the Mac OS X. #The purpose of using select and kqueue is to avoid the loop-check-sleep pattern. #O_EVTONLY is defined as 0x8000 in the OS X header files. #kqueue funcionality in watchdog for Mac OS #https://github.com/gorakhargosh/watchdog/blob/master/src/watchdog/observers/kqueue.py #documentation for select #http://docs.python.org/2.7/library/select.html def md_to_html(filename, rtime): html_filename = "%s.html" name, filename_extension = filename.split(".") THIS_DIR = os.path.dirname(os.path.abspath(__file__)) input_file = codecs.open(filename, mode="r", encoding="utf-8") text = input_file.read() html = markdown.markdown(text) j2_env = Environment(loader=FileSystemLoader(THIS_DIR),trim_blocks=True) renderVar = {"content":html, "rtime":rtime} html = j2_env.get_template('base.html').render(renderVar) output_file = codecs.open(html_filename % name, "w", encoding="utf-8", errors="xmlcharrefreplace") output_file.write(html) input_file.close() output_file.close() def main(filename, time=3): filename = filename rtime = time O_EVTONLY = 0x8000 fd = os.open(filename, O_EVTONLY) kq = select.kqueue() KQ_FILTER = select.KQ_FILTER_VNODE KQ_EV_FLAGS = select.KQ_EV_ADD | select.KQ_EV_ENABLE | select.KQ_EV_CLEAR KQ_FFLAGS = select.KQ_NOTE_WRITE | select.KQ_NOTE_EXTEND event = [select.kevent(fd, filter=KQ_FILTER, flags=KQ_EV_FLAGS, fflags=KQ_FFLAGS)] try: while True: print "waiting for events" #This call will block till the write or extend events occur r_events = kq.control(event,1,None) for ev in r_events: if ev.fflags & select.KQ_NOTE_WRITE: #print "write event occur" md_to_html(filename, rtime) except KeyboardInterrupt: print "\n Keyboard Interruption with Ctrl C, \n finishing..." kq.close() os.close(fd) print "finished" if __name__=="__main__": parser = argparse.ArgumentParser(description="Check the changes of markdown, transform the changed \ markdown file into html and display the latest markdown content in browser") parser.add_argument("filename", help="markdown file name you are editing") parser.add_argument("-t", "--time", type=int, choices=[1,2,3,4,5], \ help="the browser refresh time, default is 3") args = parser.parse_args() print "Press Ctrl and C to stop..." if args.time: sys.exit(main(args.filename, args.time)) else: sys.exit(main(args.filename))
mit
sgraham/nope
chrome/test/mini_installer/file_verifier.py
125
1116
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import verifier class FileVerifier(verifier.Verifier): """Verifies that the current files match the expectation dictionaries.""" def _VerifyExpectation(self, expectation_name, expectation, variable_expander): """Overridden from verifier.Verifier. This method will throw an AssertionError if file state doesn't match the |expectation|. Args: expectation_name: Path to the file being verified. It is expanded using Expand. expectation: A dictionary with the following key and value: 'exists' a boolean indicating whether the file should exist. variable_expander: A VariableExpander object. """ file_path = variable_expander.Expand(expectation_name) file_exists = os.path.exists(file_path) assert expectation['exists'] == file_exists, \ ('File %s exists' % file_path) if file_exists else \ ('File %s is missing' % file_path)
bsd-3-clause
MingdaZhou/gnuradio
docs/doxygen/doxyxml/generated/compoundsuper.py
348
359948
#!/usr/bin/env python # # Generated Thu Jun 11 18:44:25 2009 by generateDS.py. # import sys import getopt from string import lower as str_lower from xml.dom import minidom from xml.dom import Node # # User methods # # Calls to the methods in these classes are generated by generateDS.py. # You can replace these methods by re-implementing the following class # in a module named generatedssuper.py. try: from generatedssuper import GeneratedsSuper except ImportError, exp: class GeneratedsSuper: def format_string(self, input_data, input_name=''): return input_data def format_integer(self, input_data, input_name=''): return '%d' % input_data def format_float(self, input_data, input_name=''): return '%f' % input_data def format_double(self, input_data, input_name=''): return '%e' % input_data def format_boolean(self, input_data, input_name=''): return '%s' % input_data # # If you have installed IPython you can uncomment and use the following. # IPython is available from http://ipython.scipy.org/. # ## from IPython.Shell import IPShellEmbed ## args = '' ## ipshell = IPShellEmbed(args, ## banner = 'Dropping into IPython', ## exit_msg = 'Leaving Interpreter, back to program.') # Then use the following line where and when you want to drop into the # IPython shell: # ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit') # # Globals # ExternalEncoding = 'ascii' # # Support/utility functions. # def showIndent(outfile, level): for idx in range(level): outfile.write(' ') def quote_xml(inStr): s1 = (isinstance(inStr, basestring) and inStr or '%s' % inStr) s1 = s1.replace('&', '&amp;') s1 = s1.replace('<', '&lt;') s1 = s1.replace('>', '&gt;') return s1 def quote_attrib(inStr): s1 = (isinstance(inStr, basestring) and inStr or '%s' % inStr) s1 = s1.replace('&', '&amp;') s1 = s1.replace('<', '&lt;') s1 = s1.replace('>', '&gt;') if '"' in s1: if "'" in s1: s1 = '"%s"' % s1.replace('"', "&quot;") else: s1 = "'%s'" % s1 else: s1 = '"%s"' % s1 return s1 def quote_python(inStr): s1 = inStr if s1.find("'") == -1: if s1.find('\n') == -1: return "'%s'" % s1 else: return "'''%s'''" % s1 else: if s1.find('"') != -1: s1 = s1.replace('"', '\\"') if s1.find('\n') == -1: return '"%s"' % s1 else: return '"""%s"""' % s1 class MixedContainer: # Constants for category: CategoryNone = 0 CategoryText = 1 CategorySimple = 2 CategoryComplex = 3 # Constants for content_type: TypeNone = 0 TypeText = 1 TypeString = 2 TypeInteger = 3 TypeFloat = 4 TypeDecimal = 5 TypeDouble = 6 TypeBoolean = 7 def __init__(self, category, content_type, name, value): self.category = category self.content_type = content_type self.name = name self.value = value def getCategory(self): return self.category def getContenttype(self, content_type): return self.content_type def getValue(self): return self.value def getName(self): return self.name def export(self, outfile, level, name, namespace): if self.category == MixedContainer.CategoryText: outfile.write(self.value) elif self.category == MixedContainer.CategorySimple: self.exportSimple(outfile, level, name) else: # category == MixedContainer.CategoryComplex self.value.export(outfile, level, namespace,name) def exportSimple(self, outfile, level, name): if self.content_type == MixedContainer.TypeString: outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeInteger or \ self.content_type == MixedContainer.TypeBoolean: outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeFloat or \ self.content_type == MixedContainer.TypeDecimal: outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeDouble: outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name)) def exportLiteral(self, outfile, level, name): if self.category == MixedContainer.CategoryText: showIndent(outfile, level) outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ (self.category, self.content_type, self.name, self.value)) elif self.category == MixedContainer.CategorySimple: showIndent(outfile, level) outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ (self.category, self.content_type, self.name, self.value)) else: # category == MixedContainer.CategoryComplex showIndent(outfile, level) outfile.write('MixedContainer(%d, %d, "%s",\n' % \ (self.category, self.content_type, self.name,)) self.value.exportLiteral(outfile, level + 1) showIndent(outfile, level) outfile.write(')\n') class _MemberSpec(object): def __init__(self, name='', data_type='', container=0): self.name = name self.data_type = data_type self.container = container def set_name(self, name): self.name = name def get_name(self): return self.name def set_data_type(self, data_type): self.data_type = data_type def get_data_type(self): return self.data_type def set_container(self, container): self.container = container def get_container(self): return self.container # # Data representation classes. # class DoxygenType(GeneratedsSuper): subclass = None superclass = None def __init__(self, version=None, compounddef=None): self.version = version self.compounddef = compounddef def factory(*args_, **kwargs_): if DoxygenType.subclass: return DoxygenType.subclass(*args_, **kwargs_) else: return DoxygenType(*args_, **kwargs_) factory = staticmethod(factory) def get_compounddef(self): return self.compounddef def set_compounddef(self, compounddef): self.compounddef = compounddef def get_version(self): return self.version def set_version(self, version): self.version = version def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='DoxygenType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'): outfile.write(' version=%s' % (quote_attrib(self.version), )) def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'): if self.compounddef: self.compounddef.export(outfile, level, namespace_, name_='compounddef') def hasContent_(self): if ( self.compounddef is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='DoxygenType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.version is not None: showIndent(outfile, level) outfile.write('version = "%s",\n' % (self.version,)) def exportLiteralChildren(self, outfile, level, name_): if self.compounddef: showIndent(outfile, level) outfile.write('compounddef=model_.compounddefType(\n') self.compounddef.exportLiteral(outfile, level, name_='compounddef') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('version'): self.version = attrs.get('version').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'compounddef': obj_ = compounddefType.factory() obj_.build(child_) self.set_compounddef(obj_) # end class DoxygenType class compounddefType(GeneratedsSuper): subclass = None superclass = None def __init__(self, kind=None, prot=None, id=None, compoundname=None, title=None, basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None): self.kind = kind self.prot = prot self.id = id self.compoundname = compoundname self.title = title if basecompoundref is None: self.basecompoundref = [] else: self.basecompoundref = basecompoundref if derivedcompoundref is None: self.derivedcompoundref = [] else: self.derivedcompoundref = derivedcompoundref if includes is None: self.includes = [] else: self.includes = includes if includedby is None: self.includedby = [] else: self.includedby = includedby self.incdepgraph = incdepgraph self.invincdepgraph = invincdepgraph if innerdir is None: self.innerdir = [] else: self.innerdir = innerdir if innerfile is None: self.innerfile = [] else: self.innerfile = innerfile if innerclass is None: self.innerclass = [] else: self.innerclass = innerclass if innernamespace is None: self.innernamespace = [] else: self.innernamespace = innernamespace if innerpage is None: self.innerpage = [] else: self.innerpage = innerpage if innergroup is None: self.innergroup = [] else: self.innergroup = innergroup self.templateparamlist = templateparamlist if sectiondef is None: self.sectiondef = [] else: self.sectiondef = sectiondef self.briefdescription = briefdescription self.detaileddescription = detaileddescription self.inheritancegraph = inheritancegraph self.collaborationgraph = collaborationgraph self.programlisting = programlisting self.location = location self.listofallmembers = listofallmembers def factory(*args_, **kwargs_): if compounddefType.subclass: return compounddefType.subclass(*args_, **kwargs_) else: return compounddefType(*args_, **kwargs_) factory = staticmethod(factory) def get_compoundname(self): return self.compoundname def set_compoundname(self, compoundname): self.compoundname = compoundname def get_title(self): return self.title def set_title(self, title): self.title = title def get_basecompoundref(self): return self.basecompoundref def set_basecompoundref(self, basecompoundref): self.basecompoundref = basecompoundref def add_basecompoundref(self, value): self.basecompoundref.append(value) def insert_basecompoundref(self, index, value): self.basecompoundref[index] = value def get_derivedcompoundref(self): return self.derivedcompoundref def set_derivedcompoundref(self, derivedcompoundref): self.derivedcompoundref = derivedcompoundref def add_derivedcompoundref(self, value): self.derivedcompoundref.append(value) def insert_derivedcompoundref(self, index, value): self.derivedcompoundref[index] = value def get_includes(self): return self.includes def set_includes(self, includes): self.includes = includes def add_includes(self, value): self.includes.append(value) def insert_includes(self, index, value): self.includes[index] = value def get_includedby(self): return self.includedby def set_includedby(self, includedby): self.includedby = includedby def add_includedby(self, value): self.includedby.append(value) def insert_includedby(self, index, value): self.includedby[index] = value def get_incdepgraph(self): return self.incdepgraph def set_incdepgraph(self, incdepgraph): self.incdepgraph = incdepgraph def get_invincdepgraph(self): return self.invincdepgraph def set_invincdepgraph(self, invincdepgraph): self.invincdepgraph = invincdepgraph def get_innerdir(self): return self.innerdir def set_innerdir(self, innerdir): self.innerdir = innerdir def add_innerdir(self, value): self.innerdir.append(value) def insert_innerdir(self, index, value): self.innerdir[index] = value def get_innerfile(self): return self.innerfile def set_innerfile(self, innerfile): self.innerfile = innerfile def add_innerfile(self, value): self.innerfile.append(value) def insert_innerfile(self, index, value): self.innerfile[index] = value def get_innerclass(self): return self.innerclass def set_innerclass(self, innerclass): self.innerclass = innerclass def add_innerclass(self, value): self.innerclass.append(value) def insert_innerclass(self, index, value): self.innerclass[index] = value def get_innernamespace(self): return self.innernamespace def set_innernamespace(self, innernamespace): self.innernamespace = innernamespace def add_innernamespace(self, value): self.innernamespace.append(value) def insert_innernamespace(self, index, value): self.innernamespace[index] = value def get_innerpage(self): return self.innerpage def set_innerpage(self, innerpage): self.innerpage = innerpage def add_innerpage(self, value): self.innerpage.append(value) def insert_innerpage(self, index, value): self.innerpage[index] = value def get_innergroup(self): return self.innergroup def set_innergroup(self, innergroup): self.innergroup = innergroup def add_innergroup(self, value): self.innergroup.append(value) def insert_innergroup(self, index, value): self.innergroup[index] = value def get_templateparamlist(self): return self.templateparamlist def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist def get_sectiondef(self): return self.sectiondef def set_sectiondef(self, sectiondef): self.sectiondef = sectiondef def add_sectiondef(self, value): self.sectiondef.append(value) def insert_sectiondef(self, index, value): self.sectiondef[index] = value def get_briefdescription(self): return self.briefdescription def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription def get_detaileddescription(self): return self.detaileddescription def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription def get_inheritancegraph(self): return self.inheritancegraph def set_inheritancegraph(self, inheritancegraph): self.inheritancegraph = inheritancegraph def get_collaborationgraph(self): return self.collaborationgraph def set_collaborationgraph(self, collaborationgraph): self.collaborationgraph = collaborationgraph def get_programlisting(self): return self.programlisting def set_programlisting(self, programlisting): self.programlisting = programlisting def get_location(self): return self.location def set_location(self, location): self.location = location def get_listofallmembers(self): return self.listofallmembers def set_listofallmembers(self, listofallmembers): self.listofallmembers = listofallmembers def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def get_prot(self): return self.prot def set_prot(self, prot): self.prot = prot def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='compounddefType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='compounddefType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='compounddefType'): if self.kind is not None: outfile.write(' kind=%s' % (quote_attrib(self.kind), )) if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='compounddefType'): if self.compoundname is not None: showIndent(outfile, level) outfile.write('<%scompoundname>%s</%scompoundname>\n' % (namespace_, self.format_string(quote_xml(self.compoundname).encode(ExternalEncoding), input_name='compoundname'), namespace_)) if self.title is not None: showIndent(outfile, level) outfile.write('<%stitle>%s</%stitle>\n' % (namespace_, self.format_string(quote_xml(self.title).encode(ExternalEncoding), input_name='title'), namespace_)) for basecompoundref_ in self.basecompoundref: basecompoundref_.export(outfile, level, namespace_, name_='basecompoundref') for derivedcompoundref_ in self.derivedcompoundref: derivedcompoundref_.export(outfile, level, namespace_, name_='derivedcompoundref') for includes_ in self.includes: includes_.export(outfile, level, namespace_, name_='includes') for includedby_ in self.includedby: includedby_.export(outfile, level, namespace_, name_='includedby') if self.incdepgraph: self.incdepgraph.export(outfile, level, namespace_, name_='incdepgraph') if self.invincdepgraph: self.invincdepgraph.export(outfile, level, namespace_, name_='invincdepgraph') for innerdir_ in self.innerdir: innerdir_.export(outfile, level, namespace_, name_='innerdir') for innerfile_ in self.innerfile: innerfile_.export(outfile, level, namespace_, name_='innerfile') for innerclass_ in self.innerclass: innerclass_.export(outfile, level, namespace_, name_='innerclass') for innernamespace_ in self.innernamespace: innernamespace_.export(outfile, level, namespace_, name_='innernamespace') for innerpage_ in self.innerpage: innerpage_.export(outfile, level, namespace_, name_='innerpage') for innergroup_ in self.innergroup: innergroup_.export(outfile, level, namespace_, name_='innergroup') if self.templateparamlist: self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist') for sectiondef_ in self.sectiondef: sectiondef_.export(outfile, level, namespace_, name_='sectiondef') if self.briefdescription: self.briefdescription.export(outfile, level, namespace_, name_='briefdescription') if self.detaileddescription: self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription') if self.inheritancegraph: self.inheritancegraph.export(outfile, level, namespace_, name_='inheritancegraph') if self.collaborationgraph: self.collaborationgraph.export(outfile, level, namespace_, name_='collaborationgraph') if self.programlisting: self.programlisting.export(outfile, level, namespace_, name_='programlisting') if self.location: self.location.export(outfile, level, namespace_, name_='location') if self.listofallmembers: self.listofallmembers.export(outfile, level, namespace_, name_='listofallmembers') def hasContent_(self): if ( self.compoundname is not None or self.title is not None or self.basecompoundref is not None or self.derivedcompoundref is not None or self.includes is not None or self.includedby is not None or self.incdepgraph is not None or self.invincdepgraph is not None or self.innerdir is not None or self.innerfile is not None or self.innerclass is not None or self.innernamespace is not None or self.innerpage is not None or self.innergroup is not None or self.templateparamlist is not None or self.sectiondef is not None or self.briefdescription is not None or self.detaileddescription is not None or self.inheritancegraph is not None or self.collaborationgraph is not None or self.programlisting is not None or self.location is not None or self.listofallmembers is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='compounddefType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.kind is not None: showIndent(outfile, level) outfile.write('kind = "%s",\n' % (self.kind,)) if self.prot is not None: showIndent(outfile, level) outfile.write('prot = "%s",\n' % (self.prot,)) if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('compoundname=%s,\n' % quote_python(self.compoundname).encode(ExternalEncoding)) if self.title: showIndent(outfile, level) outfile.write('title=model_.xsd_string(\n') self.title.exportLiteral(outfile, level, name_='title') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('basecompoundref=[\n') level += 1 for basecompoundref in self.basecompoundref: showIndent(outfile, level) outfile.write('model_.basecompoundref(\n') basecompoundref.exportLiteral(outfile, level, name_='basecompoundref') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('derivedcompoundref=[\n') level += 1 for derivedcompoundref in self.derivedcompoundref: showIndent(outfile, level) outfile.write('model_.derivedcompoundref(\n') derivedcompoundref.exportLiteral(outfile, level, name_='derivedcompoundref') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('includes=[\n') level += 1 for includes in self.includes: showIndent(outfile, level) outfile.write('model_.includes(\n') includes.exportLiteral(outfile, level, name_='includes') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('includedby=[\n') level += 1 for includedby in self.includedby: showIndent(outfile, level) outfile.write('model_.includedby(\n') includedby.exportLiteral(outfile, level, name_='includedby') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.incdepgraph: showIndent(outfile, level) outfile.write('incdepgraph=model_.graphType(\n') self.incdepgraph.exportLiteral(outfile, level, name_='incdepgraph') showIndent(outfile, level) outfile.write('),\n') if self.invincdepgraph: showIndent(outfile, level) outfile.write('invincdepgraph=model_.graphType(\n') self.invincdepgraph.exportLiteral(outfile, level, name_='invincdepgraph') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('innerdir=[\n') level += 1 for innerdir in self.innerdir: showIndent(outfile, level) outfile.write('model_.innerdir(\n') innerdir.exportLiteral(outfile, level, name_='innerdir') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('innerfile=[\n') level += 1 for innerfile in self.innerfile: showIndent(outfile, level) outfile.write('model_.innerfile(\n') innerfile.exportLiteral(outfile, level, name_='innerfile') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('innerclass=[\n') level += 1 for innerclass in self.innerclass: showIndent(outfile, level) outfile.write('model_.innerclass(\n') innerclass.exportLiteral(outfile, level, name_='innerclass') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('innernamespace=[\n') level += 1 for innernamespace in self.innernamespace: showIndent(outfile, level) outfile.write('model_.innernamespace(\n') innernamespace.exportLiteral(outfile, level, name_='innernamespace') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('innerpage=[\n') level += 1 for innerpage in self.innerpage: showIndent(outfile, level) outfile.write('model_.innerpage(\n') innerpage.exportLiteral(outfile, level, name_='innerpage') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('innergroup=[\n') level += 1 for innergroup in self.innergroup: showIndent(outfile, level) outfile.write('model_.innergroup(\n') innergroup.exportLiteral(outfile, level, name_='innergroup') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.templateparamlist: showIndent(outfile, level) outfile.write('templateparamlist=model_.templateparamlistType(\n') self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('sectiondef=[\n') level += 1 for sectiondef in self.sectiondef: showIndent(outfile, level) outfile.write('model_.sectiondef(\n') sectiondef.exportLiteral(outfile, level, name_='sectiondef') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.briefdescription: showIndent(outfile, level) outfile.write('briefdescription=model_.descriptionType(\n') self.briefdescription.exportLiteral(outfile, level, name_='briefdescription') showIndent(outfile, level) outfile.write('),\n') if self.detaileddescription: showIndent(outfile, level) outfile.write('detaileddescription=model_.descriptionType(\n') self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription') showIndent(outfile, level) outfile.write('),\n') if self.inheritancegraph: showIndent(outfile, level) outfile.write('inheritancegraph=model_.graphType(\n') self.inheritancegraph.exportLiteral(outfile, level, name_='inheritancegraph') showIndent(outfile, level) outfile.write('),\n') if self.collaborationgraph: showIndent(outfile, level) outfile.write('collaborationgraph=model_.graphType(\n') self.collaborationgraph.exportLiteral(outfile, level, name_='collaborationgraph') showIndent(outfile, level) outfile.write('),\n') if self.programlisting: showIndent(outfile, level) outfile.write('programlisting=model_.listingType(\n') self.programlisting.exportLiteral(outfile, level, name_='programlisting') showIndent(outfile, level) outfile.write('),\n') if self.location: showIndent(outfile, level) outfile.write('location=model_.locationType(\n') self.location.exportLiteral(outfile, level, name_='location') showIndent(outfile, level) outfile.write('),\n') if self.listofallmembers: showIndent(outfile, level) outfile.write('listofallmembers=model_.listofallmembersType(\n') self.listofallmembers.exportLiteral(outfile, level, name_='listofallmembers') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value if attrs.get('prot'): self.prot = attrs.get('prot').value if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'compoundname': compoundname_ = '' for text__content_ in child_.childNodes: compoundname_ += text__content_.nodeValue self.compoundname = compoundname_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'title': obj_ = docTitleType.factory() obj_.build(child_) self.set_title(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'basecompoundref': obj_ = compoundRefType.factory() obj_.build(child_) self.basecompoundref.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'derivedcompoundref': obj_ = compoundRefType.factory() obj_.build(child_) self.derivedcompoundref.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'includes': obj_ = incType.factory() obj_.build(child_) self.includes.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'includedby': obj_ = incType.factory() obj_.build(child_) self.includedby.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'incdepgraph': obj_ = graphType.factory() obj_.build(child_) self.set_incdepgraph(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'invincdepgraph': obj_ = graphType.factory() obj_.build(child_) self.set_invincdepgraph(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'innerdir': obj_ = refType.factory() obj_.build(child_) self.innerdir.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'innerfile': obj_ = refType.factory() obj_.build(child_) self.innerfile.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'innerclass': obj_ = refType.factory() obj_.build(child_) self.innerclass.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'innernamespace': obj_ = refType.factory() obj_.build(child_) self.innernamespace.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'innerpage': obj_ = refType.factory() obj_.build(child_) self.innerpage.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'innergroup': obj_ = refType.factory() obj_.build(child_) self.innergroup.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'templateparamlist': obj_ = templateparamlistType.factory() obj_.build(child_) self.set_templateparamlist(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sectiondef': obj_ = sectiondefType.factory() obj_.build(child_) self.sectiondef.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'briefdescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_briefdescription(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'detaileddescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_detaileddescription(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'inheritancegraph': obj_ = graphType.factory() obj_.build(child_) self.set_inheritancegraph(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'collaborationgraph': obj_ = graphType.factory() obj_.build(child_) self.set_collaborationgraph(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'programlisting': obj_ = listingType.factory() obj_.build(child_) self.set_programlisting(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'location': obj_ = locationType.factory() obj_.build(child_) self.set_location(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'listofallmembers': obj_ = listofallmembersType.factory() obj_.build(child_) self.set_listofallmembers(obj_) # end class compounddefType class listofallmembersType(GeneratedsSuper): subclass = None superclass = None def __init__(self, member=None): if member is None: self.member = [] else: self.member = member def factory(*args_, **kwargs_): if listofallmembersType.subclass: return listofallmembersType.subclass(*args_, **kwargs_) else: return listofallmembersType(*args_, **kwargs_) factory = staticmethod(factory) def get_member(self): return self.member def set_member(self, member): self.member = member def add_member(self, value): self.member.append(value) def insert_member(self, index, value): self.member[index] = value def export(self, outfile, level, namespace_='', name_='listofallmembersType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='listofallmembersType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='listofallmembersType'): pass def exportChildren(self, outfile, level, namespace_='', name_='listofallmembersType'): for member_ in self.member: member_.export(outfile, level, namespace_, name_='member') def hasContent_(self): if ( self.member is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='listofallmembersType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('member=[\n') level += 1 for member in self.member: showIndent(outfile, level) outfile.write('model_.member(\n') member.exportLiteral(outfile, level, name_='member') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'member': obj_ = memberRefType.factory() obj_.build(child_) self.member.append(obj_) # end class listofallmembersType class memberRefType(GeneratedsSuper): subclass = None superclass = None def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope=None, name=None): self.virt = virt self.prot = prot self.refid = refid self.ambiguityscope = ambiguityscope self.scope = scope self.name = name def factory(*args_, **kwargs_): if memberRefType.subclass: return memberRefType.subclass(*args_, **kwargs_) else: return memberRefType(*args_, **kwargs_) factory = staticmethod(factory) def get_scope(self): return self.scope def set_scope(self, scope): self.scope = scope def get_name(self): return self.name def set_name(self, name): self.name = name def get_virt(self): return self.virt def set_virt(self, virt): self.virt = virt def get_prot(self): return self.prot def set_prot(self, prot): self.prot = prot def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def get_ambiguityscope(self): return self.ambiguityscope def set_ambiguityscope(self, ambiguityscope): self.ambiguityscope = ambiguityscope def export(self, outfile, level, namespace_='', name_='memberRefType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='memberRefType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='memberRefType'): if self.virt is not None: outfile.write(' virt=%s' % (quote_attrib(self.virt), )) if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) if self.ambiguityscope is not None: outfile.write(' ambiguityscope=%s' % (self.format_string(quote_attrib(self.ambiguityscope).encode(ExternalEncoding), input_name='ambiguityscope'), )) def exportChildren(self, outfile, level, namespace_='', name_='memberRefType'): if self.scope is not None: showIndent(outfile, level) outfile.write('<%sscope>%s</%sscope>\n' % (namespace_, self.format_string(quote_xml(self.scope).encode(ExternalEncoding), input_name='scope'), namespace_)) if self.name is not None: showIndent(outfile, level) outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) def hasContent_(self): if ( self.scope is not None or self.name is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='memberRefType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.virt is not None: showIndent(outfile, level) outfile.write('virt = "%s",\n' % (self.virt,)) if self.prot is not None: showIndent(outfile, level) outfile.write('prot = "%s",\n' % (self.prot,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) if self.ambiguityscope is not None: showIndent(outfile, level) outfile.write('ambiguityscope = %s,\n' % (self.ambiguityscope,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('scope=%s,\n' % quote_python(self.scope).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('virt'): self.virt = attrs.get('virt').value if attrs.get('prot'): self.prot = attrs.get('prot').value if attrs.get('refid'): self.refid = attrs.get('refid').value if attrs.get('ambiguityscope'): self.ambiguityscope = attrs.get('ambiguityscope').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'scope': scope_ = '' for text__content_ in child_.childNodes: scope_ += text__content_.nodeValue self.scope = scope_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'name': name_ = '' for text__content_ in child_.childNodes: name_ += text__content_.nodeValue self.name = name_ # end class memberRefType class scope(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if scope.subclass: return scope.subclass(*args_, **kwargs_) else: return scope(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='scope', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='scope') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='scope'): pass def exportChildren(self, outfile, level, namespace_='', name_='scope'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='scope'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class scope class name(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if name.subclass: return name.subclass(*args_, **kwargs_) else: return name(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='name', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='name') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='name'): pass def exportChildren(self, outfile, level, namespace_='', name_='name'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='name'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class name class compoundRefType(GeneratedsSuper): subclass = None superclass = None def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): self.virt = virt self.prot = prot self.refid = refid if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if compoundRefType.subclass: return compoundRefType.subclass(*args_, **kwargs_) else: return compoundRefType(*args_, **kwargs_) factory = staticmethod(factory) def get_virt(self): return self.virt def set_virt(self, virt): self.virt = virt def get_prot(self): return self.prot def set_prot(self, prot): self.prot = prot def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='compoundRefType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='compoundRefType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='compoundRefType'): if self.virt is not None: outfile.write(' virt=%s' % (quote_attrib(self.virt), )) if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) def exportChildren(self, outfile, level, namespace_='', name_='compoundRefType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='compoundRefType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.virt is not None: showIndent(outfile, level) outfile.write('virt = "%s",\n' % (self.virt,)) if self.prot is not None: showIndent(outfile, level) outfile.write('prot = "%s",\n' % (self.prot,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('virt'): self.virt = attrs.get('virt').value if attrs.get('prot'): self.prot = attrs.get('prot').value if attrs.get('refid'): self.refid = attrs.get('refid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class compoundRefType class reimplementType(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None): self.refid = refid if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if reimplementType.subclass: return reimplementType.subclass(*args_, **kwargs_) else: return reimplementType(*args_, **kwargs_) factory = staticmethod(factory) def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='reimplementType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='reimplementType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='reimplementType'): if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) def exportChildren(self, outfile, level, namespace_='', name_='reimplementType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='reimplementType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('refid'): self.refid = attrs.get('refid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class reimplementType class incType(GeneratedsSuper): subclass = None superclass = None def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None): self.local = local self.refid = refid if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if incType.subclass: return incType.subclass(*args_, **kwargs_) else: return incType(*args_, **kwargs_) factory = staticmethod(factory) def get_local(self): return self.local def set_local(self, local): self.local = local def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='incType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='incType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='incType'): if self.local is not None: outfile.write(' local=%s' % (quote_attrib(self.local), )) if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) def exportChildren(self, outfile, level, namespace_='', name_='incType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='incType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.local is not None: showIndent(outfile, level) outfile.write('local = "%s",\n' % (self.local,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('local'): self.local = attrs.get('local').value if attrs.get('refid'): self.refid = attrs.get('refid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class incType class refType(GeneratedsSuper): subclass = None superclass = None def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): self.prot = prot self.refid = refid if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if refType.subclass: return refType.subclass(*args_, **kwargs_) else: return refType(*args_, **kwargs_) factory = staticmethod(factory) def get_prot(self): return self.prot def set_prot(self, prot): self.prot = prot def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='refType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='refType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='refType'): if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) def exportChildren(self, outfile, level, namespace_='', name_='refType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='refType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.prot is not None: showIndent(outfile, level) outfile.write('prot = "%s",\n' % (self.prot,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('prot'): self.prot = attrs.get('prot').value if attrs.get('refid'): self.refid = attrs.get('refid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class refType class refTextType(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): self.refid = refid self.kindref = kindref self.external = external if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if refTextType.subclass: return refTextType.subclass(*args_, **kwargs_) else: return refTextType(*args_, **kwargs_) factory = staticmethod(factory) def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def get_kindref(self): return self.kindref def set_kindref(self, kindref): self.kindref = kindref def get_external(self): return self.external def set_external(self, external): self.external = external def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='refTextType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='refTextType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='refTextType'): if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) if self.kindref is not None: outfile.write(' kindref=%s' % (quote_attrib(self.kindref), )) if self.external is not None: outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) def exportChildren(self, outfile, level, namespace_='', name_='refTextType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='refTextType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) if self.kindref is not None: showIndent(outfile, level) outfile.write('kindref = "%s",\n' % (self.kindref,)) if self.external is not None: showIndent(outfile, level) outfile.write('external = %s,\n' % (self.external,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('refid'): self.refid = attrs.get('refid').value if attrs.get('kindref'): self.kindref = attrs.get('kindref').value if attrs.get('external'): self.external = attrs.get('external').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class refTextType class sectiondefType(GeneratedsSuper): subclass = None superclass = None def __init__(self, kind=None, header=None, description=None, memberdef=None): self.kind = kind self.header = header self.description = description if memberdef is None: self.memberdef = [] else: self.memberdef = memberdef def factory(*args_, **kwargs_): if sectiondefType.subclass: return sectiondefType.subclass(*args_, **kwargs_) else: return sectiondefType(*args_, **kwargs_) factory = staticmethod(factory) def get_header(self): return self.header def set_header(self, header): self.header = header def get_description(self): return self.description def set_description(self, description): self.description = description def get_memberdef(self): return self.memberdef def set_memberdef(self, memberdef): self.memberdef = memberdef def add_memberdef(self, value): self.memberdef.append(value) def insert_memberdef(self, index, value): self.memberdef[index] = value def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def export(self, outfile, level, namespace_='', name_='sectiondefType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='sectiondefType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='sectiondefType'): if self.kind is not None: outfile.write(' kind=%s' % (quote_attrib(self.kind), )) def exportChildren(self, outfile, level, namespace_='', name_='sectiondefType'): if self.header is not None: showIndent(outfile, level) outfile.write('<%sheader>%s</%sheader>\n' % (namespace_, self.format_string(quote_xml(self.header).encode(ExternalEncoding), input_name='header'), namespace_)) if self.description: self.description.export(outfile, level, namespace_, name_='description') for memberdef_ in self.memberdef: memberdef_.export(outfile, level, namespace_, name_='memberdef') def hasContent_(self): if ( self.header is not None or self.description is not None or self.memberdef is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='sectiondefType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.kind is not None: showIndent(outfile, level) outfile.write('kind = "%s",\n' % (self.kind,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('header=%s,\n' % quote_python(self.header).encode(ExternalEncoding)) if self.description: showIndent(outfile, level) outfile.write('description=model_.descriptionType(\n') self.description.exportLiteral(outfile, level, name_='description') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('memberdef=[\n') level += 1 for memberdef in self.memberdef: showIndent(outfile, level) outfile.write('model_.memberdef(\n') memberdef.exportLiteral(outfile, level, name_='memberdef') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'header': header_ = '' for text__content_ in child_.childNodes: header_ += text__content_.nodeValue self.header = header_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'description': obj_ = descriptionType.factory() obj_.build(child_) self.set_description(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'memberdef': obj_ = memberdefType.factory() obj_.build(child_) self.memberdef.append(obj_) # end class sectiondefType class memberdefType(GeneratedsSuper): subclass = None superclass = None def __init__(self, initonly=None, kind=None, volatile=None, const=None, raisexx=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition=None, argsstring=None, name=None, read=None, write=None, bitfield=None, reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None): self.initonly = initonly self.kind = kind self.volatile = volatile self.const = const self.raisexx = raisexx self.virt = virt self.readable = readable self.prot = prot self.explicit = explicit self.new = new self.final = final self.writable = writable self.add = add self.static = static self.remove = remove self.sealed = sealed self.mutable = mutable self.gettable = gettable self.inline = inline self.settable = settable self.id = id self.templateparamlist = templateparamlist self.type_ = type_ self.definition = definition self.argsstring = argsstring self.name = name self.read = read self.write = write self.bitfield = bitfield if reimplements is None: self.reimplements = [] else: self.reimplements = reimplements if reimplementedby is None: self.reimplementedby = [] else: self.reimplementedby = reimplementedby if param is None: self.param = [] else: self.param = param if enumvalue is None: self.enumvalue = [] else: self.enumvalue = enumvalue self.initializer = initializer self.exceptions = exceptions self.briefdescription = briefdescription self.detaileddescription = detaileddescription self.inbodydescription = inbodydescription self.location = location if references is None: self.references = [] else: self.references = references if referencedby is None: self.referencedby = [] else: self.referencedby = referencedby def factory(*args_, **kwargs_): if memberdefType.subclass: return memberdefType.subclass(*args_, **kwargs_) else: return memberdefType(*args_, **kwargs_) factory = staticmethod(factory) def get_templateparamlist(self): return self.templateparamlist def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist def get_type(self): return self.type_ def set_type(self, type_): self.type_ = type_ def get_definition(self): return self.definition def set_definition(self, definition): self.definition = definition def get_argsstring(self): return self.argsstring def set_argsstring(self, argsstring): self.argsstring = argsstring def get_name(self): return self.name def set_name(self, name): self.name = name def get_read(self): return self.read def set_read(self, read): self.read = read def get_write(self): return self.write def set_write(self, write): self.write = write def get_bitfield(self): return self.bitfield def set_bitfield(self, bitfield): self.bitfield = bitfield def get_reimplements(self): return self.reimplements def set_reimplements(self, reimplements): self.reimplements = reimplements def add_reimplements(self, value): self.reimplements.append(value) def insert_reimplements(self, index, value): self.reimplements[index] = value def get_reimplementedby(self): return self.reimplementedby def set_reimplementedby(self, reimplementedby): self.reimplementedby = reimplementedby def add_reimplementedby(self, value): self.reimplementedby.append(value) def insert_reimplementedby(self, index, value): self.reimplementedby[index] = value def get_param(self): return self.param def set_param(self, param): self.param = param def add_param(self, value): self.param.append(value) def insert_param(self, index, value): self.param[index] = value def get_enumvalue(self): return self.enumvalue def set_enumvalue(self, enumvalue): self.enumvalue = enumvalue def add_enumvalue(self, value): self.enumvalue.append(value) def insert_enumvalue(self, index, value): self.enumvalue[index] = value def get_initializer(self): return self.initializer def set_initializer(self, initializer): self.initializer = initializer def get_exceptions(self): return self.exceptions def set_exceptions(self, exceptions): self.exceptions = exceptions def get_briefdescription(self): return self.briefdescription def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription def get_detaileddescription(self): return self.detaileddescription def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription def get_inbodydescription(self): return self.inbodydescription def set_inbodydescription(self, inbodydescription): self.inbodydescription = inbodydescription def get_location(self): return self.location def set_location(self, location): self.location = location def get_references(self): return self.references def set_references(self, references): self.references = references def add_references(self, value): self.references.append(value) def insert_references(self, index, value): self.references[index] = value def get_referencedby(self): return self.referencedby def set_referencedby(self, referencedby): self.referencedby = referencedby def add_referencedby(self, value): self.referencedby.append(value) def insert_referencedby(self, index, value): self.referencedby[index] = value def get_initonly(self): return self.initonly def set_initonly(self, initonly): self.initonly = initonly def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def get_volatile(self): return self.volatile def set_volatile(self, volatile): self.volatile = volatile def get_const(self): return self.const def set_const(self, const): self.const = const def get_raise(self): return self.raisexx def set_raise(self, raisexx): self.raisexx = raisexx def get_virt(self): return self.virt def set_virt(self, virt): self.virt = virt def get_readable(self): return self.readable def set_readable(self, readable): self.readable = readable def get_prot(self): return self.prot def set_prot(self, prot): self.prot = prot def get_explicit(self): return self.explicit def set_explicit(self, explicit): self.explicit = explicit def get_new(self): return self.new def set_new(self, new): self.new = new def get_final(self): return self.final def set_final(self, final): self.final = final def get_writable(self): return self.writable def set_writable(self, writable): self.writable = writable def get_add(self): return self.add def set_add(self, add): self.add = add def get_static(self): return self.static def set_static(self, static): self.static = static def get_remove(self): return self.remove def set_remove(self, remove): self.remove = remove def get_sealed(self): return self.sealed def set_sealed(self, sealed): self.sealed = sealed def get_mutable(self): return self.mutable def set_mutable(self, mutable): self.mutable = mutable def get_gettable(self): return self.gettable def set_gettable(self, gettable): self.gettable = gettable def get_inline(self): return self.inline def set_inline(self, inline): self.inline = inline def get_settable(self): return self.settable def set_settable(self, settable): self.settable = settable def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='memberdefType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='memberdefType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='memberdefType'): if self.initonly is not None: outfile.write(' initonly=%s' % (quote_attrib(self.initonly), )) if self.kind is not None: outfile.write(' kind=%s' % (quote_attrib(self.kind), )) if self.volatile is not None: outfile.write(' volatile=%s' % (quote_attrib(self.volatile), )) if self.const is not None: outfile.write(' const=%s' % (quote_attrib(self.const), )) if self.raisexx is not None: outfile.write(' raise=%s' % (quote_attrib(self.raisexx), )) if self.virt is not None: outfile.write(' virt=%s' % (quote_attrib(self.virt), )) if self.readable is not None: outfile.write(' readable=%s' % (quote_attrib(self.readable), )) if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.explicit is not None: outfile.write(' explicit=%s' % (quote_attrib(self.explicit), )) if self.new is not None: outfile.write(' new=%s' % (quote_attrib(self.new), )) if self.final is not None: outfile.write(' final=%s' % (quote_attrib(self.final), )) if self.writable is not None: outfile.write(' writable=%s' % (quote_attrib(self.writable), )) if self.add is not None: outfile.write(' add=%s' % (quote_attrib(self.add), )) if self.static is not None: outfile.write(' static=%s' % (quote_attrib(self.static), )) if self.remove is not None: outfile.write(' remove=%s' % (quote_attrib(self.remove), )) if self.sealed is not None: outfile.write(' sealed=%s' % (quote_attrib(self.sealed), )) if self.mutable is not None: outfile.write(' mutable=%s' % (quote_attrib(self.mutable), )) if self.gettable is not None: outfile.write(' gettable=%s' % (quote_attrib(self.gettable), )) if self.inline is not None: outfile.write(' inline=%s' % (quote_attrib(self.inline), )) if self.settable is not None: outfile.write(' settable=%s' % (quote_attrib(self.settable), )) if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='memberdefType'): if self.templateparamlist: self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist') if self.type_: self.type_.export(outfile, level, namespace_, name_='type') if self.definition is not None: showIndent(outfile, level) outfile.write('<%sdefinition>%s</%sdefinition>\n' % (namespace_, self.format_string(quote_xml(self.definition).encode(ExternalEncoding), input_name='definition'), namespace_)) if self.argsstring is not None: showIndent(outfile, level) outfile.write('<%sargsstring>%s</%sargsstring>\n' % (namespace_, self.format_string(quote_xml(self.argsstring).encode(ExternalEncoding), input_name='argsstring'), namespace_)) if self.name is not None: showIndent(outfile, level) outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) if self.read is not None: showIndent(outfile, level) outfile.write('<%sread>%s</%sread>\n' % (namespace_, self.format_string(quote_xml(self.read).encode(ExternalEncoding), input_name='read'), namespace_)) if self.write is not None: showIndent(outfile, level) outfile.write('<%swrite>%s</%swrite>\n' % (namespace_, self.format_string(quote_xml(self.write).encode(ExternalEncoding), input_name='write'), namespace_)) if self.bitfield is not None: showIndent(outfile, level) outfile.write('<%sbitfield>%s</%sbitfield>\n' % (namespace_, self.format_string(quote_xml(self.bitfield).encode(ExternalEncoding), input_name='bitfield'), namespace_)) for reimplements_ in self.reimplements: reimplements_.export(outfile, level, namespace_, name_='reimplements') for reimplementedby_ in self.reimplementedby: reimplementedby_.export(outfile, level, namespace_, name_='reimplementedby') for param_ in self.param: param_.export(outfile, level, namespace_, name_='param') for enumvalue_ in self.enumvalue: enumvalue_.export(outfile, level, namespace_, name_='enumvalue') if self.initializer: self.initializer.export(outfile, level, namespace_, name_='initializer') if self.exceptions: self.exceptions.export(outfile, level, namespace_, name_='exceptions') if self.briefdescription: self.briefdescription.export(outfile, level, namespace_, name_='briefdescription') if self.detaileddescription: self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription') if self.inbodydescription: self.inbodydescription.export(outfile, level, namespace_, name_='inbodydescription') if self.location: self.location.export(outfile, level, namespace_, name_='location', ) for references_ in self.references: references_.export(outfile, level, namespace_, name_='references') for referencedby_ in self.referencedby: referencedby_.export(outfile, level, namespace_, name_='referencedby') def hasContent_(self): if ( self.templateparamlist is not None or self.type_ is not None or self.definition is not None or self.argsstring is not None or self.name is not None or self.read is not None or self.write is not None or self.bitfield is not None or self.reimplements is not None or self.reimplementedby is not None or self.param is not None or self.enumvalue is not None or self.initializer is not None or self.exceptions is not None or self.briefdescription is not None or self.detaileddescription is not None or self.inbodydescription is not None or self.location is not None or self.references is not None or self.referencedby is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='memberdefType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.initonly is not None: showIndent(outfile, level) outfile.write('initonly = "%s",\n' % (self.initonly,)) if self.kind is not None: showIndent(outfile, level) outfile.write('kind = "%s",\n' % (self.kind,)) if self.volatile is not None: showIndent(outfile, level) outfile.write('volatile = "%s",\n' % (self.volatile,)) if self.const is not None: showIndent(outfile, level) outfile.write('const = "%s",\n' % (self.const,)) if self.raisexx is not None: showIndent(outfile, level) outfile.write('raisexx = "%s",\n' % (self.raisexx,)) if self.virt is not None: showIndent(outfile, level) outfile.write('virt = "%s",\n' % (self.virt,)) if self.readable is not None: showIndent(outfile, level) outfile.write('readable = "%s",\n' % (self.readable,)) if self.prot is not None: showIndent(outfile, level) outfile.write('prot = "%s",\n' % (self.prot,)) if self.explicit is not None: showIndent(outfile, level) outfile.write('explicit = "%s",\n' % (self.explicit,)) if self.new is not None: showIndent(outfile, level) outfile.write('new = "%s",\n' % (self.new,)) if self.final is not None: showIndent(outfile, level) outfile.write('final = "%s",\n' % (self.final,)) if self.writable is not None: showIndent(outfile, level) outfile.write('writable = "%s",\n' % (self.writable,)) if self.add is not None: showIndent(outfile, level) outfile.write('add = "%s",\n' % (self.add,)) if self.static is not None: showIndent(outfile, level) outfile.write('static = "%s",\n' % (self.static,)) if self.remove is not None: showIndent(outfile, level) outfile.write('remove = "%s",\n' % (self.remove,)) if self.sealed is not None: showIndent(outfile, level) outfile.write('sealed = "%s",\n' % (self.sealed,)) if self.mutable is not None: showIndent(outfile, level) outfile.write('mutable = "%s",\n' % (self.mutable,)) if self.gettable is not None: showIndent(outfile, level) outfile.write('gettable = "%s",\n' % (self.gettable,)) if self.inline is not None: showIndent(outfile, level) outfile.write('inline = "%s",\n' % (self.inline,)) if self.settable is not None: showIndent(outfile, level) outfile.write('settable = "%s",\n' % (self.settable,)) if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): if self.templateparamlist: showIndent(outfile, level) outfile.write('templateparamlist=model_.templateparamlistType(\n') self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist') showIndent(outfile, level) outfile.write('),\n') if self.type_: showIndent(outfile, level) outfile.write('type_=model_.linkedTextType(\n') self.type_.exportLiteral(outfile, level, name_='type') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('definition=%s,\n' % quote_python(self.definition).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('argsstring=%s,\n' % quote_python(self.argsstring).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('read=%s,\n' % quote_python(self.read).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('write=%s,\n' % quote_python(self.write).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('bitfield=%s,\n' % quote_python(self.bitfield).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('reimplements=[\n') level += 1 for reimplements in self.reimplements: showIndent(outfile, level) outfile.write('model_.reimplements(\n') reimplements.exportLiteral(outfile, level, name_='reimplements') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('reimplementedby=[\n') level += 1 for reimplementedby in self.reimplementedby: showIndent(outfile, level) outfile.write('model_.reimplementedby(\n') reimplementedby.exportLiteral(outfile, level, name_='reimplementedby') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('param=[\n') level += 1 for param in self.param: showIndent(outfile, level) outfile.write('model_.param(\n') param.exportLiteral(outfile, level, name_='param') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('enumvalue=[\n') level += 1 for enumvalue in self.enumvalue: showIndent(outfile, level) outfile.write('model_.enumvalue(\n') enumvalue.exportLiteral(outfile, level, name_='enumvalue') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.initializer: showIndent(outfile, level) outfile.write('initializer=model_.linkedTextType(\n') self.initializer.exportLiteral(outfile, level, name_='initializer') showIndent(outfile, level) outfile.write('),\n') if self.exceptions: showIndent(outfile, level) outfile.write('exceptions=model_.linkedTextType(\n') self.exceptions.exportLiteral(outfile, level, name_='exceptions') showIndent(outfile, level) outfile.write('),\n') if self.briefdescription: showIndent(outfile, level) outfile.write('briefdescription=model_.descriptionType(\n') self.briefdescription.exportLiteral(outfile, level, name_='briefdescription') showIndent(outfile, level) outfile.write('),\n') if self.detaileddescription: showIndent(outfile, level) outfile.write('detaileddescription=model_.descriptionType(\n') self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription') showIndent(outfile, level) outfile.write('),\n') if self.inbodydescription: showIndent(outfile, level) outfile.write('inbodydescription=model_.descriptionType(\n') self.inbodydescription.exportLiteral(outfile, level, name_='inbodydescription') showIndent(outfile, level) outfile.write('),\n') if self.location: showIndent(outfile, level) outfile.write('location=model_.locationType(\n') self.location.exportLiteral(outfile, level, name_='location') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('references=[\n') level += 1 for references in self.references: showIndent(outfile, level) outfile.write('model_.references(\n') references.exportLiteral(outfile, level, name_='references') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('referencedby=[\n') level += 1 for referencedby in self.referencedby: showIndent(outfile, level) outfile.write('model_.referencedby(\n') referencedby.exportLiteral(outfile, level, name_='referencedby') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('initonly'): self.initonly = attrs.get('initonly').value if attrs.get('kind'): self.kind = attrs.get('kind').value if attrs.get('volatile'): self.volatile = attrs.get('volatile').value if attrs.get('const'): self.const = attrs.get('const').value if attrs.get('raise'): self.raisexx = attrs.get('raise').value if attrs.get('virt'): self.virt = attrs.get('virt').value if attrs.get('readable'): self.readable = attrs.get('readable').value if attrs.get('prot'): self.prot = attrs.get('prot').value if attrs.get('explicit'): self.explicit = attrs.get('explicit').value if attrs.get('new'): self.new = attrs.get('new').value if attrs.get('final'): self.final = attrs.get('final').value if attrs.get('writable'): self.writable = attrs.get('writable').value if attrs.get('add'): self.add = attrs.get('add').value if attrs.get('static'): self.static = attrs.get('static').value if attrs.get('remove'): self.remove = attrs.get('remove').value if attrs.get('sealed'): self.sealed = attrs.get('sealed').value if attrs.get('mutable'): self.mutable = attrs.get('mutable').value if attrs.get('gettable'): self.gettable = attrs.get('gettable').value if attrs.get('inline'): self.inline = attrs.get('inline').value if attrs.get('settable'): self.settable = attrs.get('settable').value if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'templateparamlist': obj_ = templateparamlistType.factory() obj_.build(child_) self.set_templateparamlist(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'type': obj_ = linkedTextType.factory() obj_.build(child_) self.set_type(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'definition': definition_ = '' for text__content_ in child_.childNodes: definition_ += text__content_.nodeValue self.definition = definition_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'argsstring': argsstring_ = '' for text__content_ in child_.childNodes: argsstring_ += text__content_.nodeValue self.argsstring = argsstring_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'name': name_ = '' for text__content_ in child_.childNodes: name_ += text__content_.nodeValue self.name = name_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'read': read_ = '' for text__content_ in child_.childNodes: read_ += text__content_.nodeValue self.read = read_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'write': write_ = '' for text__content_ in child_.childNodes: write_ += text__content_.nodeValue self.write = write_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'bitfield': bitfield_ = '' for text__content_ in child_.childNodes: bitfield_ += text__content_.nodeValue self.bitfield = bitfield_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'reimplements': obj_ = reimplementType.factory() obj_.build(child_) self.reimplements.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'reimplementedby': obj_ = reimplementType.factory() obj_.build(child_) self.reimplementedby.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'param': obj_ = paramType.factory() obj_.build(child_) self.param.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'enumvalue': obj_ = enumvalueType.factory() obj_.build(child_) self.enumvalue.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'initializer': obj_ = linkedTextType.factory() obj_.build(child_) self.set_initializer(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'exceptions': obj_ = linkedTextType.factory() obj_.build(child_) self.set_exceptions(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'briefdescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_briefdescription(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'detaileddescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_detaileddescription(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'inbodydescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_inbodydescription(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'location': obj_ = locationType.factory() obj_.build(child_) self.set_location(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'references': obj_ = referenceType.factory() obj_.build(child_) self.references.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'referencedby': obj_ = referenceType.factory() obj_.build(child_) self.referencedby.append(obj_) # end class memberdefType class definition(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if definition.subclass: return definition.subclass(*args_, **kwargs_) else: return definition(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='definition', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='definition') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='definition'): pass def exportChildren(self, outfile, level, namespace_='', name_='definition'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='definition'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class definition class argsstring(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if argsstring.subclass: return argsstring.subclass(*args_, **kwargs_) else: return argsstring(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='argsstring', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='argsstring') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='argsstring'): pass def exportChildren(self, outfile, level, namespace_='', name_='argsstring'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='argsstring'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class argsstring class read(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if read.subclass: return read.subclass(*args_, **kwargs_) else: return read(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='read', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='read') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='read'): pass def exportChildren(self, outfile, level, namespace_='', name_='read'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='read'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class read class write(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if write.subclass: return write.subclass(*args_, **kwargs_) else: return write(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='write', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='write') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='write'): pass def exportChildren(self, outfile, level, namespace_='', name_='write'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='write'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class write class bitfield(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if bitfield.subclass: return bitfield.subclass(*args_, **kwargs_) else: return bitfield(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='bitfield', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='bitfield') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='bitfield'): pass def exportChildren(self, outfile, level, namespace_='', name_='bitfield'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='bitfield'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class bitfield class descriptionType(GeneratedsSuper): subclass = None superclass = None def __init__(self, title=None, para=None, sect1=None, internal=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if descriptionType.subclass: return descriptionType.subclass(*args_, **kwargs_) else: return descriptionType(*args_, **kwargs_) factory = staticmethod(factory) def get_title(self): return self.title def set_title(self, title): self.title = title def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect1(self): return self.sect1 def set_sect1(self, sect1): self.sect1 = sect1 def add_sect1(self, value): self.sect1.append(value) def insert_sect1(self, index, value): self.sect1[index] = value def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def export(self, outfile, level, namespace_='', name_='descriptionType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='descriptionType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='descriptionType'): pass def exportChildren(self, outfile, level, namespace_='', name_='descriptionType'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.title is not None or self.para is not None or self.sect1 is not None or self.internal is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='descriptionType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'title': childobj_ = docTitleType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'title', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect1': childobj_ = docSect1Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect1', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'internal': childobj_ = docInternalType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'internal', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class descriptionType class enumvalueType(GeneratedsSuper): subclass = None superclass = None def __init__(self, prot=None, id=None, name=None, initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None): self.prot = prot self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if enumvalueType.subclass: return enumvalueType.subclass(*args_, **kwargs_) else: return enumvalueType(*args_, **kwargs_) factory = staticmethod(factory) def get_name(self): return self.name def set_name(self, name): self.name = name def get_initializer(self): return self.initializer def set_initializer(self, initializer): self.initializer = initializer def get_briefdescription(self): return self.briefdescription def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription def get_detaileddescription(self): return self.detaileddescription def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription def get_prot(self): return self.prot def set_prot(self, prot): self.prot = prot def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='enumvalueType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='enumvalueType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='enumvalueType'): if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='enumvalueType'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.name is not None or self.initializer is not None or self.briefdescription is not None or self.detaileddescription is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='enumvalueType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.prot is not None: showIndent(outfile, level) outfile.write('prot = "%s",\n' % (self.prot,)) if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('prot'): self.prot = attrs.get('prot').value if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'name': value_ = [] for text_ in child_.childNodes: value_.append(text_.nodeValue) valuestr_ = ''.join(value_) obj_ = self.mixedclass_(MixedContainer.CategorySimple, MixedContainer.TypeString, 'name', valuestr_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'initializer': childobj_ = linkedTextType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'initializer', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'briefdescription': childobj_ = descriptionType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'briefdescription', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'detaileddescription': childobj_ = descriptionType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'detaileddescription', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class enumvalueType class templateparamlistType(GeneratedsSuper): subclass = None superclass = None def __init__(self, param=None): if param is None: self.param = [] else: self.param = param def factory(*args_, **kwargs_): if templateparamlistType.subclass: return templateparamlistType.subclass(*args_, **kwargs_) else: return templateparamlistType(*args_, **kwargs_) factory = staticmethod(factory) def get_param(self): return self.param def set_param(self, param): self.param = param def add_param(self, value): self.param.append(value) def insert_param(self, index, value): self.param[index] = value def export(self, outfile, level, namespace_='', name_='templateparamlistType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='templateparamlistType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='templateparamlistType'): pass def exportChildren(self, outfile, level, namespace_='', name_='templateparamlistType'): for param_ in self.param: param_.export(outfile, level, namespace_, name_='param') def hasContent_(self): if ( self.param is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='templateparamlistType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('param=[\n') level += 1 for param in self.param: showIndent(outfile, level) outfile.write('model_.param(\n') param.exportLiteral(outfile, level, name_='param') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'param': obj_ = paramType.factory() obj_.build(child_) self.param.append(obj_) # end class templateparamlistType class paramType(GeneratedsSuper): subclass = None superclass = None def __init__(self, type_=None, declname=None, defname=None, array=None, defval=None, briefdescription=None): self.type_ = type_ self.declname = declname self.defname = defname self.array = array self.defval = defval self.briefdescription = briefdescription def factory(*args_, **kwargs_): if paramType.subclass: return paramType.subclass(*args_, **kwargs_) else: return paramType(*args_, **kwargs_) factory = staticmethod(factory) def get_type(self): return self.type_ def set_type(self, type_): self.type_ = type_ def get_declname(self): return self.declname def set_declname(self, declname): self.declname = declname def get_defname(self): return self.defname def set_defname(self, defname): self.defname = defname def get_array(self): return self.array def set_array(self, array): self.array = array def get_defval(self): return self.defval def set_defval(self, defval): self.defval = defval def get_briefdescription(self): return self.briefdescription def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription def export(self, outfile, level, namespace_='', name_='paramType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='paramType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='paramType'): pass def exportChildren(self, outfile, level, namespace_='', name_='paramType'): if self.type_: self.type_.export(outfile, level, namespace_, name_='type') if self.declname is not None: showIndent(outfile, level) outfile.write('<%sdeclname>%s</%sdeclname>\n' % (namespace_, self.format_string(quote_xml(self.declname).encode(ExternalEncoding), input_name='declname'), namespace_)) if self.defname is not None: showIndent(outfile, level) outfile.write('<%sdefname>%s</%sdefname>\n' % (namespace_, self.format_string(quote_xml(self.defname).encode(ExternalEncoding), input_name='defname'), namespace_)) if self.array is not None: showIndent(outfile, level) outfile.write('<%sarray>%s</%sarray>\n' % (namespace_, self.format_string(quote_xml(self.array).encode(ExternalEncoding), input_name='array'), namespace_)) if self.defval: self.defval.export(outfile, level, namespace_, name_='defval') if self.briefdescription: self.briefdescription.export(outfile, level, namespace_, name_='briefdescription') def hasContent_(self): if ( self.type_ is not None or self.declname is not None or self.defname is not None or self.array is not None or self.defval is not None or self.briefdescription is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='paramType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): if self.type_: showIndent(outfile, level) outfile.write('type_=model_.linkedTextType(\n') self.type_.exportLiteral(outfile, level, name_='type') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('declname=%s,\n' % quote_python(self.declname).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('defname=%s,\n' % quote_python(self.defname).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('array=%s,\n' % quote_python(self.array).encode(ExternalEncoding)) if self.defval: showIndent(outfile, level) outfile.write('defval=model_.linkedTextType(\n') self.defval.exportLiteral(outfile, level, name_='defval') showIndent(outfile, level) outfile.write('),\n') if self.briefdescription: showIndent(outfile, level) outfile.write('briefdescription=model_.descriptionType(\n') self.briefdescription.exportLiteral(outfile, level, name_='briefdescription') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'type': obj_ = linkedTextType.factory() obj_.build(child_) self.set_type(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'declname': declname_ = '' for text__content_ in child_.childNodes: declname_ += text__content_.nodeValue self.declname = declname_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'defname': defname_ = '' for text__content_ in child_.childNodes: defname_ += text__content_.nodeValue self.defname = defname_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'array': array_ = '' for text__content_ in child_.childNodes: array_ += text__content_.nodeValue self.array = array_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'defval': obj_ = linkedTextType.factory() obj_.build(child_) self.set_defval(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'briefdescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_briefdescription(obj_) # end class paramType class declname(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if declname.subclass: return declname.subclass(*args_, **kwargs_) else: return declname(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='declname', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='declname') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='declname'): pass def exportChildren(self, outfile, level, namespace_='', name_='declname'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='declname'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class declname class defname(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if defname.subclass: return defname.subclass(*args_, **kwargs_) else: return defname(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='defname', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='defname') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='defname'): pass def exportChildren(self, outfile, level, namespace_='', name_='defname'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='defname'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class defname class array(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if array.subclass: return array.subclass(*args_, **kwargs_) else: return array(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='array', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='array') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='array'): pass def exportChildren(self, outfile, level, namespace_='', name_='array'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='array'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class array class linkedTextType(GeneratedsSuper): subclass = None superclass = None def __init__(self, ref=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if linkedTextType.subclass: return linkedTextType.subclass(*args_, **kwargs_) else: return linkedTextType(*args_, **kwargs_) factory = staticmethod(factory) def get_ref(self): return self.ref def set_ref(self, ref): self.ref = ref def add_ref(self, value): self.ref.append(value) def insert_ref(self, index, value): self.ref[index] = value def export(self, outfile, level, namespace_='', name_='linkedTextType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='linkedTextType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='linkedTextType'): pass def exportChildren(self, outfile, level, namespace_='', name_='linkedTextType'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.ref is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='linkedTextType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'ref': childobj_ = docRefTextType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'ref', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class linkedTextType class graphType(GeneratedsSuper): subclass = None superclass = None def __init__(self, node=None): if node is None: self.node = [] else: self.node = node def factory(*args_, **kwargs_): if graphType.subclass: return graphType.subclass(*args_, **kwargs_) else: return graphType(*args_, **kwargs_) factory = staticmethod(factory) def get_node(self): return self.node def set_node(self, node): self.node = node def add_node(self, value): self.node.append(value) def insert_node(self, index, value): self.node[index] = value def export(self, outfile, level, namespace_='', name_='graphType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='graphType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='graphType'): pass def exportChildren(self, outfile, level, namespace_='', name_='graphType'): for node_ in self.node: node_.export(outfile, level, namespace_, name_='node') def hasContent_(self): if ( self.node is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='graphType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('node=[\n') level += 1 for node in self.node: showIndent(outfile, level) outfile.write('model_.node(\n') node.exportLiteral(outfile, level, name_='node') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'node': obj_ = nodeType.factory() obj_.build(child_) self.node.append(obj_) # end class graphType class nodeType(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, label=None, link=None, childnode=None): self.id = id self.label = label self.link = link if childnode is None: self.childnode = [] else: self.childnode = childnode def factory(*args_, **kwargs_): if nodeType.subclass: return nodeType.subclass(*args_, **kwargs_) else: return nodeType(*args_, **kwargs_) factory = staticmethod(factory) def get_label(self): return self.label def set_label(self, label): self.label = label def get_link(self): return self.link def set_link(self, link): self.link = link def get_childnode(self): return self.childnode def set_childnode(self, childnode): self.childnode = childnode def add_childnode(self, value): self.childnode.append(value) def insert_childnode(self, index, value): self.childnode[index] = value def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='nodeType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='nodeType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='nodeType'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='nodeType'): if self.label is not None: showIndent(outfile, level) outfile.write('<%slabel>%s</%slabel>\n' % (namespace_, self.format_string(quote_xml(self.label).encode(ExternalEncoding), input_name='label'), namespace_)) if self.link: self.link.export(outfile, level, namespace_, name_='link') for childnode_ in self.childnode: childnode_.export(outfile, level, namespace_, name_='childnode') def hasContent_(self): if ( self.label is not None or self.link is not None or self.childnode is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='nodeType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('label=%s,\n' % quote_python(self.label).encode(ExternalEncoding)) if self.link: showIndent(outfile, level) outfile.write('link=model_.linkType(\n') self.link.exportLiteral(outfile, level, name_='link') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('childnode=[\n') level += 1 for childnode in self.childnode: showIndent(outfile, level) outfile.write('model_.childnode(\n') childnode.exportLiteral(outfile, level, name_='childnode') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'label': label_ = '' for text__content_ in child_.childNodes: label_ += text__content_.nodeValue self.label = label_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'link': obj_ = linkType.factory() obj_.build(child_) self.set_link(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'childnode': obj_ = childnodeType.factory() obj_.build(child_) self.childnode.append(obj_) # end class nodeType class label(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if label.subclass: return label.subclass(*args_, **kwargs_) else: return label(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='label', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='label') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='label'): pass def exportChildren(self, outfile, level, namespace_='', name_='label'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='label'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class label class childnodeType(GeneratedsSuper): subclass = None superclass = None def __init__(self, relation=None, refid=None, edgelabel=None): self.relation = relation self.refid = refid if edgelabel is None: self.edgelabel = [] else: self.edgelabel = edgelabel def factory(*args_, **kwargs_): if childnodeType.subclass: return childnodeType.subclass(*args_, **kwargs_) else: return childnodeType(*args_, **kwargs_) factory = staticmethod(factory) def get_edgelabel(self): return self.edgelabel def set_edgelabel(self, edgelabel): self.edgelabel = edgelabel def add_edgelabel(self, value): self.edgelabel.append(value) def insert_edgelabel(self, index, value): self.edgelabel[index] = value def get_relation(self): return self.relation def set_relation(self, relation): self.relation = relation def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def export(self, outfile, level, namespace_='', name_='childnodeType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='childnodeType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='childnodeType'): if self.relation is not None: outfile.write(' relation=%s' % (quote_attrib(self.relation), )) if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) def exportChildren(self, outfile, level, namespace_='', name_='childnodeType'): for edgelabel_ in self.edgelabel: showIndent(outfile, level) outfile.write('<%sedgelabel>%s</%sedgelabel>\n' % (namespace_, self.format_string(quote_xml(edgelabel_).encode(ExternalEncoding), input_name='edgelabel'), namespace_)) def hasContent_(self): if ( self.edgelabel is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='childnodeType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.relation is not None: showIndent(outfile, level) outfile.write('relation = "%s",\n' % (self.relation,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('edgelabel=[\n') level += 1 for edgelabel in self.edgelabel: showIndent(outfile, level) outfile.write('%s,\n' % quote_python(edgelabel).encode(ExternalEncoding)) level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('relation'): self.relation = attrs.get('relation').value if attrs.get('refid'): self.refid = attrs.get('refid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'edgelabel': edgelabel_ = '' for text__content_ in child_.childNodes: edgelabel_ += text__content_.nodeValue self.edgelabel.append(edgelabel_) # end class childnodeType class edgelabel(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if edgelabel.subclass: return edgelabel.subclass(*args_, **kwargs_) else: return edgelabel(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='edgelabel', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='edgelabel') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='edgelabel'): pass def exportChildren(self, outfile, level, namespace_='', name_='edgelabel'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='edgelabel'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class edgelabel class linkType(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, external=None, valueOf_=''): self.refid = refid self.external = external self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if linkType.subclass: return linkType.subclass(*args_, **kwargs_) else: return linkType(*args_, **kwargs_) factory = staticmethod(factory) def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def get_external(self): return self.external def set_external(self, external): self.external = external def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='linkType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='linkType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='linkType'): if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) if self.external is not None: outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) def exportChildren(self, outfile, level, namespace_='', name_='linkType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='linkType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) if self.external is not None: showIndent(outfile, level) outfile.write('external = %s,\n' % (self.external,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('refid'): self.refid = attrs.get('refid').value if attrs.get('external'): self.external = attrs.get('external').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class linkType class listingType(GeneratedsSuper): subclass = None superclass = None def __init__(self, codeline=None): if codeline is None: self.codeline = [] else: self.codeline = codeline def factory(*args_, **kwargs_): if listingType.subclass: return listingType.subclass(*args_, **kwargs_) else: return listingType(*args_, **kwargs_) factory = staticmethod(factory) def get_codeline(self): return self.codeline def set_codeline(self, codeline): self.codeline = codeline def add_codeline(self, value): self.codeline.append(value) def insert_codeline(self, index, value): self.codeline[index] = value def export(self, outfile, level, namespace_='', name_='listingType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='listingType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='listingType'): pass def exportChildren(self, outfile, level, namespace_='', name_='listingType'): for codeline_ in self.codeline: codeline_.export(outfile, level, namespace_, name_='codeline') def hasContent_(self): if ( self.codeline is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='listingType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('codeline=[\n') level += 1 for codeline in self.codeline: showIndent(outfile, level) outfile.write('model_.codeline(\n') codeline.exportLiteral(outfile, level, name_='codeline') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'codeline': obj_ = codelineType.factory() obj_.build(child_) self.codeline.append(obj_) # end class listingType class codelineType(GeneratedsSuper): subclass = None superclass = None def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None): self.external = external self.lineno = lineno self.refkind = refkind self.refid = refid if highlight is None: self.highlight = [] else: self.highlight = highlight def factory(*args_, **kwargs_): if codelineType.subclass: return codelineType.subclass(*args_, **kwargs_) else: return codelineType(*args_, **kwargs_) factory = staticmethod(factory) def get_highlight(self): return self.highlight def set_highlight(self, highlight): self.highlight = highlight def add_highlight(self, value): self.highlight.append(value) def insert_highlight(self, index, value): self.highlight[index] = value def get_external(self): return self.external def set_external(self, external): self.external = external def get_lineno(self): return self.lineno def set_lineno(self, lineno): self.lineno = lineno def get_refkind(self): return self.refkind def set_refkind(self, refkind): self.refkind = refkind def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def export(self, outfile, level, namespace_='', name_='codelineType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='codelineType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='codelineType'): if self.external is not None: outfile.write(' external=%s' % (quote_attrib(self.external), )) if self.lineno is not None: outfile.write(' lineno="%s"' % self.format_integer(self.lineno, input_name='lineno')) if self.refkind is not None: outfile.write(' refkind=%s' % (quote_attrib(self.refkind), )) if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) def exportChildren(self, outfile, level, namespace_='', name_='codelineType'): for highlight_ in self.highlight: highlight_.export(outfile, level, namespace_, name_='highlight') def hasContent_(self): if ( self.highlight is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='codelineType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.external is not None: showIndent(outfile, level) outfile.write('external = "%s",\n' % (self.external,)) if self.lineno is not None: showIndent(outfile, level) outfile.write('lineno = %s,\n' % (self.lineno,)) if self.refkind is not None: showIndent(outfile, level) outfile.write('refkind = "%s",\n' % (self.refkind,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('highlight=[\n') level += 1 for highlight in self.highlight: showIndent(outfile, level) outfile.write('model_.highlight(\n') highlight.exportLiteral(outfile, level, name_='highlight') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('external'): self.external = attrs.get('external').value if attrs.get('lineno'): try: self.lineno = int(attrs.get('lineno').value) except ValueError, exp: raise ValueError('Bad integer attribute (lineno): %s' % exp) if attrs.get('refkind'): self.refkind = attrs.get('refkind').value if attrs.get('refid'): self.refid = attrs.get('refid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'highlight': obj_ = highlightType.factory() obj_.build(child_) self.highlight.append(obj_) # end class codelineType class highlightType(GeneratedsSuper): subclass = None superclass = None def __init__(self, classxx=None, sp=None, ref=None, mixedclass_=None, content_=None): self.classxx = classxx if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if highlightType.subclass: return highlightType.subclass(*args_, **kwargs_) else: return highlightType(*args_, **kwargs_) factory = staticmethod(factory) def get_sp(self): return self.sp def set_sp(self, sp): self.sp = sp def add_sp(self, value): self.sp.append(value) def insert_sp(self, index, value): self.sp[index] = value def get_ref(self): return self.ref def set_ref(self, ref): self.ref = ref def add_ref(self, value): self.ref.append(value) def insert_ref(self, index, value): self.ref[index] = value def get_class(self): return self.classxx def set_class(self, classxx): self.classxx = classxx def export(self, outfile, level, namespace_='', name_='highlightType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='highlightType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='highlightType'): if self.classxx is not None: outfile.write(' class=%s' % (quote_attrib(self.classxx), )) def exportChildren(self, outfile, level, namespace_='', name_='highlightType'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.sp is not None or self.ref is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='highlightType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.classxx is not None: showIndent(outfile, level) outfile.write('classxx = "%s",\n' % (self.classxx,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('class'): self.classxx = attrs.get('class').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sp': value_ = [] for text_ in child_.childNodes: value_.append(text_.nodeValue) valuestr_ = ''.join(value_) obj_ = self.mixedclass_(MixedContainer.CategorySimple, MixedContainer.TypeString, 'sp', valuestr_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'ref': childobj_ = docRefTextType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'ref', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class highlightType class sp(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if sp.subclass: return sp.subclass(*args_, **kwargs_) else: return sp(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='sp', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='sp') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='sp'): pass def exportChildren(self, outfile, level, namespace_='', name_='sp'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='sp'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class sp class referenceType(GeneratedsSuper): subclass = None superclass = None def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None): self.endline = endline self.startline = startline self.refid = refid self.compoundref = compoundref if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if referenceType.subclass: return referenceType.subclass(*args_, **kwargs_) else: return referenceType(*args_, **kwargs_) factory = staticmethod(factory) def get_endline(self): return self.endline def set_endline(self, endline): self.endline = endline def get_startline(self): return self.startline def set_startline(self, startline): self.startline = startline def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def get_compoundref(self): return self.compoundref def set_compoundref(self, compoundref): self.compoundref = compoundref def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='referenceType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='referenceType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='referenceType'): if self.endline is not None: outfile.write(' endline="%s"' % self.format_integer(self.endline, input_name='endline')) if self.startline is not None: outfile.write(' startline="%s"' % self.format_integer(self.startline, input_name='startline')) if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) if self.compoundref is not None: outfile.write(' compoundref=%s' % (self.format_string(quote_attrib(self.compoundref).encode(ExternalEncoding), input_name='compoundref'), )) def exportChildren(self, outfile, level, namespace_='', name_='referenceType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='referenceType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.endline is not None: showIndent(outfile, level) outfile.write('endline = %s,\n' % (self.endline,)) if self.startline is not None: showIndent(outfile, level) outfile.write('startline = %s,\n' % (self.startline,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) if self.compoundref is not None: showIndent(outfile, level) outfile.write('compoundref = %s,\n' % (self.compoundref,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('endline'): try: self.endline = int(attrs.get('endline').value) except ValueError, exp: raise ValueError('Bad integer attribute (endline): %s' % exp) if attrs.get('startline'): try: self.startline = int(attrs.get('startline').value) except ValueError, exp: raise ValueError('Bad integer attribute (startline): %s' % exp) if attrs.get('refid'): self.refid = attrs.get('refid').value if attrs.get('compoundref'): self.compoundref = attrs.get('compoundref').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class referenceType class locationType(GeneratedsSuper): subclass = None superclass = None def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''): self.bodystart = bodystart self.line = line self.bodyend = bodyend self.bodyfile = bodyfile self.file = file self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if locationType.subclass: return locationType.subclass(*args_, **kwargs_) else: return locationType(*args_, **kwargs_) factory = staticmethod(factory) def get_bodystart(self): return self.bodystart def set_bodystart(self, bodystart): self.bodystart = bodystart def get_line(self): return self.line def set_line(self, line): self.line = line def get_bodyend(self): return self.bodyend def set_bodyend(self, bodyend): self.bodyend = bodyend def get_bodyfile(self): return self.bodyfile def set_bodyfile(self, bodyfile): self.bodyfile = bodyfile def get_file(self): return self.file def set_file(self, file): self.file = file def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='locationType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='locationType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='locationType'): if self.bodystart is not None: outfile.write(' bodystart="%s"' % self.format_integer(self.bodystart, input_name='bodystart')) if self.line is not None: outfile.write(' line="%s"' % self.format_integer(self.line, input_name='line')) if self.bodyend is not None: outfile.write(' bodyend="%s"' % self.format_integer(self.bodyend, input_name='bodyend')) if self.bodyfile is not None: outfile.write(' bodyfile=%s' % (self.format_string(quote_attrib(self.bodyfile).encode(ExternalEncoding), input_name='bodyfile'), )) if self.file is not None: outfile.write(' file=%s' % (self.format_string(quote_attrib(self.file).encode(ExternalEncoding), input_name='file'), )) def exportChildren(self, outfile, level, namespace_='', name_='locationType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='locationType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.bodystart is not None: showIndent(outfile, level) outfile.write('bodystart = %s,\n' % (self.bodystart,)) if self.line is not None: showIndent(outfile, level) outfile.write('line = %s,\n' % (self.line,)) if self.bodyend is not None: showIndent(outfile, level) outfile.write('bodyend = %s,\n' % (self.bodyend,)) if self.bodyfile is not None: showIndent(outfile, level) outfile.write('bodyfile = %s,\n' % (self.bodyfile,)) if self.file is not None: showIndent(outfile, level) outfile.write('file = %s,\n' % (self.file,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('bodystart'): try: self.bodystart = int(attrs.get('bodystart').value) except ValueError, exp: raise ValueError('Bad integer attribute (bodystart): %s' % exp) if attrs.get('line'): try: self.line = int(attrs.get('line').value) except ValueError, exp: raise ValueError('Bad integer attribute (line): %s' % exp) if attrs.get('bodyend'): try: self.bodyend = int(attrs.get('bodyend').value) except ValueError, exp: raise ValueError('Bad integer attribute (bodyend): %s' % exp) if attrs.get('bodyfile'): self.bodyfile = attrs.get('bodyfile').value if attrs.get('file'): self.file = attrs.get('file').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class locationType class docSect1Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, title=None, para=None, sect2=None, internal=None, mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docSect1Type.subclass: return docSect1Type.subclass(*args_, **kwargs_) else: return docSect1Type(*args_, **kwargs_) factory = staticmethod(factory) def get_title(self): return self.title def set_title(self, title): self.title = title def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect2(self): return self.sect2 def set_sect2(self, sect2): self.sect2 = sect2 def add_sect2(self, value): self.sect2.append(value) def insert_sect2(self, index, value): self.sect2[index] = value def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='docSect1Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docSect1Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docSect1Type'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docSect1Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.title is not None or self.para is not None or self.sect2 is not None or self.internal is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docSect1Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'title': childobj_ = docTitleType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'title', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect2': childobj_ = docSect2Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect2', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'internal': childobj_ = docInternalS1Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'internal', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docSect1Type class docSect2Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, title=None, para=None, sect3=None, internal=None, mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docSect2Type.subclass: return docSect2Type.subclass(*args_, **kwargs_) else: return docSect2Type(*args_, **kwargs_) factory = staticmethod(factory) def get_title(self): return self.title def set_title(self, title): self.title = title def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect3(self): return self.sect3 def set_sect3(self, sect3): self.sect3 = sect3 def add_sect3(self, value): self.sect3.append(value) def insert_sect3(self, index, value): self.sect3[index] = value def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='docSect2Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docSect2Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docSect2Type'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docSect2Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.title is not None or self.para is not None or self.sect3 is not None or self.internal is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docSect2Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'title': childobj_ = docTitleType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'title', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect3': childobj_ = docSect3Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect3', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'internal': childobj_ = docInternalS2Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'internal', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docSect2Type class docSect3Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, title=None, para=None, sect4=None, internal=None, mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docSect3Type.subclass: return docSect3Type.subclass(*args_, **kwargs_) else: return docSect3Type(*args_, **kwargs_) factory = staticmethod(factory) def get_title(self): return self.title def set_title(self, title): self.title = title def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect4(self): return self.sect4 def set_sect4(self, sect4): self.sect4 = sect4 def add_sect4(self, value): self.sect4.append(value) def insert_sect4(self, index, value): self.sect4[index] = value def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='docSect3Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docSect3Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docSect3Type'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docSect3Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.title is not None or self.para is not None or self.sect4 is not None or self.internal is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docSect3Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'title': childobj_ = docTitleType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'title', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect4': childobj_ = docSect4Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect4', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'internal': childobj_ = docInternalS3Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'internal', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docSect3Type class docSect4Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, title=None, para=None, internal=None, mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docSect4Type.subclass: return docSect4Type.subclass(*args_, **kwargs_) else: return docSect4Type(*args_, **kwargs_) factory = staticmethod(factory) def get_title(self): return self.title def set_title(self, title): self.title = title def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='docSect4Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docSect4Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docSect4Type'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docSect4Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.title is not None or self.para is not None or self.internal is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docSect4Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'title': childobj_ = docTitleType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'title', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'internal': childobj_ = docInternalS4Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'internal', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docSect4Type class docInternalType(GeneratedsSuper): subclass = None superclass = None def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docInternalType.subclass: return docInternalType.subclass(*args_, **kwargs_) else: return docInternalType(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect1(self): return self.sect1 def set_sect1(self, sect1): self.sect1 = sect1 def add_sect1(self, value): self.sect1.append(value) def insert_sect1(self, index, value): self.sect1[index] = value def export(self, outfile, level, namespace_='', name_='docInternalType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docInternalType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docInternalType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docInternalType'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.para is not None or self.sect1 is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docInternalType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect1': childobj_ = docSect1Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect1', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docInternalType class docInternalS1Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docInternalS1Type.subclass: return docInternalS1Type.subclass(*args_, **kwargs_) else: return docInternalS1Type(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect2(self): return self.sect2 def set_sect2(self, sect2): self.sect2 = sect2 def add_sect2(self, value): self.sect2.append(value) def insert_sect2(self, index, value): self.sect2[index] = value def export(self, outfile, level, namespace_='', name_='docInternalS1Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docInternalS1Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS1Type'): pass def exportChildren(self, outfile, level, namespace_='', name_='docInternalS1Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.para is not None or self.sect2 is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docInternalS1Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect2': childobj_ = docSect2Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect2', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docInternalS1Type class docInternalS2Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docInternalS2Type.subclass: return docInternalS2Type.subclass(*args_, **kwargs_) else: return docInternalS2Type(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect3(self): return self.sect3 def set_sect3(self, sect3): self.sect3 = sect3 def add_sect3(self, value): self.sect3.append(value) def insert_sect3(self, index, value): self.sect3[index] = value def export(self, outfile, level, namespace_='', name_='docInternalS2Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docInternalS2Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS2Type'): pass def exportChildren(self, outfile, level, namespace_='', name_='docInternalS2Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.para is not None or self.sect3 is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docInternalS2Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect3': childobj_ = docSect3Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect3', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docInternalS2Type class docInternalS3Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docInternalS3Type.subclass: return docInternalS3Type.subclass(*args_, **kwargs_) else: return docInternalS3Type(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect3(self): return self.sect3 def set_sect3(self, sect3): self.sect3 = sect3 def add_sect3(self, value): self.sect3.append(value) def insert_sect3(self, index, value): self.sect3[index] = value def export(self, outfile, level, namespace_='', name_='docInternalS3Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docInternalS3Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS3Type'): pass def exportChildren(self, outfile, level, namespace_='', name_='docInternalS3Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.para is not None or self.sect3 is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docInternalS3Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect3': childobj_ = docSect4Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect3', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docInternalS3Type class docInternalS4Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, para=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docInternalS4Type.subclass: return docInternalS4Type.subclass(*args_, **kwargs_) else: return docInternalS4Type(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def export(self, outfile, level, namespace_='', name_='docInternalS4Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docInternalS4Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS4Type'): pass def exportChildren(self, outfile, level, namespace_='', name_='docInternalS4Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.para is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docInternalS4Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docInternalS4Type class docTitleType(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_='', mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docTitleType.subclass: return docTitleType.subclass(*args_, **kwargs_) else: return docTitleType(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docTitleType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docTitleType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docTitleType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docTitleType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docTitleType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docTitleType class docParaType(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_='', mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docParaType.subclass: return docParaType.subclass(*args_, **kwargs_) else: return docParaType(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docParaType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docParaType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docParaType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docParaType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docParaType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docParaType class docMarkupType(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_='', mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docMarkupType.subclass: return docMarkupType.subclass(*args_, **kwargs_) else: return docMarkupType(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docMarkupType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docMarkupType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docMarkupType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docMarkupType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docMarkupType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docMarkupType class docURLLink(GeneratedsSuper): subclass = None superclass = None def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None): self.url = url if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docURLLink.subclass: return docURLLink.subclass(*args_, **kwargs_) else: return docURLLink(*args_, **kwargs_) factory = staticmethod(factory) def get_url(self): return self.url def set_url(self, url): self.url = url def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docURLLink', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docURLLink') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docURLLink'): if self.url is not None: outfile.write(' url=%s' % (self.format_string(quote_attrib(self.url).encode(ExternalEncoding), input_name='url'), )) def exportChildren(self, outfile, level, namespace_='', name_='docURLLink'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docURLLink'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.url is not None: showIndent(outfile, level) outfile.write('url = %s,\n' % (self.url,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('url'): self.url = attrs.get('url').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docURLLink class docAnchorType(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docAnchorType.subclass: return docAnchorType.subclass(*args_, **kwargs_) else: return docAnchorType(*args_, **kwargs_) factory = staticmethod(factory) def get_id(self): return self.id def set_id(self, id): self.id = id def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docAnchorType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docAnchorType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docAnchorType'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docAnchorType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docAnchorType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docAnchorType class docFormulaType(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docFormulaType.subclass: return docFormulaType.subclass(*args_, **kwargs_) else: return docFormulaType(*args_, **kwargs_) factory = staticmethod(factory) def get_id(self): return self.id def set_id(self, id): self.id = id def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docFormulaType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docFormulaType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docFormulaType'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docFormulaType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docFormulaType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docFormulaType class docIndexEntryType(GeneratedsSuper): subclass = None superclass = None def __init__(self, primaryie=None, secondaryie=None): self.primaryie = primaryie self.secondaryie = secondaryie def factory(*args_, **kwargs_): if docIndexEntryType.subclass: return docIndexEntryType.subclass(*args_, **kwargs_) else: return docIndexEntryType(*args_, **kwargs_) factory = staticmethod(factory) def get_primaryie(self): return self.primaryie def set_primaryie(self, primaryie): self.primaryie = primaryie def get_secondaryie(self): return self.secondaryie def set_secondaryie(self, secondaryie): self.secondaryie = secondaryie def export(self, outfile, level, namespace_='', name_='docIndexEntryType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docIndexEntryType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docIndexEntryType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docIndexEntryType'): if self.primaryie is not None: showIndent(outfile, level) outfile.write('<%sprimaryie>%s</%sprimaryie>\n' % (namespace_, self.format_string(quote_xml(self.primaryie).encode(ExternalEncoding), input_name='primaryie'), namespace_)) if self.secondaryie is not None: showIndent(outfile, level) outfile.write('<%ssecondaryie>%s</%ssecondaryie>\n' % (namespace_, self.format_string(quote_xml(self.secondaryie).encode(ExternalEncoding), input_name='secondaryie'), namespace_)) def hasContent_(self): if ( self.primaryie is not None or self.secondaryie is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docIndexEntryType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('primaryie=%s,\n' % quote_python(self.primaryie).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('secondaryie=%s,\n' % quote_python(self.secondaryie).encode(ExternalEncoding)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'primaryie': primaryie_ = '' for text__content_ in child_.childNodes: primaryie_ += text__content_.nodeValue self.primaryie = primaryie_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'secondaryie': secondaryie_ = '' for text__content_ in child_.childNodes: secondaryie_ += text__content_.nodeValue self.secondaryie = secondaryie_ # end class docIndexEntryType class docListType(GeneratedsSuper): subclass = None superclass = None def __init__(self, listitem=None): if listitem is None: self.listitem = [] else: self.listitem = listitem def factory(*args_, **kwargs_): if docListType.subclass: return docListType.subclass(*args_, **kwargs_) else: return docListType(*args_, **kwargs_) factory = staticmethod(factory) def get_listitem(self): return self.listitem def set_listitem(self, listitem): self.listitem = listitem def add_listitem(self, value): self.listitem.append(value) def insert_listitem(self, index, value): self.listitem[index] = value def export(self, outfile, level, namespace_='', name_='docListType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docListType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docListType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docListType'): for listitem_ in self.listitem: listitem_.export(outfile, level, namespace_, name_='listitem') def hasContent_(self): if ( self.listitem is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docListType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('listitem=[\n') level += 1 for listitem in self.listitem: showIndent(outfile, level) outfile.write('model_.listitem(\n') listitem.exportLiteral(outfile, level, name_='listitem') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'listitem': obj_ = docListItemType.factory() obj_.build(child_) self.listitem.append(obj_) # end class docListType class docListItemType(GeneratedsSuper): subclass = None superclass = None def __init__(self, para=None): if para is None: self.para = [] else: self.para = para def factory(*args_, **kwargs_): if docListItemType.subclass: return docListItemType.subclass(*args_, **kwargs_) else: return docListItemType(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def export(self, outfile, level, namespace_='', name_='docListItemType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docListItemType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docListItemType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docListItemType'): for para_ in self.para: para_.export(outfile, level, namespace_, name_='para') def hasContent_(self): if ( self.para is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docListItemType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('para=[\n') level += 1 for para in self.para: showIndent(outfile, level) outfile.write('model_.para(\n') para.exportLiteral(outfile, level, name_='para') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': obj_ = docParaType.factory() obj_.build(child_) self.para.append(obj_) # end class docListItemType class docSimpleSectType(GeneratedsSuper): subclass = None superclass = None def __init__(self, kind=None, title=None, para=None): self.kind = kind self.title = title if para is None: self.para = [] else: self.para = para def factory(*args_, **kwargs_): if docSimpleSectType.subclass: return docSimpleSectType.subclass(*args_, **kwargs_) else: return docSimpleSectType(*args_, **kwargs_) factory = staticmethod(factory) def get_title(self): return self.title def set_title(self, title): self.title = title def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def export(self, outfile, level, namespace_='', name_='docSimpleSectType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docSimpleSectType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docSimpleSectType'): if self.kind is not None: outfile.write(' kind=%s' % (quote_attrib(self.kind), )) def exportChildren(self, outfile, level, namespace_='', name_='docSimpleSectType'): if self.title: self.title.export(outfile, level, namespace_, name_='title') for para_ in self.para: para_.export(outfile, level, namespace_, name_='para') def hasContent_(self): if ( self.title is not None or self.para is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docSimpleSectType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.kind is not None: showIndent(outfile, level) outfile.write('kind = "%s",\n' % (self.kind,)) def exportLiteralChildren(self, outfile, level, name_): if self.title: showIndent(outfile, level) outfile.write('title=model_.docTitleType(\n') self.title.exportLiteral(outfile, level, name_='title') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('para=[\n') level += 1 for para in self.para: showIndent(outfile, level) outfile.write('model_.para(\n') para.exportLiteral(outfile, level, name_='para') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'title': obj_ = docTitleType.factory() obj_.build(child_) self.set_title(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': obj_ = docParaType.factory() obj_.build(child_) self.para.append(obj_) # end class docSimpleSectType class docVarListEntryType(GeneratedsSuper): subclass = None superclass = None def __init__(self, term=None): self.term = term def factory(*args_, **kwargs_): if docVarListEntryType.subclass: return docVarListEntryType.subclass(*args_, **kwargs_) else: return docVarListEntryType(*args_, **kwargs_) factory = staticmethod(factory) def get_term(self): return self.term def set_term(self, term): self.term = term def export(self, outfile, level, namespace_='', name_='docVarListEntryType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docVarListEntryType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docVarListEntryType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docVarListEntryType'): if self.term: self.term.export(outfile, level, namespace_, name_='term', ) def hasContent_(self): if ( self.term is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docVarListEntryType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): if self.term: showIndent(outfile, level) outfile.write('term=model_.docTitleType(\n') self.term.exportLiteral(outfile, level, name_='term') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'term': obj_ = docTitleType.factory() obj_.build(child_) self.set_term(obj_) # end class docVarListEntryType class docVariableListType(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if docVariableListType.subclass: return docVariableListType.subclass(*args_, **kwargs_) else: return docVariableListType(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docVariableListType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docVariableListType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docVariableListType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docVariableListType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docVariableListType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docVariableListType class docRefTextType(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): self.refid = refid self.kindref = kindref self.external = external if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docRefTextType.subclass: return docRefTextType.subclass(*args_, **kwargs_) else: return docRefTextType(*args_, **kwargs_) factory = staticmethod(factory) def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def get_kindref(self): return self.kindref def set_kindref(self, kindref): self.kindref = kindref def get_external(self): return self.external def set_external(self, external): self.external = external def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docRefTextType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docRefTextType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docRefTextType'): if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) if self.kindref is not None: outfile.write(' kindref=%s' % (quote_attrib(self.kindref), )) if self.external is not None: outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) def exportChildren(self, outfile, level, namespace_='', name_='docRefTextType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docRefTextType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) if self.kindref is not None: showIndent(outfile, level) outfile.write('kindref = "%s",\n' % (self.kindref,)) if self.external is not None: showIndent(outfile, level) outfile.write('external = %s,\n' % (self.external,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('refid'): self.refid = attrs.get('refid').value if attrs.get('kindref'): self.kindref = attrs.get('kindref').value if attrs.get('external'): self.external = attrs.get('external').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docRefTextType class docTableType(GeneratedsSuper): subclass = None superclass = None def __init__(self, rows=None, cols=None, row=None, caption=None): self.rows = rows self.cols = cols if row is None: self.row = [] else: self.row = row self.caption = caption def factory(*args_, **kwargs_): if docTableType.subclass: return docTableType.subclass(*args_, **kwargs_) else: return docTableType(*args_, **kwargs_) factory = staticmethod(factory) def get_row(self): return self.row def set_row(self, row): self.row = row def add_row(self, value): self.row.append(value) def insert_row(self, index, value): self.row[index] = value def get_caption(self): return self.caption def set_caption(self, caption): self.caption = caption def get_rows(self): return self.rows def set_rows(self, rows): self.rows = rows def get_cols(self): return self.cols def set_cols(self, cols): self.cols = cols def export(self, outfile, level, namespace_='', name_='docTableType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docTableType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docTableType'): if self.rows is not None: outfile.write(' rows="%s"' % self.format_integer(self.rows, input_name='rows')) if self.cols is not None: outfile.write(' cols="%s"' % self.format_integer(self.cols, input_name='cols')) def exportChildren(self, outfile, level, namespace_='', name_='docTableType'): for row_ in self.row: row_.export(outfile, level, namespace_, name_='row') if self.caption: self.caption.export(outfile, level, namespace_, name_='caption') def hasContent_(self): if ( self.row is not None or self.caption is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docTableType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.rows is not None: showIndent(outfile, level) outfile.write('rows = %s,\n' % (self.rows,)) if self.cols is not None: showIndent(outfile, level) outfile.write('cols = %s,\n' % (self.cols,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('row=[\n') level += 1 for row in self.row: showIndent(outfile, level) outfile.write('model_.row(\n') row.exportLiteral(outfile, level, name_='row') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.caption: showIndent(outfile, level) outfile.write('caption=model_.docCaptionType(\n') self.caption.exportLiteral(outfile, level, name_='caption') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('rows'): try: self.rows = int(attrs.get('rows').value) except ValueError, exp: raise ValueError('Bad integer attribute (rows): %s' % exp) if attrs.get('cols'): try: self.cols = int(attrs.get('cols').value) except ValueError, exp: raise ValueError('Bad integer attribute (cols): %s' % exp) def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'row': obj_ = docRowType.factory() obj_.build(child_) self.row.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'caption': obj_ = docCaptionType.factory() obj_.build(child_) self.set_caption(obj_) # end class docTableType class docRowType(GeneratedsSuper): subclass = None superclass = None def __init__(self, entry=None): if entry is None: self.entry = [] else: self.entry = entry def factory(*args_, **kwargs_): if docRowType.subclass: return docRowType.subclass(*args_, **kwargs_) else: return docRowType(*args_, **kwargs_) factory = staticmethod(factory) def get_entry(self): return self.entry def set_entry(self, entry): self.entry = entry def add_entry(self, value): self.entry.append(value) def insert_entry(self, index, value): self.entry[index] = value def export(self, outfile, level, namespace_='', name_='docRowType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docRowType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docRowType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docRowType'): for entry_ in self.entry: entry_.export(outfile, level, namespace_, name_='entry') def hasContent_(self): if ( self.entry is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docRowType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('entry=[\n') level += 1 for entry in self.entry: showIndent(outfile, level) outfile.write('model_.entry(\n') entry.exportLiteral(outfile, level, name_='entry') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'entry': obj_ = docEntryType.factory() obj_.build(child_) self.entry.append(obj_) # end class docRowType class docEntryType(GeneratedsSuper): subclass = None superclass = None def __init__(self, thead=None, para=None): self.thead = thead if para is None: self.para = [] else: self.para = para def factory(*args_, **kwargs_): if docEntryType.subclass: return docEntryType.subclass(*args_, **kwargs_) else: return docEntryType(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_thead(self): return self.thead def set_thead(self, thead): self.thead = thead def export(self, outfile, level, namespace_='', name_='docEntryType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docEntryType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docEntryType'): if self.thead is not None: outfile.write(' thead=%s' % (quote_attrib(self.thead), )) def exportChildren(self, outfile, level, namespace_='', name_='docEntryType'): for para_ in self.para: para_.export(outfile, level, namespace_, name_='para') def hasContent_(self): if ( self.para is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docEntryType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.thead is not None: showIndent(outfile, level) outfile.write('thead = "%s",\n' % (self.thead,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('para=[\n') level += 1 for para in self.para: showIndent(outfile, level) outfile.write('model_.para(\n') para.exportLiteral(outfile, level, name_='para') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('thead'): self.thead = attrs.get('thead').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': obj_ = docParaType.factory() obj_.build(child_) self.para.append(obj_) # end class docEntryType class docCaptionType(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_='', mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docCaptionType.subclass: return docCaptionType.subclass(*args_, **kwargs_) else: return docCaptionType(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docCaptionType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docCaptionType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docCaptionType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docCaptionType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docCaptionType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docCaptionType class docHeadingType(GeneratedsSuper): subclass = None superclass = None def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None): self.level = level if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docHeadingType.subclass: return docHeadingType.subclass(*args_, **kwargs_) else: return docHeadingType(*args_, **kwargs_) factory = staticmethod(factory) def get_level(self): return self.level def set_level(self, level): self.level = level def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docHeadingType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docHeadingType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docHeadingType'): if self.level is not None: outfile.write(' level="%s"' % self.format_integer(self.level, input_name='level')) def exportChildren(self, outfile, level, namespace_='', name_='docHeadingType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docHeadingType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.level is not None: showIndent(outfile, level) outfile.write('level = %s,\n' % (self.level,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('level'): try: self.level = int(attrs.get('level').value) except ValueError, exp: raise ValueError('Bad integer attribute (level): %s' % exp) def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docHeadingType class docImageType(GeneratedsSuper): subclass = None superclass = None def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None): self.width = width self.type_ = type_ self.name = name self.height = height if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docImageType.subclass: return docImageType.subclass(*args_, **kwargs_) else: return docImageType(*args_, **kwargs_) factory = staticmethod(factory) def get_width(self): return self.width def set_width(self, width): self.width = width def get_type(self): return self.type_ def set_type(self, type_): self.type_ = type_ def get_name(self): return self.name def set_name(self, name): self.name = name def get_height(self): return self.height def set_height(self, height): self.height = height def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docImageType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docImageType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docImageType'): if self.width is not None: outfile.write(' width=%s' % (self.format_string(quote_attrib(self.width).encode(ExternalEncoding), input_name='width'), )) if self.type_ is not None: outfile.write(' type=%s' % (quote_attrib(self.type_), )) if self.name is not None: outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) if self.height is not None: outfile.write(' height=%s' % (self.format_string(quote_attrib(self.height).encode(ExternalEncoding), input_name='height'), )) def exportChildren(self, outfile, level, namespace_='', name_='docImageType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docImageType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.width is not None: showIndent(outfile, level) outfile.write('width = %s,\n' % (self.width,)) if self.type_ is not None: showIndent(outfile, level) outfile.write('type_ = "%s",\n' % (self.type_,)) if self.name is not None: showIndent(outfile, level) outfile.write('name = %s,\n' % (self.name,)) if self.height is not None: showIndent(outfile, level) outfile.write('height = %s,\n' % (self.height,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('width'): self.width = attrs.get('width').value if attrs.get('type'): self.type_ = attrs.get('type').value if attrs.get('name'): self.name = attrs.get('name').value if attrs.get('height'): self.height = attrs.get('height').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docImageType class docDotFileType(GeneratedsSuper): subclass = None superclass = None def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None): self.name = name if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docDotFileType.subclass: return docDotFileType.subclass(*args_, **kwargs_) else: return docDotFileType(*args_, **kwargs_) factory = staticmethod(factory) def get_name(self): return self.name def set_name(self, name): self.name = name def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docDotFileType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docDotFileType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docDotFileType'): if self.name is not None: outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) def exportChildren(self, outfile, level, namespace_='', name_='docDotFileType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docDotFileType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.name is not None: showIndent(outfile, level) outfile.write('name = %s,\n' % (self.name,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('name'): self.name = attrs.get('name').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docDotFileType class docTocItemType(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docTocItemType.subclass: return docTocItemType.subclass(*args_, **kwargs_) else: return docTocItemType(*args_, **kwargs_) factory = staticmethod(factory) def get_id(self): return self.id def set_id(self, id): self.id = id def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docTocItemType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docTocItemType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docTocItemType'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docTocItemType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docTocItemType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docTocItemType class docTocListType(GeneratedsSuper): subclass = None superclass = None def __init__(self, tocitem=None): if tocitem is None: self.tocitem = [] else: self.tocitem = tocitem def factory(*args_, **kwargs_): if docTocListType.subclass: return docTocListType.subclass(*args_, **kwargs_) else: return docTocListType(*args_, **kwargs_) factory = staticmethod(factory) def get_tocitem(self): return self.tocitem def set_tocitem(self, tocitem): self.tocitem = tocitem def add_tocitem(self, value): self.tocitem.append(value) def insert_tocitem(self, index, value): self.tocitem[index] = value def export(self, outfile, level, namespace_='', name_='docTocListType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docTocListType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docTocListType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docTocListType'): for tocitem_ in self.tocitem: tocitem_.export(outfile, level, namespace_, name_='tocitem') def hasContent_(self): if ( self.tocitem is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docTocListType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('tocitem=[\n') level += 1 for tocitem in self.tocitem: showIndent(outfile, level) outfile.write('model_.tocitem(\n') tocitem.exportLiteral(outfile, level, name_='tocitem') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'tocitem': obj_ = docTocItemType.factory() obj_.build(child_) self.tocitem.append(obj_) # end class docTocListType class docLanguageType(GeneratedsSuper): subclass = None superclass = None def __init__(self, langid=None, para=None): self.langid = langid if para is None: self.para = [] else: self.para = para def factory(*args_, **kwargs_): if docLanguageType.subclass: return docLanguageType.subclass(*args_, **kwargs_) else: return docLanguageType(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_langid(self): return self.langid def set_langid(self, langid): self.langid = langid def export(self, outfile, level, namespace_='', name_='docLanguageType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docLanguageType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docLanguageType'): if self.langid is not None: outfile.write(' langid=%s' % (self.format_string(quote_attrib(self.langid).encode(ExternalEncoding), input_name='langid'), )) def exportChildren(self, outfile, level, namespace_='', name_='docLanguageType'): for para_ in self.para: para_.export(outfile, level, namespace_, name_='para') def hasContent_(self): if ( self.para is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docLanguageType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.langid is not None: showIndent(outfile, level) outfile.write('langid = %s,\n' % (self.langid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('para=[\n') level += 1 for para in self.para: showIndent(outfile, level) outfile.write('model_.para(\n') para.exportLiteral(outfile, level, name_='para') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('langid'): self.langid = attrs.get('langid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': obj_ = docParaType.factory() obj_.build(child_) self.para.append(obj_) # end class docLanguageType class docParamListType(GeneratedsSuper): subclass = None superclass = None def __init__(self, kind=None, parameteritem=None): self.kind = kind if parameteritem is None: self.parameteritem = [] else: self.parameteritem = parameteritem def factory(*args_, **kwargs_): if docParamListType.subclass: return docParamListType.subclass(*args_, **kwargs_) else: return docParamListType(*args_, **kwargs_) factory = staticmethod(factory) def get_parameteritem(self): return self.parameteritem def set_parameteritem(self, parameteritem): self.parameteritem = parameteritem def add_parameteritem(self, value): self.parameteritem.append(value) def insert_parameteritem(self, index, value): self.parameteritem[index] = value def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def export(self, outfile, level, namespace_='', name_='docParamListType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docParamListType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docParamListType'): if self.kind is not None: outfile.write(' kind=%s' % (quote_attrib(self.kind), )) def exportChildren(self, outfile, level, namespace_='', name_='docParamListType'): for parameteritem_ in self.parameteritem: parameteritem_.export(outfile, level, namespace_, name_='parameteritem') def hasContent_(self): if ( self.parameteritem is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docParamListType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.kind is not None: showIndent(outfile, level) outfile.write('kind = "%s",\n' % (self.kind,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('parameteritem=[\n') level += 1 for parameteritem in self.parameteritem: showIndent(outfile, level) outfile.write('model_.parameteritem(\n') parameteritem.exportLiteral(outfile, level, name_='parameteritem') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'parameteritem': obj_ = docParamListItem.factory() obj_.build(child_) self.parameteritem.append(obj_) # end class docParamListType class docParamListItem(GeneratedsSuper): subclass = None superclass = None def __init__(self, parameternamelist=None, parameterdescription=None): if parameternamelist is None: self.parameternamelist = [] else: self.parameternamelist = parameternamelist self.parameterdescription = parameterdescription def factory(*args_, **kwargs_): if docParamListItem.subclass: return docParamListItem.subclass(*args_, **kwargs_) else: return docParamListItem(*args_, **kwargs_) factory = staticmethod(factory) def get_parameternamelist(self): return self.parameternamelist def set_parameternamelist(self, parameternamelist): self.parameternamelist = parameternamelist def add_parameternamelist(self, value): self.parameternamelist.append(value) def insert_parameternamelist(self, index, value): self.parameternamelist[index] = value def get_parameterdescription(self): return self.parameterdescription def set_parameterdescription(self, parameterdescription): self.parameterdescription = parameterdescription def export(self, outfile, level, namespace_='', name_='docParamListItem', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docParamListItem') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docParamListItem'): pass def exportChildren(self, outfile, level, namespace_='', name_='docParamListItem'): for parameternamelist_ in self.parameternamelist: parameternamelist_.export(outfile, level, namespace_, name_='parameternamelist') if self.parameterdescription: self.parameterdescription.export(outfile, level, namespace_, name_='parameterdescription', ) def hasContent_(self): if ( self.parameternamelist is not None or self.parameterdescription is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docParamListItem'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('parameternamelist=[\n') level += 1 for parameternamelist in self.parameternamelist: showIndent(outfile, level) outfile.write('model_.parameternamelist(\n') parameternamelist.exportLiteral(outfile, level, name_='parameternamelist') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.parameterdescription: showIndent(outfile, level) outfile.write('parameterdescription=model_.descriptionType(\n') self.parameterdescription.exportLiteral(outfile, level, name_='parameterdescription') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'parameternamelist': obj_ = docParamNameList.factory() obj_.build(child_) self.parameternamelist.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'parameterdescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_parameterdescription(obj_) # end class docParamListItem class docParamNameList(GeneratedsSuper): subclass = None superclass = None def __init__(self, parametername=None): if parametername is None: self.parametername = [] else: self.parametername = parametername def factory(*args_, **kwargs_): if docParamNameList.subclass: return docParamNameList.subclass(*args_, **kwargs_) else: return docParamNameList(*args_, **kwargs_) factory = staticmethod(factory) def get_parametername(self): return self.parametername def set_parametername(self, parametername): self.parametername = parametername def add_parametername(self, value): self.parametername.append(value) def insert_parametername(self, index, value): self.parametername[index] = value def export(self, outfile, level, namespace_='', name_='docParamNameList', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docParamNameList') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docParamNameList'): pass def exportChildren(self, outfile, level, namespace_='', name_='docParamNameList'): for parametername_ in self.parametername: parametername_.export(outfile, level, namespace_, name_='parametername') def hasContent_(self): if ( self.parametername is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docParamNameList'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('parametername=[\n') level += 1 for parametername in self.parametername: showIndent(outfile, level) outfile.write('model_.parametername(\n') parametername.exportLiteral(outfile, level, name_='parametername') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'parametername': obj_ = docParamName.factory() obj_.build(child_) self.parametername.append(obj_) # end class docParamNameList class docParamName(GeneratedsSuper): subclass = None superclass = None def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None): self.direction = direction if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docParamName.subclass: return docParamName.subclass(*args_, **kwargs_) else: return docParamName(*args_, **kwargs_) factory = staticmethod(factory) def get_ref(self): return self.ref def set_ref(self, ref): self.ref = ref def get_direction(self): return self.direction def set_direction(self, direction): self.direction = direction def export(self, outfile, level, namespace_='', name_='docParamName', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docParamName') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('</%s%s>\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docParamName'): if self.direction is not None: outfile.write(' direction=%s' % (quote_attrib(self.direction), )) def exportChildren(self, outfile, level, namespace_='', name_='docParamName'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.ref is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docParamName'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.direction is not None: showIndent(outfile, level) outfile.write('direction = "%s",\n' % (self.direction,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('direction'): self.direction = attrs.get('direction').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'ref': childobj_ = docRefTextType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'ref', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docParamName class docXRefSectType(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, xreftitle=None, xrefdescription=None): self.id = id if xreftitle is None: self.xreftitle = [] else: self.xreftitle = xreftitle self.xrefdescription = xrefdescription def factory(*args_, **kwargs_): if docXRefSectType.subclass: return docXRefSectType.subclass(*args_, **kwargs_) else: return docXRefSectType(*args_, **kwargs_) factory = staticmethod(factory) def get_xreftitle(self): return self.xreftitle def set_xreftitle(self, xreftitle): self.xreftitle = xreftitle def add_xreftitle(self, value): self.xreftitle.append(value) def insert_xreftitle(self, index, value): self.xreftitle[index] = value def get_xrefdescription(self): return self.xrefdescription def set_xrefdescription(self, xrefdescription): self.xrefdescription = xrefdescription def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='docXRefSectType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docXRefSectType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docXRefSectType'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docXRefSectType'): for xreftitle_ in self.xreftitle: showIndent(outfile, level) outfile.write('<%sxreftitle>%s</%sxreftitle>\n' % (namespace_, self.format_string(quote_xml(xreftitle_).encode(ExternalEncoding), input_name='xreftitle'), namespace_)) if self.xrefdescription: self.xrefdescription.export(outfile, level, namespace_, name_='xrefdescription', ) def hasContent_(self): if ( self.xreftitle is not None or self.xrefdescription is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docXRefSectType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('xreftitle=[\n') level += 1 for xreftitle in self.xreftitle: showIndent(outfile, level) outfile.write('%s,\n' % quote_python(xreftitle).encode(ExternalEncoding)) level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.xrefdescription: showIndent(outfile, level) outfile.write('xrefdescription=model_.descriptionType(\n') self.xrefdescription.exportLiteral(outfile, level, name_='xrefdescription') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'xreftitle': xreftitle_ = '' for text__content_ in child_.childNodes: xreftitle_ += text__content_.nodeValue self.xreftitle.append(xreftitle_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'xrefdescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_xrefdescription(obj_) # end class docXRefSectType class docCopyType(GeneratedsSuper): subclass = None superclass = None def __init__(self, link=None, para=None, sect1=None, internal=None): self.link = link if para is None: self.para = [] else: self.para = para if sect1 is None: self.sect1 = [] else: self.sect1 = sect1 self.internal = internal def factory(*args_, **kwargs_): if docCopyType.subclass: return docCopyType.subclass(*args_, **kwargs_) else: return docCopyType(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect1(self): return self.sect1 def set_sect1(self, sect1): self.sect1 = sect1 def add_sect1(self, value): self.sect1.append(value) def insert_sect1(self, index, value): self.sect1[index] = value def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def get_link(self): return self.link def set_link(self, link): self.link = link def export(self, outfile, level, namespace_='', name_='docCopyType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docCopyType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docCopyType'): if self.link is not None: outfile.write(' link=%s' % (self.format_string(quote_attrib(self.link).encode(ExternalEncoding), input_name='link'), )) def exportChildren(self, outfile, level, namespace_='', name_='docCopyType'): for para_ in self.para: para_.export(outfile, level, namespace_, name_='para') for sect1_ in self.sect1: sect1_.export(outfile, level, namespace_, name_='sect1') if self.internal: self.internal.export(outfile, level, namespace_, name_='internal') def hasContent_(self): if ( self.para is not None or self.sect1 is not None or self.internal is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docCopyType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.link is not None: showIndent(outfile, level) outfile.write('link = %s,\n' % (self.link,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('para=[\n') level += 1 for para in self.para: showIndent(outfile, level) outfile.write('model_.para(\n') para.exportLiteral(outfile, level, name_='para') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('sect1=[\n') level += 1 for sect1 in self.sect1: showIndent(outfile, level) outfile.write('model_.sect1(\n') sect1.exportLiteral(outfile, level, name_='sect1') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.internal: showIndent(outfile, level) outfile.write('internal=model_.docInternalType(\n') self.internal.exportLiteral(outfile, level, name_='internal') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('link'): self.link = attrs.get('link').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': obj_ = docParaType.factory() obj_.build(child_) self.para.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect1': obj_ = docSect1Type.factory() obj_.build(child_) self.sect1.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'internal': obj_ = docInternalType.factory() obj_.build(child_) self.set_internal(obj_) # end class docCopyType class docCharType(GeneratedsSuper): subclass = None superclass = None def __init__(self, char=None, valueOf_=''): self.char = char self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if docCharType.subclass: return docCharType.subclass(*args_, **kwargs_) else: return docCharType(*args_, **kwargs_) factory = staticmethod(factory) def get_char(self): return self.char def set_char(self, char): self.char = char def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docCharType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docCharType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docCharType'): if self.char is not None: outfile.write(' char=%s' % (quote_attrib(self.char), )) def exportChildren(self, outfile, level, namespace_='', name_='docCharType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docCharType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.char is not None: showIndent(outfile, level) outfile.write('char = "%s",\n' % (self.char,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('char'): self.char = attrs.get('char').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docCharType class docEmptyType(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if docEmptyType.subclass: return docEmptyType.subclass(*args_, **kwargs_) else: return docEmptyType(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docEmptyType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docEmptyType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docEmptyType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docEmptyType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','<![CDATA') value=value.replace(']]',']]>') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docEmptyType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docEmptyType USAGE_TEXT = """ Usage: python <Parser>.py [ -s ] <in_xml_file> Options: -s Use the SAX parser, not the minidom parser. """ def usage(): print USAGE_TEXT sys.exit(1) def parse(inFileName): doc = minidom.parse(inFileName) rootNode = doc.documentElement rootObj = DoxygenType.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None sys.stdout.write('<?xml version="1.0" ?>\n') rootObj.export(sys.stdout, 0, name_="doxygen", namespacedef_='') return rootObj def parseString(inString): doc = minidom.parseString(inString) rootNode = doc.documentElement rootObj = DoxygenType.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None sys.stdout.write('<?xml version="1.0" ?>\n') rootObj.export(sys.stdout, 0, name_="doxygen", namespacedef_='') return rootObj def parseLiteral(inFileName): doc = minidom.parse(inFileName) rootNode = doc.documentElement rootObj = DoxygenType.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None sys.stdout.write('from compound import *\n\n') sys.stdout.write('rootObj = doxygen(\n') rootObj.exportLiteral(sys.stdout, 0, name_="doxygen") sys.stdout.write(')\n') return rootObj def main(): args = sys.argv[1:] if len(args) == 1: parse(args[0]) else: usage() if __name__ == '__main__': main() #import pdb #pdb.run('main()')
gpl-3.0