repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
nke001/attention-lvcsr
libs/fuel/tests/test_config_parser.py
11
1534
import os import tempfile from numpy.testing import assert_raises from fuel.config_parser import Configuration, ConfigurationError def test_config_parser(): _environ = dict(os.environ) try: with tempfile.NamedTemporaryFile(mode='w', delete=False) as f: f.write('data_path: yaml_path') filename = f.name os.environ['FUEL_CONFIG'] = filename if 'FUEL_DATA_PATH' in os.environ: del os.environ['FUEL_DATA_PATH'] config = Configuration() config.add_config('data_path', str, env_var='FUEL_DATA_PATH') config.add_config('config_with_default', int, default='1', env_var='FUEL_CONFIG_TEST') config.add_config('config_without_default', str) config.load_yaml() assert config.data_path == 'yaml_path' os.environ['FUEL_DATA_PATH'] = 'env_path' assert config.data_path == 'env_path' assert config.config_with_default == 1 os.environ['FUEL_CONFIG_TEST'] = '2' assert config.config_with_default == 2 assert_raises(AttributeError, getattr, config, 'non_existing_config') assert_raises(ConfigurationError, getattr, config, 'config_without_default') config.data_path = 'manual_path' assert config.data_path == 'manual_path' config.new_config = 'new_config' assert config.new_config == 'new_config' finally: os.environ.clear() os.environ.update(_environ)
mit
amenonsen/ansible
lib/ansible/modules/identity/ipa/ipa_config.py
72
4030
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2018, Fran Fitzpatrick <francis.x.fitzpatrick@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ipa_config author: Fran Fitzpatrick (@fxfitz) short_description: Manage Global FreeIPA Configuration Settings description: - Modify global configuration settings of a FreeIPA Server. options: ipadefaultloginshell: description: Default shell for new users. aliases: ["loginshell"] ipadefaultemaildomain: description: Default e-mail domain for new users. aliases: ["emaildomain"] extends_documentation_fragment: ipa.documentation version_added: "2.7" ''' EXAMPLES = ''' # Ensure the default login shell is bash. - ipa_config: ipadefaultloginshell: /bin/bash ipa_host: localhost ipa_user: admin ipa_pass: supersecret # Ensure the default e-mail domain is ansible.com. - ipa_config: ipadefaultemaildomain: ansible.com ipa_host: localhost ipa_user: admin ipa_pass: supersecret ''' RETURN = ''' config: description: Configuration as returned by IPA API. returned: always type: dict ''' import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ipa import IPAClient, ipa_argument_spec from ansible.module_utils._text import to_native class ConfigIPAClient(IPAClient): def __init__(self, module, host, port, protocol): super(ConfigIPAClient, self).__init__(module, host, port, protocol) def config_show(self): return self._post_json(method='config_show', name=None) def config_mod(self, name, item): return self._post_json(method='config_mod', name=name, item=item) def get_config_dict(ipadefaultloginshell=None, ipadefaultemaildomain=None): config = {} if ipadefaultloginshell is not None: config['ipadefaultloginshell'] = ipadefaultloginshell if ipadefaultemaildomain is not None: config['ipadefaultemaildomain'] = ipadefaultemaildomain return config def get_config_diff(client, ipa_config, module_config): return client.get_diff(ipa_data=ipa_config, module_data=module_config) def ensure(module, client): module_config = get_config_dict( ipadefaultloginshell=module.params.get('ipadefaultloginshell'), ipadefaultemaildomain=module.params.get('ipadefaultemaildomain'), ) ipa_config = client.config_show() diff = get_config_diff(client, ipa_config, module_config) changed = False new_config = {} for module_key in diff: if module_config.get(module_key) != ipa_config.get(module_key, None): changed = True new_config.update({module_key: module_config.get(module_key)}) if changed and not module.check_mode: client.config_mod(name=None, item=new_config) return changed, client.config_show() def main(): argument_spec = ipa_argument_spec() argument_spec.update( ipadefaultloginshell=dict(type='str', aliases=['loginshell']), ipadefaultemaildomain=dict(type='str', aliases=['emaildomain']), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True ) client = ConfigIPAClient( module=module, host=module.params['ipa_host'], port=module.params['ipa_port'], protocol=module.params['ipa_prot'] ) try: client.login( username=module.params['ipa_user'], password=module.params['ipa_pass'] ) changed, user = ensure(module, client) module.exit_json(changed=changed, user=user) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) if __name__ == '__main__': main()
gpl-3.0
jnordling/cabin
onadata/libs/utils/mongo_sync.py
5
5725
import sys from django.conf import settings from pymongo import MongoClient from onadata.apps.logger.models import Instance from onadata.apps.logger.models import XForm from onadata.apps.viewer.models import ParsedInstance from onadata.libs.utils import common_tags from onadata.libs.utils.model_tools import queryset_iterator def get_mongo_connection(): MONGO_DATABASE = settings.MONGO_DATABASE if MONGO_DATABASE.get('USER') and MONGO_DATABASE.get('PASSWORD'): MONGO_CONNECTION_URL = ( "mongodb://%(USER)s:%(PASSWORD)s@%(HOST)s:%(PORT)s") \ % MONGO_DATABASE else: MONGO_CONNECTION_URL = "mongodb://%(HOST)s:%(PORT)s" % MONGO_DATABASE MONGO_CONNECTION = MongoClient( MONGO_CONNECTION_URL, safe=True, j=True, tz_aware=True) return MONGO_CONNECTION[MONGO_DATABASE['NAME']] def update_mongo_for_xform(xform, only_update_missing=True): mongo_instances = get_mongo_connection().instances instance_ids = set( [i.id for i in Instance.objects.only('id').filter(xform=xform)]) sys.stdout.write("Total no of instances: %d\n" % len(instance_ids)) mongo_ids = set() user = xform.user userform_id = "%s_%s" % (user.username, xform.id_string) if only_update_missing: sys.stdout.write("Only updating missing mongo instances\n") mongo_ids = set( [rec[common_tags.ID] for rec in mongo_instances.find( {common_tags.USERFORM_ID: userform_id}, {common_tags.ID: 1})]) sys.stdout.write("Total no of mongo instances: %d\n" % len(mongo_ids)) # get the difference instance_ids = instance_ids.difference(mongo_ids) else: # clear mongo records mongo_instances.remove({common_tags.USERFORM_ID: userform_id}) # get instances sys.stdout.write( "Total no of instances to update: %d\n" % len(instance_ids)) instances = Instance.objects.only('id').in_bulk( [id for id in instance_ids]) total = len(instances) done = 0 for id, instance in instances.items(): (pi, created) = ParsedInstance.objects.get_or_create(instance=instance) pi.save(async=False) done += 1 # if 1000 records are done, flush mongo if (done % 1000) == 0: sys.stdout.write( 'Updated %d records, flushing MongoDB...\n' % done) settings.MONGO_CONNECTION.admin.command({'fsync': 1}) progress = "\r%.2f %% done..." % ((float(done) / float(total)) * 100) sys.stdout.write(progress) sys.stdout.flush() # flush mongo again when done settings.MONGO_CONNECTION.admin.command({'fsync': 1}) sys.stdout.write( "\nUpdated %s\n------------------------------------------\n" % xform.id_string) def mongo_sync_status(remongo=False, update_all=False, user=None, xform=None): """Check the status of records in the mysql db versus mongodb. At a minimum, return a report (string) of the results. Optionally, take action to correct the differences, based on these parameters, if present and defined: remongo -> if True, update the records missing in mongodb (default: False) update_all -> if True, update all the relevant records (default: False) user -> if specified, apply only to the forms for the given user (default: None) xform -> if specified, apply only to the given form (default: None) """ mongo_instances = get_mongo_connection().instances qs = XForm.objects.only('id_string', 'user').select_related('user') if user and not xform: qs = qs.filter(user=user) elif user and xform: qs = qs.filter(user=user, id_string=xform.id_string) else: qs = qs.all() total = qs.count() found = 0 done = 0 total_to_remongo = 0 report_string = "" for xform in queryset_iterator(qs, 100): # get the count user = xform.user instance_count = Instance.objects.filter(xform=xform).count() userform_id = "%s_%s" % (user.username, xform.id_string) mongo_count = mongo_instances.find( {common_tags.USERFORM_ID: userform_id}).count() if instance_count != mongo_count or update_all: line = "user: %s, id_string: %s\nInstance count: %d\t"\ "Mongo count: %d\n---------------------------------"\ "-----\n" % ( user.username, xform.id_string, instance_count, mongo_count) report_string += line found += 1 total_to_remongo += (instance_count - mongo_count) # should we remongo if remongo or (remongo and update_all): if update_all: sys.stdout.write( "Updating all records for %s\n--------------------" "---------------------------\n" % xform.id_string) else: sys.stdout.write( "Updating missing records for %s\n----------------" "-------------------------------\n" % xform.id_string) update_mongo_for_xform( xform, only_update_missing=not update_all) done += 1 sys.stdout.write( "%.2f %% done ...\r" % ((float(done) / float(total)) * 100)) # only show stats if we are not updating mongo, the update function # will show progress if not remongo: line = "Total # of forms out of sync: %d\n" \ "Total # of records to remongo: %d\n" % (found, total_to_remongo) report_string += line return report_string
bsd-2-clause
druuu/pietrack
project/tasks.py
3
1095
from celery import task from django.core.mail import send_mail from django.template import loader from django.contrib.auth.forms import PasswordResetForm from django.core.mail import EmailMultiAlternatives, send_mail @task() def add(): return 2 + 2 @task() def send_mail_old_user(email): send_mail('invitation for project', 'time to code', 'dineshmcmf@gmail.com', [email]) @task() def celery_send_mail(subject_template_name, email_template_name, context, from_email, to_email, html_email_template_name=None): """ Sends a django.core.mail.EmailMultiAlternatives to `to_email`. """ subject = loader.render_to_string(subject_template_name, context) subject = ''.join(subject.splitlines()) body = loader.render_to_string(email_template_name, context) email_message = EmailMultiAlternatives(subject, body, from_email, [to_email]) if html_email_template_name is not None: html_email = loader.render_to_string(html_email_template_name, context) email_message.attach_alternative(html_email, 'text/html') email_message.send()
mit
0x0all/scikit-learn
sklearn/utils/setup.py
296
2884
import os from os.path import join from sklearn._build_utils import get_blas_info def configuration(parent_package='', top_path=None): import numpy from numpy.distutils.misc_util import Configuration config = Configuration('utils', parent_package, top_path) config.add_subpackage('sparsetools') cblas_libs, blas_info = get_blas_info() cblas_compile_args = blas_info.pop('extra_compile_args', []) cblas_includes = [join('..', 'src', 'cblas'), numpy.get_include(), blas_info.pop('include_dirs', [])] libraries = [] if os.name == 'posix': libraries.append('m') cblas_libs.append('m') config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'], libraries=libraries) config.add_extension('arrayfuncs', sources=['arrayfuncs.c'], depends=[join('src', 'cholesky_delete.h')], libraries=cblas_libs, include_dirs=cblas_includes, extra_compile_args=cblas_compile_args, **blas_info ) config.add_extension( 'murmurhash', sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')], include_dirs=['src']) config.add_extension('lgamma', sources=['lgamma.c', join('src', 'gamma.c')], include_dirs=['src'], libraries=libraries) config.add_extension('graph_shortest_path', sources=['graph_shortest_path.c'], include_dirs=[numpy.get_include()]) config.add_extension('fast_dict', sources=['fast_dict.cpp'], language="c++", include_dirs=[numpy.get_include()], libraries=libraries) config.add_extension('seq_dataset', sources=['seq_dataset.c'], include_dirs=[numpy.get_include()]) config.add_extension('weight_vector', sources=['weight_vector.c'], include_dirs=cblas_includes, libraries=cblas_libs, **blas_info) config.add_extension("_random", sources=["_random.c"], include_dirs=[numpy.get_include()], libraries=libraries) config.add_extension("_logistic_sigmoid", sources=["_logistic_sigmoid.c"], include_dirs=[numpy.get_include()], libraries=libraries) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
bsd-3-clause
tonypconway/scrabobs
app/board/refactor_rows.py
1
1894
from pprint import pprint letters = [{ "letter": "A", "points": 1, "remaining": 9 }, { "letter": "B", "points": 3, "remaining": 2 }, { "letter": "C", "points": 3, "remaining": 2 }, { "letter": "D", "points": 2, "remaining": 4 }, { "letter": "E", "points": 1, "remaining": 12 }, { "letter": "F", "points": 4, "remaining": 2 }, { "letter": "G", "points": 2, "remaining": 2 }, { "letter": "H", "points": 4, "remaining": 2 }, { "letter": "I", "points": 1, "remaining": 9 }, { "letter": "J", "points": 8, "remaining": 1 }, { "letter": "K", "points": 5, "remaining": 1 }, { "letter": "L", "points": 1, "remaining": 4 }, { "letter": "M", "points": 3, "remaining": 2 }, { "letter": "N", "points": 1, "remaining": 6 }, { "letter": "O", "points": 1, "remaining": 8 }, { "letter": "P", "points": 3, "remaining": 2 }, { "letter": "Q", "points": 10, "remaining": 1 }, { "letter": "R", "points": 1, "remaining": 6 }, { "letter": "S", "points": 1, "remaining": 4 }, { "letter": "T", "points": 1, "remaining": 6 }, { "letter": "U", "points": 1, "remaining": 4 }, { "letter": "V", "points": 4, "remaining": 2 }, { "letter": "W", "points": 4, "remaining": 2 }, { "letter": "X", "points": 8, "remaining": 1 }, { "letter": "Y", "points": 4, "remaining": 2 }, { "letter": "Z", "points": 10, "remaining": 1 }, { "letter": " ", "points": 0, "remaining": 2 }] new_letters = {} for letter in letters: new_letters[letter["letter"]] = {} new_letters[letter["letter"]]["points"] = letter["points"] new_letters[letter["letter"]]["remaining"] = letter["remaining"] pprint(new_letters)
mit
toshihirock/repo
color.py
36
4089
# # Copyright (C) 2008 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import pager COLORS = {None :-1, 'normal' :-1, 'black' : 0, 'red' : 1, 'green' : 2, 'yellow' : 3, 'blue' : 4, 'magenta': 5, 'cyan' : 6, 'white' : 7} ATTRS = {None :-1, 'bold' : 1, 'dim' : 2, 'ul' : 4, 'blink' : 5, 'reverse': 7} RESET = "\033[m" # pylint: disable=W1401 # backslash is not anomalous def is_color(s): return s in COLORS def is_attr(s): return s in ATTRS def _Color(fg = None, bg = None, attr = None): fg = COLORS[fg] bg = COLORS[bg] attr = ATTRS[attr] if attr >= 0 or fg >= 0 or bg >= 0: need_sep = False code = "\033[" #pylint: disable=W1401 if attr >= 0: code += chr(ord('0') + attr) need_sep = True if fg >= 0: if need_sep: code += ';' need_sep = True if fg < 8: code += '3%c' % (ord('0') + fg) else: code += '38;5;%d' % fg if bg >= 0: if need_sep: code += ';' need_sep = True if bg < 8: code += '4%c' % (ord('0') + bg) else: code += '48;5;%d' % bg code += 'm' else: code = '' return code class Coloring(object): def __init__(self, config, section_type): self._section = 'color.%s' % section_type self._config = config self._out = sys.stdout on = self._config.GetString(self._section) if on is None: on = self._config.GetString('color.ui') if on == 'auto': if pager.active or os.isatty(1): self._on = True else: self._on = False elif on in ('true', 'always'): self._on = True else: self._on = False def redirect(self, out): self._out = out @property def is_on(self): return self._on def write(self, fmt, *args): self._out.write(fmt % args) def flush(self): self._out.flush() def nl(self): self._out.write('\n') def printer(self, opt=None, fg=None, bg=None, attr=None): s = self c = self.colorer(opt, fg, bg, attr) def f(fmt, *args): s._out.write(c(fmt, *args)) return f def nofmt_printer(self, opt=None, fg=None, bg=None, attr=None): s = self c = self.nofmt_colorer(opt, fg, bg, attr) def f(fmt): s._out.write(c(fmt)) return f def colorer(self, opt=None, fg=None, bg=None, attr=None): if self._on: c = self._parse(opt, fg, bg, attr) def f(fmt, *args): output = fmt % args return ''.join([c, output, RESET]) return f else: def f(fmt, *args): return fmt % args return f def nofmt_colorer(self, opt=None, fg=None, bg=None, attr=None): if self._on: c = self._parse(opt, fg, bg, attr) def f(fmt): return ''.join([c, fmt, RESET]) return f else: def f(fmt): return fmt return f def _parse(self, opt, fg, bg, attr): if not opt: return _Color(fg, bg, attr) v = self._config.GetString('%s.%s' % (self._section, opt)) if v is None: return _Color(fg, bg, attr) v = v.strip().lower() if v == "reset": return RESET elif v == '': return _Color(fg, bg, attr) have_fg = False for a in v.split(' '): if is_color(a): if have_fg: bg = a else: fg = a elif is_attr(a): attr = a return _Color(fg, bg, attr)
apache-2.0
tomtor/QGIS
python/plugins/processing/tests/ToolsTest.py
45
3227
# -*- coding: utf-8 -*- """ *************************************************************************** ToolsTest --------------------- Date : July 2016 Copyright : (C) 2016 by Nyall Dawson Email : nyall dot dawson at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Nyall Dawson' __date__ = 'July 2016' __copyright__ = '(C) 2016, Nyall Dawson' import os import shutil from qgis.core import NULL, QgsVectorLayer from qgis.testing import start_app, unittest from processing.tests.TestData import points from processing.tools import vector testDataPath = os.path.join(os.path.dirname(__file__), 'testdata') start_app() class VectorTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.cleanup_paths = [] @classmethod def tearDownClass(cls): for path in cls.cleanup_paths: shutil.rmtree(path) def testValues(self): test_data = points() test_layer = QgsVectorLayer(test_data, 'test', 'ogr') # field by index res = vector.values(test_layer, 1) self.assertEqual(res[1], [1, 2, 3, 4, 5, 6, 7, 8, 9]) # field by name res = vector.values(test_layer, 'id') self.assertEqual(res['id'], [1, 2, 3, 4, 5, 6, 7, 8, 9]) # two fields res = vector.values(test_layer, 1, 2) self.assertEqual(res[1], [1, 2, 3, 4, 5, 6, 7, 8, 9]) self.assertEqual(res[2], [2, 1, 0, 2, 1, 0, 0, 0, 0]) # two fields by name res = vector.values(test_layer, 'id', 'id2') self.assertEqual(res['id'], [1, 2, 3, 4, 5, 6, 7, 8, 9]) self.assertEqual(res['id2'], [2, 1, 0, 2, 1, 0, 0, 0, 0]) # two fields by name and index res = vector.values(test_layer, 'id', 2) self.assertEqual(res['id'], [1, 2, 3, 4, 5, 6, 7, 8, 9]) self.assertEqual(res[2], [2, 1, 0, 2, 1, 0, 0, 0, 0]) def testConvertNulls(self): self.assertEqual(vector.convert_nulls([]), []) self.assertEqual(vector.convert_nulls([], '_'), []) self.assertEqual(vector.convert_nulls([NULL]), [None]) self.assertEqual(vector.convert_nulls([NULL], '_'), ['_']) self.assertEqual(vector.convert_nulls([NULL], -1), [-1]) self.assertEqual(vector.convert_nulls([1, 2, 3]), [1, 2, 3]) self.assertEqual(vector.convert_nulls([1, None, 3]), [1, None, 3]) self.assertEqual(vector.convert_nulls([1, NULL, 3, NULL]), [1, None, 3, None]) self.assertEqual(vector.convert_nulls([1, NULL, 3, NULL], '_'), [1, '_', 3, '_']) if __name__ == '__main__': unittest.main()
gpl-2.0
asm-products/movie-database-service
ani/lib/python2.7/site-packages/selenium/webdriver/chrome/service.py
25
3676
#!/usr/bin/python # # Copyright 2011 Webdriver_name committers # Copyright 2011 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import subprocess from subprocess import PIPE import time from selenium.common.exceptions import WebDriverException from selenium.webdriver.common import utils class Service(object): """ Object that manages the starting and stopping of the ChromeDriver """ def __init__(self, executable_path, port=0, service_args=None, log_path=None, env=None): """ Creates a new instance of the Service :Args: - executable_path : Path to the ChromeDriver - port : Port the service is running on - service_args : List of args to pass to the chromedriver service - log_path : Path for the chromedriver service to log to""" self.port = port self.path = executable_path self.service_args = service_args or [] if log_path: self.service_args.append('--log-path=%s' % log_path) if self.port == 0: self.port = utils.free_port() self.env = env def start(self): """ Starts the ChromeDriver Service. :Exceptions: - WebDriverException : Raised either when it can't start the service or when it can't connect to the service """ env = self.env or os.environ try: self.process = subprocess.Popen([ self.path, "--port=%d" % self.port] + self.service_args, env=env, stdout=PIPE, stderr=PIPE) except: raise WebDriverException( "ChromeDriver executable needs to be available in the path. \ Please download from http://chromedriver.storage.googleapis.com/index.html\ and read up at http://code.google.com/p/selenium/wiki/ChromeDriver") count = 0 while not utils.is_connectable(self.port): count += 1 time.sleep(1) if count == 30: raise WebDriverException("Can not connect to the ChromeDriver") @property def service_url(self): """ Gets the url of the ChromeDriver Service """ return "http://localhost:%d" % self.port def stop(self): """ Tells the ChromeDriver to stop and cleans up the process """ #If its dead dont worry if self.process is None: return #Tell the Server to die! try: from urllib import request as url_request except ImportError: import urllib2 as url_request url_request.urlopen("http://127.0.0.1:%d/shutdown" % self.port) count = 0 while utils.is_connectable(self.port): if count == 30: break count += 1 time.sleep(1) #Tell the Server to properly die in case try: if self.process: self.process.kill() self.process.wait() except OSError: # kill may not be available under windows environment pass
agpl-3.0
mheap/ansible
lib/ansible/modules/network/f5/bigip_irule.py
11
12636
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (c) 2017 F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: bigip_irule short_description: Manage iRules across different modules on a BIG-IP description: - Manage iRules across different modules on a BIG-IP. version_added: 2.2 options: content: description: - When used instead of 'src', sets the contents of an iRule directly to the specified value. This is for simple values, but can be used with lookup plugins for anything complex or with formatting. Either one of C(src) or C(content) must be provided. module: description: - The BIG-IP module to add the iRule to. required: True choices: - ltm - gtm name: description: - The name of the iRule. required: True src: description: - The iRule file to interpret and upload to the BIG-IP. Either one of C(src) or C(content) must be provided. required: True state: description: - Whether the iRule should exist or not. default: present choices: - present - absent partition: description: - Device partition to manage resources on. default: Common version_added: 2.5 extends_documentation_fragment: f5 author: - Tim Rupp (@caphrim007) ''' EXAMPLES = r''' - name: Add the iRule contained in template irule.tcl to the LTM module bigip_irule: content: "{{ lookup('template', 'irule.tcl') }}" module: ltm name: MyiRule password: secret server: lb.mydomain.com state: present user: admin delegate_to: localhost - name: Add the iRule contained in static file irule.tcl to the LTM module bigip_irule: module: ltm name: MyiRule password: secret server: lb.mydomain.com src: irule.tcl state: present user: admin delegate_to: localhost ''' RETURN = r''' module: description: The module that the iRule was added to returned: changed and success type: string sample: gtm src: description: The filename that included the iRule source returned: changed and success, when provided type: string sample: /opt/src/irules/example1.tcl content: description: The content of the iRule that was managed returned: changed and success type: string sample: "when LB_FAILED { set wipHost [LB::server addr] }" ''' import os from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import env_fallback try: from library.module_utils.network.f5.bigip import HAS_F5SDK from library.module_utils.network.f5.bigip import F5Client from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import cleanup_tokens from library.module_utils.network.f5.common import f5_argument_spec try: from library.module_utils.network.f5.common import iControlUnexpectedHTTPError except ImportError: HAS_F5SDK = False except ImportError: from ansible.module_utils.network.f5.bigip import HAS_F5SDK from ansible.module_utils.network.f5.bigip import F5Client from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import cleanup_tokens from ansible.module_utils.network.f5.common import f5_argument_spec try: from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError except ImportError: HAS_F5SDK = False class Parameters(AnsibleF5Parameters): api_map = { 'apiAnonymous': 'content' } updatables = [ 'content' ] api_attributes = [ 'apiAnonymous' ] returnables = [ 'content', 'src', 'module' ] def to_return(self): result = {} try: for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) except Exception: pass return result @property def content(self): if self._values['content'] is None: result = self.src_content else: result = self._values['content'] return str(result).strip() @property def src(self): if self._values['src'] is None: return None return self._values['src'] @property def src_content(self): if not os.path.exists(self._values['src']): raise F5ModuleError( "The specified 'src' was not found." ) with open(self._values['src']) as f: result = f.read() return result class ModuleManager(object): def __init__(self, *args, **kwargs): self.client = kwargs.get('client', None) self.module = kwargs.get('module', None) self.kwargs = kwargs def exec_module(self): if self.module.params['module'] == 'ltm': manager = self.get_manager('ltm') elif self.module.params['module'] == 'gtm': manager = self.get_manager('gtm') else: raise F5ModuleError( "An unknown iRule module type was specified" ) return manager.exec_module() def get_manager(self, type): if type == 'ltm': return LtmManager(**self.kwargs) elif type == 'gtm': return GtmManager(**self.kwargs) class BaseManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = kwargs.get('client', None) self.want = Parameters(params=self.module.params) self.changes = Parameters() def exec_module(self): changed = False result = dict() state = self.want.state try: if state == "present": changed = self.present() elif state == "absent": changed = self.absent() except iControlUnexpectedHTTPError as e: raise F5ModuleError(str(e)) changes = self.changes.to_return() result.update(**changes) result.update(dict(changed=changed)) return result def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = Parameters(params=changed) def _update_changed_options(self): changed = {} for key in Parameters.updatables: if getattr(self.want, key) is not None: attr1 = getattr(self.want, key) attr2 = getattr(self.have, key) if attr1 != attr2: changed[key] = attr1 if changed: self.changes = Parameters(params=changed) return True return False def present(self): if not self.want.content and not self.want.src: raise F5ModuleError( "Either 'content' or 'src' must be provided" ) if self.exists(): return self.update() else: return self.create() def create(self): self._set_changed_options() if self.module.check_mode: return True self.create_on_device() if not self.exists(): raise F5ModuleError("Failed to create the iRule") return True def should_update(self): result = self._update_changed_options() if result: return True return False def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.module.check_mode: return True self.update_on_device() return True def absent(self): if self.exists(): return self.remove() return False def remove(self): if self.module.check_mode: return True self.remove_from_device() if self.exists(): raise F5ModuleError("Failed to delete the iRule") return True class LtmManager(BaseManager): def exists(self): result = self.client.api.tm.ltm.rules.rule.exists( name=self.want.name, partition=self.want.partition ) return result def update_on_device(self): params = self.changes.api_params() resource = self.client.api.tm.ltm.rules.rule.load( name=self.want.name, partition=self.want.partition ) resource.update(**params) def create_on_device(self): params = self.want.api_params() resource = self.client.api.tm.ltm.rules.rule resource.create( name=self.want.name, partition=self.want.partition, **params ) def read_current_from_device(self): resource = self.client.api.tm.ltm.rules.rule.load( name=self.want.name, partition=self.want.partition ) result = resource.attrs return Parameters(params=result) def remove_from_device(self): resource = self.client.api.tm.ltm.rules.rule.load( name=self.want.name, partition=self.want.partition ) resource.delete() class GtmManager(BaseManager): def read_current_from_device(self): resource = self.client.api.tm.gtm.rules.rule.load( name=self.want.name, partition=self.want.partition ) result = resource.attrs return Parameters(params=result) def remove_from_device(self): resource = self.client.api.tm.gtm.rules.rule.load( name=self.want.name, partition=self.want.partition ) resource.delete() def exists(self): result = self.client.api.tm.gtm.rules.rule.exists( name=self.want.name, partition=self.want.partition ) return result def update_on_device(self): params = self.changes.api_params() resource = self.client.api.tm.gtm.rules.rule.load( name=self.want.name, partition=self.want.partition ) resource.update(**params) def create_on_device(self): params = self.want.api_params() resource = self.client.api.tm.gtm.rules.rule resource.create( name=self.want.name, partition=self.want.partition, **params ) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( content=dict( required=False, default=None ), src=dict( required=False, default=None ), name=dict(required=True), module=dict( required=True, choices=['gtm', 'ltm'] ), state=dict( default='present', choices=['present', 'absent'] ), partition=dict( default='Common', fallback=(env_fallback, ['F5_PARTITION']) ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) self.mutually_exclusive = [ ['content', 'src'] ] def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, mutually_exclusive=spec.mutually_exclusive ) if not HAS_F5SDK: module.fail_json(msg="The python f5-sdk module is required") try: client = F5Client(**module.params) mm = ModuleManager(module=module, client=client) results = mm.exec_module() cleanup_tokens(client) module.exit_json(**results) except F5ModuleError as e: cleanup_tokens(client) module.fail_json(msg=str(e)) if __name__ == '__main__': main()
gpl-3.0
theoryno3/pylearn2
pylearn2/utils/tests/test_string_utils.py
39
4610
""" Tests for string_utils.py """ from __future__ import print_function import os import uuid from theano.compat.six.moves import xrange from pylearn2.utils.string_utils import find_number from pylearn2.utils.string_utils import preprocess from pylearn2.utils.string_utils import tokenize_by_number from pylearn2.utils.string_utils import number_aware_alphabetical_key def test_preprocess(): """ Tests that `preprocess` fills in environment variables using various interfaces and raises a ValueError if a needed environment variable definition is missing. """ try: keys = ["PYLEARN2_" + str(uuid.uuid1())[:8] for _ in xrange(3)] strs = ["${%s}" % k for k in keys] os.environ[keys[0]] = keys[1] # Test with os.environ only. assert preprocess(strs[0]) == keys[1] # Test with provided dict only. assert preprocess(strs[1], environ={keys[1]: keys[2]}) == keys[2] # Provided overrides os.environ. assert preprocess(strs[0], environ={keys[0]: keys[2]}) == keys[2] raised = False try: preprocess(strs[2], environ={keys[1]: keys[0]}) except ValueError: raised = True assert raised finally: for key in keys: if key in os.environ: del os.environ[key] def test_find_number_0(): "test that we find no number in a string with no numbers" r = find_number('sss') assert r is None def test_find_number_1(): "tests that we find an int among letters" s = 'jashlhl123sfs' r = find_number(s) assert s[r[0]:r[1]] == '123' def test_find_number_2(): "tests that we find an int with a negative sign" s = 'aghwirougiuhfajlsopka"-987?' r = find_number(s) assert s[r[0]:r[1]] == '-987' def test_find_number_3(): "tests that we find the first of two numbers" s = 'jq% misdirect/ 82ghn 931' r = find_number(s) assert s[r[0]:r[1]] == '82' def test_find_number_4(): "tests that we find decimal numbers" s = 'the quick brown fox 54.6 jumped' r = find_number(s) assert s[r[0]:r[1]] == '54.6' def test_find_number_5(): "tests that we find decimal numbers with negative signs" s = 'over the laz-91.2y dog' r = find_number(s) assert s[r[0]:r[1]] == '-91.2' def test_find_number_6(): "tests that we find numbers with exponents" s = 'query1e5 not found' r = find_number(s) assert s[r[0]:r[1]] == '1e5' def test_find_number_7(): "tests that we find decimal numbers with exponents" s = 'sdglk421.e6' r = find_number(s) assert s[r[0]:r[1]] == '421.e6', s[r[0]:r[1]] def test_find_number_8(): "tests that we find numbers with exponents and negative signs" s = 'ryleh -14e7$$!$' r = find_number(s) assert s[r[0]:r[1]] == '-14e7' def test_find_number_false_exponent(): """tests that we don't include an e after a number, mistaking it for an exponent.""" s = '2e' r = find_number(s) assert s[r[0]:r[1]] == '2', s[r[0]:r[1]] def token_lists_match(l, r): """ Returns true if the lists of tokens match. Parameters ---------- l : list A list whose elements are either strings, ints, or floats. r : list Same format as l Returns ------- output : bool True if `l` and `r` have the same length and element i of `l` matches element i of `r`, False otherwise. """ if len(l) != len(r): print("lengths don't match") print(len(l)) print(len(r)) return False for l_elem, r_elem in zip(l, r): assert isinstance(l_elem, (str, float, int)), type(l_elem) assert isinstance(r_elem, (str, float, int)), type(r_elem) if l_elem != r_elem: print('"' + l_elem + '" doesn\'t match "' + r_elem + '"') return False return True def test_tokenize_0(): """ Tests that tokenize_by_numbers matches a manually generated output on a specific example. """ s = ' 123 klsdgh 56.7?98.2---\%-1e3' true_tokens = [' ', 123, ' klsdgh ', 56.7, '?', 98.2, '---\%', -1e3] tokens = tokenize_by_number(s) assert token_lists_match(tokens, true_tokens) def test_number_aware_alphabetical_key(): """ Tests that number-aware sorting matches a manually generated output on a specific example. """ l = ['0', 'mystr_1', 'mystr_10', 'mystr_2', 'mystr_1_a', 'mystr'] l.sort(key=number_aware_alphabetical_key) print(l) assert l == ['0', 'mystr', 'mystr_1', 'mystr_1_a', 'mystr_2', 'mystr_10']
bsd-3-clause
Autostew/autostew
autostew_web_account/urls.py
1
1584
from django.conf.urls import url from autostew_web_account.views import login_view, account_view, logout_view, register_view, settings_view, \ rotation_view, queue_view, add_view, remove_rotated_setup, add_setup_to_rotation, remove_queued_setup, \ add_setup_to_queue, toggle_kicks_view, set_crash_points_limit, set_custom_motd urlpatterns = [ url(r'^add$', add_view, name='add'), url(r'^toggle_kicks/(?P<pk>[0-9]+)/?$', toggle_kicks_view, name='toggle_kicks'), url(r'^set_crash_points_limit/(?P<pk>[0-9]+)/?$', set_crash_points_limit, name='set_crash_points_limit'), url(r'^set_custom_motd/(?P<pk>[0-9]+)/?$', set_custom_motd, name='set_custom_motd'), url(r'^settings/(?P<pk>[0-9]+)/?$', settings_view, name='settings'), url(r'^rotation/(?P<pk>[0-9]+)/?$', rotation_view, name='rotation'), url(r'^rotation/(?P<server_pk>[0-9]+)/remove/(?P<entry_pk>[0-9]+)/?$', remove_rotated_setup, name='remove_rotated_setup'), url(r'^rotation/(?P<server_pk>[0-9]+)/add/(?P<setup_pk>[0-9]+)/?$', add_setup_to_rotation, name='add_setup_to_rotation'), url(r'^queue/(?P<pk>[0-9]+)/?$', queue_view, name='queue'), url(r'^queue/(?P<server_pk>[0-9]+)/remove/(?P<entry_pk>[0-9]+)/?$', remove_queued_setup, name='remove_queued_setup'), url(r'^queue/(?P<server_pk>[0-9]+)/add/(?P<setup_pk>[0-9]+)/?$', add_setup_to_queue, name='add_setup_to_queue'), url(r'^register/?$', register_view, name='register'), url(r'^login/?$', login_view, name='login'), url(r'^logout/?$', logout_view, name='logout'), url(r'^$', account_view, name='home'), ]
agpl-3.0
CYBERBUGJR/Diamond
src/collectors/jcollectd/jcollectd.py
30
5977
# coding=utf-8 """ The JCollectdCollector is capable of receiving Collectd network traffic as sent by the JCollectd jvm agent (and child Collectd processes). Reason for developing this collector is allowing to use JCollectd, without the need for Collectd. A few notes: This collector starts a UDP server to receive data. This server runs in a separate thread and puts it on a queue, waiting for the collect() method to pull. Because of this setup, the collector interval parameter is of less importance. What matters is the 'sendinterval' JCollectd parameter. See https://github.com/emicklei/jcollectd for an up-to-date jcollect fork. #### Dependencies * jcollectd sending metrics """ import threading import re import Queue import diamond.collector import diamond.metric import collectd_network ALIVE = True class JCollectdCollector(diamond.collector.Collector): def __init__(self, *args, **kwargs): super(JCollectdCollector, self).__init__(*args, **kwargs) self.listener_thread = None def get_default_config(self): """ Returns the default collector settings """ config = super(JCollectdCollector, self).get_default_config() config.update({ 'path': 'jvm', 'listener_host': '127.0.0.1', 'listener_port': 25826, }) return config def collect(self): if not self.listener_thread: self.start_listener() q = self.listener_thread.queue while True: try: dp = q.get(False) metric = self.make_metric(dp) except Queue.Empty: break self.publish_metric(metric) def start_listener(self): self.listener_thread = ListenerThread(self.config['listener_host'], self.config['listener_port'], self.log) self.listener_thread.start() def stop_listener(self): global ALIVE ALIVE = False self.listener_thread.join() self.log.error('Listener thread is shut down.') def make_metric(self, dp): path = ".".join((dp.host, self.config['path'], dp.name)) if 'path_prefix' in self.config: prefix = self.config['path_prefix'] if prefix: path = ".".join((prefix, path)) if 'path_suffix' in self.config: suffix = self.config['path_suffix'] if suffix: path = ".".join((path, suffix)) if dp.is_counter: metric_type = "COUNTER" else: metric_type = "GAUGE" metric = diamond.metric.Metric(path, dp.value, dp.time, metric_type=metric_type) return metric def __del__(self): if self.listener_thread: self.stop_listener() class ListenerThread(threading.Thread): def __init__(self, host, port, log, poll_interval=0.4): super(ListenerThread, self).__init__() self.name = 'JCollectdListener' # thread name self.host = host self.port = port self.log = log self.poll_interval = poll_interval self.queue = Queue.Queue() def run(self): self.log.info('ListenerThread started on {0}:{1}(udp)'.format( self.host, self.port)) rdr = collectd_network.Reader(self.host, self.port) try: while ALIVE: try: items = rdr.interpret(poll_interval=self.poll_interval) self.send_to_collector(items) except ValueError, e: self.log.warn('Dropping bad packet: {0}'.format(e)) except Exception, e: self.log.error('caught exception: type={0}, exc={1}'.format(type(e), e)) self.log.info('ListenerThread - stop') def send_to_collector(self, items): if items is None: return for item in items: try: metric = self.transform(item) self.queue.put(metric) except Queue.Full: self.log.error('Queue to collector is FULL') except Exception, e: self.log.error('B00M! type={0}, exception={1}'.format(type(e), e)) def transform(self, item): parts = [] path = item.plugininstance # extract jvm name from 'logstash-MemoryPool Eden Space' if '-' in path: (jvm, tail) = path.split('-', 1) path = tail else: jvm = 'unnamed' # add JVM name parts.append(jvm) # add mbean name (e.g. 'java_lang') parts.append(item.plugin) # get typed mbean: 'MemoryPool Eden Space' if ' ' in path: (mb_type, mb_name) = path.split(' ', 1) parts.append(mb_type) parts.append(mb_name) else: parts.append(path) # add property name parts.append(item.typeinstance) # construct full path, from safe parts name = '.'.join([sanitize_word(part) for part in parts]) if item[0][0] == 0: is_counter = True else: is_counter = False dp = Datapoint(item.host, item.time, name, item[0][1], is_counter) return dp def sanitize_word(s): """Remove non-alphanumerical characters from metric word. And trim excessive underscores. """ s = re.sub('[^\w-]+', '_', s) s = re.sub('__+', '_', s) return s.strip('_') class Datapoint(object): def __init__(self, host, time, name, value, is_counter): self.host = host self.time = time self.name = name self.value = value self.is_counter = is_counter
mit
rbaindourov/v8-inspector
Source/chrome/tools/gyp/test/generator-output/gyptest-top-all.py
216
1437
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies building a project hierarchy created when the --generator-output= option is used to put the build configuration files in a separate directory tree. """ import TestGyp # Android doesn't support --generator-output. test = TestGyp.TestGyp(formats=['!android']) test.writable(test.workpath('src'), False) test.run_gyp('prog1.gyp', '-Dset_symroot=1', '--generator-output=' + test.workpath('gypfiles'), chdir='src') test.writable(test.workpath('src/build'), True) test.writable(test.workpath('src/subdir2/build'), True) test.writable(test.workpath('src/subdir3/build'), True) test.build('prog1.gyp', test.ALL, chdir='gypfiles') chdir = 'gypfiles' expect = """\ Hello from %s Hello from inc.h Hello from inc1/include1.h Hello from inc2/include2.h Hello from inc3/include3.h Hello from subdir2/deeper/deeper.h """ if test.format == 'xcode': chdir = 'src' test.run_built_executable('prog1', chdir=chdir, stdout=expect % 'prog1.c') if test.format == 'xcode': chdir = 'src/subdir2' test.run_built_executable('prog2', chdir=chdir, stdout=expect % 'prog2.c') if test.format == 'xcode': chdir = 'src/subdir3' test.run_built_executable('prog3', chdir=chdir, stdout=expect % 'prog3.c') test.pass_test()
bsd-3-clause
alflanagan/pyxbee
tools/annotations.py
1
1104
# vim: fileencoding=utf-8 from functools import wraps import inspect def print_call(f): sig = inspect.signature(f) called_name = f.__name__ called_params = sig.parameters #oh, this is beautiful @wraps(f) def wrapper(*args, **kwargs): #Prints the function name of its caller, with args #arg to stack() is # of lines of context to include (not well documented) s = inspect.stack(0) #print(s[1]) caller_name = s[1][3] caller_frame = s[1][0] #print([x for x, y in inspect.getmembers(caller_frame)]) params = [str(x) for x in args] kwdparams = [] for key in kwargs: kwdparams.append("{}={}".format(str(key), str(kwargs[key]))) kwd_string = ', '.join(kwdparams) outstr = caller_name + " called " + called_name + "(" + ', '.join(params) if kwdparams: if params: outstr += ", " + kwd_string else: outstr += kwd_string outstr += ")" print(outstr) return f(*args, **kwargs) return wrapper
gpl-3.0
tjsavage/sfcsdatabase
sfcs/django/contrib/localflavor/sk/forms.py
344
1439
""" Slovak-specific form helpers """ from django.forms.fields import Select, RegexField from django.utils.translation import ugettext_lazy as _ class SKRegionSelect(Select): """ A select widget widget with list of Slovak regions as choices. """ def __init__(self, attrs=None): from sk_regions import REGION_CHOICES super(SKRegionSelect, self).__init__(attrs, choices=REGION_CHOICES) class SKDistrictSelect(Select): """ A select widget with list of Slovak districts as choices. """ def __init__(self, attrs=None): from sk_districts import DISTRICT_CHOICES super(SKDistrictSelect, self).__init__(attrs, choices=DISTRICT_CHOICES) class SKPostalCodeField(RegexField): """ A form field that validates its input as Slovak postal code. Valid form is XXXXX or XXX XX, where X represents integer. """ default_error_messages = { 'invalid': _(u'Enter a postal code in the format XXXXX or XXX XX.'), } def __init__(self, *args, **kwargs): super(SKPostalCodeField, self).__init__(r'^\d{5}$|^\d{3} \d{2}$', max_length=None, min_length=None, *args, **kwargs) def clean(self, value): """ Validates the input and returns a string that contains only numbers. Returns an empty string for empty values. """ v = super(SKPostalCodeField, self).clean(value) return v.replace(' ', '')
bsd-3-clause
kouaw/CouchPotatoServer
libs/apscheduler/jobstores/shelve_store.py
92
1974
""" Stores jobs in a file governed by the :mod:`shelve` module. """ import shelve import pickle import random import logging from apscheduler.jobstores.base import JobStore from apscheduler.job import Job from apscheduler.util import itervalues logger = logging.getLogger(__name__) class ShelveJobStore(JobStore): MAX_ID = 1000000 def __init__(self, path, pickle_protocol=pickle.HIGHEST_PROTOCOL): self.jobs = [] self.path = path self.pickle_protocol = pickle_protocol self._open_store() def _open_store(self): self.store = shelve.open(self.path, 'c', self.pickle_protocol) def _generate_id(self): id = None while not id: id = str(random.randint(1, self.MAX_ID)) if not id in self.store: return id def add_job(self, job): job.id = self._generate_id() self.store[job.id] = job.__getstate__() self.store.close() self._open_store() self.jobs.append(job) def update_job(self, job): job_dict = self.store[job.id] job_dict['next_run_time'] = job.next_run_time job_dict['runs'] = job.runs self.store[job.id] = job_dict self.store.close() self._open_store() def remove_job(self, job): del self.store[job.id] self.store.close() self._open_store() self.jobs.remove(job) def load_jobs(self): jobs = [] for job_dict in itervalues(self.store): try: job = Job.__new__(Job) job.__setstate__(job_dict) jobs.append(job) except Exception: job_name = job_dict.get('name', '(unknown)') logger.exception('Unable to restore job "%s"', job_name) self.jobs = jobs def close(self): self.store.close() def __repr__(self): return '<%s (path=%s)>' % (self.__class__.__name__, self.path)
gpl-3.0
jolyboy/ardupilot
Tools/LogAnalyzer/tests/TestVCC.py
218
1278
from LogAnalyzer import Test,TestResult import DataflashLog import collections class TestVCC(Test): '''test for VCC within recommendations, or abrupt end to log in flight''' def __init__(self): Test.__init__(self) self.name = "VCC" def run(self, logdata, verbose): self.result = TestResult() self.result.status = TestResult.StatusType.GOOD if not "CURR" in logdata.channels: self.result.status = TestResult.StatusType.UNKNOWN self.result.statusMessage = "No CURR log data" return # just a naive min/max test for now vccMin = logdata.channels["CURR"]["Vcc"].min() vccMax = logdata.channels["CURR"]["Vcc"].max() vccDiff = vccMax - vccMin; vccMinThreshold = 4.6 * 1000; vccMaxDiff = 0.3 * 1000; if vccDiff > vccMaxDiff: self.result.status = TestResult.StatusType.WARN self.result.statusMessage = "VCC min/max diff %sv, should be <%sv" % (vccDiff/1000.0, vccMaxDiff/1000.0) elif vccMin < vccMinThreshold: self.result.status = TestResult.StatusType.FAIL self.result.statusMessage = "VCC below minimum of %sv (%sv)" % (`vccMinThreshold/1000.0`,`vccMin/1000.0`)
gpl-3.0
Exa-Networks/exaproxy
lib/exaproxy/reactor/redirector/serialize/icap.py
2
2328
# encoding: utf-8 class ICAPSerializer (object): def __init__ (self, configuration, protocol): self.configuration = configuration self.protocol = protocol def serialize (self, accept_addr, accept_port, peer, message, icap_message, http_header, path, icap_host): if icap_message is not None and icap_message.method == 'OPTIONS': res = self.createOptionsRequest(accept_addr, accept_port, peer, icap_message, path) return res return self.createRequest(accept_addr, accept_port, peer, message, icap_message, http_header, path, icap_host) def createOptionsRequest (self, accept_addr, accept_port, peer, icap_message, path): return """\ OPTIONS %s ICAP/1.0 Pragma: transport= Pragma: proxy=test Pragma: scheme= Pragma: accept=%s Pragma: accept-port=%s Pragma: client=%s Pragma: host= Pragma: path= Pragma: method= Encapsulated: req-hdr=0, null-body=0 """ % (path, accept_addr, accept_port, peer) def createRequest (self, accept_addr, accept_port, peer, message, icap_message, http_header, path, icap_host): username = icap_message.headers.get('x-authenticated-user', '').strip() if icap_message else None groups = icap_message.headers.get('x-authenticated-groups', '').strip() if icap_message else None ip_addr = icap_message.headers.get('x-client-ip', '').strip() if icap_message else None customer = icap_message.headers.get('x-customer-name', '').strip() if icap_message else None allow = icap_message.headers.get('allow', '').strip() if icap_message else None icap_request = """\ REQMOD %s ICAP/1.0 Host: %s Pragma: transport=%s Pragma: proxy=test Pragma: scheme=http Pragma: accept=%s Pragma: accept-port=%s Pragma: client=%s Pragma: host=%s Pragma: path=%s Pragma: method=%s""" % ( path, icap_host, message.request.protocol, accept_addr, accept_port, peer, message.host, message.request.path, message.request.method, ) if ip_addr: icap_request += """ X-Client-IP: %s""" % ip_addr if username: icap_request += """ X-Authenticated-User: %s""" % username if groups: icap_request += """ X-Authenticated-Groups: %s""" % groups if customer: icap_request += """ X-Customer-Name: %s""" % customer if allow: icap_request += """ Allow: %s""" % allow return icap_request + """ Encapsulated: req-hdr=0, null-body=%d %s""" % (len(http_header), http_header)
bsd-2-clause
ClearingHouse/clearblockd
lib/events.py
4
8758
import os import re import logging import datetime import time import copy import decimal import json import urllib import StringIO import pymongo import gevent from PIL import Image from lib import config, util, blockchain from lib.components import assets, assets_trading, betting D = decimal.Decimal COMPILE_MARKET_PAIR_INFO_PERIOD = 10 * 60 #in seconds (this is every 10 minutes currently) COMPILE_ASSET_MARKET_INFO_PERIOD = 30 * 60 #in seconds (this is every 30 minutes currently) def check_blockchain_service(): try: blockchain.check() except Exception as e: raise Exception('Could not connect to blockchain service: %s' % e) finally: gevent.spawn_later(5 * 60, check_blockchain_service) #call again in 5 minutes def expire_stale_prefs(): """ Every day, clear out preferences objects that haven't been touched in > 30 days, in order to reduce abuse risk/space consumed """ mongo_db = config.mongo_db min_last_updated = time.mktime((datetime.datetime.utcnow() - datetime.timedelta(days=30)).timetuple()) num_stale_records = config.mongo_db.preferences.find({'last_touched': {'$lt': min_last_updated}}).count() mongo_db.preferences.remove({'last_touched': {'$lt': min_last_updated}}) if num_stale_records: logging.warn("REMOVED %i stale preferences objects" % num_stale_records) #call again in 1 day gevent.spawn_later(86400, expire_stale_prefs) def expire_stale_btc_open_order_records(): mongo_db = config.mongo_db min_when_created = time.mktime((datetime.datetime.utcnow() - datetime.timedelta(days=15)).timetuple()) num_stale_records = config.mongo_db.btc_open_orders.find({'when_created': {'$lt': min_when_created}}).count() mongo_db.btc_open_orders.remove({'when_created': {'$lt': min_when_created}}) if num_stale_records: logging.warn("REMOVED %i stale BTC open order objects" % num_stale_records) #call again in 1 day gevent.spawn_later(86400, expire_stale_btc_open_order_records) def generate_wallet_stats(): """ Every 30 minutes, from the login history, update and generate wallet stats """ mongo_db = config.mongo_db def gen_stats_for_network(network): assert network in ('mainnet', 'testnet') #get the latest date in the stats table present now = datetime.datetime.utcnow() latest_stat = mongo_db.wallet_stats.find({'network': network}).sort('when', pymongo.DESCENDING).limit(1) latest_stat = latest_stat[0] if latest_stat.count() else None new_entries = {} #the queries below work with data that happened on or after the date of the latest stat present #aggregate over the same period for new logins, adding the referrers to a set match_criteria = {'when': {"$gte": latest_stat['when']}, 'network': network, 'action': 'create'} \ if latest_stat else {'when': {"$lte": now}, 'network': network, 'action': 'create'} new_wallets = mongo_db.login_history.aggregate([ {"$match": match_criteria }, {"$project": { "year": {"$year": "$when"}, "month": {"$month": "$when"}, "day": {"$dayOfMonth": "$when"} }}, {"$group": { "_id": {"year": "$year", "month": "$month", "day": "$day"}, "new_count": {"$sum": 1} }} ]) new_wallets = [] if not new_wallets['ok'] else new_wallets['result'] for e in new_wallets: ts = time.mktime(datetime.datetime(e['_id']['year'], e['_id']['month'], e['_id']['day']).timetuple()) new_entries[ts] = { #a future wallet_stats entry 'when': datetime.datetime(e['_id']['year'], e['_id']['month'], e['_id']['day']), 'network': network, 'new_count': e['new_count'], } referer_counts = mongo_db.login_history.aggregate([ {"$match": match_criteria }, {"$project": { "year": {"$year": "$when"}, "month": {"$month": "$when"}, "day": {"$dayOfMonth": "$when"}, "referer": 1 }}, {"$group": { "_id": {"year": "$year", "month": "$month", "day": "$day", "referer": "$referer"}, #"uniqueReferers": {"$addToSet": "$_id"}, "count": {"$sum": 1} }} ]) referer_counts = [] if not referer_counts['ok'] else referer_counts['result'] for e in referer_counts: ts = time.mktime(datetime.datetime(e['_id']['year'], e['_id']['month'], e['_id']['day']).timetuple()) assert ts in new_entries referer_key = urllib.quote(e['_id']['referer']).replace('.', '%2E') if 'referers' not in new_entries[ts]: new_entries[ts]['referers'] = {} if e['_id']['referer'] not in new_entries[ts]['referers']: new_entries[ts]['referers'][referer_key] = 0 new_entries[ts]['referers'][referer_key] += 1 #logins (not new wallets) - generate stats match_criteria = {'when': {"$gte": latest_stat['when']}, 'network': network, 'action': 'login'} \ if latest_stat else {'when': {"$lte": now}, 'network': network, 'action': 'login'} logins = mongo_db.login_history.aggregate([ {"$match": match_criteria }, {"$project": { "year": {"$year": "$when"}, "month": {"$month": "$when"}, "day": {"$dayOfMonth": "$when"}, "wallet_id": 1 }}, {"$group": { "_id": {"year": "$year", "month": "$month", "day": "$day"}, "login_count": {"$sum": 1}, "distinct_wallets": {"$addToSet": "$wallet_id"}, }} ]) logins = [] if not logins['ok'] else logins['result'] for e in logins: ts = time.mktime(datetime.datetime(e['_id']['year'], e['_id']['month'], e['_id']['day']).timetuple()) if ts not in new_entries: new_entries[ts] = { #a future wallet_stats entry 'when': datetime.datetime(e['_id']['year'], e['_id']['month'], e['_id']['day']), 'network': network, 'new_count': 0, 'referers': [] } new_entries[ts]['login_count'] = e['login_count'] new_entries[ts]['distinct_login_count'] = len(e['distinct_wallets']) #add/replace the wallet_stats data if latest_stat: updated_entry_ts = time.mktime(datetime.datetime( latest_stat['when'].year, latest_stat['when'].month, latest_stat['when'].day).timetuple()) if updated_entry_ts in new_entries: updated_entry = new_entries[updated_entry_ts] del new_entries[updated_entry_ts] assert updated_entry['when'] == latest_stat['when'] del updated_entry['when'] #not required for the upsert logging.info("Revised wallet statistics for partial day %s-%s-%s: %s" % ( latest_stat['when'].year, latest_stat['when'].month, latest_stat['when'].day, updated_entry)) mongo_db.wallet_stats.update({'when': latest_stat['when']}, {"$set": updated_entry}, upsert=True) if new_entries: #insert the rest #logging.info("Stats, new entries: %s" % new_entries.values()) mongo_db.wallet_stats.insert(new_entries.values()) logging.info("Added wallet statistics for %i full days" % len(new_entries.values())) gen_stats_for_network('mainnet') gen_stats_for_network('testnet') #call again in 30 minutes gevent.spawn_later(30 * 60, generate_wallet_stats) def compile_asset_pair_market_info(): assets_trading.compile_asset_pair_market_info() #all done for this run...call again in a bit gevent.spawn_later(COMPILE_MARKET_PAIR_INFO_PERIOD, compile_asset_pair_market_info) def compile_extended_asset_info(): assets.fetch_all_asset_info(config.mongo_db) #call again in 60 minutes gevent.spawn_later(60 * 60, compile_extended_asset_info) def compile_extended_feed_info(): betting.fetch_all_feed_info(config.mongo_db) #call again in 5 minutes gevent.spawn_later(60 * 5, compile_extended_feed_info) def compile_asset_market_info(): assets_trading.compile_asset_market_info() #all done for this run...call again in a bit gevent.spawn_later(COMPILE_ASSET_MARKET_INFO_PERIOD, compile_asset_market_info)
mit
Stane1983/u-boot
tools/buildman/bsettings.py
8
2540
# SPDX-License-Identifier: GPL-2.0+ # Copyright (c) 2012 The Chromium OS Authors. import configparser import os import io def Setup(fname=''): """Set up the buildman settings module by reading config files Args: config_fname: Config filename to read ('' for default) """ global settings global config_fname settings = configparser.SafeConfigParser() if fname is not None: config_fname = fname if config_fname == '': config_fname = '%s/.buildman' % os.getenv('HOME') if not os.path.exists(config_fname): print('No config file found ~/.buildman\nCreating one...\n') CreateBuildmanConfigFile(config_fname) print('To install tool chains, please use the --fetch-arch option') if config_fname: settings.read(config_fname) def AddFile(data): settings.readfp(io.StringIO(data)) def GetItems(section): """Get the items from a section of the config. Args: section: name of section to retrieve Returns: List of (name, value) tuples for the section """ try: return settings.items(section) except configparser.NoSectionError as e: return [] except: raise def SetItem(section, tag, value): """Set an item and write it back to the settings file""" global settings global config_fname settings.set(section, tag, value) if config_fname is not None: with open(config_fname, 'w') as fd: settings.write(fd) def CreateBuildmanConfigFile(config_fname): """Creates a new config file with no tool chain information. Args: config_fname: Config filename to create Returns: None """ try: f = open(config_fname, 'w') except IOError: print("Couldn't create buildman config file '%s'\n" % config_fname) raise print('''[toolchain] # name = path # e.g. x86 = /opt/gcc-4.6.3-nolibc/x86_64-linux [toolchain-prefix] # name = path to prefix # e.g. x86 = /opt/gcc-4.6.3-nolibc/x86_64-linux/bin/x86_64-linux- [toolchain-alias] # arch = alias # Indicates which toolchain should be used to build for that arch x86 = i386 blackfin = bfin nds32 = nds32le openrisc = or1k [make-flags] # Special flags to pass to 'make' for certain boards, e.g. to pass a test # flag and build tag to snapper boards: # snapper-boards=ENABLE_AT91_TEST=1 # snapper9260=${snapper-boards} BUILD_TAG=442 # snapper9g45=${snapper-boards} BUILD_TAG=443 ''', file=f) f.close();
gpl-2.0
AutorestCI/azure-sdk-for-python
azure-mgmt-datalake-store/azure/mgmt/datalake/store/models/update_encryption_config.py
2
1120
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class UpdateEncryptionConfig(Model): """The encryption configuration used to update a user managed Key Vault key. :param key_vault_meta_info: The updated Key Vault key to use in user managed key rotation. :type key_vault_meta_info: ~azure.mgmt.datalake.store.models.UpdateKeyVaultMetaInfo """ _attribute_map = { 'key_vault_meta_info': {'key': 'keyVaultMetaInfo', 'type': 'UpdateKeyVaultMetaInfo'}, } def __init__(self, key_vault_meta_info=None): super(UpdateEncryptionConfig, self).__init__() self.key_vault_meta_info = key_vault_meta_info
mit
Davidjohnwilson/sympy
sympy/functions/elementary/tests/test_interface.py
117
1890
# This test file tests the SymPy function interface, that people use to create # their own new functions. It should be as easy as possible. from sympy import Function, sympify, sin, cos, limit, tanh from sympy.abc import x def test_function_series1(): """Create our new "sin" function.""" class my_function(Function): def fdiff(self, argindex=1): return cos(self.args[0]) @classmethod def eval(cls, arg): arg = sympify(arg) if arg == 0: return sympify(0) #Test that the taylor series is correct assert my_function(x).series(x, 0, 10) == sin(x).series(x, 0, 10) assert limit(my_function(x)/x, x, 0) == 1 def test_function_series2(): """Create our new "cos" function.""" class my_function2(Function): def fdiff(self, argindex=1): return -sin(self.args[0]) @classmethod def eval(cls, arg): arg = sympify(arg) if arg == 0: return sympify(1) #Test that the taylor series is correct assert my_function2(x).series(x, 0, 10) == cos(x).series(x, 0, 10) def test_function_series3(): """ Test our easy "tanh" function. This test tests two things: * that the Function interface works as expected and it's easy to use * that the general algorithm for the series expansion works even when the derivative is defined recursively in terms of the original function, since tanh(x).diff(x) == 1-tanh(x)**2 """ class mytanh(Function): def fdiff(self, argindex=1): return 1 - mytanh(self.args[0])**2 @classmethod def eval(cls, arg): arg = sympify(arg) if arg == 0: return sympify(0) e = tanh(x) f = mytanh(x) assert tanh(x).series(x, 0, 6) == mytanh(x).series(x, 0, 6)
bsd-3-clause
pitpalme/volunteer_planner
common/management/commands/check_db_connection.py
5
1439
# coding=utf-8 from time import sleep from django.core.management import BaseCommand, CommandError from django.db import connections from django.db.utils import OperationalError class Command(BaseCommand): help = 'This command checks availability of configured database.' requires_system_checks = False def add_arguments(self, parser): parser.add_argument('--count', type=int, default=5, help='Number of connections attempts before failing') parser.add_argument('--sleep', type=int, default=1, help='Seconds to wait after every failed attempt') def handle(self, *args, **options): if 0 > options['count']: raise CommandError(u'No negative count allowed. ' u'It''s not I''m picky, but I simply don''t now, how to handle it.') if 0 > options['sleep']: raise CommandError(u'No negative sleep allowed. Bear with me, but I forgot my time machine.') success = False db_conn = connections['default'] for i in range(0, options['count']): try: c = db_conn.cursor() success = True break except OperationalError as e: self.stderr.write(self.style.WARNING(u'Database connection failed: %s' % e)) sleep(options['sleep']) if not success: raise CommandError(u'Database connection check failed')
agpl-3.0
abhiQmar/servo
tests/wpt/web-platform-tests/tools/pywebsocket/src/test/testdata/handlers/sub/wrong_transfer_sig_wsh.py
499
1854
# Copyright 2009, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Wrong web_socket_transfer_data() signature. """ def web_socket_do_extra_handshake(request): pass def no_web_socket_transfer_data(request): request.connection.write( 'sub/wrong_transfer_sig_wsh.py is called for %s, %s' % (request.ws_resource, request.ws_protocol)) # vi:sts=4 sw=4 et
mpl-2.0
geekaia/edx-platform
common/djangoapps/reverification/models.py
66
2118
""" Models for reverification features common to both lms and studio """ from datetime import datetime import pytz from django.core.exceptions import ValidationError from django.db import models from util.validate_on_save import ValidateOnSaveMixin from xmodule_django.models import CourseKeyField class MidcourseReverificationWindow(ValidateOnSaveMixin, models.Model): """ Defines the start and end times for midcourse reverification for a particular course. There can be many MidcourseReverificationWindows per course, but they cannot have overlapping time ranges. This is enforced by this class's clean() method. """ # the course that this window is attached to course_id = CourseKeyField(max_length=255, db_index=True) start_date = models.DateTimeField(default=None, null=True, blank=True) end_date = models.DateTimeField(default=None, null=True, blank=True) def clean(self): """ Gives custom validation for the MidcourseReverificationWindow model. Prevents overlapping windows for any particular course. """ query = MidcourseReverificationWindow.objects.filter( course_id=self.course_id, end_date__gte=self.start_date, start_date__lte=self.end_date ) if query.count() > 0: raise ValidationError('Reverification windows cannot overlap for a given course.') @classmethod def window_open_for_course(cls, course_id): """ Returns a boolean, True if the course is currently asking for reverification, else False. """ now = datetime.now(pytz.UTC) return cls.get_window(course_id, now) is not None @classmethod def get_window(cls, course_id, date): """ Returns the window that is open for a particular course for a particular date. If no such window is open, or if more than one window is open, returns None. """ try: return cls.objects.get(course_id=course_id, start_date__lte=date, end_date__gte=date) except cls.DoesNotExist: return None
agpl-3.0
rishikksh20/scikit-learn
doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py
73
2264
"""Build a language detector model The goal of this exercise is to train a linear classifier on text features that represent sequences of up to 3 consecutive characters so as to be recognize natural languages by using the frequencies of short character sequences as 'fingerprints'. """ # Author: Olivier Grisel <olivier.grisel@ensta.org> # License: Simplified BSD import sys from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import Perceptron from sklearn.pipeline import Pipeline from sklearn.datasets import load_files from sklearn.model_selection import train_test_split from sklearn import metrics # The training data folder must be passed as first argument languages_data_folder = sys.argv[1] dataset = load_files(languages_data_folder) # Split the dataset in training and test set: docs_train, docs_test, y_train, y_test = train_test_split( dataset.data, dataset.target, test_size=0.5) # TASK: Build a vectorizer that splits strings into sequence of 1 to 3 # characters instead of word tokens vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char', use_idf=False) # TASK: Build a vectorizer / classifier pipeline using the previous analyzer # the pipeline instance should stored in a variable named clf clf = Pipeline([ ('vec', vectorizer), ('clf', Perceptron()), ]) # TASK: Fit the pipeline on the training set clf.fit(docs_train, y_train) # TASK: Predict the outcome on the testing set in a variable named y_predicted y_predicted = clf.predict(docs_test) # Print the classification report print(metrics.classification_report(y_test, y_predicted, target_names=dataset.target_names)) # Plot the confusion matrix cm = metrics.confusion_matrix(y_test, y_predicted) print(cm) #import matlotlib.pyplot as plt #plt.matshow(cm, cmap=plt.cm.jet) #plt.show() # Predict the result on some short new sentences: sentences = [ u'This is a language detection test.', u'Ceci est un test de d\xe9tection de la langue.', u'Dies ist ein Test, um die Sprache zu erkennen.', ] predicted = clf.predict(sentences) for s, p in zip(sentences, predicted): print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
bsd-3-clause
ammaradil/fibonacci
Lib/site-packages/django/conf/locale/sr_Latn/formats.py
1008
2011
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j. F Y.' TIME_FORMAT = 'H:i' DATETIME_FORMAT = 'j. F Y. H:i' YEAR_MONTH_FORMAT = 'F Y.' MONTH_DAY_FORMAT = 'j. F' SHORT_DATE_FORMAT = 'j.m.Y.' SHORT_DATETIME_FORMAT = 'j.m.Y. H:i' FIRST_DAY_OF_WEEK = 1 # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = [ '%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.' '%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.' # '%d. %b %y.', '%d. %B %y.', # '25. Oct 06.', '25. October 06.' # '%d. %b \'%y.', '%d. %B \'%y.', # '25. Oct '06.', '25. October '06.' # '%d. %b %Y.', '%d. %B %Y.', # '25. Oct 2006.', '25. October 2006.' ] DATETIME_INPUT_FORMATS = [ '%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59' '%d.%m.%Y. %H:%M:%S.%f', # '25.10.2006. 14:30:59.000200' '%d.%m.%Y. %H:%M', # '25.10.2006. 14:30' '%d.%m.%Y.', # '25.10.2006.' '%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59' '%d.%m.%y. %H:%M:%S.%f', # '25.10.06. 14:30:59.000200' '%d.%m.%y. %H:%M', # '25.10.06. 14:30' '%d.%m.%y.', # '25.10.06.' '%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59' '%d. %m. %Y. %H:%M:%S.%f', # '25. 10. 2006. 14:30:59.000200' '%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30' '%d. %m. %Y.', # '25. 10. 2006.' '%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59' '%d. %m. %y. %H:%M:%S.%f', # '25. 10. 06. 14:30:59.000200' '%d. %m. %y. %H:%M', # '25. 10. 06. 14:30' '%d. %m. %y.', # '25. 10. 06.' ] DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' NUMBER_GROUPING = 3
mit
py-geek/City-Air
venv/lib/python2.7/site-packages/django/contrib/gis/admin/widgets.py
111
4809
import logging from django.forms.widgets import Textarea from django.template import loader, Context from django.utils import six from django.utils import translation from django.contrib.gis.gdal import OGRException from django.contrib.gis.geos import GEOSGeometry, GEOSException # Creating a template context that contains Django settings # values needed by admin map templates. geo_context = Context({'LANGUAGE_BIDI' : translation.get_language_bidi()}) logger = logging.getLogger('django.contrib.gis') class OpenLayersWidget(Textarea): """ Renders an OpenLayers map using the WKT of the geometry. """ def render(self, name, value, attrs=None): # Update the template parameters with any attributes passed in. if attrs: self.params.update(attrs) # Defaulting the WKT value to a blank string -- this # will be tested in the JavaScript and the appropriate # interface will be constructed. self.params['wkt'] = '' # If a string reaches here (via a validation error on another # field) then just reconstruct the Geometry. if isinstance(value, six.string_types): try: value = GEOSGeometry(value) except (GEOSException, ValueError) as err: logger.error( "Error creating geometry from value '%s' (%s)" % ( value, err) ) value = None if (value and value.geom_type.upper() != self.geom_type and self.geom_type != 'GEOMETRY'): value = None # Constructing the dictionary of the map options. self.params['map_options'] = self.map_options() # Constructing the JavaScript module name using the name of # the GeometryField (passed in via the `attrs` keyword). # Use the 'name' attr for the field name (rather than 'field') self.params['name'] = name # note: we must switch out dashes for underscores since js # functions are created using the module variable js_safe_name = self.params['name'].replace('-','_') self.params['module'] = 'geodjango_%s' % js_safe_name if value: # Transforming the geometry to the projection used on the # OpenLayers map. srid = self.params['srid'] if value.srid != srid: try: ogr = value.ogr ogr.transform(srid) wkt = ogr.wkt except OGRException as err: logger.error( "Error transforming geometry from srid '%s' to srid '%s' (%s)" % ( value.srid, srid, err) ) wkt = '' else: wkt = value.wkt # Setting the parameter WKT with that of the transformed # geometry. self.params['wkt'] = wkt return loader.render_to_string(self.template, self.params, context_instance=geo_context) def map_options(self): "Builds the map options hash for the OpenLayers template." # JavaScript construction utilities for the Bounds and Projection. def ol_bounds(extent): return 'new OpenLayers.Bounds(%s)' % str(extent) def ol_projection(srid): return 'new OpenLayers.Projection("EPSG:%s")' % srid # An array of the parameter name, the name of their OpenLayers # counterpart, and the type of variable they are. map_types = [('srid', 'projection', 'srid'), ('display_srid', 'displayProjection', 'srid'), ('units', 'units', str), ('max_resolution', 'maxResolution', float), ('max_extent', 'maxExtent', 'bounds'), ('num_zoom', 'numZoomLevels', int), ('max_zoom', 'maxZoomLevels', int), ('min_zoom', 'minZoomLevel', int), ] # Building the map options hash. map_options = {} for param_name, js_name, option_type in map_types: if self.params.get(param_name, False): if option_type == 'srid': value = ol_projection(self.params[param_name]) elif option_type == 'bounds': value = ol_bounds(self.params[param_name]) elif option_type in (float, int): value = self.params[param_name] elif option_type in (str,): value = '"%s"' % self.params[param_name] else: raise TypeError map_options[js_name] = value return map_options
mit
juliantaylor/scipy
scipy/signal/__init__.py
2
8680
""" ======================================= Signal processing (:mod:`scipy.signal`) ======================================= .. module:: scipy.signal Convolution =========== .. autosummary:: :toctree: generated/ convolve -- N-dimensional convolution. correlate -- N-dimensional correlation. fftconvolve -- N-dimensional convolution using the FFT. convolve2d -- 2-dimensional convolution (more options). correlate2d -- 2-dimensional correlation (more options). sepfir2d -- Convolve with a 2-D separable FIR filter. B-splines ========= .. autosummary:: :toctree: generated/ bspline -- B-spline basis function of order n. cubic -- B-spline basis function of order 3. quadratic -- B-spline basis function of order 2. gauss_spline -- Gaussian approximation to the B-spline basis function. cspline1d -- Coefficients for 1-D cubic (3rd order) B-spline. qspline1d -- Coefficients for 1-D quadratic (2nd order) B-spline. cspline2d -- Coefficients for 2-D cubic (3rd order) B-spline. qspline2d -- Coefficients for 2-D quadratic (2nd order) B-spline. cspline1d_eval -- Evaluate a cubic spline at the given points. cspline1d_eval -- Evaluate a quadratic spline at the given points. spline_filter -- Smoothing spline (cubic) filtering of a rank-2 array. Filtering ========= .. autosummary:: :toctree: generated/ order_filter -- N-dimensional order filter. medfilt -- N-dimensional median filter. medfilt2d -- 2-dimensional median filter (faster). wiener -- N-dimensional wiener filter. symiirorder1 -- 2nd-order IIR filter (cascade of first-order systems). symiirorder2 -- 4th-order IIR filter (cascade of second-order systems). lfilter -- 1-dimensional FIR and IIR digital linear filtering. lfiltic -- Construct initial conditions for `lfilter`. lfilter_zi -- Compute an initial state zi for the lfilter function that -- corresponds to the steady state of the step response. filtfilt -- A forward-backward filter. savgol_filter -- Filter a signal using the Savitzky-Golay filter. deconvolve -- 1-d deconvolution using lfilter. hilbert -- Compute the analytic signal of a 1-d signal. get_window -- Create FIR window. decimate -- Downsample a signal. detrend -- Remove linear and/or constant trends from data. resample -- Resample using Fourier method. Filter design ============= .. autosummary:: :toctree: generated/ bilinear -- Digital filter from an analog filter using -- the bilinear transform. firwin -- Windowed FIR filter design, with frequency response -- defined as pass and stop bands. firwin2 -- Windowed FIR filter design, with arbitrary frequency -- response. freqs -- Analog filter frequency response. freqz -- Digital filter frequency response. iirdesign -- IIR filter design given bands and gains. iirfilter -- IIR filter design given order and critical frequencies. kaiser_atten -- Compute the attenuation of a Kaiser FIR filter, given -- the number of taps and the transition width at -- discontinuities in the frequency response. kaiser_beta -- Compute the Kaiser parameter beta, given the desired -- FIR filter attenuation. kaiserord -- Design a Kaiser window to limit ripple and width of -- transition region. savgol_coeffs -- Compute the FIR filter coefficients for a Savitzky-Golay -- filter. remez -- Optimal FIR filter design. unique_roots -- Unique roots and their multiplicities. residue -- Partial fraction expansion of b(s) / a(s). residuez -- Partial fraction expansion of b(z) / a(z). invres -- Inverse partial fraction expansion. Matlab-style IIR filter design ============================== .. autosummary:: :toctree: generated/ butter -- Butterworth buttord cheby1 -- Chebyshev Type I cheb1ord cheby2 -- Chebyshev Type II cheb2ord ellip -- Elliptic (Cauer) ellipord bessel -- Bessel (no order selection available -- try butterod) Continuous-Time Linear Systems ============================== .. autosummary:: :toctree: generated/ freqresp -- frequency response of a continuous-time LTI system. lti -- linear time invariant system object. lsim -- continuous-time simulation of output to linear system. lsim2 -- like lsim, but `scipy.integrate.odeint` is used. impulse -- impulse response of linear, time-invariant (LTI) system. impulse2 -- like impulse, but `scipy.integrate.odeint` is used. step -- step response of continous-time LTI system. step2 -- like step, but `scipy.integrate.odeint` is used. bode -- Calculate Bode magnitude and phase data. Discrete-Time Linear Systems ============================ .. autosummary:: :toctree: generated/ dlsim -- simulation of output to a discrete-time linear system. dimpulse -- impulse response of a discrete-time LTI system. dstep -- step response of a discrete-time LTI system. LTI Representations =================== .. autosummary:: :toctree: generated/ tf2zpk -- transfer function to zero-pole-gain. zpk2tf -- zero-pole-gain to transfer function. tf2ss -- transfer function to state-space. ss2tf -- state-pace to transfer function. zpk2ss -- zero-pole-gain to state-space. ss2zpk -- state-space to pole-zero-gain. cont2discrete -- continuous-time to discrete-time LTI conversion. Waveforms ========= .. autosummary:: :toctree: generated/ chirp -- Frequency swept cosine signal, with several freq functions. gausspulse -- Gaussian modulated sinusoid sawtooth -- Periodic sawtooth square -- Square wave sweep_poly -- Frequency swept cosine signal; freq is arbitrary polynomial Window functions ================ .. autosummary:: :toctree: generated/ get_window -- Return a window of a given length and type. barthann -- Bartlett-Hann window bartlett -- Bartlett window blackman -- Blackman window blackmanharris -- Minimum 4-term Blackman-Harris window bohman -- Bohman window boxcar -- Boxcar window chebwin -- Dolph-Chebyshev window cosine -- Cosine window flattop -- Flat top window gaussian -- Gaussian window general_gaussian -- Generalized Gaussian window hamming -- Hamming window hann -- Hann window kaiser -- Kaiser window nuttall -- Nuttall's minimum 4-term Blackman-Harris window parzen -- Parzen window slepian -- Slepian window triang -- Triangular window Wavelets ======== .. autosummary:: :toctree: generated/ cascade -- compute scaling function and wavelet from coefficients daub -- return low-pass morlet -- Complex Morlet wavelet. qmf -- return quadrature mirror filter from low-pass ricker -- return ricker wavelet cwt -- perform continuous wavelet transform Peak finding ============ .. autosummary:: :toctree: generated/ find_peaks_cwt -- Attempt to find the peaks in the given 1-D array argrelmin -- Calculate the relative minima of data argrelmax -- Calculate the relative maxima of data argrelextrema -- Calculate the relative extrema of data Spectral Analysis ================= .. autosummary:: :toctree: generated/ periodogram -- Computes a (modified) periodogram welch -- Compute a periodogram using Welch's method lombscargle -- Computes the Lomb-Scargle periodogram """ from __future__ import division, print_function, absolute_import from . import sigtools from .waveforms import * # The spline module (a C extension) provides: # cspline2d, qspline2d, sepfir2d, symiirord1, symiirord2 from .spline import * from .bsplines import * from .cont2discrete import * from .dltisys import * from .filter_design import * from .fir_filter_design import * from .ltisys import * from .windows import * from .signaltools import * from ._savitzky_golay import savgol_coeffs, savgol_filter from .spectral import * from .wavelets import * from ._peak_finding import * __all__ = [s for s in dir() if not s.startswith('_')] from numpy.testing import Tester test = Tester().test
bsd-3-clause
iruga090/python-social-auth
social/apps/django_app/me/models.py
77
2124
""" MongoEngine Django models for Social Auth. Requires MongoEngine 0.8.6 or higher. """ from django.conf import settings from mongoengine import Document, ReferenceField from mongoengine.queryset import OperationError from social.utils import setting_name, module_member from social.storage.django_orm import BaseDjangoStorage from social.storage.mongoengine_orm import MongoengineUserMixin, \ MongoengineNonceMixin, \ MongoengineAssociationMixin, \ MongoengineCodeMixin UNUSABLE_PASSWORD = '!' # Borrowed from django 1.4 def _get_user_model(): """ Get the User Document class user for MongoEngine authentication. Use the model defined in SOCIAL_AUTH_USER_MODEL if defined, or defaults to MongoEngine's configured user document class. """ custom_model = getattr(settings, setting_name('USER_MODEL'), None) if custom_model: return module_member(custom_model) try: # Custom user model support with MongoEngine 0.8 from mongoengine.django.mongo_auth.models import get_user_document return get_user_document() except ImportError: return module_member('mongoengine.django.auth.User') USER_MODEL = _get_user_model() class UserSocialAuth(Document, MongoengineUserMixin): """Social Auth association model""" user = ReferenceField(USER_MODEL) @classmethod def user_model(cls): return USER_MODEL class Nonce(Document, MongoengineNonceMixin): """One use numbers""" pass class Association(Document, MongoengineAssociationMixin): """OpenId account association""" pass class Code(Document, MongoengineCodeMixin): """Mail validation single one time use code""" pass class DjangoStorage(BaseDjangoStorage): user = UserSocialAuth nonce = Nonce association = Association code = Code @classmethod def is_integrity_error(cls, exception): return exception.__class__ is OperationError and \ 'E11000' in exception.message
bsd-3-clause
rdsathene/SchoolIdolAPI
api/migrations/0122_auto_20160331_1940.py
3
1439
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('api', '0121_auto_20160331_1710'), ] operations = [ migrations.AddField( model_name='account', name='center_alt_text', field=models.CharField(max_length=100, null=True, blank=True), preserve_default=True, ), migrations.AddField( model_name='account', name='center_card_attribute', field=models.CharField(blank=True, max_length=6, null=True, choices=[(b'Smile', 'Smile'), (b'Pure', 'Pure'), (b'Cool', 'Cool'), (b'All', 'All')]), preserve_default=True, ), migrations.AddField( model_name='account', name='center_card_id', field=models.PositiveIntegerField(default=0), preserve_default=True, ), migrations.AddField( model_name='account', name='center_card_round_image', field=models.CharField(max_length=200, null=True, blank=True), preserve_default=True, ), migrations.AddField( model_name='account', name='center_card_transparent_image', field=models.CharField(max_length=200, null=True, blank=True), preserve_default=True, ), ]
apache-2.0
uri-mog/dreampie
dreampielib/gui/autocomplete_window.py
3
15891
# Copyright 2009 Noam Yorav-Raphael # # This file is part of DreamPie. # # DreamPie is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # DreamPie is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with DreamPie. If not, see <http://www.gnu.org/licenses/>. __all__ = ['AutocompleteWindow', 'find_prefix_range'] import gobject import gtk from gtk import gdk from .keyhandler import make_keyhandler_decorator, handle_keypress from .common import beep, get_text N_ROWS = 10 # A decorator for managing sourceview key handlers keyhandlers = {} keyhandler = make_keyhandler_decorator(keyhandlers) class AutocompleteWindow(object): def __init__(self, sourceview, sv_changed, window_main, on_complete): self.sourceview = sourceview sv_changed.append(self.on_sv_changed) self.sourcebuffer = sb = sourceview.get_buffer() self.window_main = window_main self.on_complete = on_complete self.liststore = gtk.ListStore(gobject.TYPE_STRING) self.cellrend = gtk.CellRendererText() self.cellrend.props.ypad = 0 self.col = gtk.TreeViewColumn("col", self.cellrend, text=0) self.col.props.sizing = gtk.TREE_VIEW_COLUMN_FIXED self.treeview = gtk.TreeView(self.liststore) self.treeview.props.headers_visible = False self.treeview.append_column(self.col) self.treeview.props.fixed_height_mode = True # Calculate width and height of treeview self.cellrend.props.text = 'a_quite_lengthy_identifier' _, _, width, height = self.cellrend.get_size(self.treeview, None) self.treeview.set_size_request(width, (height+2)*N_ROWS) self.scrolledwindow = gtk.ScrolledWindow() self.scrolledwindow.props.hscrollbar_policy = gtk.POLICY_NEVER self.scrolledwindow.props.vscrollbar_policy = gtk.POLICY_ALWAYS self.scrolledwindow.add(self.treeview) self.window = gtk.Window(gtk.WINDOW_POPUP) self.window.props.resizable = False self.window.add(self.scrolledwindow) self.window_height = None self.mark = sb.create_mark(None, sb.get_start_iter(), True) # We define this handler here so that it will be defined before # the default key-press handler, and so will have higher priority. self.keypress_handler = self.sourceview.connect( 'key-press-event', self.on_keypress) self.sourceview.handler_block(self.keypress_handler) self.keypress_handler_blocked = True self.is_shown = False self.cur_list = None # cur_list_keys is cur_list if not is_case_insen, otherwise, # lowercase strings. self.cur_list_keys = None self.is_case_insen = None self.private_list = None self.showing_private = None self.cur_prefix = None # Indices to self.cur_list - range which is displayed self.start = None self.end = None # A list with (widget, handler) pairs, to be filled with self.connect() self.signals = [] # handler id for on_changed_after_hide self.changed_after_hide_handler = None def on_sv_changed(self, new_sv): if self.is_shown: self.hide() self.sourcebuffer.delete_mark(self.mark) self.sourceview.disconnect(self.keypress_handler) self.sourceview = new_sv self.sourcebuffer = sb = new_sv.get_buffer() self.mark = sb.create_mark(None, sb.get_start_iter(), True) self.keypress_handler = self.sourceview.connect( 'key-press-event', self.on_keypress) self.sourceview.handler_block(self.keypress_handler) def connect(self, widget, *args): handler = widget.connect(*args) self.signals.append((widget, handler)) def disconnect_all(self): for widget, handler in self.signals: widget.disconnect(handler) self.signals[:] = [] def show(self, public, private, is_case_insen, start_len): sb = self.sourcebuffer if self.is_shown: self.hide() self.is_shown = True it = sb.get_iter_at_mark(sb.get_insert()) it.backward_chars(start_len) sb.move_mark(self.mark, it) # Update list and check if is empty self.cur_list = public self.is_case_insen = is_case_insen if not is_case_insen: self.cur_list_keys = self.cur_list else: self.cur_list_keys = [s.lower() for s in self.cur_list] self.private_list = private self.showing_private = False self.cur_prefix = None if self.changed_after_hide_handler is not None: sb.disconnect(self.changed_after_hide_handler) self.changed_after_hide_handler = None isnt_empty = self.update_list() if not isnt_empty: return self.place_window() self.connect(sb, 'mark-set', self.on_mark_set) self.connect(sb, 'changed', self.on_changed) self.connect(sb, 'insert-text', self.on_insert_text) self.connect(sb, 'delete-range', self.on_delete_range) self.connect(self.treeview, 'button-press-event', self.on_tv_button_press) self.connect(self.sourceview, 'focus-out-event', self.on_focus_out) self.connect(self.window_main, 'configure-event', self.on_configure) self.sourceview.handler_unblock(self.keypress_handler) self.keypress_handler_blocked = False self.window.show_all() def update_list(self): # Update the ListStore. # Return True if something is shown. # Otherwise, calls hide(), and returns False. if not self.is_shown: # Could be a result of a callback after the list was alrady hidden. # See bug #529939. return False sb = self.sourcebuffer prefix = get_text(sb, sb.get_iter_at_mark(self.mark), sb.get_iter_at_mark(sb.get_insert())) if prefix == self.cur_prefix: return True self.cur_prefix = prefix prefix_key = prefix.lower() if self.is_case_insen else prefix start, end = find_prefix_range(self.cur_list_keys, prefix_key) public_list = None if start == end and not self.showing_private: self.showing_private = True public_list = self.cur_list[:] self.cur_list.extend(self.private_list) if self.is_case_insen: self.cur_list.sort(key = lambda s: s.lower()) self.cur_list_keys = [s.lower() for s in self.cur_list] else: self.cur_list.sort() self.cur_list_keys = self.cur_list start, end = find_prefix_range(self.cur_list_keys, prefix_key) self.start, self.end = start, end if start == end: # We check to see if removing the last char (by pressing backspace) # should re-open the list. start2, end2 = find_prefix_range(self.cur_list_keys, prefix_key[:-1]) if start2 != end2: # Re-open the list if the last char is removed if public_list is not None: # We were not showing private public = public_list private = self.private_list else: # We were showing private - now everything is public public = self.cur_list private = [] if public is None or private is None: import pdb; pdb.set_trace() text = get_text(sb, sb.get_start_iter(), sb.get_end_iter()) offset = sb.get_iter_at_mark(sb.get_insert()).get_offset() expected_text = text[:offset-1] + text[offset:] self.changed_after_hide_handler = \ sb.connect('changed', self.on_changed_after_hide, expected_text, public, private, self.is_case_insen, len(prefix)-1) self.hide() return False self.liststore.clear() for i in xrange(end-start): self.liststore.insert(i, [self.cur_list[start+i]]) self.treeview.get_selection().select_path(0) self.treeview.scroll_to_cell((0,)) return True def place_window(self): sv = self.sourceview sb = self.sourcebuffer it = sb.get_iter_at_mark(self.mark) loc = sv.get_iter_location(it) x, y = loc.x, loc.y x, y = sv.buffer_to_window_coords(gtk.TEXT_WINDOW_WIDGET, x, y) sv_x, sv_y = sv.get_window(gtk.TEXT_WINDOW_WIDGET).get_origin() x += sv_x; y += sv_y if self.window_height is None: # We have to draw the window in order to calculate window_height. # We do it here, so as not to cause a flicker when the application starts. self.window.move(-2000, -2000) self.window.show_all() self.window_height = self.window.get_size()[1] self.window.hide() self.window.move(x, y-self.window_height) def on_mark_set(self, sb, it, mark): if mark is sb.get_insert(): if it.compare(sb.get_iter_at_mark(self.mark)) < 0: self.hide() else: self.update_list() def on_changed(self, _sb): self.update_list() def on_insert_text(self, sb, it, _text, _length): if it.compare(sb.get_iter_at_mark(self.mark)) < 0: self.hide() def on_delete_range(self, sb, start, _end): if start.compare(sb.get_iter_at_mark(self.mark)) < 0: self.hide() @keyhandler('Escape', 0) def on_esc(self): self.hide() # Don't return True - other things may be escaped too. def select_row(self, row): path = (row,) self.treeview.get_selection().select_path(path) self.treeview.scroll_to_cell(path) @keyhandler('Up', 0) def on_up(self): index = self.treeview.get_selection().get_selected_rows()[1][0][0] if index > 0: self.select_row(index - 1) else: beep() return True @keyhandler('Down', 0) def on_down(self): index = self.treeview.get_selection().get_selected_rows()[1][0][0] if index < len(self.liststore) - 1: self.select_row(index + 1) else: beep() return True @keyhandler('Home', 0) def on_home(self): self.select_row(0) return True @keyhandler('End', 0) def on_end(self): self.select_row(len(self.liststore)-1) return True @keyhandler('Page_Up', 0) def on_page_up(self): # Select the row displayed at top, or, if it is displayed, scroll one # page and then display the row. tv = self.treeview sel = tv.get_selection() row = tv.get_path_at_pos(0, 1)[0][0] if sel.path_is_selected((row,)): if row == 0: beep() row = max(row - N_ROWS, 0) self.select_row(row) return True @keyhandler('Page_Down', 0) def on_page_down(self): # Select the row displayed at bottom, or, if it is displayed, scroll one # page and then display the row. tv = self.treeview sel = tv.get_selection() last_row = len(self.liststore) - 1 r = tv.get_path_at_pos(0, tv.get_size_request()[1]) if r is not None: row = r[0][0] else: # nothing is displayed there, too short list row = last_row if sel.path_is_selected((row,)): if row == last_row: beep() row = min(row + N_ROWS, last_row) self.select_row(row) return True @keyhandler('Tab', 0) def tab(self): """ Complete the text to the common prefix, and if there's only one, close the window. """ if len(self.liststore) == 1: self.complete() return True first = self.cur_list_keys[self.start] last = self.cur_list_keys[self.end-1] i = 0 while i < len(first) and i < len(last) and first[i] == last[i]: i += 1 if i > len(self.cur_prefix): toadd = first[len(self.cur_prefix):i] # This updates self.cur_prefix self.sourcebuffer.insert_at_cursor(toadd) return True @keyhandler('Return', 0) @keyhandler('KP_Enter', 0) def complete(self): sel_row = self.treeview.get_selection().get_selected_rows()[1][0][0] text = self.liststore[sel_row][0].decode('utf8') insert = text[len(self.cur_prefix):] self.hide() self.sourcebuffer.insert_at_cursor(insert) self.on_complete() return True def on_keypress(self, _widget, event): return handle_keypress(self, event, keyhandlers) def on_tv_button_press(self, _widget, event): if event.type == gdk._2BUTTON_PRESS: self.complete() return True def on_focus_out(self, _widget, _event): self.hide() def on_configure(self, _widget, _event): self.hide() def hide(self): self.disconnect_all() if not self.keypress_handler_blocked: self.sourceview.handler_block(self.keypress_handler) self.keypress_handler_blocked = True self.window.hide() self.is_shown = False self.cur_list = None self.private_list = None self.showing_private = None self.cur_prefix = None def on_changed_after_hide(self, sb, expected_text, public, private, is_case_insen, start_len): """ This is called on the first 'changed' signal after the completion list was hidden because a "wrong" character was typed. If it is deleted, this method opens the list again. """ # Stop handler sb.disconnect(self.changed_after_hide_handler) self.changed_after_hide_handler = None if sb.get_text(sb.get_start_iter(), sb.get_end_iter()) == expected_text: self.show(public, private, is_case_insen, start_len) def find_prefix_range(L, prefix): # Find the range in the list L which begins with prefix, using binary # search. # start. l = 0 r = len(L) while r > l: m = (l + r) // 2 if L[m] == prefix: l = r = m elif L[m] < prefix: l = m + 1 else: r = m start = l # end l = 0 r = len(L) while r > l: m = (l + r) // 2 if L[m][:len(prefix)] > prefix: r = m else: l = m + 1 end = l return start, end class BackspaceUndo(object): """ If the completion list was closed because of a wrong character, we want it to be re-opened if it is deleted by pressing backspace. This class holds the data needed to re-open the list in that case. It waits for a backspace. If it is pressed, it re-opens the window. Otherwise, it stops listening. """ def __init__(self, public, private, is_case_insen, mark): pass #def on_mark
gpl-3.0
NMGRL/pychron
pychron/spectrometer/jobs/magnet_scan.py
2
9533
# =============================================================================== # Copyright 2012 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= from __future__ import absolute_import from pyface.timer.do_later import do_after from traits.api import Any, Float, DelegatesTo, List, Bool, Property from traitsui.api import View, Item, EnumEditor, Group, HGroup, spring, ButtonEditor # ============= standard library imports ======================== from numpy import linspace, hstack, array, Inf from numpy.core.umath import exp import random import time from threading import Event # ============= local library imports ========================== from .spectrometer_task import SpectrometerTask from pychron.core.ui.gui import invoke_in_main_thread def multi_peak_generator(values): for v in values: m = 0.1 if 4.8 <= v <= 5.2: m = 3 elif 5.5 <= v <= 5.8: m = 9 elif 6.1 <= v <= 7: m = 6 yield m + random.random() / 5.0 def psuedo_peak(center, start, stop, step, magnitude=500, peak_width=0.008): x = linspace(start, stop, step) gaussian = lambda x: magnitude * exp(-((center - x) / peak_width) ** 2) for i, d in enumerate(gaussian(x)): if abs(center - x[i]) < peak_width: # d = magnitude d = magnitude + magnitude / 50.0 * random.random() yield d class MagnetScan(SpectrometerTask): # graph = Any detectors = DelegatesTo('spectrometer') integration_time = DelegatesTo('spectrometer') reference_detector = Any additional_detectors = List active_detectors = Property start_mass = Float(36) stop_mass = Float(40) step_mass = Float(1) normalize = Bool(True) _peak_generator = None verbose = False def _scan_dac(self, values): self.graph.set_x_limits(values[0], values[-1]) if self.spectrometer.simulation: self._peak_generator = psuedo_peak(values[len(values) / 2] + 0.001, values[0], values[-1], len(values)) # self._peak_generator = multi_peak_generator(values) self.integration_time = 0.065536 gen = (vi for vi in values) evt = Event() intensities = [] invoke_in_main_thread(self._iter_dac, next(gen), gen, evt, intensities) while not evt.isSet(): time.sleep(0.01) # self.integration_time = QTEGRA_INTEGRATION_TIMES[4] return True def _get_active_detectors(self): return [self.reference_detector] + self.additional_detectors def _iter_dac(self, di, gen, evt, intensities): # self.debug('iter dac {}'.format(di)) mag = self.spectrometer.magnet mag.set_dac(di, verbose=self.verbose, settling_time=self.integration_time*2) d = self._magnet_step_hook() self._graph_hook(di, d) intensities.append(d) try: di = next(gen) except StopIteration: di = None if di is not None and self.isAlive(): p = int(self.integration_time * 1000 * 0.9) do_after(p, self._iter_dac, di, gen, evt, intensities) else: evt.set() def _update_graph_data(self, plot, di, intensity, **kw): """ add and scale scans """ def set_data(k, v): plot.data.set_data(k, v) def get_data(k): return plot.data.get_data(k) R = None r = None mi, ma = Inf, -Inf for i, v in enumerate(intensity): oys = None k = 'odata{}'.format(i) if hasattr(plot, k): oys = getattr(plot, k) oys = array([v]) if oys is None else hstack((oys, v)) setattr(plot, k, oys) if self.normalize: if i == 0: # calculate ref range miR = min(oys) maR = max(oys) R = maR - miR else: mir = min(oys) mar = max(oys) r = mar - mir if r and R: oys = (oys - mir) * R / r + miR xs = get_data('x{}'.format(i)) xs = hstack((xs, di)) set_data('x{}'.format(i), xs) set_data('y{}'.format(i), oys) mi, ma = min(mi, min(oys)), max(ma, max(oys)) self.graph.set_y_limits(min_=mi, max_=ma, pad='0.05', pad_style='upper') def _graph_hook(self, di, intensity, **kw): graph = self.graph if graph: plot = graph.plots[0] self._update_graph_data(plot, di, intensity) def _magnet_step_hook(self): spec = self.spectrometer ds = [str(self.reference_detector)] + self.additional_detectors intensity = spec.get_intensity(ds) # print ds,intensity # intensity = intensity[1] # print self._peak_generator # if self._peak_generator: # # print 'asdfas', intensity # v = self._peak_generator.next() # intensity = [v+random.random() for i in range(len(ds))] # debug # if globalv.experiment_debug: # from numpy import array, ones # # v = self._peak_generator.next() # v = array([v]) # # r = ones(len(ds)) # r = r * v # if len(r) > 1: # r[1] *= 0.5 # if len(r) > 2: # r[2] *= 0.1 # # intensity = r # intensity=[random.random()] return intensity def _execute(self): sm = self.start_mass em = self.stop_mass stm = self.step_mass self.verbose = True if abs(sm - em) > stm: self._do_scan(sm, em, stm) self._alive = False self._post_execute() self.verbose = False def _do_scan(self, sm, em, stm, directions=None, map_mass=True): self.debug('_do_scan') # default to forward scan if directions is None: directions = [1] elif isinstance(directions, str): if directions == 'Decrease': directions = [-1] elif directions == 'Oscillate': def oscillate(): i = 0 while 1: if i % 2 == 0: yield 1 else: yield -1 i += 1 directions = oscillate() else: directions = [1] spec = self.spectrometer mag = spec.magnet if map_mass: detname = self.reference_detector.name ds = spec.correct_dac(self.reference_detector, mag.map_mass_to_dac(sm, detname)) de = spec.correct_dac(self.reference_detector, mag.map_mass_to_dac(em, detname)) massdev = abs(sm - em) dacdev = abs(ds - de) stm = stm / float(massdev) * dacdev sm, em = ds, de for di in directions: if not self._alive: return if di == -1: sm, em = em, sm values = self._calc_step_values(sm, em, stm) if not self._scan_dac(values): return return True def _post_execute(self): self.debug('scan finished') def _reference_detector_default(self): return self.detectors[0] def edit_view(self): # v = self.traits_view() v = View( Group( Item('reference_detector', editor=EnumEditor(name='detectors')), Item('integration_time', label='Integration (s)'), label='Magnet Scan', show_border=True)) v.title = self.title v.buttons = ['OK', 'Cancel'] return v def traits_view(self): v = View( Group( Item('reference_detector', editor=EnumEditor(name='detectors')), Item('start_value', label='Start Mass', tooltip='Start scan at this mass'), Item('stop_value', label='Stop Mass', tooltip='Stop scan when magnet reaches this mass'), Item('step_value', label='Step Mass', tooltip='Step from Start to Stop by this amount'), Item('integration_time', label='Integration (s)'), HGroup(spring, Item('execute_button', editor=ButtonEditor(label_value='execute_label'), show_label=False)), label='Magnet Scan', show_border=True)) return v # ============= EOF =============================================
apache-2.0
yoer/hue
desktop/core/ext-py/markdown/setup.py
40
2937
#!/usr/bin/env python import sys, os from distutils.core import setup from distutils.command.install_scripts import install_scripts version = '2.0.3' class md_install_scripts(install_scripts): """ Customized install_scripts. Create markdown.bat for win32. """ def run(self): install_scripts.run(self) if sys.platform == 'win32': try: script_dir = os.path.join(sys.prefix, 'Scripts') script_path = os.path.join(script_dir, 'markdown') bat_str = '@"%s" "%s" %%*' % (sys.executable, script_path) bat_path = os.path.join(self.install_dir, 'markdown.bat') f = file(bat_path, 'w') f.write(bat_str) f.close() print 'Created:', bat_path except Exception, e: print 'ERROR: Unable to create %s: %s' % (bat_path, e) data = dict( name = 'Markdown', version = version, url = 'http://www.freewisdom.org/projects/python-markdown', download_url = 'http://pypi.python.org/packages/source/M/Markdown/Markdown-%s.tar.gz' % version, description = 'Python implementation of Markdown.', author = 'Manfred Stienstra and Yuri takhteyev', author_email = 'yuri [at] freewisdom.org', maintainer = 'Waylan Limberg', maintainer_email = 'waylan [at] gmail.com', license = 'BSD License', packages = ['markdown', 'markdown.extensions'], scripts = ['bin/markdown'], cmdclass = {'install_scripts': md_install_scripts}, classifiers = ['Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.3', 'Programming Language :: Python :: 2.4', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.0', 'Topic :: Communications :: Email :: Filters', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries', 'Topic :: Internet :: WWW/HTTP :: Site Management', 'Topic :: Software Development :: Documentation', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Text Processing :: Filters', 'Topic :: Text Processing :: Markup :: HTML', ], zip_safe = False ) if sys.version[:3] < '2.5': data['install_requires'] = ['elementtree'] setup(**data)
apache-2.0
prolativ/IoBlocks
devices/copernicus/project/copernicus.py
1
11517
# -*- coding: utf-8 -*- """ The MIT License (MIT) Copyright (c) 2015 Krzysztof Śmiałek Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import print_function import re import operator import serial import sys __author__ = 'Krzysztof "gronostaj" Smialek' __all__ = ['Copernicus'] class PatternOverlapError(Exception): def __init__(self, msg, pattern1, pattern2): super(PatternOverlapError, self).__init__(msg) self.pattern1 = pattern1 self.pattern2 = pattern2 class BitPattern: def __init__(self, mask): """ :type mask: str """ if len(mask) != 8 or not re.match('^[01]*_+$', mask): raise ValueError('`{0}` is not a valid 8-bit mask'.format(mask)) self._mask = mask self._low = int(mask.replace('_', '0'), 2) self._high = int(mask.replace('_', '1'), 2) self._masked_bits = mask.count('_') @property def mask(self): return self._mask @property def masked_bits(self): return self._masked_bits @property def bounds(self): return self._low, self._high def matches(self, bits): """ :type bits: str """ value = int(bits, 2) return self._low <= value <= self._high def is_subset_of(self, pattern): """ :type pattern: BitPattern """ known_bits = 8 - pattern.masked_bits return self._mask[:known_bits] == pattern.mask[:known_bits] @staticmethod def assert_no_overlaps(patterns): """ :type patterns: list[BitPattern] """ patterns = sorted(patterns, key=lambda pattern_: pattern_.masked_bits, reverse=True) for index, pattern in enumerate(patterns): for following_pattern in patterns[index + 1:]: if following_pattern.is_subset_of(pattern): raise PatternOverlapError('Pattern {0} overlaps with {1}'.format(following_pattern.mask, pattern.mask), following_pattern, pattern) class Event: def __init__(self, name, mask, transform=None): """ :type name: str :type mask: str :type transform: (int) -> T """ self._name = name self._pattern = BitPattern(mask) self._transform = transform if transform is not None else lambda x: x @property def name(self): return self._name @property def pattern(self): return self._pattern def transform(self, value): """ :type value: int :rtype int """ return self._transform(value) def extract_arg(self, bits): """ :type bits: str :rtype int """ if not self._pattern.matches(bits): raise ValueError() masked_bits = bits[-self._pattern.masked_bits:] return int(masked_bits, 2) class Command: def __init__(self, mask, transform=None): self._pattern = BitPattern(mask) self._transform = transform if transform is not None else lambda x: x @property def pattern(self): return self._pattern def translate(self, *args): """ :type args: list[int] :rtype chr """ value = self._transform(*args) binary = "{0:b}".format(value).zfill(self._pattern.masked_bits) if len(binary) > self._pattern.masked_bits: raise ValueError("Value too big") cmd_string = re.sub('_+', binary, self._pattern.mask) return chr(int(cmd_string, 2)) class Codecs: def __init__(self): pass _available_events = { 'motion': 1, 'temperature': 2, 'knob': 4, 'button2': 8, 'button1': 16, 'light': 32, '*': 63 } @staticmethod def encode_services(*args): int_values = map(lambda evt: Codecs._available_events[evt], args) return reduce(operator.or_, int_values, 0) @staticmethod def encode_rgb(*args): color_names = { 'off': 0, 'red': 48, 'green': 12, 'blue': 3, 'cyan': 15, 'magenta': 51, 'yellow': 60, 'white': 63 } if len(args) == 1: return color_names[args[0]] else: return args[0] * 16 + args[1] * 4 + args[2] @staticmethod def decode_temperature(temp): return temp / 2.0 + 10 class Copernicus: _default_events = [ Event('light', '00______'), Event('knob', '01______'), Event('temperature', '10______', Codecs.decode_temperature), Event('motion', '1100000_', bool), Event('button1', '1100001_', bool), Event('button2', '1100010_', bool) ] _default_commands = { 'servo': Command('000_____'), 'led': Command('0010000_', int), 'rgb': Command('01______', Codecs.encode_rgb), 'subscribe': Command('10______', Codecs.encode_services), 'query': Command('11______', Codecs.encode_services) } def __init__(self, timeout=None, connection=None, debug=False): """ Creates a new Copernicus API object and loads default events and commands. :param timeout: Serial connection timeout for listen() calls. Either this of connection arg must be None. :param connection: Serial object to use for communication with Copernicus. :type connection: serial.Serial """ self._debug = debug assert timeout is None or connection is None if timeout is not None and \ type(timeout) is not int and type(timeout) is not float and \ type(timeout) is type(serial.Serial()): print('Warning: You\'re using the old API call. Instead of this:', file=sys.stderr) print(' api = Copernicus(my_conn)', file=sys.stderr) print('Use this:', file=sys.stderr) print(' api = Copernicus(connection=my_conn)', file=sys.stderr) connection = timeout if connection is None: self._connection = serial.Serial('/dev/ttyS0', 38400, timeout=timeout) else: self._connection = connection self._events = [] self._handlers = {} self._default_handler = None self._commands = {} self.load_events(self._default_events) self.load_commands(self._default_commands) def load_events(self, events): """ Loads new event set that is later used to translate serial responses to API events. Event set is simply a list of Event objects. Calling this method discards all previously registered handlers. :type events: list[Event] """ patterns = [event.pattern for event in events] BitPattern.assert_no_overlaps(patterns) self._events = events self._handlers = dict((event.name, None) for event in events) def set_handler(self, event, handler): """ Registers a handler function for event. This function will be supplied with an argument extracted from serial response and called each time event is fired. Overwrites previously registered handler. :param event: Name of event that should be handled with this function :param handler: Function that can handle this event :type event: str :type handler: (T) -> None """ if event not in self._handlers: raise ValueError('Unknown event `{0}`'.format(event)) self._handlers[event] = handler def set_default_handler(self, handler): """ Registers a catch-all handler. This handler is called each time a recognized event fires, but no event-specific handler is registered. Default handler is supplied with event's name and extracted argument. :param handler: Function that should handle events by default :type handler: (str, T) -> None """ self._default_handler = handler def handle(self, value): """ Finds a correct event handler that should fire for provided value and calls it with appropriate argument. :param value: Single byte received from serial device :type value: chr """ value = ord(value) bin_value = '{0:b}'.format(value) this_event = None for event in self._events: if event.pattern.matches(bin_value): this_event = event break if this_event is None: raise KeyError('Unrecognized byte value {0}'.format(value)) event = this_event arg = this_event.extract_arg(bin_value) if self._handlers[event.name] is not None: translated_arg = event.transform(arg) self._handlers[event.name](translated_arg) elif self._default_handler is not None: self._default_handler(event.name, arg) def listen(self): """ Waits for incoming byte and fires appropriate event. :return: Whether event was received (True) or read operation timed out (False). :rtype: bool """ char = self._connection.read(1) if len(char) > 0: if self._debug: print('Byte received: {0:b}'.format(ord(char))) self.handle(char) return True else: if self._debug: print('Timed out') return False def load_commands(self, commands): """ Loads new Copernicus command set that is later used to translate API commands to serial queries. :param commands: A dict with command names as keys and Command objects as values :type commands: dict[str, Command] """ patterns = map(lambda cmd: cmd.pattern, commands.values()) BitPattern.assert_no_overlaps(patterns) self._commands = commands def command(self, cmd, *args): """ Sends a serial command to Copernicus. :param cmd: Name of command to be sent :param args: Any number of arguments. Accepted arguments differ between commands. :type cmd: str :type args: list[*] """ if cmd not in self._commands: raise KeyError('Unknown command {0}'.format(cmd)) char = self._commands[cmd].translate(*args) self._connection.write(char) if self._debug: print('Byte sent: {0:b}'.format(ord(char)))
mit
zhouzhenghui/python-for-android
python-build/python-libs/gdata/src/gdata/tlslite/FileObject.py
359
6807
"""Class returned by TLSConnection.makefile().""" class FileObject: """This class provides a file object interface to a L{tlslite.TLSConnection.TLSConnection}. Call makefile() on a TLSConnection to create a FileObject instance. This class was copied, with minor modifications, from the _fileobject class in socket.py. Note that fileno() is not implemented.""" default_bufsize = 16384 #TREV: changed from 8192 def __init__(self, sock, mode='rb', bufsize=-1): self._sock = sock self.mode = mode # Not actually used in this version if bufsize < 0: bufsize = self.default_bufsize self.bufsize = bufsize self.softspace = False if bufsize == 0: self._rbufsize = 1 elif bufsize == 1: self._rbufsize = self.default_bufsize else: self._rbufsize = bufsize self._wbufsize = bufsize self._rbuf = "" # A string self._wbuf = [] # A list of strings def _getclosed(self): return self._sock is not None closed = property(_getclosed, doc="True if the file is closed") def close(self): try: if self._sock: for result in self._sock._decrefAsync(): #TREV pass finally: self._sock = None def __del__(self): try: self.close() except: # close() may fail if __init__ didn't complete pass def flush(self): if self._wbuf: buffer = "".join(self._wbuf) self._wbuf = [] self._sock.sendall(buffer) #def fileno(self): # raise NotImplementedError() #TREV def write(self, data): data = str(data) # XXX Should really reject non-string non-buffers if not data: return self._wbuf.append(data) if (self._wbufsize == 0 or self._wbufsize == 1 and '\n' in data or self._get_wbuf_len() >= self._wbufsize): self.flush() def writelines(self, list): # XXX We could do better here for very long lists # XXX Should really reject non-string non-buffers self._wbuf.extend(filter(None, map(str, list))) if (self._wbufsize <= 1 or self._get_wbuf_len() >= self._wbufsize): self.flush() def _get_wbuf_len(self): buf_len = 0 for x in self._wbuf: buf_len += len(x) return buf_len def read(self, size=-1): data = self._rbuf if size < 0: # Read until EOF buffers = [] if data: buffers.append(data) self._rbuf = "" if self._rbufsize <= 1: recv_size = self.default_bufsize else: recv_size = self._rbufsize while True: data = self._sock.recv(recv_size) if not data: break buffers.append(data) return "".join(buffers) else: # Read until size bytes or EOF seen, whichever comes first buf_len = len(data) if buf_len >= size: self._rbuf = data[size:] return data[:size] buffers = [] if data: buffers.append(data) self._rbuf = "" while True: left = size - buf_len recv_size = max(self._rbufsize, left) data = self._sock.recv(recv_size) if not data: break buffers.append(data) n = len(data) if n >= left: self._rbuf = data[left:] buffers[-1] = data[:left] break buf_len += n return "".join(buffers) def readline(self, size=-1): data = self._rbuf if size < 0: # Read until \n or EOF, whichever comes first if self._rbufsize <= 1: # Speed up unbuffered case assert data == "" buffers = [] recv = self._sock.recv while data != "\n": data = recv(1) if not data: break buffers.append(data) return "".join(buffers) nl = data.find('\n') if nl >= 0: nl += 1 self._rbuf = data[nl:] return data[:nl] buffers = [] if data: buffers.append(data) self._rbuf = "" while True: data = self._sock.recv(self._rbufsize) if not data: break buffers.append(data) nl = data.find('\n') if nl >= 0: nl += 1 self._rbuf = data[nl:] buffers[-1] = data[:nl] break return "".join(buffers) else: # Read until size bytes or \n or EOF seen, whichever comes first nl = data.find('\n', 0, size) if nl >= 0: nl += 1 self._rbuf = data[nl:] return data[:nl] buf_len = len(data) if buf_len >= size: self._rbuf = data[size:] return data[:size] buffers = [] if data: buffers.append(data) self._rbuf = "" while True: data = self._sock.recv(self._rbufsize) if not data: break buffers.append(data) left = size - buf_len nl = data.find('\n', 0, left) if nl >= 0: nl += 1 self._rbuf = data[nl:] buffers[-1] = data[:nl] break n = len(data) if n >= left: self._rbuf = data[left:] buffers[-1] = data[:left] break buf_len += n return "".join(buffers) def readlines(self, sizehint=0): total = 0 list = [] while True: line = self.readline() if not line: break list.append(line) total += len(line) if sizehint and total >= sizehint: break return list # Iterator protocols def __iter__(self): return self def next(self): line = self.readline() if not line: raise StopIteration return line
apache-2.0
oVirt/ovirt-node
src/ovirt/node/utils/hooks.py
2
2481
#!/usr/bin/python # -*- coding: utf-8 -*- # # hooks.py - Copyright (C) 2014 Red Hat, Inc. # Written by Ryan Barry <rbarry@redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 2 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. A copy of the GNU General Public License is # also available at http://www.gnu.org/copyleft/gpl.html. """ Manage running installer hooks """ import logging import os from ovirt.node import base from ovirt.node.utils import process LOGGER = logging.getLogger(__name__) class Hooks(base.Base): """A utility class which executes files for additional configuration beyond the normal install """ known = ["pre-upgrade", "post-upgrade", "rollback", "on-boot", "on-changed-boot-image"] legacy_hooks_directory = "/etc/ovirt-config-boot.d/" hooks_path_tpl = "/usr/libexec/ovirt-node/hooks/{name}" @staticmethod def post_auto_install(): Hooks.__run(Hooks.legacy_hooks_directory) @staticmethod def emit(name): """Signal that a specific event appeared, and trigger the hook handlers Args: name: Name of the hook (bust be in Hooks.known) """ assert name in Hooks.known path = Hooks.hooks_path_tpl.format(name=name) Hooks.__run(path) @staticmethod def __run(hooks_directory): for hook in os.listdir(hooks_directory): script = os.path.join(hooks_directory, hook) if script.endswith(".pyc") or script.endswith(".pyo"): continue LOGGER.debug("Running hook %s" % script) if script.endswith(".py"): output = process.check_output(["python", script]) else: output = process.check_output("%s &> /dev/null" % script, shell=True) [LOGGER.debug("%s: %s" % (script, line)) for line in output]
gpl-2.0
tivvit/devfest-rpg
backend/model/game.py
2
4955
__author__ = 'tivvit' from google.appengine.api import memcache from users import Users from leaderboard import Leaderboard from backend.cdh_m import User_m, UsersCollection_m, FactionStats_m, Stats_m, FactionUsers_m, Leaderboard_entry_m, Leaderboard_m, FactionFull_m, FactionMinPoints_m import logging from google.appengine.ext import ndb class Game(ndb.Model): min_points = ndb.IntegerProperty() def stats(self): data = memcache.get('stats') if data is not None: return data else: self.users = Users() users = [0, 0, 0] points = [0, 0, 0] for user in Users.query().fetch(): if user.faction: users[user.faction-1] += 1 points[user.faction-1] += user.get_points_sum(user.key.id()) faUsers = [] for usr in users: faUsers.append(FactionUsers_m(users=usr)) stats = [] for usr in points: stats.append(Stats_m(points=usr)) logging.info("%s", users) fa_stats = FactionStats_m(users=faUsers, stats=stats) memcache.add(key="stats", value=fa_stats, time=160) return fa_stats def leaderboard(self, limit): data = memcache.get('leaderboard_a') if data is not None: return data else: data = memcache.get('leaderboard') if data is not None: return data else: lb_m = Leaderboard().query().get() memcache.add(key="leaderboard", value=lb_m, time=500) return lb_m # lb_m = Leaderboard().query().get() # return Leaderboard_m(leaderboard=lb_m.leaderboard[:limit]) # leaderboard = [] # # for user in Users.query().fetch(): # leaderboard.append(Leaderboard_entry_m( # user=user.get(user.key.id()), # points=user.get_points_sum(user.key.id()) # )) # # leaderboard.sort(key=lambda x: x.points, reverse=True) # return Leaderboard_m(leaderboard=leaderboard[:limit]) def generateLeaderboard(self): leaderboard = [] leaderboard_a = [] cntr = 0 for user in Users.query().fetch(): u = user.get(user.key.id()) p = user.get_points_sum(user.key.id()) leaderboard.append(Leaderboard_entry_m( user=u, points=p )) if p > 0: cntr += 1 logging.info("total playing count: " + str(cntr)) leaderboard.sort(key=lambda x: x.points, reverse=True) lb_m = Leaderboard_m(leaderboard=leaderboard[:20]) if Leaderboard().query().get(): lb = Leaderboard().query().get() lb.leaderboard = lb_m lb.put() else: lb = Leaderboard(leaderboard=lb_m) lb.put() # lb_short = Leaderboard_m(leaderboard=lb_m.leaderboard[:20]) memcache.add(key="leaderboard", value=lb_m, time=500) def leaderboard(self, limit): leaderboard = [] for user in Users.query().fetch(): leaderboard.append(Leaderboard_entry_m( user=user.get(user.key.id()), points=user.get_points_sum(user.key.id()) )) leaderboard.sort(key=lambda x: x.points, reverse=True) return Leaderboard_m(leaderboard=leaderboard[:limit]) def faction_hiring(self, faction_id): limit = 10 game = Game() stats = game.stats() # stats = stats["stats"] print stats faction_min = stats.users[0].users faction_max = 0 # faction_max = stats["users"][0]["users"] for users in stats.users: if faction_max < users.users: faction_max = users.users if faction_min > users.users: faction_min = users.users # print faction_max # print faction_min #print "M" + str(faction_max) + "N" + str(faction_min) + "ID " + str(faction_id) + str(stats.users[faction_id].users) return FactionFull_m(hiring=int(not (faction_max > (faction_min + limit) and stats.users[faction_id-1].users == faction_max))) def get_min_faction_points(self): game = Game.query().get() return game.min_points def get_min_faction_points_m(self): return FactionMinPoints_m(min=self.get_min_faction_points()) def set_min_faction_points(self, new_min): min_points = Game.query().get() if not min_points: set_min_points = Game(min_points=new_min) set_min_points.put() min_points = Game.query().get() min_points.min_points = new_min min_points.put() return FactionMinPoints_m(min=min_points.min_points)
apache-2.0
hale36/SRTV
lib/requests/packages/chardet/mbcsgroupprober.py
2769
1967
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # Proofpoint, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .charsetgroupprober import CharSetGroupProber from .utf8prober import UTF8Prober from .sjisprober import SJISProber from .eucjpprober import EUCJPProber from .gb2312prober import GB2312Prober from .euckrprober import EUCKRProber from .cp949prober import CP949Prober from .big5prober import Big5Prober from .euctwprober import EUCTWProber class MBCSGroupProber(CharSetGroupProber): def __init__(self): CharSetGroupProber.__init__(self) self._mProbers = [ UTF8Prober(), SJISProber(), EUCJPProber(), GB2312Prober(), EUCKRProber(), CP949Prober(), Big5Prober(), EUCTWProber() ] self.reset()
gpl-3.0
sergioSEa/Developed_scripts
primer-generation.py
1
16460
#!/usr/bin/python import math import argparse from string import maketrans #Script used in order to obtain primers. parser = argparse.ArgumentParser() parser.add_argument('-file', action="store", dest = 'File', required = "True") parser.add_argument('-fasta', action = "store", dest = "genome", required = "True") parser.add_argument('-fq', action = "store", dest = "fq_lim") parser.add_argument('-out', action = "store", dest = "output", required = "True") args = parser.parse_args() genome = args.genome #Tm calculation of an oligo. Based on biophp script of Joseba Bikandi https://www.biophp.org/minitools/melting_temperature/demo.php def Tm_calculation(oligo): primer = float(400) #400 nM are supposed as a standard [primer] mg = float(2) #2 mM are supposed as a standard [Mg2+] salt = float(40) #40 mM are supposed as a standard salt concentration s = 0 h = 0 #Enthalpy and entrophy values from http://www.ncbi.nlm.nih.gov/pmc/articles/PMC19045/table/T2/ (SantaLucia, 1998) dic = { "AA": [-7.9,-22.2], "AC": [-8.4,-22.4], "AG": [-7.8, -21.0], "AT": [-7.2,-20.4], "CA": [-8.5,-22.7], "CC": [-8.0, -19.9], "CG": [-10.6,-27.2], "CT": [-7.8,-21.0], "GA": [-8.2,-22.2], "GC": [-9.8, -24.4], "GG": [-8.0, -19.9], "GT": [-8.4, -22.4], "TA": [-7.2,-21.3], "TC": [-8.2,-22.2], "TG": [-8.5,-22.7], "TT": [-7.9,-22.2]} #Effect on entropy by salt correction; von Ahsen et al 1999 #Increase of stability due to presence of Mg salt_effect = (salt/1000)+((mg/1000)*140) #effect on entropy s+=0.368 * (len(oligo)-1)* math.log(salt_effect) #terminal corrections. Santalucia 1998 firstnucleotide= oligo[0] if firstnucleotide=="G" or firstnucleotide=="C": h+=0.1; s+=-2.8 if firstnucleotide=="A" or firstnucleotide=="T": h+=2.3; s+=4.1 lastnucleotide= oligo[-1] if lastnucleotide=="G" or lastnucleotide=="C": h+=0.1; s+=-2.8 if lastnucleotide=="A" or lastnucleotide=="T": h+=2.3; s+=4.1 #compute new H and s based on sequence. Santalucia 1998 for i in range(0,len(oligo)-1): f = i+ 2 substring = oligo[i:f] try: h = h + float(dic[substring][0]) s =s + float(dic[substring][1]) except: return 0 tm=((1000*h)/(s+(1.987*math.log(primer/2000000000))))-273.15 return tm def reverse_complementary(oligo): revcomp = oligo.translate(maketrans('ACGT', 'TGCA'))[::-1] return revcomp def rever(oligo): return oligo[::-1] def find_repetitive(oligo,repetitions): for i in range(len(oligo)-repetitions+1): subdivision = oligo[i:i+repetitions] sub_list = list(subdivision) if len(set(sub_list)) == 1: return "yes" return "no" def cross_dimerization(oligo,against): #lista= ["ccgtcggagaaacttacatg","aacggcagagcagatttg","tgccatcagaccaacttttc","gtaaaactctagccagctcag","aaggaagaggaaaggggg","aaaataatcctggaaaccttaatg","cattaaacaattcacaaactcaag","tcaatttttagaagctaaatccatg","aaccaaaacaggggaaattttc","caaatctgagtcggtgacg","cctctcttcaagtccaagttg","ttgtcagcatcaagaccaac","ttccagctgggtaccatc","ggaaataaaagttagcctacctc","ccaccaatatagctcctgc","gagagggatttaccattctttc","caaaacctgatgatgatgatcc","gcttagattgttgtatataagcttag" ,"tgctatactggtgattgttgc","attatagagctttagagcatttttag" ,"ctcaaccacaacctgagg"] #lista=[] #number = 0 #for oligo in lista: # oligo = oligo.upper() if against == "self": backwards = reverse_complementary(oligo) else: backwards = reverse_complementary(against) #oligo = "ctaattataacgaaagagaaacgc".upper() #backwards=reverse_complementary("cccgggaccaagttattatg".upper()) counter_list = [] for j in range(len(oligo)): match = [] counter = 0 former = "no" #######################################One sense for i in range(len(backwards)): try: if oligo[j+i] == backwards[i]: if former == "yes": match[-1]= match[-1]+oligo[j+i] else: match.append(oligo[j+i]) former = "yes" else: match.append("-") former = "no" except: continue ##################################Calculation of value pos = 0 for nuc in match: if len(nuc) == 1: if nuc == "G" or nuc == "C": if pos == 0 or pos == 1: counter += 3*1.2 elif pos == len(match)-1 or pos == len(match)-2 or pos == len(match)-3: counter +=3*1.2 else: counter += 3 elif nuc == "T" or nuc == "A": if pos == 0 or pos == 1: counter +=2*1.2 elif pos == len(match)-1 or pos == len(match)-2 or pos == len(match)-2: counter +=2*1.2 else: counter +=2 else: c = 0 for n in nuc: if n == "G" or n == "C": c += 3 elif n == "T" or n == "A": c +=2 if pos == 0 or pos == 1: counter += c * len(nuc) * 1.2 elif pos == len(match)-1 or pos == len(match)-2 or pos == len(match)-3: counter+=c*len(nuc)*1.2 else: counter += c * len(nuc) pos += 1 #if counter > 50: #print match, counter, number counter_list.append(counter) ####################################Other sense match = [] counter = 0 former = "no" for i in range(len(backwards)): try: if i-j < 0: continue if oligo[i-j] == backwards[i]: if former == "yes": match[-1]= match[-1]+oligo[i-j] else: match.append(oligo[i-j]) former = "yes" else: match.append("-") former = "no" except: continue #################################Calculation of value pos = 0 for nuc in match: if len(nuc) == 1: if nuc == "G" or nuc == "C": if pos == 0 or pos == 1: counter += 3*1.2 elif pos == len(match)-1 or pos == len(match)-2 or pos == len(match)-3: counter +=3*1.2 else: counter += 3 elif nuc == "T" or nuc == "A": if pos == 0 or pos == 1: counter +=2*1.2 elif pos == len(match)-1 or pos == len(match)-2 or pos == len(match)-3: counter +=2*1.2 else: counter +=2 else: c = 0 for n in nuc: if n == "G" or n == "C": c += 3 elif n == "T" or n == "A": c +=2 if pos == 0 or pos == 1: counter += c * len(nuc) * 1.2 elif pos == len(match)-1 or pos == len(match)-2 or pos == len(match)-3: counter+=c*len(nuc)*1.2 else: counter += c * len(nuc) pos += 1 counter_list.append(counter) #print counter_list, len(counter_list), max(counter_list) #print max(counter_list), oligo #number += 1 return max(counter_list) # Function to parse fasta file (based on one of the Biopython IOs) def read_fasta(fp): name, seq = None, [] for line in fp: line = line.rstrip() if line.startswith('>'): if name: yield (name, ''.join(seq)) name, seq = line, [] else: seq.append(line) if name: yield (name, ''.join(seq)) def genome_selection(contig,genome): with open(genome) as fp: for name_contig, seq_contig in read_fasta(fp): if name_contig[1:].lower() == contig: genome = seq_contig return genome def rule_1(oligo,sense,oligo2): last_element = len(oligo) if sense == "reverse" : oligo = reverse_complementary(oligo) if oligo2 == "-": oligo2 = "self" while True: end_of_primer = 21 begin_of_primer = 0 oligo = oligo[:last_element] ###################################### guanine = oligo.rfind("G") cytosine = oligo.rfind("C") ###################################### It looks for a G/C ending #Checking wheter there are both G and C in the oligo if guanine != -1 and cytosine != -1: last_element = max([guanine,cytosine]) else: if guanine == -1 and cytosine == -1: found = "no" # No more G or C, end of rule 1, go to a non restrictive one. return found, found, "-" break elif guanine == -1 and cytosine != -1: last_element = cytosine elif guanine != -1 and cytosine == -1: last_element = guanine begin = last_element - end_of_primer #Begining of the oligo, Last G/C found as the last position and 21 positions before as the fist position. end = last_element while end - begin < 26 and end - begin > 16: #Size of the oligo 25 pb max, 17 pb min if begin < 0: break primer = oligo[begin:end+1] Tm = Tm_calculation(primer) ###################################################### Filter: Tm of the primer, repetitions in the primer, homodimer formation if Tm > 60 and Tm < 64: contain_repetition = find_repetitive(primer,3) if contain_repetition == "no": cross = cross_dimerization(primer,"self") if cross <= 47: if oligo2 != "self": cross = cross_dimerization(primer,oligo2) if cross > 47: break found = "yes" #print cross, primer,oligo2 return found, primer, Tm else: break else: break ###################################################### elif Tm < 60: begin -= 1 elif Tm > 64: begin += 1 def rule_2(oligo,sense,oligo2): #This rule just looks for primers which begin_of_primer = 0 if oligo2 == "-": oligo2 = "self" if sense == "reverse" : oligo = reverse_complementary(oligo) while True: end_of_primer =21 + begin_of_primer if end_of_primer > len(oligo): found = "no" return found, found, "-" while end_of_primer - begin_of_primer < 26 and end_of_primer - begin_of_primer > 16: primer = oligo[begin_of_primer:end_of_primer+1] Tm = Tm_calculation(primer) ###################################################### Filter: Tm of the primer, repetitions in the primer, homodimer formation if Tm > 60 and Tm < 64: contain_repetition = find_repetitive(primer,4) if contain_repetition == "no": cross = cross_dimerization(primer,"self") if cross <= float(47): if oligo2 != "self": cross = cross_dimerization(primer,oligo2) if cross > float(47): break found = "yes" return found, primer, Tm else: break else: break ###################################################### elif Tm < 60: end_of_primer += 1 elif Tm > 64: end_of_primer -= 1 begin_of_primer +=1 def snp_calculation(position,genome): #Total size of the amplification size_i = 300 size_f = 500 oligos = [] Tms = [] #upstream primer up_primer_pos = int(position) - size_i #Starting position: mutation - downstream position of the primer oligo = genome[up_primer_pos-1 : up_primer_pos + 200] #This is the window which will be screaned in order to look for the primer result = rule_1(oligo, "upstream","-") #Rule 1: Finish in G/C, does not contain repetitive sequences, does not form homodimers if result[0] == "no": result = rule_2(oligo, "upstream","-") if result[0] == "no": oligos.append("not found") oligos.append("-") Tms.append("-") Tms.append("-") if result[0] == "yes": oligos.append(result[1]) Tms.append(str(result[2])) #downstream primer down_primer_pos = int(position) + size_f oligo = genome[down_primer_pos-1 : down_primer_pos + 100] result = rule_1(oligo,"downstream",oligos[0]) if result[0] == "no": result = rule_2(oligo, "downstream",oligos[0]) if result[0] == "no": oligos.append("not found") Tms.append("-") if result[0] == "yes": oligos.append(result[1]) Tms.append(str(result[2])) return oligos,Tms def insertion_calculation(position,genome,contig_used): size= 600 oligos = [] Tms = [] #selection = "5" #try_size = 100 pos_n_contig = contig_used+"_"+position if pos_n_contig not in consensus_3: oligos.extend(["not found","-","-","-"]) Tms.extend(["-","-","-","-"]) return oligos,Tms #insertion primer #Generation of the oligo from the insertion where the primer will be searched for selection in [5,3]: if selection == 3: lenght_consensus = len(consensus_3[pos_n_contig]) how = "forward" try_oligo = consensus_3[pos_n_contig][:lenght_consensus] other = oligos[0] elif selection == 5: lenght_consensus = len(consensus_5[pos_n_contig]) how = "reverse" try_oligo = consensus_5[pos_n_contig][:lenght_consensus] other = "-" if lenght_consensus < 10: oligos.extend(["not found","-"]) Tms.extend(["-","-"]) result = rule_1(try_oligo,how,other) if result[0] == "no": result = rule_2(try_oligo, how,"-") if result[0] == "no": oligos.extend(["not found","-"]) Tms.extend(["-","-"]) if result[0] == "yes": oligos.append(result[1]) Tms.append(str(result[2])) #Generation of the forward and reverse oligos up_primer_pos = int(position) - size try_oligo = genome[up_primer_pos-1 : up_primer_pos + 200] result = rule_1(try_oligo, "forward",oligos[1]) if result[0] == "no": if oligos[1] != "-" and oligos[1] != "not found": result = rule_2(try_oligo, "forward",oligos[1]) else: result = rule_2(try_oligo, "forward","-") if result[0] == "no": oligos.append("not found") oligos.append("-") Tms.append("-") Tms.append("-") if result[0] == "yes": oligos.append(result[1]) Tms.append(str(result[2])) #downstream primer down_primer_pos = int(position) + size try_oligo = genome[down_primer_pos-1 : down_primer_pos + 200] result = rule_1(try_oligo,"reverse",oligos[0]) if result[0] == "no": if oligos[1] != "-" and oligos[1] != "not found": result = rule_2(try_oligo, "reverse",oligos[0]) else: result = rule_2(try_oligo, "reverse",oligos[2]) if result[0] == "no": oligos.append("not found") Tms.append("-") if result[0] == "yes": oligos.append(result[1]) Tms.append(str(result[2])) return oligos,Tms def fastaq_to_dic(fq): #It gets two dictionaries for 3' and 5' sequences. dic_fas_3= {} dic_fas_5= {} #fq = open(fq,"r") i = 0 m = 0 with open(fq,"r") as fq: for line in fq.readlines(): line = line.rstrip() if line.startswith("@user_projects"): gene = line.split("/")[-1] split = gene.split("_") h = split[0]+"_"+split[1] m = 0 #Depending on whether the reads are in the 3' or 5' extreme they will go to one dic or another. if split[2].strip()=="3": n = 1 dic_fas_3[h]="" if split[2].strip()=="5": n= 2 dic_fas_5[h]="" continue if m == 0 and not line.startswith("+"): line = line.upper() if n == 1: dic_fas_3[h]+= line if n == 2: dic_fas_5[h]+= line if line.startswith("+"): m =1 return dic_fas_3,dic_fas_5 positions = open(args.File,"r") result = open(args.output,"w") n = 0 first_list = [] for line in positions.readlines(): line = line.split("\t") if n != 0: first_list.append(line) n += 1 former = "" list2= [] contig_used = "" mode = first_list[0][0] if mode == "snp": result.write("@type\tcontig\tposition\tref_base\talt_base\thit\tmrna_start\tmrna_end\tstrand\tgene_model\tgene_element\taa_pos\taa_ref\taa_alt\tgene_funct_annotation\tforward primer\tTm forward\treverse primer\tTm reverse\n") if mode == "lim": #try: consensus = fastaq_to_dic(args.fq_lim) consensus_3 = consensus[0] consensus_5 = consensus[1] #except: # print "Fq file missing" # exit() result.write("@type\tcontig\tposition\tref_base\talt_base\thit\tmrna_start\tmrna_end\tstrand\tgene_model\tgene_element\taa_pos\taa_ref\taa_alt\tgene_funct_annotation\tforward primer\tTm forward\tinsertion primer 5'\tTm insertion primer 5'\tinsertion primer 3'\tTm insertion primer 3'\treverse primer\tTm reverse\n") for line in first_list: if mode == "snp": v = line[0]+"\t"+line[1]+"\t"+line[2]+"\t"+line[3]+"\t"+line[4]+"\t"+line[5]+"\t"+line[6]+"\t"+line[7]+"\t"+line[8]+"\t"+line[9]+"\t"+line[10]+"\t"+line[11]+"\t"+line[12]+"\t"+line[13]+"\t"+line[14].rstrip() else: v = line[0]+"\t"+line[1]+"\t"+line[2]+"\t"+line[3]+"\t"+line[4]+"\t"+line[5]+"\t"+line[6]+"\t"+line[7]+"\t"+line[8]+"\t"+line[9]+"\t"+line[10]+"\t"+line[11]+"\t"+line[12]+"\t"+line[13]+"\t"+line[14].rstrip() #if line[5] == "nh": # if mode == "snp": list2.append(v+"\t-\t-\t-\t-\n") # else: list2.append(v+"\t-\t-\t-\t-\t-\t-\n") #else: a = line[1]+"-"+line[2] if a == former: if mode == "snp": list2.append(v+"\t-\t-\t-\t-\n") else: list2.append(v+"\t-\t-\t-\t-\t-\t-\n") elif a != former: if mode == "snp": if line[1] != contig_used: genom = genome_selection(line[1],genome) contig_used = line[1] r = snp_calculation(line[2],genom) list2.append(v+"\t"+r[0][0]+"\t"+r[1][0]+"\t"+r[0][1]+"\t"+r[1][1]+"\n") if mode == "lim": if line[1] != contig_used: genom = genome_selection(line[1],genome) contig_used = line[1] r = insertion_calculation(line[2],genom,contig_used) list2.append(v+"\t"+r[0][2]+"\t"+r[1][2]+"\t"+r[0][0]+"\t"+r[1][0]+"\t"+r[0][1]+"\t"+r[1][1]+"\t"+r[0][3]+"\t"+r[1][3]+"\n") former = a for items in list2: result.write(items)
gpl-3.0
znoland3/zachdemo
venvdir/lib/python3.4/site-packages/sqlalchemy/util/deprecations.py
55
4403
# util/deprecations.py # Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Helpers related to deprecation of functions, methods, classes, other functionality.""" from .. import exc import warnings import re from .langhelpers import decorator def warn_deprecated(msg, stacklevel=3): warnings.warn(msg, exc.SADeprecationWarning, stacklevel=stacklevel) def warn_pending_deprecation(msg, stacklevel=3): warnings.warn(msg, exc.SAPendingDeprecationWarning, stacklevel=stacklevel) def deprecated(version, message=None, add_deprecation_to_docstring=True): """Decorates a function and issues a deprecation warning on use. :param message: If provided, issue message in the warning. A sensible default is used if not provided. :param add_deprecation_to_docstring: Default True. If False, the wrapped function's __doc__ is left as-is. If True, the 'message' is prepended to the docs if provided, or sensible default if message is omitted. """ if add_deprecation_to_docstring: header = ".. deprecated:: %s %s" % \ (version, (message or '')) else: header = None if message is None: message = "Call to deprecated function %(func)s" def decorate(fn): return _decorate_with_warning( fn, exc.SADeprecationWarning, message % dict(func=fn.__name__), header) return decorate def pending_deprecation(version, message=None, add_deprecation_to_docstring=True): """Decorates a function and issues a pending deprecation warning on use. :param version: An approximate future version at which point the pending deprecation will become deprecated. Not used in messaging. :param message: If provided, issue message in the warning. A sensible default is used if not provided. :param add_deprecation_to_docstring: Default True. If False, the wrapped function's __doc__ is left as-is. If True, the 'message' is prepended to the docs if provided, or sensible default if message is omitted. """ if add_deprecation_to_docstring: header = ".. deprecated:: %s (pending) %s" % \ (version, (message or '')) else: header = None if message is None: message = "Call to deprecated function %(func)s" def decorate(fn): return _decorate_with_warning( fn, exc.SAPendingDeprecationWarning, message % dict(func=fn.__name__), header) return decorate def _sanitize_restructured_text(text): def repl(m): type_, name = m.group(1, 2) if type_ in ("func", "meth"): name += "()" return name return re.sub(r'\:(\w+)\:`~?\.?(.+?)`', repl, text) def _decorate_with_warning(func, wtype, message, docstring_header=None): """Wrap a function with a warnings.warn and augmented docstring.""" message = _sanitize_restructured_text(message) @decorator def warned(fn, *args, **kwargs): warnings.warn(message, wtype, stacklevel=3) return fn(*args, **kwargs) doc = func.__doc__ is not None and func.__doc__ or '' if docstring_header is not None: docstring_header %= dict(func=func.__name__) doc = inject_docstring_text(doc, docstring_header, 1) decorated = warned(func) decorated.__doc__ = doc return decorated import textwrap def _dedent_docstring(text): split_text = text.split("\n", 1) if len(split_text) == 1: return text else: firstline, remaining = split_text if not firstline.startswith(" "): return firstline + "\n" + textwrap.dedent(remaining) else: return textwrap.dedent(text) def inject_docstring_text(doctext, injecttext, pos): doctext = _dedent_docstring(doctext or "") lines = doctext.split('\n') injectlines = textwrap.dedent(injecttext).split("\n") if injectlines[0]: injectlines.insert(0, "") blanks = [num for num, line in enumerate(lines) if not line.strip()] blanks.insert(0, 0) inject_pos = blanks[min(pos, len(blanks) - 1)] lines = lines[0:inject_pos] + injectlines + lines[inject_pos:] return "\n".join(lines)
mit
meabsence/python-for-android
python3-alpha/python3-src/Lib/tkinter/ttk.py
45
56250
"""Ttk wrapper. This module provides classes to allow using Tk themed widget set. Ttk is based on a revised and enhanced version of TIP #48 (http://tip.tcl.tk/48) specified style engine. Its basic idea is to separate, to the extent possible, the code implementing a widget's behavior from the code implementing its appearance. Widget class bindings are primarily responsible for maintaining the widget state and invoking callbacks, all aspects of the widgets appearance lies at Themes. """ __version__ = "0.3.1" __author__ = "Guilherme Polo <ggpolo@gmail.com>" __all__ = ["Button", "Checkbutton", "Combobox", "Entry", "Frame", "Label", "Labelframe", "LabelFrame", "Menubutton", "Notebook", "Panedwindow", "PanedWindow", "Progressbar", "Radiobutton", "Scale", "Scrollbar", "Separator", "Sizegrip", "Style", "Treeview", # Extensions "LabeledScale", "OptionMenu", # functions "tclobjs_to_py", "setup_master"] import tkinter _flatten = tkinter._flatten # Verify if Tk is new enough to not need the Tile package _REQUIRE_TILE = True if tkinter.TkVersion < 8.5 else False def _load_tile(master): if _REQUIRE_TILE: import os tilelib = os.environ.get('TILE_LIBRARY') if tilelib: # append custom tile path to the the list of directories that # Tcl uses when attempting to resolve packages with the package # command master.tk.eval( 'global auto_path; ' 'lappend auto_path {%s}' % tilelib) master.tk.eval('package require tile') # TclError may be raised here master._tile_loaded = True def _format_optdict(optdict, script=False, ignore=None): """Formats optdict to a tuple to pass it to tk.call. E.g. (script=False): {'foreground': 'blue', 'padding': [1, 2, 3, 4]} returns: ('-foreground', 'blue', '-padding', '1 2 3 4')""" format = "%s" if not script else "{%s}" opts = [] for opt, value in optdict.items(): if ignore and opt in ignore: continue if isinstance(value, (list, tuple)): v = [] for val in value: if isinstance(val, str): v.append(str(val) if val else '{}') else: v.append(str(val)) # format v according to the script option, but also check for # space in any value in v in order to group them correctly value = format % ' '.join( ('{%s}' if ' ' in val else '%s') % val for val in v) if script and value == '': value = '{}' # empty string in Python is equivalent to {} in Tcl opts.append(("-%s" % opt, value)) # Remember: _flatten skips over None return _flatten(opts) def _format_mapdict(mapdict, script=False): """Formats mapdict to pass it to tk.call. E.g. (script=False): {'expand': [('active', 'selected', 'grey'), ('focus', [1, 2, 3, 4])]} returns: ('-expand', '{active selected} grey focus {1, 2, 3, 4}')""" # if caller passes a Tcl script to tk.call, all the values need to # be grouped into words (arguments to a command in Tcl dialect) format = "%s" if not script else "{%s}" opts = [] for opt, value in mapdict.items(): opt_val = [] # each value in mapdict is expected to be a sequence, where each item # is another sequence containing a state (or several) and a value for statespec in value: state, val = statespec[:-1], statespec[-1] if len(state) > 1: # group multiple states state = "{%s}" % ' '.join(state) else: # single state # if it is empty (something that evaluates to False), then # format it to Tcl code to denote the "normal" state state = state[0] or '{}' if isinstance(val, (list, tuple)): # val needs to be grouped val = "{%s}" % ' '.join(map(str, val)) opt_val.append("%s %s" % (state, val)) opts.append(("-%s" % opt, format % ' '.join(opt_val))) return _flatten(opts) def _format_elemcreate(etype, script=False, *args, **kw): """Formats args and kw according to the given element factory etype.""" spec = None opts = () if etype in ("image", "vsapi"): if etype == "image": # define an element based on an image # first arg should be the default image name iname = args[0] # next args, if any, are statespec/value pairs which is almost # a mapdict, but we just need the value imagespec = _format_mapdict({None: args[1:]})[1] spec = "%s %s" % (iname, imagespec) else: # define an element whose visual appearance is drawn using the # Microsoft Visual Styles API which is responsible for the # themed styles on Windows XP and Vista. # Availability: Tk 8.6, Windows XP and Vista. class_name, part_id = args[:2] statemap = _format_mapdict({None: args[2:]})[1] spec = "%s %s %s" % (class_name, part_id, statemap) opts = _format_optdict(kw, script) elif etype == "from": # clone an element # it expects a themename and optionally an element to clone from, # otherwise it will clone {} (empty element) spec = args[0] # theme name if len(args) > 1: # elementfrom specified opts = (args[1], ) if script: spec = '{%s}' % spec opts = ' '.join(map(str, opts)) return spec, opts def _format_layoutlist(layout, indent=0, indent_size=2): """Formats a layout list so we can pass the result to ttk::style layout and ttk::style settings. Note that the layout doesn't has to be a list necessarily. E.g.: [("Menubutton.background", None), ("Menubutton.button", {"children": [("Menubutton.focus", {"children": [("Menubutton.padding", {"children": [("Menubutton.label", {"side": "left", "expand": 1})] })] })] }), ("Menubutton.indicator", {"side": "right"}) ] returns: Menubutton.background Menubutton.button -children { Menubutton.focus -children { Menubutton.padding -children { Menubutton.label -side left -expand 1 } } } Menubutton.indicator -side right""" script = [] for layout_elem in layout: elem, opts = layout_elem opts = opts or {} fopts = ' '.join(map(str, _format_optdict(opts, True, "children"))) head = "%s%s%s" % (' ' * indent, elem, (" %s" % fopts) if fopts else '') if "children" in opts: script.append(head + " -children {") indent += indent_size newscript, indent = _format_layoutlist(opts['children'], indent, indent_size) script.append(newscript) indent -= indent_size script.append('%s}' % (' ' * indent)) else: script.append(head) return '\n'.join(script), indent def _script_from_settings(settings): """Returns an appropriate script, based on settings, according to theme_settings definition to be used by theme_settings and theme_create.""" script = [] # a script will be generated according to settings passed, which # will then be evaluated by Tcl for name, opts in settings.items(): # will format specific keys according to Tcl code if opts.get('configure'): # format 'configure' s = ' '.join(map(str, _format_optdict(opts['configure'], True))) script.append("ttk::style configure %s %s;" % (name, s)) if opts.get('map'): # format 'map' s = ' '.join(map(str, _format_mapdict(opts['map'], True))) script.append("ttk::style map %s %s;" % (name, s)) if 'layout' in opts: # format 'layout' which may be empty if not opts['layout']: s = 'null' # could be any other word, but this one makes sense else: s, _ = _format_layoutlist(opts['layout']) script.append("ttk::style layout %s {\n%s\n}" % (name, s)) if opts.get('element create'): # format 'element create' eopts = opts['element create'] etype = eopts[0] # find where args end, and where kwargs start argc = 1 # etype was the first one while argc < len(eopts) and not hasattr(eopts[argc], 'items'): argc += 1 elemargs = eopts[1:argc] elemkw = eopts[argc] if argc < len(eopts) and eopts[argc] else {} spec, opts = _format_elemcreate(etype, True, *elemargs, **elemkw) script.append("ttk::style element create %s %s %s %s" % ( name, etype, spec, opts)) return '\n'.join(script) def _dict_from_tcltuple(ttuple, cut_minus=True): """Break tuple in pairs, format it properly, then build the return dict. If cut_minus is True, the supposed '-' prefixing options will be removed. ttuple is expected to contain an even number of elements.""" opt_start = 1 if cut_minus else 0 retdict = {} it = iter(ttuple) for opt, val in zip(it, it): retdict[str(opt)[opt_start:]] = val return tclobjs_to_py(retdict) def _list_from_statespec(stuple): """Construct a list from the given statespec tuple according to the accepted statespec accepted by _format_mapdict.""" nval = [] for val in stuple: typename = getattr(val, 'typename', None) if typename is None: nval.append(val) else: # this is a Tcl object val = str(val) if typename == 'StateSpec': val = val.split() nval.append(val) it = iter(nval) return [_flatten(spec) for spec in zip(it, it)] def _list_from_layouttuple(ltuple): """Construct a list from the tuple returned by ttk::layout, this is somewhat the reverse of _format_layoutlist.""" res = [] indx = 0 while indx < len(ltuple): name = ltuple[indx] opts = {} res.append((name, opts)) indx += 1 while indx < len(ltuple): # grab name's options opt, val = ltuple[indx:indx + 2] if not opt.startswith('-'): # found next name break opt = opt[1:] # remove the '-' from the option indx += 2 if opt == 'children': val = _list_from_layouttuple(val) opts[opt] = val return res def _val_or_dict(options, func, *args): """Format options then call func with args and options and return the appropriate result. If no option is specified, a dict is returned. If a option is specified with the None value, the value for that option is returned. Otherwise, the function just sets the passed options and the caller shouldn't be expecting a return value anyway.""" options = _format_optdict(options) res = func(*(args + options)) if len(options) % 2: # option specified without a value, return its value return res return _dict_from_tcltuple(res) def _convert_stringval(value): """Converts a value to, hopefully, a more appropriate Python object.""" value = str(value) try: value = int(value) except (ValueError, TypeError): pass return value def tclobjs_to_py(adict): """Returns adict with its values converted from Tcl objects to Python objects.""" for opt, val in adict.items(): if val and hasattr(val, '__len__') and not isinstance(val, str): if getattr(val[0], 'typename', None) == 'StateSpec': val = _list_from_statespec(val) else: val = list(map(_convert_stringval, val)) elif hasattr(val, 'typename'): # some other (single) Tcl object val = _convert_stringval(val) adict[opt] = val return adict def setup_master(master=None): """If master is not None, itself is returned. If master is None, the default master is returned if there is one, otherwise a new master is created and returned. If it is not allowed to use the default root and master is None, RuntimeError is raised.""" if master is None: if tkinter._support_default_root: master = tkinter._default_root or tkinter.Tk() else: raise RuntimeError( "No master specified and tkinter is " "configured to not support default root") return master class Style(object): """Manipulate style database.""" _name = "ttk::style" def __init__(self, master=None): master = setup_master(master) if not getattr(master, '_tile_loaded', False): # Load tile now, if needed _load_tile(master) self.master = master self.tk = self.master.tk def configure(self, style, query_opt=None, **kw): """Query or sets the default value of the specified option(s) in style. Each key in kw is an option and each value is either a string or a sequence identifying the value for that option.""" if query_opt is not None: kw[query_opt] = None return _val_or_dict(kw, self.tk.call, self._name, "configure", style) def map(self, style, query_opt=None, **kw): """Query or sets dynamic values of the specified option(s) in style. Each key in kw is an option and each value should be a list or a tuple (usually) containing statespecs grouped in tuples, or list, or something else of your preference. A statespec is compound of one or more states and then a value.""" if query_opt is not None: return _list_from_statespec( self.tk.call(self._name, "map", style, '-%s' % query_opt)) return _dict_from_tcltuple( self.tk.call(self._name, "map", style, *(_format_mapdict(kw)))) def lookup(self, style, option, state=None, default=None): """Returns the value specified for option in style. If state is specified it is expected to be a sequence of one or more states. If the default argument is set, it is used as a fallback value in case no specification for option is found.""" state = ' '.join(state) if state else '' return self.tk.call(self._name, "lookup", style, '-%s' % option, state, default) def layout(self, style, layoutspec=None): """Define the widget layout for given style. If layoutspec is omitted, return the layout specification for given style. layoutspec is expected to be a list or an object different than None that evaluates to False if you want to "turn off" that style. If it is a list (or tuple, or something else), each item should be a tuple where the first item is the layout name and the second item should have the format described below: LAYOUTS A layout can contain the value None, if takes no options, or a dict of options specifying how to arrange the element. The layout mechanism uses a simplified version of the pack geometry manager: given an initial cavity, each element is allocated a parcel. Valid options/values are: side: whichside Specifies which side of the cavity to place the element; one of top, right, bottom or left. If omitted, the element occupies the entire cavity. sticky: nswe Specifies where the element is placed inside its allocated parcel. children: [sublayout... ] Specifies a list of elements to place inside the element. Each element is a tuple (or other sequence) where the first item is the layout name, and the other is a LAYOUT.""" lspec = None if layoutspec: lspec = _format_layoutlist(layoutspec)[0] elif layoutspec is not None: # will disable the layout ({}, '', etc) lspec = "null" # could be any other word, but this may make sense # when calling layout(style) later return _list_from_layouttuple( self.tk.call(self._name, "layout", style, lspec)) def element_create(self, elementname, etype, *args, **kw): """Create a new element in the current theme of given etype.""" spec, opts = _format_elemcreate(etype, False, *args, **kw) self.tk.call(self._name, "element", "create", elementname, etype, spec, *opts) def element_names(self): """Returns the list of elements defined in the current theme.""" return self.tk.call(self._name, "element", "names") def element_options(self, elementname): """Return the list of elementname's options.""" return self.tk.call(self._name, "element", "options", elementname) def theme_create(self, themename, parent=None, settings=None): """Creates a new theme. It is an error if themename already exists. If parent is specified, the new theme will inherit styles, elements and layouts from the specified parent theme. If settings are present, they are expected to have the same syntax used for theme_settings.""" script = _script_from_settings(settings) if settings else '' if parent: self.tk.call(self._name, "theme", "create", themename, "-parent", parent, "-settings", script) else: self.tk.call(self._name, "theme", "create", themename, "-settings", script) def theme_settings(self, themename, settings): """Temporarily sets the current theme to themename, apply specified settings and then restore the previous theme. Each key in settings is a style and each value may contain the keys 'configure', 'map', 'layout' and 'element create' and they are expected to have the same format as specified by the methods configure, map, layout and element_create respectively.""" script = _script_from_settings(settings) self.tk.call(self._name, "theme", "settings", themename, script) def theme_names(self): """Returns a list of all known themes.""" return self.tk.call(self._name, "theme", "names") def theme_use(self, themename=None): """If themename is None, returns the theme in use, otherwise, set the current theme to themename, refreshes all widgets and emits a <<ThemeChanged>> event.""" if themename is None: # Starting on Tk 8.6, checking this global is no longer needed # since it allows doing self.tk.call(self._name, "theme", "use") return self.tk.eval("return $ttk::currentTheme") # using "ttk::setTheme" instead of "ttk::style theme use" causes # the variable currentTheme to be updated, also, ttk::setTheme calls # "ttk::style theme use" in order to change theme. self.tk.call("ttk::setTheme", themename) class Widget(tkinter.Widget): """Base class for Tk themed widgets.""" def __init__(self, master, widgetname, kw=None): """Constructs a Ttk Widget with the parent master. STANDARD OPTIONS class, cursor, takefocus, style SCROLLABLE WIDGET OPTIONS xscrollcommand, yscrollcommand LABEL WIDGET OPTIONS text, textvariable, underline, image, compound, width WIDGET STATES active, disabled, focus, pressed, selected, background, readonly, alternate, invalid """ master = setup_master(master) if not getattr(master, '_tile_loaded', False): # Load tile now, if needed _load_tile(master) tkinter.Widget.__init__(self, master, widgetname, kw=kw) def identify(self, x, y): """Returns the name of the element at position x, y, or the empty string if the point does not lie within any element. x and y are pixel coordinates relative to the widget.""" return self.tk.call(self._w, "identify", x, y) def instate(self, statespec, callback=None, *args, **kw): """Test the widget's state. If callback is not specified, returns True if the widget state matches statespec and False otherwise. If callback is specified, then it will be invoked with *args, **kw if the widget state matches statespec. statespec is expected to be a sequence.""" ret = self.tk.call(self._w, "instate", ' '.join(statespec)) if ret and callback: return callback(*args, **kw) return bool(ret) def state(self, statespec=None): """Modify or inquire widget state. Widget state is returned if statespec is None, otherwise it is set according to the statespec flags and then a new state spec is returned indicating which flags were changed. statespec is expected to be a sequence.""" if statespec is not None: statespec = ' '.join(statespec) return self.tk.splitlist(str(self.tk.call(self._w, "state", statespec))) class Button(Widget): """Ttk Button widget, displays a textual label and/or image, and evaluates a command when pressed.""" def __init__(self, master=None, **kw): """Construct a Ttk Button widget with the parent master. STANDARD OPTIONS class, compound, cursor, image, state, style, takefocus, text, textvariable, underline, width WIDGET-SPECIFIC OPTIONS command, default, width """ Widget.__init__(self, master, "ttk::button", kw) def invoke(self): """Invokes the command associated with the button.""" return self.tk.call(self._w, "invoke") class Checkbutton(Widget): """Ttk Checkbutton widget which is either in on- or off-state.""" def __init__(self, master=None, **kw): """Construct a Ttk Checkbutton widget with the parent master. STANDARD OPTIONS class, compound, cursor, image, state, style, takefocus, text, textvariable, underline, width WIDGET-SPECIFIC OPTIONS command, offvalue, onvalue, variable """ Widget.__init__(self, master, "ttk::checkbutton", kw) def invoke(self): """Toggles between the selected and deselected states and invokes the associated command. If the widget is currently selected, sets the option variable to the offvalue option and deselects the widget; otherwise, sets the option variable to the option onvalue. Returns the result of the associated command.""" return self.tk.call(self._w, "invoke") class Entry(Widget, tkinter.Entry): """Ttk Entry widget displays a one-line text string and allows that string to be edited by the user.""" def __init__(self, master=None, widget=None, **kw): """Constructs a Ttk Entry widget with the parent master. STANDARD OPTIONS class, cursor, style, takefocus, xscrollcommand WIDGET-SPECIFIC OPTIONS exportselection, invalidcommand, justify, show, state, textvariable, validate, validatecommand, width VALIDATION MODES none, key, focus, focusin, focusout, all """ Widget.__init__(self, master, widget or "ttk::entry", kw) def bbox(self, index): """Return a tuple of (x, y, width, height) which describes the bounding box of the character given by index.""" return self.tk.call(self._w, "bbox", index) def identify(self, x, y): """Returns the name of the element at position x, y, or the empty string if the coordinates are outside the window.""" return self.tk.call(self._w, "identify", x, y) def validate(self): """Force revalidation, independent of the conditions specified by the validate option. Returns False if validation fails, True if it succeeds. Sets or clears the invalid state accordingly.""" return bool(self.tk.call(self._w, "validate")) class Combobox(Entry): """Ttk Combobox widget combines a text field with a pop-down list of values.""" def __init__(self, master=None, **kw): """Construct a Ttk Combobox widget with the parent master. STANDARD OPTIONS class, cursor, style, takefocus WIDGET-SPECIFIC OPTIONS exportselection, justify, height, postcommand, state, textvariable, values, width """ # The "values" option may need special formatting, so leave to # _format_optdict the responsibility to format it if "values" in kw: kw["values"] = _format_optdict({'v': kw["values"]})[1] Entry.__init__(self, master, "ttk::combobox", **kw) def __setitem__(self, item, value): if item == "values": value = _format_optdict({item: value})[1] Entry.__setitem__(self, item, value) def configure(self, cnf=None, **kw): """Custom Combobox configure, created to properly format the values option.""" if "values" in kw: kw["values"] = _format_optdict({'v': kw["values"]})[1] return Entry.configure(self, cnf, **kw) def current(self, newindex=None): """If newindex is supplied, sets the combobox value to the element at position newindex in the list of values. Otherwise, returns the index of the current value in the list of values or -1 if the current value does not appear in the list.""" return self.tk.call(self._w, "current", newindex) def set(self, value): """Sets the value of the combobox to value.""" self.tk.call(self._w, "set", value) class Frame(Widget): """Ttk Frame widget is a container, used to group other widgets together.""" def __init__(self, master=None, **kw): """Construct a Ttk Frame with parent master. STANDARD OPTIONS class, cursor, style, takefocus WIDGET-SPECIFIC OPTIONS borderwidth, relief, padding, width, height """ Widget.__init__(self, master, "ttk::frame", kw) class Label(Widget): """Ttk Label widget displays a textual label and/or image.""" def __init__(self, master=None, **kw): """Construct a Ttk Label with parent master. STANDARD OPTIONS class, compound, cursor, image, style, takefocus, text, textvariable, underline, width WIDGET-SPECIFIC OPTIONS anchor, background, font, foreground, justify, padding, relief, text, wraplength """ Widget.__init__(self, master, "ttk::label", kw) class Labelframe(Widget): """Ttk Labelframe widget is a container used to group other widgets together. It has an optional label, which may be a plain text string or another widget.""" def __init__(self, master=None, **kw): """Construct a Ttk Labelframe with parent master. STANDARD OPTIONS class, cursor, style, takefocus WIDGET-SPECIFIC OPTIONS labelanchor, text, underline, padding, labelwidget, width, height """ Widget.__init__(self, master, "ttk::labelframe", kw) LabelFrame = Labelframe # tkinter name compatibility class Menubutton(Widget): """Ttk Menubutton widget displays a textual label and/or image, and displays a menu when pressed.""" def __init__(self, master=None, **kw): """Construct a Ttk Menubutton with parent master. STANDARD OPTIONS class, compound, cursor, image, state, style, takefocus, text, textvariable, underline, width WIDGET-SPECIFIC OPTIONS direction, menu """ Widget.__init__(self, master, "ttk::menubutton", kw) class Notebook(Widget): """Ttk Notebook widget manages a collection of windows and displays a single one at a time. Each child window is associated with a tab, which the user may select to change the currently-displayed window.""" def __init__(self, master=None, **kw): """Construct a Ttk Notebook with parent master. STANDARD OPTIONS class, cursor, style, takefocus WIDGET-SPECIFIC OPTIONS height, padding, width TAB OPTIONS state, sticky, padding, text, image, compound, underline TAB IDENTIFIERS (tab_id) The tab_id argument found in several methods may take any of the following forms: * An integer between zero and the number of tabs * The name of a child window * A positional specification of the form "@x,y", which defines the tab * The string "current", which identifies the currently-selected tab * The string "end", which returns the number of tabs (only valid for method index) """ Widget.__init__(self, master, "ttk::notebook", kw) def add(self, child, **kw): """Adds a new tab to the notebook. If window is currently managed by the notebook but hidden, it is restored to its previous position.""" self.tk.call(self._w, "add", child, *(_format_optdict(kw))) def forget(self, tab_id): """Removes the tab specified by tab_id, unmaps and unmanages the associated window.""" self.tk.call(self._w, "forget", tab_id) def hide(self, tab_id): """Hides the tab specified by tab_id. The tab will not be displayed, but the associated window remains managed by the notebook and its configuration remembered. Hidden tabs may be restored with the add command.""" self.tk.call(self._w, "hide", tab_id) def identify(self, x, y): """Returns the name of the tab element at position x, y, or the empty string if none.""" return self.tk.call(self._w, "identify", x, y) def index(self, tab_id): """Returns the numeric index of the tab specified by tab_id, or the total number of tabs if tab_id is the string "end".""" return self.tk.call(self._w, "index", tab_id) def insert(self, pos, child, **kw): """Inserts a pane at the specified position. pos is either the string end, an integer index, or the name of a managed child. If child is already managed by the notebook, moves it to the specified position.""" self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw))) def select(self, tab_id=None): """Selects the specified tab. The associated child window will be displayed, and the previously-selected window (if different) is unmapped. If tab_id is omitted, returns the widget name of the currently selected pane.""" return self.tk.call(self._w, "select", tab_id) def tab(self, tab_id, option=None, **kw): """Query or modify the options of the specific tab_id. If kw is not given, returns a dict of the tab option values. If option is specified, returns the value of that option. Otherwise, sets the options to the corresponding values.""" if option is not None: kw[option] = None return _val_or_dict(kw, self.tk.call, self._w, "tab", tab_id) def tabs(self): """Returns a list of windows managed by the notebook.""" return self.tk.call(self._w, "tabs") or () def enable_traversal(self): """Enable keyboard traversal for a toplevel window containing this notebook. This will extend the bindings for the toplevel window containing this notebook as follows: Control-Tab: selects the tab following the currently selected one Shift-Control-Tab: selects the tab preceding the currently selected one Alt-K: where K is the mnemonic (underlined) character of any tab, will select that tab. Multiple notebooks in a single toplevel may be enabled for traversal, including nested notebooks. However, notebook traversal only works properly if all panes are direct children of the notebook.""" # The only, and good, difference I see is about mnemonics, which works # after calling this method. Control-Tab and Shift-Control-Tab always # works (here at least). self.tk.call("ttk::notebook::enableTraversal", self._w) class Panedwindow(Widget, tkinter.PanedWindow): """Ttk Panedwindow widget displays a number of subwindows, stacked either vertically or horizontally.""" def __init__(self, master=None, **kw): """Construct a Ttk Panedwindow with parent master. STANDARD OPTIONS class, cursor, style, takefocus WIDGET-SPECIFIC OPTIONS orient, width, height PANE OPTIONS weight """ Widget.__init__(self, master, "ttk::panedwindow", kw) forget = tkinter.PanedWindow.forget # overrides Pack.forget def insert(self, pos, child, **kw): """Inserts a pane at the specified positions. pos is either the string end, and integer index, or the name of a child. If child is already managed by the paned window, moves it to the specified position.""" self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw))) def pane(self, pane, option=None, **kw): """Query or modify the options of the specified pane. pane is either an integer index or the name of a managed subwindow. If kw is not given, returns a dict of the pane option values. If option is specified then the value for that option is returned. Otherwise, sets the options to the corresponding values.""" if option is not None: kw[option] = None return _val_or_dict(kw, self.tk.call, self._w, "pane", pane) def sashpos(self, index, newpos=None): """If newpos is specified, sets the position of sash number index. May adjust the positions of adjacent sashes to ensure that positions are monotonically increasing. Sash positions are further constrained to be between 0 and the total size of the widget. Returns the new position of sash number index.""" return self.tk.call(self._w, "sashpos", index, newpos) PanedWindow = Panedwindow # tkinter name compatibility class Progressbar(Widget): """Ttk Progressbar widget shows the status of a long-running operation. They can operate in two modes: determinate mode shows the amount completed relative to the total amount of work to be done, and indeterminate mode provides an animated display to let the user know that something is happening.""" def __init__(self, master=None, **kw): """Construct a Ttk Progressbar with parent master. STANDARD OPTIONS class, cursor, style, takefocus WIDGET-SPECIFIC OPTIONS orient, length, mode, maximum, value, variable, phase """ Widget.__init__(self, master, "ttk::progressbar", kw) def start(self, interval=None): """Begin autoincrement mode: schedules a recurring timer event that calls method step every interval milliseconds. interval defaults to 50 milliseconds (20 steps/second) if ommited.""" self.tk.call(self._w, "start", interval) def step(self, amount=None): """Increments the value option by amount. amount defaults to 1.0 if omitted.""" self.tk.call(self._w, "step", amount) def stop(self): """Stop autoincrement mode: cancels any recurring timer event initiated by start.""" self.tk.call(self._w, "stop") class Radiobutton(Widget): """Ttk Radiobutton widgets are used in groups to show or change a set of mutually-exclusive options.""" def __init__(self, master=None, **kw): """Construct a Ttk Radiobutton with parent master. STANDARD OPTIONS class, compound, cursor, image, state, style, takefocus, text, textvariable, underline, width WIDGET-SPECIFIC OPTIONS command, value, variable """ Widget.__init__(self, master, "ttk::radiobutton", kw) def invoke(self): """Sets the option variable to the option value, selects the widget, and invokes the associated command. Returns the result of the command, or an empty string if no command is specified.""" return self.tk.call(self._w, "invoke") class Scale(Widget, tkinter.Scale): """Ttk Scale widget is typically used to control the numeric value of a linked variable that varies uniformly over some range.""" def __init__(self, master=None, **kw): """Construct a Ttk Scale with parent master. STANDARD OPTIONS class, cursor, style, takefocus WIDGET-SPECIFIC OPTIONS command, from, length, orient, to, value, variable """ Widget.__init__(self, master, "ttk::scale", kw) def configure(self, cnf=None, **kw): """Modify or query scale options. Setting a value for any of the "from", "from_" or "to" options generates a <<RangeChanged>> event.""" if cnf: kw.update(cnf) Widget.configure(self, **kw) if any(['from' in kw, 'from_' in kw, 'to' in kw]): self.event_generate('<<RangeChanged>>') def get(self, x=None, y=None): """Get the current value of the value option, or the value corresponding to the coordinates x, y if they are specified. x and y are pixel coordinates relative to the scale widget origin.""" return self.tk.call(self._w, 'get', x, y) class Scrollbar(Widget, tkinter.Scrollbar): """Ttk Scrollbar controls the viewport of a scrollable widget.""" def __init__(self, master=None, **kw): """Construct a Ttk Scrollbar with parent master. STANDARD OPTIONS class, cursor, style, takefocus WIDGET-SPECIFIC OPTIONS command, orient """ Widget.__init__(self, master, "ttk::scrollbar", kw) class Separator(Widget): """Ttk Separator widget displays a horizontal or vertical separator bar.""" def __init__(self, master=None, **kw): """Construct a Ttk Separator with parent master. STANDARD OPTIONS class, cursor, style, takefocus WIDGET-SPECIFIC OPTIONS orient """ Widget.__init__(self, master, "ttk::separator", kw) class Sizegrip(Widget): """Ttk Sizegrip allows the user to resize the containing toplevel window by pressing and dragging the grip.""" def __init__(self, master=None, **kw): """Construct a Ttk Sizegrip with parent master. STANDARD OPTIONS class, cursor, state, style, takefocus """ Widget.__init__(self, master, "ttk::sizegrip", kw) class Treeview(Widget, tkinter.XView, tkinter.YView): """Ttk Treeview widget displays a hierarchical collection of items. Each item has a textual label, an optional image, and an optional list of data values. The data values are displayed in successive columns after the tree label.""" def __init__(self, master=None, **kw): """Construct a Ttk Treeview with parent master. STANDARD OPTIONS class, cursor, style, takefocus, xscrollcommand, yscrollcommand WIDGET-SPECIFIC OPTIONS columns, displaycolumns, height, padding, selectmode, show ITEM OPTIONS text, image, values, open, tags TAG OPTIONS foreground, background, font, image """ Widget.__init__(self, master, "ttk::treeview", kw) def bbox(self, item, column=None): """Returns the bounding box (relative to the treeview widget's window) of the specified item in the form x y width height. If column is specified, returns the bounding box of that cell. If the item is not visible (i.e., if it is a descendant of a closed item or is scrolled offscreen), returns an empty string.""" return self.tk.call(self._w, "bbox", item, column) def get_children(self, item=None): """Returns a tuple of children belonging to item. If item is not specified, returns root children.""" return self.tk.call(self._w, "children", item or '') or () def set_children(self, item, *newchildren): """Replaces item's child with newchildren. Children present in item that are not present in newchildren are detached from tree. No items in newchildren may be an ancestor of item.""" self.tk.call(self._w, "children", item, newchildren) def column(self, column, option=None, **kw): """Query or modify the options for the specified column. If kw is not given, returns a dict of the column option values. If option is specified then the value for that option is returned. Otherwise, sets the options to the corresponding values.""" if option is not None: kw[option] = None return _val_or_dict(kw, self.tk.call, self._w, "column", column) def delete(self, *items): """Delete all specified items and all their descendants. The root item may not be deleted.""" self.tk.call(self._w, "delete", items) def detach(self, *items): """Unlinks all of the specified items from the tree. The items and all of their descendants are still present, and may be reinserted at another point in the tree, but will not be displayed. The root item may not be detached.""" self.tk.call(self._w, "detach", items) def exists(self, item): """Returns True if the specified item is present in the three, False otherwise.""" return bool(self.tk.call(self._w, "exists", item)) def focus(self, item=None): """If item is specified, sets the focus item to item. Otherwise, returns the current focus item, or '' if there is none.""" return self.tk.call(self._w, "focus", item) def heading(self, column, option=None, **kw): """Query or modify the heading options for the specified column. If kw is not given, returns a dict of the heading option values. If option is specified then the value for that option is returned. Otherwise, sets the options to the corresponding values. Valid options/values are: text: text The text to display in the column heading image: image_name Specifies an image to display to the right of the column heading anchor: anchor Specifies how the heading text should be aligned. One of the standard Tk anchor values command: callback A callback to be invoked when the heading label is pressed. To configure the tree column heading, call this with column = "#0" """ cmd = kw.get('command') if cmd and not isinstance(cmd, str): # callback not registered yet, do it now kw['command'] = self.master.register(cmd, self._substitute) if option is not None: kw[option] = None return _val_or_dict(kw, self.tk.call, self._w, 'heading', column) def identify(self, component, x, y): """Returns a description of the specified component under the point given by x and y, or the empty string if no such component is present at that position.""" return self.tk.call(self._w, "identify", component, x, y) def identify_row(self, y): """Returns the item ID of the item at position y.""" return self.identify("row", 0, y) def identify_column(self, x): """Returns the data column identifier of the cell at position x. The tree column has ID #0.""" return self.identify("column", x, 0) def identify_region(self, x, y): """Returns one of: heading: Tree heading area. separator: Space between two columns headings; tree: The tree area. cell: A data cell. * Availability: Tk 8.6""" return self.identify("region", x, y) def identify_element(self, x, y): """Returns the element at position x, y. * Availability: Tk 8.6""" return self.identify("element", x, y) def index(self, item): """Returns the integer index of item within its parent's list of children.""" return self.tk.call(self._w, "index", item) def insert(self, parent, index, iid=None, **kw): """Creates a new item and return the item identifier of the newly created item. parent is the item ID of the parent item, or the empty string to create a new top-level item. index is an integer, or the value end, specifying where in the list of parent's children to insert the new item. If index is less than or equal to zero, the new node is inserted at the beginning, if index is greater than or equal to the current number of children, it is inserted at the end. If iid is specified, it is used as the item identifier, iid must not already exist in the tree. Otherwise, a new unique identifier is generated.""" opts = _format_optdict(kw) if iid: res = self.tk.call(self._w, "insert", parent, index, "-id", iid, *opts) else: res = self.tk.call(self._w, "insert", parent, index, *opts) return res def item(self, item, option=None, **kw): """Query or modify the options for the specified item. If no options are given, a dict with options/values for the item is returned. If option is specified then the value for that option is returned. Otherwise, sets the options to the corresponding values as given by kw.""" if option is not None: kw[option] = None return _val_or_dict(kw, self.tk.call, self._w, "item", item) def move(self, item, parent, index): """Moves item to position index in parent's list of children. It is illegal to move an item under one of its descendants. If index is less than or equal to zero, item is moved to the beginning, if greater than or equal to the number of children, it is moved to the end. If item was detached it is reattached.""" self.tk.call(self._w, "move", item, parent, index) reattach = move # A sensible method name for reattaching detached items def next(self, item): """Returns the identifier of item's next sibling, or '' if item is the last child of its parent.""" return self.tk.call(self._w, "next", item) def parent(self, item): """Returns the ID of the parent of item, or '' if item is at the top level of the hierarchy.""" return self.tk.call(self._w, "parent", item) def prev(self, item): """Returns the identifier of item's previous sibling, or '' if item is the first child of its parent.""" return self.tk.call(self._w, "prev", item) def see(self, item): """Ensure that item is visible. Sets all of item's ancestors open option to True, and scrolls the widget if necessary so that item is within the visible portion of the tree.""" self.tk.call(self._w, "see", item) def selection(self, selop=None, items=None): """If selop is not specified, returns selected items.""" return self.tk.call(self._w, "selection", selop, items) def selection_set(self, items): """items becomes the new selection.""" self.selection("set", items) def selection_add(self, items): """Add items to the selection.""" self.selection("add", items) def selection_remove(self, items): """Remove items from the selection.""" self.selection("remove", items) def selection_toggle(self, items): """Toggle the selection state of each item in items.""" self.selection("toggle", items) def set(self, item, column=None, value=None): """With one argument, returns a dictionary of column/value pairs for the specified item. With two arguments, returns the current value of the specified column. With three arguments, sets the value of given column in given item to the specified value.""" res = self.tk.call(self._w, "set", item, column, value) if column is None and value is None: return _dict_from_tcltuple(res, False) else: return res def tag_bind(self, tagname, sequence=None, callback=None): """Bind a callback for the given event sequence to the tag tagname. When an event is delivered to an item, the callbacks for each of the item's tags option are called.""" self._bind((self._w, "tag", "bind", tagname), sequence, callback, add=0) def tag_configure(self, tagname, option=None, **kw): """Query or modify the options for the specified tagname. If kw is not given, returns a dict of the option settings for tagname. If option is specified, returns the value for that option for the specified tagname. Otherwise, sets the options to the corresponding values for the given tagname.""" if option is not None: kw[option] = None return _val_or_dict(kw, self.tk.call, self._w, "tag", "configure", tagname) def tag_has(self, tagname, item=None): """If item is specified, returns 1 or 0 depending on whether the specified item has the given tagname. Otherwise, returns a list of all items which have the specified tag. * Availability: Tk 8.6""" return self.tk.call(self._w, "tag", "has", tagname, item) # Extensions class LabeledScale(Frame): """A Ttk Scale widget with a Ttk Label widget indicating its current value. The Ttk Scale can be accessed through instance.scale, and Ttk Label can be accessed through instance.label""" def __init__(self, master=None, variable=None, from_=0, to=10, **kw): """Construct an horizontal LabeledScale with parent master, a variable to be associated with the Ttk Scale widget and its range. If variable is not specified, a tkinter.IntVar is created. WIDGET-SPECIFIC OPTIONS compound: 'top' or 'bottom' Specifies how to display the label relative to the scale. Defaults to 'top'. """ self._label_top = kw.pop('compound', 'top') == 'top' Frame.__init__(self, master, **kw) self._variable = variable or tkinter.IntVar(master) self._variable.set(from_) self._last_valid = from_ self.label = Label(self) self.scale = Scale(self, variable=self._variable, from_=from_, to=to) self.scale.bind('<<RangeChanged>>', self._adjust) # position scale and label according to the compound option scale_side = 'bottom' if self._label_top else 'top' label_side = 'top' if scale_side == 'bottom' else 'bottom' self.scale.pack(side=scale_side, fill='x') tmp = Label(self).pack(side=label_side) # place holder self.label.place(anchor='n' if label_side == 'top' else 's') # update the label as scale or variable changes self.__tracecb = self._variable.trace_variable('w', self._adjust) self.bind('<Configure>', self._adjust) self.bind('<Map>', self._adjust) def destroy(self): """Destroy this widget and possibly its associated variable.""" try: self._variable.trace_vdelete('w', self.__tracecb) except AttributeError: # widget has been destroyed already pass else: del self._variable Frame.destroy(self) def _adjust(self, *args): """Adjust the label position according to the scale.""" def adjust_label(): self.update_idletasks() # "force" scale redraw x, y = self.scale.coords() if self._label_top: y = self.scale.winfo_y() - self.label.winfo_reqheight() else: y = self.scale.winfo_reqheight() + self.label.winfo_reqheight() self.label.place_configure(x=x, y=y) from_, to = self.scale['from'], self.scale['to'] if to < from_: from_, to = to, from_ newval = self._variable.get() if not from_ <= newval <= to: # value outside range, set value back to the last valid one self.value = self._last_valid return self._last_valid = newval self.label['text'] = newval self.after_idle(adjust_label) def _get_value(self): """Return current scale value.""" return self._variable.get() def _set_value(self, val): """Set new scale value.""" self._variable.set(val) value = property(_get_value, _set_value) class OptionMenu(Menubutton): """Themed OptionMenu, based after tkinter's OptionMenu, which allows the user to select a value from a menu.""" def __init__(self, master, variable, default=None, *values, **kwargs): """Construct a themed OptionMenu widget with master as the parent, the resource textvariable set to variable, the initially selected value specified by the default parameter, the menu values given by *values and additional keywords. WIDGET-SPECIFIC OPTIONS style: stylename Menubutton style. direction: 'above', 'below', 'left', 'right', or 'flush' Menubutton direction. command: callback A callback that will be invoked after selecting an item. """ kw = {'textvariable': variable, 'style': kwargs.pop('style', None), 'direction': kwargs.pop('direction', None)} Menubutton.__init__(self, master, **kw) self['menu'] = tkinter.Menu(self, tearoff=False) self._variable = variable self._callback = kwargs.pop('command', None) if kwargs: raise tkinter.TclError('unknown option -%s' % ( next(iter(kwargs.keys())))) self.set_menu(default, *values) def __getitem__(self, item): if item == 'menu': return self.nametowidget(Menubutton.__getitem__(self, item)) return Menubutton.__getitem__(self, item) def set_menu(self, default=None, *values): """Build a new menu of radiobuttons with *values and optionally a default value.""" menu = self['menu'] menu.delete(0, 'end') for val in values: menu.add_radiobutton(label=val, command=tkinter._setit(self._variable, val, self._callback)) if default: self._variable.set(default) def destroy(self): """Destroy this widget and its associated variable.""" del self._variable Menubutton.destroy(self)
apache-2.0
liikGit/MissionPlanner
Lib/htmllib.py
67
13360
"""HTML 2.0 parser. See the HTML 2.0 specification: http://www.w3.org/hypertext/WWW/MarkUp/html-spec/html-spec_toc.html """ from warnings import warnpy3k warnpy3k("the htmllib module has been removed in Python 3.0", stacklevel=2) del warnpy3k import sgmllib from formatter import AS_IS __all__ = ["HTMLParser", "HTMLParseError"] class HTMLParseError(sgmllib.SGMLParseError): """Error raised when an HTML document can't be parsed.""" class HTMLParser(sgmllib.SGMLParser): """This is the basic HTML parser class. It supports all entity names required by the XHTML 1.0 Recommendation. It also defines handlers for all HTML 2.0 and many HTML 3.0 and 3.2 elements. """ from htmlentitydefs import entitydefs def __init__(self, formatter, verbose=0): """Creates an instance of the HTMLParser class. The formatter parameter is the formatter instance associated with the parser. """ sgmllib.SGMLParser.__init__(self, verbose) self.formatter = formatter def error(self, message): raise HTMLParseError(message) def reset(self): sgmllib.SGMLParser.reset(self) self.savedata = None self.isindex = 0 self.title = None self.base = None self.anchor = None self.anchorlist = [] self.nofill = 0 self.list_stack = [] # ------ Methods used internally; some may be overridden # --- Formatter interface, taking care of 'savedata' mode; # shouldn't need to be overridden def handle_data(self, data): if self.savedata is not None: self.savedata = self.savedata + data else: if self.nofill: self.formatter.add_literal_data(data) else: self.formatter.add_flowing_data(data) # --- Hooks to save data; shouldn't need to be overridden def save_bgn(self): """Begins saving character data in a buffer instead of sending it to the formatter object. Retrieve the stored data via the save_end() method. Use of the save_bgn() / save_end() pair may not be nested. """ self.savedata = '' def save_end(self): """Ends buffering character data and returns all data saved since the preceding call to the save_bgn() method. If the nofill flag is false, whitespace is collapsed to single spaces. A call to this method without a preceding call to the save_bgn() method will raise a TypeError exception. """ data = self.savedata self.savedata = None if not self.nofill: data = ' '.join(data.split()) return data # --- Hooks for anchors; should probably be overridden def anchor_bgn(self, href, name, type): """This method is called at the start of an anchor region. The arguments correspond to the attributes of the <A> tag with the same names. The default implementation maintains a list of hyperlinks (defined by the HREF attribute for <A> tags) within the document. The list of hyperlinks is available as the data attribute anchorlist. """ self.anchor = href if self.anchor: self.anchorlist.append(href) def anchor_end(self): """This method is called at the end of an anchor region. The default implementation adds a textual footnote marker using an index into the list of hyperlinks created by the anchor_bgn()method. """ if self.anchor: self.handle_data("[%d]" % len(self.anchorlist)) self.anchor = None # --- Hook for images; should probably be overridden def handle_image(self, src, alt, *args): """This method is called to handle images. The default implementation simply passes the alt value to the handle_data() method. """ self.handle_data(alt) # --------- Top level elememts def start_html(self, attrs): pass def end_html(self): pass def start_head(self, attrs): pass def end_head(self): pass def start_body(self, attrs): pass def end_body(self): pass # ------ Head elements def start_title(self, attrs): self.save_bgn() def end_title(self): self.title = self.save_end() def do_base(self, attrs): for a, v in attrs: if a == 'href': self.base = v def do_isindex(self, attrs): self.isindex = 1 def do_link(self, attrs): pass def do_meta(self, attrs): pass def do_nextid(self, attrs): # Deprecated pass # ------ Body elements # --- Headings def start_h1(self, attrs): self.formatter.end_paragraph(1) self.formatter.push_font(('h1', 0, 1, 0)) def end_h1(self): self.formatter.end_paragraph(1) self.formatter.pop_font() def start_h2(self, attrs): self.formatter.end_paragraph(1) self.formatter.push_font(('h2', 0, 1, 0)) def end_h2(self): self.formatter.end_paragraph(1) self.formatter.pop_font() def start_h3(self, attrs): self.formatter.end_paragraph(1) self.formatter.push_font(('h3', 0, 1, 0)) def end_h3(self): self.formatter.end_paragraph(1) self.formatter.pop_font() def start_h4(self, attrs): self.formatter.end_paragraph(1) self.formatter.push_font(('h4', 0, 1, 0)) def end_h4(self): self.formatter.end_paragraph(1) self.formatter.pop_font() def start_h5(self, attrs): self.formatter.end_paragraph(1) self.formatter.push_font(('h5', 0, 1, 0)) def end_h5(self): self.formatter.end_paragraph(1) self.formatter.pop_font() def start_h6(self, attrs): self.formatter.end_paragraph(1) self.formatter.push_font(('h6', 0, 1, 0)) def end_h6(self): self.formatter.end_paragraph(1) self.formatter.pop_font() # --- Block Structuring Elements def do_p(self, attrs): self.formatter.end_paragraph(1) def start_pre(self, attrs): self.formatter.end_paragraph(1) self.formatter.push_font((AS_IS, AS_IS, AS_IS, 1)) self.nofill = self.nofill + 1 def end_pre(self): self.formatter.end_paragraph(1) self.formatter.pop_font() self.nofill = max(0, self.nofill - 1) def start_xmp(self, attrs): self.start_pre(attrs) self.setliteral('xmp') # Tell SGML parser def end_xmp(self): self.end_pre() def start_listing(self, attrs): self.start_pre(attrs) self.setliteral('listing') # Tell SGML parser def end_listing(self): self.end_pre() def start_address(self, attrs): self.formatter.end_paragraph(0) self.formatter.push_font((AS_IS, 1, AS_IS, AS_IS)) def end_address(self): self.formatter.end_paragraph(0) self.formatter.pop_font() def start_blockquote(self, attrs): self.formatter.end_paragraph(1) self.formatter.push_margin('blockquote') def end_blockquote(self): self.formatter.end_paragraph(1) self.formatter.pop_margin() # --- List Elements def start_ul(self, attrs): self.formatter.end_paragraph(not self.list_stack) self.formatter.push_margin('ul') self.list_stack.append(['ul', '*', 0]) def end_ul(self): if self.list_stack: del self.list_stack[-1] self.formatter.end_paragraph(not self.list_stack) self.formatter.pop_margin() def do_li(self, attrs): self.formatter.end_paragraph(0) if self.list_stack: [dummy, label, counter] = top = self.list_stack[-1] top[2] = counter = counter+1 else: label, counter = '*', 0 self.formatter.add_label_data(label, counter) def start_ol(self, attrs): self.formatter.end_paragraph(not self.list_stack) self.formatter.push_margin('ol') label = '1.' for a, v in attrs: if a == 'type': if len(v) == 1: v = v + '.' label = v self.list_stack.append(['ol', label, 0]) def end_ol(self): if self.list_stack: del self.list_stack[-1] self.formatter.end_paragraph(not self.list_stack) self.formatter.pop_margin() def start_menu(self, attrs): self.start_ul(attrs) def end_menu(self): self.end_ul() def start_dir(self, attrs): self.start_ul(attrs) def end_dir(self): self.end_ul() def start_dl(self, attrs): self.formatter.end_paragraph(1) self.list_stack.append(['dl', '', 0]) def end_dl(self): self.ddpop(1) if self.list_stack: del self.list_stack[-1] def do_dt(self, attrs): self.ddpop() def do_dd(self, attrs): self.ddpop() self.formatter.push_margin('dd') self.list_stack.append(['dd', '', 0]) def ddpop(self, bl=0): self.formatter.end_paragraph(bl) if self.list_stack: if self.list_stack[-1][0] == 'dd': del self.list_stack[-1] self.formatter.pop_margin() # --- Phrase Markup # Idiomatic Elements def start_cite(self, attrs): self.start_i(attrs) def end_cite(self): self.end_i() def start_code(self, attrs): self.start_tt(attrs) def end_code(self): self.end_tt() def start_em(self, attrs): self.start_i(attrs) def end_em(self): self.end_i() def start_kbd(self, attrs): self.start_tt(attrs) def end_kbd(self): self.end_tt() def start_samp(self, attrs): self.start_tt(attrs) def end_samp(self): self.end_tt() def start_strong(self, attrs): self.start_b(attrs) def end_strong(self): self.end_b() def start_var(self, attrs): self.start_i(attrs) def end_var(self): self.end_i() # Typographic Elements def start_i(self, attrs): self.formatter.push_font((AS_IS, 1, AS_IS, AS_IS)) def end_i(self): self.formatter.pop_font() def start_b(self, attrs): self.formatter.push_font((AS_IS, AS_IS, 1, AS_IS)) def end_b(self): self.formatter.pop_font() def start_tt(self, attrs): self.formatter.push_font((AS_IS, AS_IS, AS_IS, 1)) def end_tt(self): self.formatter.pop_font() def start_a(self, attrs): href = '' name = '' type = '' for attrname, value in attrs: value = value.strip() if attrname == 'href': href = value if attrname == 'name': name = value if attrname == 'type': type = value.lower() self.anchor_bgn(href, name, type) def end_a(self): self.anchor_end() # --- Line Break def do_br(self, attrs): self.formatter.add_line_break() # --- Horizontal Rule def do_hr(self, attrs): self.formatter.add_hor_rule() # --- Image def do_img(self, attrs): align = '' alt = '(image)' ismap = '' src = '' width = 0 height = 0 for attrname, value in attrs: if attrname == 'align': align = value if attrname == 'alt': alt = value if attrname == 'ismap': ismap = value if attrname == 'src': src = value if attrname == 'width': try: width = int(value) except ValueError: pass if attrname == 'height': try: height = int(value) except ValueError: pass self.handle_image(src, alt, ismap, align, width, height) # --- Really Old Unofficial Deprecated Stuff def do_plaintext(self, attrs): self.start_pre(attrs) self.setnomoretags() # Tell SGML parser # --- Unhandled tags def unknown_starttag(self, tag, attrs): pass def unknown_endtag(self, tag): pass def test(args = None): import sys, formatter if not args: args = sys.argv[1:] silent = args and args[0] == '-s' if silent: del args[0] if args: file = args[0] else: file = 'test.html' if file == '-': f = sys.stdin else: try: f = open(file, 'r') except IOError, msg: print file, ":", msg sys.exit(1) data = f.read() if f is not sys.stdin: f.close() if silent: f = formatter.NullFormatter() else: f = formatter.AbstractFormatter(formatter.DumbWriter()) p = HTMLParser(f) p.feed(data) p.close() if __name__ == '__main__': test()
gpl-3.0
becxer/pytrain
pytrain/LinearRegression/LinearRegression.py
1
2702
# # Linear Regression # # @ author becxer # @ e-mail becxer87@gmail.com # import numpy as np from pytrain.lib import convert import math import time import random import sys class LinearRegression: def __init__(self, mat_data, label_data): self.mat_data = convert.list2npfloat(mat_data) self.label_data = convert.list2npfloat(label_data) self.out_bit = len(label_data[0]) self.mat_w = np.array([ [ (random.random() * 0.00001 + sys.float_info.epsilon)\ for i in range(len(mat_data[0]))] \ for j in range(self.out_bit) ], dtype = np.float64) self.mat_w0 = np.array([ random.random() * 0.00001 + sys.float_info.epsilon\ for i in range(self.out_bit) ], dtype = np.float64) # # Description of differential equation # # k = w0 + w1 x1 + w2 x2 + w3 x3 + .. + wn xn # J(k) = (y - k1)^2 + (y - k2)^2 + .. + (y - kn)^2 # # dJ/dw1 = dJ/dk * dk/dw1 = # - 2 * (y - k1) * x1_1 # - 2 * (y - k2) * x1_2 # - 2 * (y - k3) * x1_3 # ... # # UPDATE w1 with gradient # w1 = w1 - lr * dJ/dw1 # # dJ/dw0 = dJ/dk0 * dk0/dw0 = # - 2 * (y - k1) * 1 # - 2 * (y - k2) * 1 # ... # # w0 = w0 - lr * dJ/w0 # def batch_update_w(self, out_bit_index, data, label): w = self.mat_w[out_bit_index] w0 = self.mat_w0[out_bit_index] tiled_w0 = np.tile(w0,(len(data))) k = (w * data).sum(axis=1) + tiled_w0 gd = (k - label.T[out_bit_index]) # dJ_dw is gradient of J(w) function dJ_dw = (gd * data.T).sum(axis=1)/len(data) dJ_dw0 = gd.sum(axis=0)/len(data) w = w - (dJ_dw * self.lr) w0 = w0 - (dJ_dw0 * self.lr) self.mat_w[out_bit_index] = w self.mat_w0[out_bit_index] = w0 def fit(self, lr, epoch, batch_size): self.lr = lr self.epoch = epoch start = 0 end = batch_size datalen = len(self.mat_data) for ep in range(epoch): start = 0 end = batch_size while start < datalen : for i in range(self.out_bit): self.batch_update_w(i, self.mat_data[start:end],\ self.label_data[start:end]) start = end end += batch_size def predict(self, array_input): array_input = convert.list2npfloat(array_input) return (array_input * self.mat_w).sum(axis=1) \ + self.mat_w0
mit
esi-mineset/spark
examples/src/main/python/mllib/stratified_sampling_example.py
128
1368
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function from pyspark import SparkContext if __name__ == "__main__": sc = SparkContext(appName="StratifiedSamplingExample") # SparkContext # $example on$ # an RDD of any key value pairs data = sc.parallelize([(1, 'a'), (1, 'b'), (2, 'c'), (2, 'd'), (2, 'e'), (3, 'f')]) # specify the exact fraction desired from each key as a dictionary fractions = {1: 0.1, 2: 0.6, 3: 0.3} approxSample = data.sampleByKey(False, fractions) # $example off$ for each in approxSample.collect(): print(each) sc.stop()
apache-2.0
xiaolonginfo/decode-Django
Django-1.5.1/django/contrib/gis/geoip/prototypes.py
200
3730
from ctypes import c_char_p, c_float, c_int, string_at, Structure, POINTER from django.contrib.gis.geoip.libgeoip import lgeoip, free #### GeoIP C Structure definitions #### class GeoIPRecord(Structure): _fields_ = [('country_code', c_char_p), ('country_code3', c_char_p), ('country_name', c_char_p), ('region', c_char_p), ('city', c_char_p), ('postal_code', c_char_p), ('latitude', c_float), ('longitude', c_float), # TODO: In 1.4.6 this changed from `int dma_code;` to # `union {int metro_code; int dma_code;};`. Change # to a `ctypes.Union` in to accomodate in future when # pre-1.4.6 versions are no longer distributed. ('dma_code', c_int), ('area_code', c_int), ('charset', c_int), ('continent_code', c_char_p), ] geoip_char_fields = [name for name, ctype in GeoIPRecord._fields_ if ctype is c_char_p] geoip_encodings = { 0: 'iso-8859-1', 1: 'utf8', } class GeoIPTag(Structure): pass RECTYPE = POINTER(GeoIPRecord) DBTYPE = POINTER(GeoIPTag) #### ctypes function prototypes #### # GeoIP_lib_version appeared in version 1.4.7. if hasattr(lgeoip, 'GeoIP_lib_version'): GeoIP_lib_version = lgeoip.GeoIP_lib_version GeoIP_lib_version.argtypes = None GeoIP_lib_version.restype = c_char_p else: GeoIP_lib_version = None # For freeing memory allocated within a record GeoIPRecord_delete = lgeoip.GeoIPRecord_delete GeoIPRecord_delete.argtypes = [RECTYPE] GeoIPRecord_delete.restype = None # For retrieving records by name or address. def check_record(result, func, cargs): if bool(result): # Checking the pointer to the C structure, if valid pull out elements # into a dicionary. rec = result.contents record = dict((fld, getattr(rec, fld)) for fld, ctype in rec._fields_) # Now converting the strings to unicode using the proper encoding. encoding = geoip_encodings[record['charset']] for char_field in geoip_char_fields: if record[char_field]: record[char_field] = record[char_field].decode(encoding) # Free the memory allocated for the struct & return. GeoIPRecord_delete(result) return record else: return None def record_output(func): func.argtypes = [DBTYPE, c_char_p] func.restype = RECTYPE func.errcheck = check_record return func GeoIP_record_by_addr = record_output(lgeoip.GeoIP_record_by_addr) GeoIP_record_by_name = record_output(lgeoip.GeoIP_record_by_name) # For opening & closing GeoIP database files. GeoIP_open = lgeoip.GeoIP_open GeoIP_open.restype = DBTYPE GeoIP_delete = lgeoip.GeoIP_delete GeoIP_delete.argtypes = [DBTYPE] GeoIP_delete.restype = None # This is so the string pointer can be freed within Python. class geoip_char_p(c_char_p): pass def check_string(result, func, cargs): if result: s = string_at(result) free(result) else: s = '' return s GeoIP_database_info = lgeoip.GeoIP_database_info GeoIP_database_info.restype = geoip_char_p GeoIP_database_info.errcheck = check_string # String output routines. def string_output(func): func.restype = c_char_p return func GeoIP_country_code_by_addr = string_output(lgeoip.GeoIP_country_code_by_addr) GeoIP_country_code_by_name = string_output(lgeoip.GeoIP_country_code_by_name) GeoIP_country_name_by_addr = string_output(lgeoip.GeoIP_country_name_by_addr) GeoIP_country_name_by_name = string_output(lgeoip.GeoIP_country_name_by_name)
gpl-2.0
stefanseefeld/numba
numba/numba_entry.py
6
1980
from __future__ import print_function, division, absolute_import import sys import argparse import os import subprocess def make_parser(): parser = argparse.ArgumentParser() parser.add_argument('--annotate', help='Annotate source', action='store_true') parser.add_argument('--dump-llvm', action="store_true", help='Print generated llvm assembly') parser.add_argument('--dump-optimized', action='store_true', help='Dump the optimized llvm assembly') parser.add_argument('--dump-assembly', action='store_true', help='Dump the LLVM generated assembly') parser.add_argument('--dump-cfg', action="store_true", help='[Deprecated] Dump the control flow graph') parser.add_argument('--dump-ast', action="store_true", help='[Deprecated] Dump the AST') parser.add_argument('--annotate-html', nargs=1, help='Output source annotation as html') parser.add_argument('filename', help='Python source filename') return parser def main(): parser = make_parser() args = parser.parse_args() if args.dump_cfg: print("CFG dump is removed.") sys.exit(1) if args.dump_ast: print("AST dump is removed. Numba no longer depends on AST.") sys.exit(1) os.environ['NUMBA_DUMP_ANNOTATION'] = str(int(args.annotate)) if args.annotate_html is not None: try: from jinja2 import Template except ImportError: raise ImportError("Please install the 'jinja2' package") os.environ['NUMBA_DUMP_HTML'] = str(args.annotate_html[0]) os.environ['NUMBA_DUMP_LLVM'] = str(int(args.dump_llvm)) os.environ['NUMBA_DUMP_OPTIMIZED'] = str(int(args.dump_optimized)) os.environ['NUMBA_DUMP_ASSEMBLY'] = str(int(args.dump_assembly)) cmd = [sys.executable, args.filename] subprocess.call(cmd)
bsd-2-clause
jakubtuchol/epi
src/stacks_queues.py
1
6608
""" Chapter 9: Stacks and Queues """ from collections import deque class MaxStack(object): """ Question 9.1 """ def __init__(self): self.contents = [] self.max_at = [0] def push(self, elt): cur_max = self.max_at[-1] if cur_max >= elt: self.max_at.append(cur_max) else: self.max_at.append(elt) self.contents.append(elt) def pop(self): self.max_at.pop() return self.contents.pop() def get_max(self): return self.max_at[-1] def evaluate_rpn(ls): """ Question 9.2 """ tokens = [] for token in ls: if token in '+-/*': tk_1 = tokens.pop() tk_2 = tokens.pop() if token == '+': tokens.append(tk_1 + tk_2) elif token == '-': tokens.append(tk_1 - tk_2) elif token == '*': tokens.append(tk_1 * tk_2) elif token == '/': tokens.append(tk_1 / tk_2) else: tokens.append(int(token)) return tokens.pop() def balanced_parentheses(elts): """ Question 9.3: Check whether a string consisting of '{,},[,],(,)' is properly balanced """ stack = [] open_elts = {'{', '[', '('} matches = { '}': '{', ']': '[', ')': '(', } for elt in elts: if elt in open_elts: stack.append(elt) if elt in matches: if not stack: return False if matches[elt] != stack.pop(): return False if len(stack): return False return True def shortest_equivalent_path(path): """ Question 9.4: Normalize relative pathnames """ elts = path.split('/') realpath = [] for elt in elts: if not len(elt) or elt == '.': continue elif elt == '..': realpath.pop() else: realpath.append(elt) full_path = '/'.join(realpath) begin_slash = '/' if path[0] == '/' else '' end_slash = '/' if path[-1] == '/' else '' return '{}{}{}'.format(begin_slash, full_path, end_slash) def bst_sorted_order(root): """ Question 9.5: Given a bst node, compute all keys at that node, and its descendants in sorted order """ stack = [] cur_node = root result = [] while stack or cur_node: if cur_node: stack.append(cur_node) cur_node = cur_node.left else: cur_node = stack.pop() result.append(cur_node.val) cur_node = cur_node.right return result class PostingListNode(object): def __init__(self, val): self.val = val self.next = None self.jump = None self.order = -1 def set_jump_order(head): """ Question 9.6: Set the order of every node when processed in jump first order """ nodes = [head] order = 1 while nodes: cur_node = nodes.pop() cur_node.order = order order += 1 if cur_node != cur_node.jump: nodes.append(cur_node.jump) def get_buildings_with_sunset_view(buildings): """ Question 9.7: Design an algorithm that processes buildings in east-to-west order and returns the set of buildings which view the sunset. Each building is specified by its height. """ building_stack = [] # when we get new building, we pop buildings # that are shorter than this building off # the stack for building in buildings: while len(building_stack) and building_stack[-1] <= building: building_stack.pop() building_stack.append(building) return building_stack def depth_order(root): """ Question 9.9: return depth order representation of a binary tree """ traversal = [] queue = [] if root: queue.insert(0, (root, 0)) while queue: (elt, depth) = queue.pop() if len(traversal) == depth: traversal.append([elt.val]) else: traversal[depth].append(elt.val) if elt.left: queue.insert(0, (elt.left, depth + 1)) if elt.right: queue.insert(0, (elt.right, depth + 1)) return traversal class CircularQueue(object): """ Question 9.10: Implement a circular queue """ def __init__(self, capacity): self.capacity = capacity self.size = 0 self.head = 0 self.tail = 0 self.contents = [None] * capacity def enqueue(self, value): if self.size == self.capacity: raise Exception('queue is currently at capacity') self.contents[self.tail] = value self.tail = (self.tail + 1) % self.capacity self.size += 1 def dequeue(self): if not self.size: return None elt = self.contents[self.head] self.size -= 1 self.head = (self.head + 1) % self.capacity return elt class QueueUsingStacks(object): """ Question 9.11: Implement a queue using stacks """ def __init__(self): self.to_enqueue = [] self.to_dequeue = [] def enqueue(self, val): self.to_enqueue.append(val) def dequeue(self): if not len(self.to_dequeue): # transfer elements in dequeue to enqueue while len(self.to_enqueue): self.to_dequeue.append(self.to_enqueue.pop()) if not len(self.to_dequeue): raise Exception('No more elements in queue') return self.to_dequeue.pop() class MaxQueue(object): """ Question 9.12: Implement a queue with a max API """ def __init__(self): self._entries = [] self._candidates_for_max = deque() def enqueue(self, x): self._entries.append(x) # eliminate dominated entries in _candidates_for_max while len(self._candidates_for_max): if self._candidates_for_max[-1] >= x: break self._candidates_for_max.pop() self._candidates_for_max.append(x) def deque(self): if len(self._candidates_for_max): result = self._entries[0] if result == self._candidates_for_max[0]: self._candidates_for_max.popleft() self._entries.pop(0) return result raise Exception('empty queue') def max(self): if len(self._candidates_for_max): return self._candidates_for_max[0] raise Exception('empty queue')
mit
sudhamisha/vmw-kube
third_party/htpasswd/htpasswd.py
897
5219
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2008-2013 Edgewall Software # Copyright (C) 2008 Eli Carter # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://trac.edgewall.com/license.html. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://trac.edgewall.org/. """Replacement for htpasswd""" import os import sys import random from optparse import OptionParser # We need a crypt module, but Windows doesn't have one by default. Try to find # one, and tell the user if we can't. try: import crypt except ImportError: try: import fcrypt as crypt except ImportError: sys.stderr.write("Cannot find a crypt module. " "Possibly http://carey.geek.nz/code/python-fcrypt/\n") sys.exit(1) def wait_for_file_mtime_change(filename): """This function is typically called before a file save operation, waiting if necessary for the file modification time to change. The purpose is to avoid successive file updates going undetected by the caching mechanism that depends on a change in the file modification time to know when the file should be reparsed.""" try: mtime = os.stat(filename).st_mtime os.utime(filename, None) while mtime == os.stat(filename).st_mtime: time.sleep(1e-3) os.utime(filename, None) except OSError: pass # file doesn't exist (yet) def salt(): """Returns a string of 2 randome letters""" letters = 'abcdefghijklmnopqrstuvwxyz' \ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' \ '0123456789/.' return random.choice(letters) + random.choice(letters) class HtpasswdFile: """A class for manipulating htpasswd files.""" def __init__(self, filename, create=False): self.entries = [] self.filename = filename if not create: if os.path.exists(self.filename): self.load() else: raise Exception("%s does not exist" % self.filename) def load(self): """Read the htpasswd file into memory.""" lines = open(self.filename, 'r').readlines() self.entries = [] for line in lines: username, pwhash = line.split(':') entry = [username, pwhash.rstrip()] self.entries.append(entry) def save(self): """Write the htpasswd file to disk""" wait_for_file_mtime_change(self.filename) open(self.filename, 'w').writelines(["%s:%s\n" % (entry[0], entry[1]) for entry in self.entries]) def update(self, username, password): """Replace the entry for the given user, or add it if new.""" pwhash = crypt.crypt(password, salt()) matching_entries = [entry for entry in self.entries if entry[0] == username] if matching_entries: matching_entries[0][1] = pwhash else: self.entries.append([username, pwhash]) def delete(self, username): """Remove the entry for the given user.""" self.entries = [entry for entry in self.entries if entry[0] != username] def main(): """ %prog -b[c] filename username password %prog -D filename username""" # For now, we only care about the use cases that affect tests/functional.py parser = OptionParser(usage=main.__doc__) parser.add_option('-b', action='store_true', dest='batch', default=False, help='Batch mode; password is passed on the command line IN THE CLEAR.' ) parser.add_option('-c', action='store_true', dest='create', default=False, help='Create a new htpasswd file, overwriting any existing file.') parser.add_option('-D', action='store_true', dest='delete_user', default=False, help='Remove the given user from the password file.') options, args = parser.parse_args() def syntax_error(msg): """Utility function for displaying fatal error messages with usage help. """ sys.stderr.write("Syntax error: " + msg) sys.stderr.write(parser.get_usage()) sys.exit(1) if not (options.batch or options.delete_user): syntax_error("Only batch and delete modes are supported\n") # Non-option arguments if len(args) < 2: syntax_error("Insufficient number of arguments.\n") filename, username = args[:2] if options.delete_user: if len(args) != 2: syntax_error("Incorrect number of arguments.\n") password = None else: if len(args) != 3: syntax_error("Incorrect number of arguments.\n") password = args[2] passwdfile = HtpasswdFile(filename, create=options.create) if options.delete_user: passwdfile.delete(username) else: passwdfile.update(username, password) passwdfile.save() if __name__ == '__main__': main()
apache-2.0
balloob/home-assistant
tests/components/blebox/conftest.py
5
3473
"""PyTest fixtures and test helpers.""" from unittest import mock import blebox_uniapi import pytest from homeassistant.components.blebox.const import DOMAIN from homeassistant.const import CONF_HOST, CONF_PORT from homeassistant.setup import async_setup_component from tests.async_mock import AsyncMock, PropertyMock, patch from tests.common import MockConfigEntry from tests.components.light.conftest import mock_light_profiles # noqa def patch_product_identify(path=None, **kwargs): """Patch the blebox_uniapi Products class.""" if path is None: path = "homeassistant.components.blebox.Products" patcher = patch(path, mock.DEFAULT, blebox_uniapi.products.Products, True, True) products_class = patcher.start() products_class.async_from_host = AsyncMock(**kwargs) return products_class def setup_product_mock(category, feature_mocks, path=None): """Mock a product returning the given features.""" product_mock = mock.create_autospec( blebox_uniapi.box.Box, True, True, features=None ) type(product_mock).features = PropertyMock(return_value={category: feature_mocks}) for feature in feature_mocks: type(feature).product = PropertyMock(return_value=product_mock) patch_product_identify(path, return_value=product_mock) return product_mock def mock_only_feature(spec, **kwargs): """Mock just the feature, without the product setup.""" return mock.create_autospec(spec, True, True, **kwargs) def mock_feature(category, spec, **kwargs): """Mock a feature along with whole product setup.""" feature_mock = mock_only_feature(spec, **kwargs) feature_mock.async_update = AsyncMock() product = setup_product_mock(category, [feature_mock]) type(feature_mock.product).name = PropertyMock(return_value="Some name") type(feature_mock.product).type = PropertyMock(return_value="some type") type(feature_mock.product).model = PropertyMock(return_value="some model") type(feature_mock.product).brand = PropertyMock(return_value="BleBox") type(feature_mock.product).firmware_version = PropertyMock(return_value="1.23") type(feature_mock.product).unique_id = PropertyMock(return_value="abcd0123ef5678") type(feature_mock).product = PropertyMock(return_value=product) return feature_mock def mock_config(ip_address="172.100.123.4"): """Return a Mock of the HA entity config.""" return MockConfigEntry(domain=DOMAIN, data={CONF_HOST: ip_address, CONF_PORT: 80}) @pytest.fixture(name="config") def config_fixture(): """Create hass config fixture.""" return {DOMAIN: {CONF_HOST: "172.100.123.4", CONF_PORT: 80}} @pytest.fixture(name="feature") def feature_fixture(request): """Return an entity wrapper from given fixture name.""" return request.getfixturevalue(request.param) async def async_setup_entities(hass, config, entity_ids): """Return configured entries with the given entity ids.""" config_entry = mock_config() config_entry.add_to_hass(hass) assert await async_setup_component(hass, DOMAIN, config) await hass.async_block_till_done() entity_registry = await hass.helpers.entity_registry.async_get_registry() return [entity_registry.async_get(entity_id) for entity_id in entity_ids] async def async_setup_entity(hass, config, entity_id): """Return a configured entry with the given entity_id.""" return (await async_setup_entities(hass, config, [entity_id]))[0]
apache-2.0
liorvh/infernal-twin
build/pip/build/lib.linux-i686-2.7/pip/_vendor/colorama/ansi.py
442
2304
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. ''' This module generates ANSI character codes to printing colors to terminals. See: http://en.wikipedia.org/wiki/ANSI_escape_code ''' CSI = '\033[' OSC = '\033]' BEL = '\007' def code_to_chars(code): return CSI + str(code) + 'm' class AnsiCodes(object): def __init__(self, codes): for name in dir(codes): if not name.startswith('_'): value = getattr(codes, name) setattr(self, name, code_to_chars(value)) class AnsiCursor(object): def UP(self, n=1): return CSI + str(n) + "A" def DOWN(self, n=1): return CSI + str(n) + "B" def FORWARD(self, n=1): return CSI + str(n) + "C" def BACK(self, n=1): return CSI + str(n) + "D" def POS(self, x=1, y=1): return CSI + str(y) + ";" + str(x) + "H" def set_title(title): return OSC + "2;" + title + BEL def clear_screen(mode=2): return CSI + str(mode) + "J" def clear_line(mode=2): return CSI + str(mode) + "K" class AnsiFore: BLACK = 30 RED = 31 GREEN = 32 YELLOW = 33 BLUE = 34 MAGENTA = 35 CYAN = 36 WHITE = 37 RESET = 39 # These are fairly well supported, but not part of the standard. LIGHTBLACK_EX = 90 LIGHTRED_EX = 91 LIGHTGREEN_EX = 92 LIGHTYELLOW_EX = 93 LIGHTBLUE_EX = 94 LIGHTMAGENTA_EX = 95 LIGHTCYAN_EX = 96 LIGHTWHITE_EX = 97 class AnsiBack: BLACK = 40 RED = 41 GREEN = 42 YELLOW = 43 BLUE = 44 MAGENTA = 45 CYAN = 46 WHITE = 47 RESET = 49 # These are fairly well supported, but not part of the standard. LIGHTBLACK_EX = 100 LIGHTRED_EX = 101 LIGHTGREEN_EX = 102 LIGHTYELLOW_EX = 103 LIGHTBLUE_EX = 104 LIGHTMAGENTA_EX = 105 LIGHTCYAN_EX = 106 LIGHTWHITE_EX = 107 class AnsiStyle: BRIGHT = 1 DIM = 2 NORMAL = 22 RESET_ALL = 0 Fore = AnsiCodes( AnsiFore ) Back = AnsiCodes( AnsiBack ) Style = AnsiCodes( AnsiStyle ) Cursor = AnsiCursor()
gpl-3.0
odejesush/tensorflow
tensorflow/compiler/tests/function_test.py
27
4129
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test cases for Tensorflow functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.compiler.tests.xla_test import XLATestCase from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import function from tensorflow.python.ops import array_ops from tensorflow.python.platform import googletest class FunctionTest(XLATestCase): def testFunction(self): """Executes a simple TensorFlow function.""" def APlus2B(a, b): return a + b * 2 aval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32) bval = np.array([5, 6, 7, 8]).reshape([2, 2]).astype(np.float32) expected = APlus2B(aval, bval) with self.test_session() as sess: @function.Defun(dtypes.float32, dtypes.float32) def Foo(a, b): return APlus2B(a, b) a = constant_op.constant(aval, name="a") b = constant_op.constant(bval, name="b") with self.test_scope(): call_f = Foo(a, b) result = sess.run(call_f) self.assertAllClose(result, expected, rtol=1e-3) def testNestedFunctions(self): """Executes two nested TensorFlow functions.""" def TimesTwo(x): return x * 2 def APlus2B(a, b): return a + TimesTwo(b) aval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32) bval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32) expected = APlus2B(aval, bval) with self.test_session() as sess: @function.Defun(dtypes.float32, dtypes.float32) def Foo(a, b): return APlus2B(a, b) a = constant_op.constant(aval, name="a") b = constant_op.constant(bval, name="b") with self.test_scope(): call_g = Foo(a, b) result = sess.run(call_g) self.assertAllClose(result, expected, rtol=1e-3) def testFunctionMultipleRetvals(self): """Executes a function with multiple return values.""" # This function will run on the XLA device def Func(a, b): return a + b, a - b aval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32) bval = np.array([5, 6, 7, 8]).reshape([2, 2]).astype(np.float32) expected = Func(aval, bval) with self.test_session() as sess: @function.Defun(dtypes.float32, dtypes.float32) def Foo(a, b): return Func(a, b) a = constant_op.constant(aval, name="a") b = constant_op.constant(bval, name="b") with self.test_scope(): call_f = Foo(a, b) result = sess.run(call_f) self.assertAllClose(result, expected, rtol=1e-3) def testFunctionsNoInline(self): @function.Defun(dtypes.float32, noinline=True) def TimesTwo(x): return x * 2 @function.Defun(dtypes.float32, dtypes.float32) def APlus2B(a, b): return a + TimesTwo(b) aval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32) bval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32) expected = aval + bval * 2 with self.test_session() as sess: with self.test_scope(): a = array_ops.placeholder(dtypes.float32, name="a") b = array_ops.placeholder(dtypes.float32, name="b") call = APlus2B(a, b) result = sess.run(call, {a: aval, b: bval}) self.assertAllClose(result, expected, rtol=1e-3) if __name__ == "__main__": googletest.main()
apache-2.0
kuri65536/python-for-android
python3-alpha/python-libs/atom/service.py
47
28950
#!/usr/bin/python # # Copyright (C) 2006, 2007, 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """AtomService provides CRUD ops. in line with the Atom Publishing Protocol. AtomService: Encapsulates the ability to perform insert, update and delete operations with the Atom Publishing Protocol on which GData is based. An instance can perform query, insertion, deletion, and update. HttpRequest: Function that performs a GET, POST, PUT, or DELETE HTTP request to the specified end point. An AtomService object or a subclass can be used to specify information about the request. """ __author__ = 'api.jscudder (Jeff Scudder)' import atom.http_interface import atom.url import atom.http import atom.token_store import os import http.client import urllib.request, urllib.parse, urllib.error import re import base64 import socket import warnings try: from xml.etree import cElementTree as ElementTree except ImportError: try: import cElementTree as ElementTree except ImportError: try: from xml.etree import ElementTree except ImportError: from elementtree import ElementTree import atom class AtomService(object): """Performs Atom Publishing Protocol CRUD operations. The AtomService contains methods to perform HTTP CRUD operations. """ # Default values for members port = 80 ssl = False # Set the current_token to force the AtomService to use this token # instead of searching for an appropriate token in the token_store. current_token = None auto_store_tokens = True auto_set_current_token = True def _get_override_token(self): return self.current_token def _set_override_token(self, token): self.current_token = token override_token = property(_get_override_token, _set_override_token) #@atom.v1_deprecated('Please use atom.client.AtomPubClient instead.') def __init__(self, server=None, additional_headers=None, application_name='', http_client=None, token_store=None): """Creates a new AtomService client. Args: server: string (optional) The start of a URL for the server to which all operations should be directed. Example: 'www.google.com' additional_headers: dict (optional) Any additional HTTP headers which should be included with CRUD operations. http_client: An object responsible for making HTTP requests using a request method. If none is provided, a new instance of atom.http.ProxiedHttpClient will be used. token_store: Keeps a collection of authorization tokens which can be applied to requests for a specific URLs. Critical methods are find_token based on a URL (atom.url.Url or a string), add_token, and remove_token. """ self.http_client = http_client or atom.http.ProxiedHttpClient() self.token_store = token_store or atom.token_store.TokenStore() self.server = server self.additional_headers = additional_headers or {} self.additional_headers['User-Agent'] = atom.http_interface.USER_AGENT % ( application_name,) # If debug is True, the HTTPConnection will display debug information self._set_debug(False) __init__ = atom.v1_deprecated( 'Please use atom.client.AtomPubClient instead.')( __init__) def _get_debug(self): return self.http_client.debug def _set_debug(self, value): self.http_client.debug = value debug = property(_get_debug, _set_debug, doc='If True, HTTP debug information is printed.') def use_basic_auth(self, username, password, scopes=None): if username is not None and password is not None: if scopes is None: scopes = [atom.token_store.SCOPE_ALL] base_64_string = base64.encodestring('%s:%s' % (username, password)) token = BasicAuthToken('Basic %s' % base_64_string.strip(), scopes=[atom.token_store.SCOPE_ALL]) if self.auto_set_current_token: self.current_token = token if self.auto_store_tokens: return self.token_store.add_token(token) return True return False def UseBasicAuth(self, username, password, for_proxy=False): """Sets an Authenticaiton: Basic HTTP header containing plaintext. Deprecated, use use_basic_auth instead. The username and password are base64 encoded and added to an HTTP header which will be included in each request. Note that your username and password are sent in plaintext. Args: username: str password: str """ self.use_basic_auth(username, password) #@atom.v1_deprecated('Please use atom.client.AtomPubClient for requests.') def request(self, operation, url, data=None, headers=None, url_params=None): if isinstance(url, str): if url.startswith('http:') and self.ssl: # Force all requests to be https if self.ssl is True. url = atom.url.parse_url('https:' + url[5:]) elif not url.startswith('http') and self.ssl: url = atom.url.parse_url('https://%s%s' % (self.server, url)) elif not url.startswith('http'): url = atom.url.parse_url('http://%s%s' % (self.server, url)) else: url = atom.url.parse_url(url) if url_params: for name, value in url_params.items(): url.params[name] = value all_headers = self.additional_headers.copy() if headers: all_headers.update(headers) # If the list of headers does not include a Content-Length, attempt to # calculate it based on the data object. if data and 'Content-Length' not in all_headers: content_length = CalculateDataLength(data) if content_length: all_headers['Content-Length'] = str(content_length) # Find an Authorization token for this URL if one is available. if self.override_token: auth_token = self.override_token else: auth_token = self.token_store.find_token(url) return auth_token.perform_request(self.http_client, operation, url, data=data, headers=all_headers) request = atom.v1_deprecated( 'Please use atom.client.AtomPubClient for requests.')( request) # CRUD operations def Get(self, uri, extra_headers=None, url_params=None, escape_params=True): """Query the APP server with the given URI The uri is the portion of the URI after the server value (server example: 'www.google.com'). Example use: To perform a query against Google Base, set the server to 'base.google.com' and set the uri to '/base/feeds/...', where ... is your query. For example, to find snippets for all digital cameras uri should be set to: '/base/feeds/snippets?bq=digital+camera' Args: uri: string The query in the form of a URI. Example: '/base/feeds/snippets?bq=digital+camera'. extra_headers: dicty (optional) Extra HTTP headers to be included in the GET request. These headers are in addition to those stored in the client's additional_headers property. The client automatically sets the Content-Type and Authorization headers. url_params: dict (optional) Additional URL parameters to be included in the query. These are translated into query arguments in the form '&dict_key=value&...'. Example: {'max-results': '250'} becomes &max-results=250 escape_params: boolean (optional) If false, the calling code has already ensured that the query will form a valid URL (all reserved characters have been escaped). If true, this method will escape the query and any URL parameters provided. Returns: httplib.HTTPResponse The server's response to the GET request. """ return self.request('GET', uri, data=None, headers=extra_headers, url_params=url_params) def Post(self, data, uri, extra_headers=None, url_params=None, escape_params=True, content_type='application/atom+xml'): """Insert data into an APP server at the given URI. Args: data: string, ElementTree._Element, or something with a __str__ method The XML to be sent to the uri. uri: string The location (feed) to which the data should be inserted. Example: '/base/feeds/items'. extra_headers: dict (optional) HTTP headers which are to be included. The client automatically sets the Content-Type, Authorization, and Content-Length headers. url_params: dict (optional) Additional URL parameters to be included in the URI. These are translated into query arguments in the form '&dict_key=value&...'. Example: {'max-results': '250'} becomes &max-results=250 escape_params: boolean (optional) If false, the calling code has already ensured that the query will form a valid URL (all reserved characters have been escaped). If true, this method will escape the query and any URL parameters provided. Returns: httplib.HTTPResponse Server's response to the POST request. """ if extra_headers is None: extra_headers = {} if content_type: extra_headers['Content-Type'] = content_type return self.request('POST', uri, data=data, headers=extra_headers, url_params=url_params) def Put(self, data, uri, extra_headers=None, url_params=None, escape_params=True, content_type='application/atom+xml'): """Updates an entry at the given URI. Args: data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The XML containing the updated data. uri: string A URI indicating entry to which the update will be applied. Example: '/base/feeds/items/ITEM-ID' extra_headers: dict (optional) HTTP headers which are to be included. The client automatically sets the Content-Type, Authorization, and Content-Length headers. url_params: dict (optional) Additional URL parameters to be included in the URI. These are translated into query arguments in the form '&dict_key=value&...'. Example: {'max-results': '250'} becomes &max-results=250 escape_params: boolean (optional) If false, the calling code has already ensured that the query will form a valid URL (all reserved characters have been escaped). If true, this method will escape the query and any URL parameters provided. Returns: httplib.HTTPResponse Server's response to the PUT request. """ if extra_headers is None: extra_headers = {} if content_type: extra_headers['Content-Type'] = content_type return self.request('PUT', uri, data=data, headers=extra_headers, url_params=url_params) def Delete(self, uri, extra_headers=None, url_params=None, escape_params=True): """Deletes the entry at the given URI. Args: uri: string The URI of the entry to be deleted. Example: '/base/feeds/items/ITEM-ID' extra_headers: dict (optional) HTTP headers which are to be included. The client automatically sets the Content-Type and Authorization headers. url_params: dict (optional) Additional URL parameters to be included in the URI. These are translated into query arguments in the form '&dict_key=value&...'. Example: {'max-results': '250'} becomes &max-results=250 escape_params: boolean (optional) If false, the calling code has already ensured that the query will form a valid URL (all reserved characters have been escaped). If true, this method will escape the query and any URL parameters provided. Returns: httplib.HTTPResponse Server's response to the DELETE request. """ return self.request('DELETE', uri, data=None, headers=extra_headers, url_params=url_params) class BasicAuthToken(atom.http_interface.GenericToken): def __init__(self, auth_header, scopes=None): """Creates a token used to add Basic Auth headers to HTTP requests. Args: auth_header: str The value for the Authorization header. scopes: list of str or atom.url.Url specifying the beginnings of URLs for which this token can be used. For example, if scopes contains 'http://example.com/foo', then this token can be used for a request to 'http://example.com/foo/bar' but it cannot be used for a request to 'http://example.com/baz' """ self.auth_header = auth_header self.scopes = scopes or [] def perform_request(self, http_client, operation, url, data=None, headers=None): """Sets the Authorization header to the basic auth string.""" if headers is None: headers = {'Authorization':self.auth_header} else: headers['Authorization'] = self.auth_header return http_client.request(operation, url, data=data, headers=headers) def __str__(self): return self.auth_header def valid_for_scope(self, url): """Tells the caller if the token authorizes access to the desired URL. """ if isinstance(url, str): url = atom.url.parse_url(url) for scope in self.scopes: if scope == atom.token_store.SCOPE_ALL: return True if isinstance(scope, str): scope = atom.url.parse_url(scope) if scope == url: return True # Check the host and the path, but ignore the port and protocol. elif scope.host == url.host and not scope.path: return True elif scope.host == url.host and scope.path and not url.path: continue elif scope.host == url.host and url.path.startswith(scope.path): return True return False def PrepareConnection(service, full_uri): """Opens a connection to the server based on the full URI. This method is deprecated, instead use atom.http.HttpClient.request. Examines the target URI and the proxy settings, which are set as environment variables, to open a connection with the server. This connection is used to make an HTTP request. Args: service: atom.AtomService or a subclass. It must have a server string which represents the server host to which the request should be made. It may also have a dictionary of additional_headers to send in the HTTP request. full_uri: str Which is the target relative (lacks protocol and host) or absolute URL to be opened. Example: 'https://www.google.com/accounts/ClientLogin' or 'base/feeds/snippets' where the server is set to www.google.com. Returns: A tuple containing the httplib.HTTPConnection and the full_uri for the request. """ deprecation('calling deprecated function PrepareConnection') (server, port, ssl, partial_uri) = ProcessUrl(service, full_uri) if ssl: # destination is https proxy = os.environ.get('https_proxy') if proxy: (p_server, p_port, p_ssl, p_uri) = ProcessUrl(service, proxy, True) proxy_username = os.environ.get('proxy-username') if not proxy_username: proxy_username = os.environ.get('proxy_username') proxy_password = os.environ.get('proxy-password') if not proxy_password: proxy_password = os.environ.get('proxy_password') if proxy_username: user_auth = base64.encodestring('%s:%s' % (proxy_username, proxy_password)) proxy_authorization = ('Proxy-authorization: Basic %s\r\n' % ( user_auth.strip())) else: proxy_authorization = '' proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (server, port) user_agent = 'User-Agent: %s\r\n' % ( service.additional_headers['User-Agent']) proxy_pieces = (proxy_connect + proxy_authorization + user_agent + '\r\n') #now connect, very simple recv and error checking p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) p_sock.connect((p_server,p_port)) p_sock.sendall(proxy_pieces) response = '' # Wait for the full response. while response.find("\r\n\r\n") == -1: response += p_sock.recv(8192) p_status=response.split()[1] if p_status!=str(200): raise atom.http.ProxyError('Error status=%s' % p_status) # Trivial setup for ssl socket. ssl = socket.ssl(p_sock, None, None) fake_sock = http.client.FakeSocket(p_sock, ssl) # Initalize httplib and replace with the proxy socket. connection = http.client.HTTPConnection(server) connection.sock=fake_sock full_uri = partial_uri else: connection = http.client.HTTPSConnection(server, port) full_uri = partial_uri else: # destination is http proxy = os.environ.get('http_proxy') if proxy: (p_server, p_port, p_ssl, p_uri) = ProcessUrl(service.server, proxy, True) proxy_username = os.environ.get('proxy-username') if not proxy_username: proxy_username = os.environ.get('proxy_username') proxy_password = os.environ.get('proxy-password') if not proxy_password: proxy_password = os.environ.get('proxy_password') if proxy_username: UseBasicAuth(service, proxy_username, proxy_password, True) connection = http.client.HTTPConnection(p_server, p_port) if not full_uri.startswith("http://"): if full_uri.startswith("/"): full_uri = "http://%s%s" % (service.server, full_uri) else: full_uri = "http://%s/%s" % (service.server, full_uri) else: connection = http.client.HTTPConnection(server, port) full_uri = partial_uri return (connection, full_uri) def UseBasicAuth(service, username, password, for_proxy=False): """Sets an Authenticaiton: Basic HTTP header containing plaintext. Deprecated, use AtomService.use_basic_auth insread. The username and password are base64 encoded and added to an HTTP header which will be included in each request. Note that your username and password are sent in plaintext. The auth header is added to the additional_headers dictionary in the service object. Args: service: atom.AtomService or a subclass which has an additional_headers dict as a member. username: str password: str """ deprecation('calling deprecated function UseBasicAuth') base_64_string = base64.encodestring('%s:%s' % (username, password)) base_64_string = base_64_string.strip() if for_proxy: header_name = 'Proxy-Authorization' else: header_name = 'Authorization' service.additional_headers[header_name] = 'Basic %s' % (base_64_string,) def ProcessUrl(service, url, for_proxy=False): """Processes a passed URL. If the URL does not begin with https?, then the default value for server is used This method is deprecated, use atom.url.parse_url instead. """ if not isinstance(url, atom.url.Url): url = atom.url.parse_url(url) server = url.host ssl = False port = 80 if not server: if hasattr(service, 'server'): server = service.server else: server = service if not url.protocol and hasattr(service, 'ssl'): ssl = service.ssl if hasattr(service, 'port'): port = service.port else: if url.protocol == 'https': ssl = True elif url.protocol == 'http': ssl = False if url.port: port = int(url.port) elif port == 80 and ssl: port = 443 return (server, port, ssl, url.get_request_uri()) def DictionaryToParamList(url_parameters, escape_params=True): """Convert a dictionary of URL arguments into a URL parameter string. This function is deprcated, use atom.url.Url instead. Args: url_parameters: The dictionaty of key-value pairs which will be converted into URL parameters. For example, {'dry-run': 'true', 'foo': 'bar'} will become ['dry-run=true', 'foo=bar']. Returns: A list which contains a string for each key-value pair. The strings are ready to be incorporated into a URL by using '&'.join([] + parameter_list) """ # Choose which function to use when modifying the query and parameters. # Use quote_plus when escape_params is true. transform_op = [str, urllib.parse.quote_plus][bool(escape_params)] # Create a list of tuples containing the escaped version of the # parameter-value pairs. parameter_tuples = [(transform_op(param), transform_op(value)) for param, value in list((url_parameters or {}).items())] # Turn parameter-value tuples into a list of strings in the form # 'PARAMETER=VALUE'. return ['='.join(x) for x in parameter_tuples] def BuildUri(uri, url_params=None, escape_params=True): """Converts a uri string and a collection of parameters into a URI. This function is deprcated, use atom.url.Url instead. Args: uri: string url_params: dict (optional) escape_params: boolean (optional) uri: string The start of the desired URI. This string can alrady contain URL parameters. Examples: '/base/feeds/snippets', '/base/feeds/snippets?bq=digital+camera' url_parameters: dict (optional) Additional URL parameters to be included in the query. These are translated into query arguments in the form '&dict_key=value&...'. Example: {'max-results': '250'} becomes &max-results=250 escape_params: boolean (optional) If false, the calling code has already ensured that the query will form a valid URL (all reserved characters have been escaped). If true, this method will escape the query and any URL parameters provided. Returns: string The URI consisting of the escaped URL parameters appended to the initial uri string. """ # Prepare URL parameters for inclusion into the GET request. parameter_list = DictionaryToParamList(url_params, escape_params) # Append the URL parameters to the URL. if parameter_list: if uri.find('?') != -1: # If there are already URL parameters in the uri string, add the # parameters after a new & character. full_uri = '&'.join([uri] + parameter_list) else: # The uri string did not have any URL parameters (no ? character) # so put a ? between the uri and URL parameters. full_uri = '%s%s' % (uri, '?%s' % ('&'.join([] + parameter_list))) else: full_uri = uri return full_uri def HttpRequest(service, operation, data, uri, extra_headers=None, url_params=None, escape_params=True, content_type='application/atom+xml'): """Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE. This method is deprecated, use atom.http.HttpClient.request instead. Usage example, perform and HTTP GET on http://www.google.com/: import atom.service client = atom.service.AtomService() http_response = client.Get('http://www.google.com/') or you could set the client.server to 'www.google.com' and use the following: client.server = 'www.google.com' http_response = client.Get('/') Args: service: atom.AtomService object which contains some of the parameters needed to make the request. The following members are used to construct the HTTP call: server (str), additional_headers (dict), port (int), and ssl (bool). operation: str The HTTP operation to be performed. This is usually one of 'GET', 'POST', 'PUT', or 'DELETE' data: ElementTree, filestream, list of parts, or other object which can be converted to a string. Should be set to None when performing a GET or PUT. If data is a file-like object which can be read, this method will read a chunk of 100K bytes at a time and send them. If the data is a list of parts to be sent, each part will be evaluated and sent. uri: The beginning of the URL to which the request should be sent. Examples: '/', '/base/feeds/snippets', '/m8/feeds/contacts/default/base' extra_headers: dict of strings. HTTP headers which should be sent in the request. These headers are in addition to those stored in service.additional_headers. url_params: dict of strings. Key value pairs to be added to the URL as URL parameters. For example {'foo':'bar', 'test':'param'} will become ?foo=bar&test=param. escape_params: bool default True. If true, the keys and values in url_params will be URL escaped when the form is constructed (Special characters converted to %XX form.) content_type: str The MIME type for the data being sent. Defaults to 'application/atom+xml', this is only used if data is set. """ deprecation('call to deprecated function HttpRequest') full_uri = BuildUri(uri, url_params, escape_params) (connection, full_uri) = PrepareConnection(service, full_uri) if extra_headers is None: extra_headers = {} # Turn on debug mode if the debug member is set. if service.debug: connection.debuglevel = 1 connection.putrequest(operation, full_uri) # If the list of headers does not include a Content-Length, attempt to # calculate it based on the data object. if (data and 'Content-Length' not in service.additional_headers and 'Content-Length' not in extra_headers): content_length = CalculateDataLength(data) if content_length: extra_headers['Content-Length'] = str(content_length) if content_type: extra_headers['Content-Type'] = content_type # Send the HTTP headers. if isinstance(service.additional_headers, dict): for header in service.additional_headers: connection.putheader(header, service.additional_headers[header]) if isinstance(extra_headers, dict): for header in extra_headers: connection.putheader(header, extra_headers[header]) connection.endheaders() # If there is data, send it in the request. if data: if isinstance(data, list): for data_part in data: __SendDataPart(data_part, connection) else: __SendDataPart(data, connection) # Return the HTTP Response from the server. return connection.getresponse() def __SendDataPart(data, connection): """This method is deprecated, use atom.http._send_data_part""" deprecated('call to deprecated function __SendDataPart') if isinstance(data, str): #TODO add handling for unicode. connection.send(data) return elif ElementTree.iselement(data): connection.send(ElementTree.tostring(data)) return # Check to see if data is a file-like object that has a read method. elif hasattr(data, 'read'): # Read the file and send it a chunk at a time. while 1: binarydata = data.read(100000) if binarydata == '': break connection.send(binarydata) return else: # The data object was not a file. # Try to convert to a string and send the data. connection.send(str(data)) return def CalculateDataLength(data): """Attempts to determine the length of the data to send. This method will respond with a length only if the data is a string or and ElementTree element. Args: data: object If this is not a string or ElementTree element this funtion will return None. """ if isinstance(data, str): return len(data) elif isinstance(data, list): return None elif ElementTree.iselement(data): return len(ElementTree.tostring(data)) elif hasattr(data, 'read'): # If this is a file-like object, don't try to guess the length. return None else: return len(str(data)) def deprecation(message): warnings.warn(message, DeprecationWarning, stacklevel=2)
apache-2.0
podemos-info/odoo
addons/base_report_designer/plugin/openerp_report_designer/bin/script/ModifyExistingReport.py
6
9335
########################################################################## # # Portions of this file are under the following copyright and license: # # # Copyright (c) 2003-2004 Danny Brewer # d29583@groovegarden.com # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # See: http://www.gnu.org/licenses/lgpl.html # # # and other portions are under the following copyright and license: # # # OpenERP, Open Source Management Solution>.. # Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # ############################################################################## import uno import string import unohelper import xmlrpclib import base64, tempfile from com.sun.star.task import XJobExecutor import os import sys if __name__<>'package': from lib.gui import * from lib.error import * from LoginTest import * from lib.logreport import * from lib.rpc import * database="test" uid = 3 # class ModifyExistingReport(unohelper.Base, XJobExecutor): def __init__(self,ctx): self.ctx = ctx self.module = "openerp_report" self.version = "0.1" LoginTest() if not loginstatus and __name__=="package": exit(1) self.win = DBModalDialog(60, 50, 180, 120, "Modify Existing Report") self.win.addFixedText("lblReport", 2, 3, 60, 15, "Report Selection") self.win.addComboListBox("lstReport", -1,15,178,80 , False ) self.lstReport = self.win.getControl( "lstReport" ) desktop=getDesktop() doc = desktop.getCurrentComponent() docinfo=doc.getDocumentInfo() self.logobj=Logger() self.hostname = docinfo.getUserFieldValue(0) global passwd self.password = passwd global url self.sock=RPCSession(url) # Open a new connexion to the server ids = self.sock.execute(database, uid, self.password, 'ir.module.module', 'search', [('name','=','base_report_designer'),('state', '=', 'installed')]) if not len(ids): ErrorDialog("Please Install base_report_designer module", "", "Module Uninstalled Error") exit(1) ids = self.sock.execute(database, uid, self.password, 'ir.actions.report.xml', 'search', [('report_xsl', '=', False),('report_xml', '=', False)]) fields=['id', 'name','report_name','model'] self.reports = self.sock.execute(database, uid, self.password, 'ir.actions.report.xml', 'read', ids, fields) self.report_with_id = [] for report in self.reports: if report['name']<>"": model_ids = self.sock.execute(database, uid, self.password, 'ir.model' , 'search', [('model','=', report['model'])]) model_res_other =self.sock.execute(database, uid, self.password, 'ir.model', 'read', model_ids, [ 'name', 'model' ] ) if model_res_other <> []: name = model_res_other[0]['name'] + " - " + report['name'] else: name = report['name'] + " - " + report['model'] self.report_with_id.append( (report['id'], name, report['model'] ) ) self.report_with_id.sort( lambda x, y: cmp( x[1], y[1] ) ) for id, report_name, model_name in self.report_with_id: self.lstReport.addItem( report_name, self.lstReport.getItemCount() ) self.win.addButton('btnSave',10,-5,50,15,'Open Report' ,actionListenerProc = self.btnOk_clicked ) self.win.addButton('btnCancel',-10 ,-5,50,15,'Cancel' ,actionListenerProc = self.btnCancel_clicked ) self.win.addButton('btnDelete',15 -80 ,-5,50,15,'Delete Report',actionListenerProc = self.btnDelete_clicked) self.win.doModalDialog("lstReport",self.report_with_id[0][1] ) def btnOk_clicked(self, oActionEvent): try: desktop=getDesktop() doc = desktop.getCurrentComponent() docinfo=doc.getDocumentInfo() selectedItemPos = self.win.getListBoxSelectedItemPos( "lstReport" ) id = self.report_with_id[ selectedItemPos ][0] res = self.sock.execute(database, uid, self.password, 'ir.actions.report.xml', 'report_get', id) if res['file_type'] in ['sxw','odt'] : file_type = res['file_type'] else : file_type = 'sxw' fp_name = tempfile.mktemp('.'+file_type) fp_name1="r"+fp_name fp_path=os.path.join(fp_name1).replace("\\","/") fp_win=fp_path[1:] filename = ( os.name == 'nt' and fp_win or fp_name ) if res['report_sxw_content']: write_data_to_file( filename, base64.decodestring(res['report_sxw_content'])) url = "file:///%s" % filename arr=Array(makePropertyValue("MediaType","application/vnd.sun.xml.writer"),) oDoc2 = desktop.loadComponentFromURL(url, "openerp", 55, arr) docinfo2=oDoc2.getDocumentInfo() docinfo2.setUserFieldValue(0, self.hostname) docinfo2.setUserFieldValue(1,self.password) docinfo2.setUserFieldValue(2,id) docinfo2.setUserFieldValue(3,self.report_with_id[selectedItemPos][2]) oParEnum = oDoc2.getTextFields().createEnumeration() while oParEnum.hasMoreElements(): oPar = oParEnum.nextElement() if oPar.supportsService("com.sun.star.text.TextField.DropDown"): oPar.SelectedItem = oPar.Items[0] oPar.update() if oDoc2.isModified(): if oDoc2.hasLocation() and not oDoc2.isReadonly(): oDoc2.store() ErrorDialog("Download is Completed","Your file has been placed here :\n"+ fp_name,"Download Message") obj=Logger() obj.log_write('Modify Existing Report',LOG_INFO, ':successful download report %s using database %s' % (self.report_with_id[selectedItemPos][2], database)) except Exception, e: ErrorDialog("Report has not been downloaded", "Report: %s\nDetails: %s" % ( fp_name, str(e) ),"Download Message") import traceback,sys info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) self.logobj.log_write('ModifyExistingReport', LOG_ERROR, info) self.win.endExecute() def btnCancel_clicked( self, oActionEvent ): self.win.endExecute() def btnDelete_clicked( self, oActionEvent ): desktop=getDesktop() doc = desktop.getCurrentComponent() docinfo=doc.getDocumentInfo() selectedItemPos = self.win.getListBoxSelectedItemPos( "lstReport" ) name=self.win.getListBoxSelectedItem ("lstReport") id = self.report_with_id[ selectedItemPos ][0] temp = self.sock.execute(database, uid, self.password, 'ir.actions.report.xml', 'unlink', id,) str_value='ir.actions.report.xml,'+str(id) ids = self.sock.execute(database, uid, self.password, 'ir.values' , 'search',[('value','=',str_value)]) if ids: rec = self.sock.execute(database, uid, self.password, 'ir.values', 'unlink', ids,) else : pass if temp: ErrorDialog("Report","Report has been Delete:\n "+name,"Message") self.logobj.log_write('Delete Report',LOG_INFO, ':successful delete report %s using database %s' % (name, database)) else: ErrorDialog("Report","Report has not Delete:\n"+name," Message") self.win.endExecute() if __name__<>"package" and __name__=="__main__": ModifyExistingReport(None) elif __name__=="package": g_ImplementationHelper.addImplementation( ModifyExistingReport, "org.openoffice.openerp.report.modifyreport", ("com.sun.star.task.Job",),) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
Nikoli/youtube-dl
youtube_dl/extractor/internetvideoarchive.py
146
3475
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_urlparse, compat_urllib_parse, ) from ..utils import ( xpath_with_ns, ) class InternetVideoArchiveIE(InfoExtractor): _VALID_URL = r'https?://video\.internetvideoarchive\.net/flash/players/.*?\?.*?publishedid.*?' _TEST = { 'url': 'http://video.internetvideoarchive.net/flash/players/flashconfiguration.aspx?customerid=69249&publishedid=452693&playerid=247', 'info_dict': { 'id': '452693', 'ext': 'mp4', 'title': 'SKYFALL', 'description': 'In SKYFALL, Bond\'s loyalty to M is tested as her past comes back to haunt her. As MI6 comes under attack, 007 must track down and destroy the threat, no matter how personal the cost.', 'duration': 152, }, } @staticmethod def _build_url(query): return 'http://video.internetvideoarchive.net/flash/players/flashconfiguration.aspx?' + query @staticmethod def _clean_query(query): NEEDED_ARGS = ['publishedid', 'customerid'] query_dic = compat_urlparse.parse_qs(query) cleaned_dic = dict((k, v[0]) for (k, v) in query_dic.items() if k in NEEDED_ARGS) # Other player ids return m3u8 urls cleaned_dic['playerid'] = '247' cleaned_dic['videokbrate'] = '100000' return compat_urllib_parse.urlencode(cleaned_dic) def _real_extract(self, url): query = compat_urlparse.urlparse(url).query query_dic = compat_urlparse.parse_qs(query) video_id = query_dic['publishedid'][0] url = self._build_url(query) flashconfiguration = self._download_xml(url, video_id, 'Downloading flash configuration') file_url = flashconfiguration.find('file').text file_url = file_url.replace('/playlist.aspx', '/mrssplaylist.aspx') # Replace some of the parameters in the query to get the best quality # and http links (no m3u8 manifests) file_url = re.sub(r'(?<=\?)(.+)$', lambda m: self._clean_query(m.group()), file_url) info = self._download_xml(file_url, video_id, 'Downloading video info') item = info.find('channel/item') def _bp(p): return xpath_with_ns( p, { 'media': 'http://search.yahoo.com/mrss/', 'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats', } ) formats = [] for content in item.findall(_bp('media:group/media:content')): attr = content.attrib f_url = attr['url'] width = int(attr['width']) bitrate = int(attr['bitrate']) format_id = '%d-%dk' % (width, bitrate) formats.append({ 'format_id': format_id, 'url': f_url, 'width': width, 'tbr': bitrate, }) self._sort_formats(formats) return { 'id': video_id, 'title': item.find('title').text, 'formats': formats, 'thumbnail': item.find(_bp('media:thumbnail')).attrib['url'], 'description': item.find('description').text, 'duration': int(attr['duration']), }
unlicense
ChenJunor/hue
desktop/core/ext-py/Django-1.6.10/django/contrib/gis/geos/tests/test_io.py
105
4181
from __future__ import unicode_literals import binascii import unittest from django.contrib.gis import memoryview from django.utils.unittest import skipUnless from ..import HAS_GEOS if HAS_GEOS: from .. import GEOSGeometry, WKTReader, WKTWriter, WKBReader, WKBWriter, geos_version_info @skipUnless(HAS_GEOS, "Geos is required.") class GEOSIOTest(unittest.TestCase): def test01_wktreader(self): # Creating a WKTReader instance wkt_r = WKTReader() wkt = 'POINT (5 23)' # read() should return a GEOSGeometry ref = GEOSGeometry(wkt) g1 = wkt_r.read(wkt.encode()) g2 = wkt_r.read(wkt) for geom in (g1, g2): self.assertEqual(ref, geom) # Should only accept six.string_types objects. self.assertRaises(TypeError, wkt_r.read, 1) self.assertRaises(TypeError, wkt_r.read, memoryview(b'foo')) def test02_wktwriter(self): # Creating a WKTWriter instance, testing its ptr property. wkt_w = WKTWriter() self.assertRaises(TypeError, wkt_w._set_ptr, WKTReader.ptr_type()) ref = GEOSGeometry('POINT (5 23)') ref_wkt = 'POINT (5.0000000000000000 23.0000000000000000)' self.assertEqual(ref_wkt, wkt_w.write(ref).decode()) def test03_wkbreader(self): # Creating a WKBReader instance wkb_r = WKBReader() hex = b'000000000140140000000000004037000000000000' wkb = memoryview(binascii.a2b_hex(hex)) ref = GEOSGeometry(hex) # read() should return a GEOSGeometry on either a hex string or # a WKB buffer. g1 = wkb_r.read(wkb) g2 = wkb_r.read(hex) for geom in (g1, g2): self.assertEqual(ref, geom) bad_input = (1, 5.23, None, False) for bad_wkb in bad_input: self.assertRaises(TypeError, wkb_r.read, bad_wkb) def test04_wkbwriter(self): wkb_w = WKBWriter() # Representations of 'POINT (5 23)' in hex -- one normal and # the other with the byte order changed. g = GEOSGeometry('POINT (5 23)') hex1 = b'010100000000000000000014400000000000003740' wkb1 = memoryview(binascii.a2b_hex(hex1)) hex2 = b'000000000140140000000000004037000000000000' wkb2 = memoryview(binascii.a2b_hex(hex2)) self.assertEqual(hex1, wkb_w.write_hex(g)) self.assertEqual(wkb1, wkb_w.write(g)) # Ensuring bad byteorders are not accepted. for bad_byteorder in (-1, 2, 523, 'foo', None): # Equivalent of `wkb_w.byteorder = bad_byteorder` self.assertRaises(ValueError, wkb_w._set_byteorder, bad_byteorder) # Setting the byteorder to 0 (for Big Endian) wkb_w.byteorder = 0 self.assertEqual(hex2, wkb_w.write_hex(g)) self.assertEqual(wkb2, wkb_w.write(g)) # Back to Little Endian wkb_w.byteorder = 1 # Now, trying out the 3D and SRID flags. g = GEOSGeometry('POINT (5 23 17)') g.srid = 4326 hex3d = b'0101000080000000000000144000000000000037400000000000003140' wkb3d = memoryview(binascii.a2b_hex(hex3d)) hex3d_srid = b'01010000A0E6100000000000000000144000000000000037400000000000003140' wkb3d_srid = memoryview(binascii.a2b_hex(hex3d_srid)) # Ensuring bad output dimensions are not accepted for bad_outdim in (-1, 0, 1, 4, 423, 'foo', None): # Equivalent of `wkb_w.outdim = bad_outdim` self.assertRaises(ValueError, wkb_w._set_outdim, bad_outdim) # These tests will fail on 3.0.0 because of a bug that was fixed in 3.1: # http://trac.osgeo.org/geos/ticket/216 if not geos_version_info()['version'].startswith('3.0.'): # Now setting the output dimensions to be 3 wkb_w.outdim = 3 self.assertEqual(hex3d, wkb_w.write_hex(g)) self.assertEqual(wkb3d, wkb_w.write(g)) # Telling the WKBWriter to include the srid in the representation. wkb_w.srid = True self.assertEqual(hex3d_srid, wkb_w.write_hex(g)) self.assertEqual(wkb3d_srid, wkb_w.write(g))
apache-2.0
FHannes/intellij-community
python/lib/Lib/site-packages/django/views/generic/base.py
73
5447
from django import http from django.core.exceptions import ImproperlyConfigured from django.template import RequestContext, loader from django.template.response import TemplateResponse from django.utils.functional import update_wrapper from django.utils.log import getLogger from django.utils.decorators import classonlymethod logger = getLogger('django.request') class View(object): """ Intentionally simple parent class for all views. Only implements dispatch-by-method and simple sanity checking. """ http_method_names = ['get', 'post', 'put', 'delete', 'head', 'options', 'trace'] def __init__(self, **kwargs): """ Constructor. Called in the URLconf; can contain helpful extra keyword arguments, and other things. """ # Go through keyword arguments, and either save their values to our # instance, or raise an error. for key, value in kwargs.iteritems(): setattr(self, key, value) @classonlymethod def as_view(cls, **initkwargs): """ Main entry point for a request-response process. """ # sanitize keyword arguments for key in initkwargs: if key in cls.http_method_names: raise TypeError(u"You tried to pass in the %s method name as a " u"keyword argument to %s(). Don't do that." % (key, cls.__name__)) if not hasattr(cls, key): raise TypeError(u"%s() received an invalid keyword %r" % ( cls.__name__, key)) def view(request, *args, **kwargs): self = cls(**initkwargs) return self.dispatch(request, *args, **kwargs) # take name and docstring from class update_wrapper(view, cls, updated=()) # and possible attributes set by decorators # like csrf_exempt from dispatch update_wrapper(view, cls.dispatch, assigned=()) return view def dispatch(self, request, *args, **kwargs): # Try to dispatch to the right method; if a method doesn't exist, # defer to the error handler. Also defer to the error handler if the # request method isn't on the approved list. if request.method.lower() in self.http_method_names: handler = getattr(self, request.method.lower(), self.http_method_not_allowed) else: handler = self.http_method_not_allowed self.request = request self.args = args self.kwargs = kwargs return handler(request, *args, **kwargs) def http_method_not_allowed(self, request, *args, **kwargs): allowed_methods = [m for m in self.http_method_names if hasattr(self, m)] logger.warning('Method Not Allowed (%s): %s' % (request.method, request.path), extra={ 'status_code': 405, 'request': self.request } ) return http.HttpResponseNotAllowed(allowed_methods) class TemplateResponseMixin(object): """ A mixin that can be used to render a template. """ template_name = None response_class = TemplateResponse def render_to_response(self, context, **response_kwargs): """ Returns a response with a template rendered with the given context. """ return self.response_class( request = self.request, template = self.get_template_names(), context = context, **response_kwargs ) def get_template_names(self): """ Returns a list of template names to be used for the request. Must return a list. May not be called if render_to_response is overridden. """ if self.template_name is None: return [] else: return [self.template_name] class TemplateView(TemplateResponseMixin, View): """ A view that renders a template. """ def get_context_data(self, **kwargs): return { 'params': kwargs } def get(self, request, *args, **kwargs): context = self.get_context_data(**kwargs) return self.render_to_response(context) class RedirectView(View): """ A view that provides a redirect on any GET request. """ permanent = True url = None query_string = False def get_redirect_url(self, **kwargs): """ Return the URL redirect to. Keyword arguments from the URL pattern match generating the redirect request are provided as kwargs to this method. """ if self.url: args = self.request.META["QUERY_STRING"] if args and self.query_string: url = "%s?%s" % (self.url, args) else: url = self.url return url % kwargs else: return None def get(self, request, *args, **kwargs): url = self.get_redirect_url(**kwargs) if url: if self.permanent: return http.HttpResponsePermanentRedirect(url) else: return http.HttpResponseRedirect(url) else: logger.warning('Gone: %s' % self.request.path, extra={ 'status_code': 410, 'request': self.request }) return http.HttpResponseGone()
apache-2.0
robdennis/sideboard
tests/plugins/not_installed/foo/env/lib/python2.7/site-packages/distribute-0.6.34-py2.7.egg/setuptools/command/easy_install.py
27
72759
#!python """\ Easy Install ------------ A tool for doing automatic download/extract/build of distutils-based Python packages. For detailed documentation, see the accompanying EasyInstall.txt file, or visit the `EasyInstall home page`__. __ http://packages.python.org/distribute/easy_install.html """ import sys import os import zipimport import shutil import tempfile import zipfile import re import stat import random from glob import glob from setuptools import Command, _dont_write_bytecode from setuptools.sandbox import run_setup from distutils import log, dir_util from distutils.util import get_platform from distutils.util import convert_path, subst_vars from distutils.sysconfig import get_python_lib, get_config_vars from distutils.errors import DistutilsArgError, DistutilsOptionError, \ DistutilsError, DistutilsPlatformError from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from setuptools.command import setopt from setuptools.archive_util import unpack_archive from setuptools.package_index import PackageIndex from setuptools.package_index import URL_SCHEME from setuptools.command import bdist_egg, egg_info from pkg_resources import yield_lines, normalize_path, resource_string, \ ensure_directory, get_distribution, find_distributions, \ Environment, Requirement, Distribution, \ PathMetadata, EggMetadata, WorkingSet, \ DistributionNotFound, VersionConflict, \ DEVELOP_DIST sys_executable = os.path.normpath(sys.executable) __all__ = [ 'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg', 'main', 'get_exe_prefixes', ] import site HAS_USER_SITE = not sys.version < "2.6" and site.ENABLE_USER_SITE import struct def is_64bit(): return struct.calcsize("P") == 8 def samefile(p1,p2): if hasattr(os.path,'samefile') and ( os.path.exists(p1) and os.path.exists(p2) ): return os.path.samefile(p1,p2) return ( os.path.normpath(os.path.normcase(p1)) == os.path.normpath(os.path.normcase(p2)) ) if sys.version_info <= (3,): def _to_ascii(s): return s def isascii(s): try: unicode(s, 'ascii') return True except UnicodeError: return False else: def _to_ascii(s): return s.encode('ascii') def isascii(s): try: s.encode('ascii') return True except UnicodeError: return False class easy_install(Command): """Manage a download/build/install process""" description = "Find/get/install Python packages" command_consumes_arguments = True user_options = [ ('prefix=', None, "installation prefix"), ("zip-ok", "z", "install package as a zipfile"), ("multi-version", "m", "make apps have to require() a version"), ("upgrade", "U", "force upgrade (searches PyPI for latest versions)"), ("install-dir=", "d", "install package to DIR"), ("script-dir=", "s", "install scripts to DIR"), ("exclude-scripts", "x", "Don't install scripts"), ("always-copy", "a", "Copy all needed packages to install dir"), ("index-url=", "i", "base URL of Python Package Index"), ("find-links=", "f", "additional URL(s) to search for packages"), ("delete-conflicting", "D", "no longer needed; don't use this"), ("ignore-conflicts-at-my-risk", None, "no longer needed; don't use this"), ("build-directory=", "b", "download/extract/build in DIR; keep the results"), ('optimize=', 'O', "also compile with optimization: -O1 for \"python -O\", " "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), ('record=', None, "filename in which to record list of installed files"), ('always-unzip', 'Z', "don't install as a zipfile, no matter what"), ('site-dirs=','S',"list of directories where .pth files work"), ('editable', 'e', "Install specified packages in editable form"), ('no-deps', 'N', "don't install dependencies"), ('allow-hosts=', 'H', "pattern(s) that hostnames must match"), ('local-snapshots-ok', 'l', "allow building eggs from local checkouts"), ('version', None, "print version information and exit"), ('no-find-links', None, "Don't load find-links defined in packages being installed") ] boolean_options = [ 'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy', 'delete-conflicting', 'ignore-conflicts-at-my-risk', 'editable', 'no-deps', 'local-snapshots-ok', 'version' ] if HAS_USER_SITE: user_options.append(('user', None, "install in user site-package '%s'" % site.USER_SITE)) boolean_options.append('user') negative_opt = {'always-unzip': 'zip-ok'} create_index = PackageIndex def initialize_options(self): if HAS_USER_SITE: whereami = os.path.abspath(__file__) self.user = whereami.startswith(site.USER_SITE) else: self.user = 0 self.zip_ok = self.local_snapshots_ok = None self.install_dir = self.script_dir = self.exclude_scripts = None self.index_url = None self.find_links = None self.build_directory = None self.args = None self.optimize = self.record = None self.upgrade = self.always_copy = self.multi_version = None self.editable = self.no_deps = self.allow_hosts = None self.root = self.prefix = self.no_report = None self.version = None self.install_purelib = None # for pure module distributions self.install_platlib = None # non-pure (dists w/ extensions) self.install_headers = None # for C/C++ headers self.install_lib = None # set to either purelib or platlib self.install_scripts = None self.install_data = None self.install_base = None self.install_platbase = None if HAS_USER_SITE: self.install_userbase = site.USER_BASE self.install_usersite = site.USER_SITE else: self.install_userbase = None self.install_usersite = None self.no_find_links = None # Options not specifiable via command line self.package_index = None self.pth_file = self.always_copy_from = None self.delete_conflicting = None self.ignore_conflicts_at_my_risk = None self.site_dirs = None self.installed_projects = {} self.sitepy_installed = False # Always read easy_install options, even if we are subclassed, or have # an independent instance created. This ensures that defaults will # always come from the standard configuration file(s)' "easy_install" # section, even if this is a "develop" or "install" command, or some # other embedding. self._dry_run = None self.verbose = self.distribution.verbose self.distribution._set_command_options( self, self.distribution.get_option_dict('easy_install') ) def delete_blockers(self, blockers): for filename in blockers: if os.path.exists(filename) or os.path.islink(filename): log.info("Deleting %s", filename) if not self.dry_run: if os.path.isdir(filename) and not os.path.islink(filename): rmtree(filename) else: os.unlink(filename) def finalize_options(self): if self.version: print 'distribute %s' % get_distribution('distribute').version sys.exit() py_version = sys.version.split()[0] prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix') self.config_vars = {'dist_name': self.distribution.get_name(), 'dist_version': self.distribution.get_version(), 'dist_fullname': self.distribution.get_fullname(), 'py_version': py_version, 'py_version_short': py_version[0:3], 'py_version_nodot': py_version[0] + py_version[2], 'sys_prefix': prefix, 'prefix': prefix, 'sys_exec_prefix': exec_prefix, 'exec_prefix': exec_prefix, # Only python 3.2+ has abiflags 'abiflags': getattr(sys, 'abiflags', ''), } if HAS_USER_SITE: self.config_vars['userbase'] = self.install_userbase self.config_vars['usersite'] = self.install_usersite # fix the install_dir if "--user" was used #XXX: duplicate of the code in the setup command if self.user and HAS_USER_SITE: self.create_home_path() if self.install_userbase is None: raise DistutilsPlatformError( "User base directory is not specified") self.install_base = self.install_platbase = self.install_userbase if os.name == 'posix': self.select_scheme("unix_user") else: self.select_scheme(os.name + "_user") self.expand_basedirs() self.expand_dirs() self._expand('install_dir','script_dir','build_directory','site_dirs') # If a non-default installation directory was specified, default the # script directory to match it. if self.script_dir is None: self.script_dir = self.install_dir if self.no_find_links is None: self.no_find_links = False # Let install_dir get set by install_lib command, which in turn # gets its info from the install command, and takes into account # --prefix and --home and all that other crud. self.set_undefined_options('install_lib', ('install_dir','install_dir') ) # Likewise, set default script_dir from 'install_scripts.install_dir' self.set_undefined_options('install_scripts', ('install_dir', 'script_dir') ) if self.user and self.install_purelib: self.install_dir = self.install_purelib self.script_dir = self.install_scripts # default --record from the install command self.set_undefined_options('install', ('record', 'record')) normpath = map(normalize_path, sys.path) self.all_site_dirs = get_site_dirs() if self.site_dirs is not None: site_dirs = [ os.path.expanduser(s.strip()) for s in self.site_dirs.split(',') ] for d in site_dirs: if not os.path.isdir(d): log.warn("%s (in --site-dirs) does not exist", d) elif normalize_path(d) not in normpath: raise DistutilsOptionError( d+" (in --site-dirs) is not on sys.path" ) else: self.all_site_dirs.append(normalize_path(d)) if not self.editable: self.check_site_dir() self.index_url = self.index_url or "http://pypi.python.org/simple" self.shadow_path = self.all_site_dirs[:] for path_item in self.install_dir, normalize_path(self.script_dir): if path_item not in self.shadow_path: self.shadow_path.insert(0, path_item) if self.allow_hosts is not None: hosts = [s.strip() for s in self.allow_hosts.split(',')] else: hosts = ['*'] if self.package_index is None: self.package_index = self.create_index( self.index_url, search_path = self.shadow_path, hosts=hosts, ) self.local_index = Environment(self.shadow_path+sys.path) if self.find_links is not None: if isinstance(self.find_links, basestring): self.find_links = self.find_links.split() else: self.find_links = [] if self.local_snapshots_ok: self.package_index.scan_egg_links(self.shadow_path+sys.path) if not self.no_find_links: self.package_index.add_find_links(self.find_links) self.set_undefined_options('install_lib', ('optimize','optimize')) if not isinstance(self.optimize,int): try: self.optimize = int(self.optimize) if not (0 <= self.optimize <= 2): raise ValueError except ValueError: raise DistutilsOptionError("--optimize must be 0, 1, or 2") if self.delete_conflicting and self.ignore_conflicts_at_my_risk: raise DistutilsOptionError( "Can't use both --delete-conflicting and " "--ignore-conflicts-at-my-risk at the same time" ) if self.editable and not self.build_directory: raise DistutilsArgError( "Must specify a build directory (-b) when using --editable" ) if not self.args: raise DistutilsArgError( "No urls, filenames, or requirements specified (see --help)") self.outputs = [] def _expand_attrs(self, attrs): for attr in attrs: val = getattr(self, attr) if val is not None: if os.name == 'posix' or os.name == 'nt': val = os.path.expanduser(val) val = subst_vars(val, self.config_vars) setattr(self, attr, val) def expand_basedirs(self): """Calls `os.path.expanduser` on install_base, install_platbase and root.""" self._expand_attrs(['install_base', 'install_platbase', 'root']) def expand_dirs(self): """Calls `os.path.expanduser` on install dirs.""" self._expand_attrs(['install_purelib', 'install_platlib', 'install_lib', 'install_headers', 'install_scripts', 'install_data',]) def run(self): if self.verbose != self.distribution.verbose: log.set_verbosity(self.verbose) try: for spec in self.args: self.easy_install(spec, not self.no_deps) if self.record: outputs = self.outputs if self.root: # strip any package prefix root_len = len(self.root) for counter in xrange(len(outputs)): outputs[counter] = outputs[counter][root_len:] from distutils import file_util self.execute( file_util.write_file, (self.record, outputs), "writing list of installed files to '%s'" % self.record ) self.warn_deprecated_options() finally: log.set_verbosity(self.distribution.verbose) def pseudo_tempname(self): """Return a pseudo-tempname base in the install directory. This code is intentionally naive; if a malicious party can write to the target directory you're already in deep doodoo. """ try: pid = os.getpid() except: pid = random.randint(0,sys.maxint) return os.path.join(self.install_dir, "test-easy-install-%s" % pid) def warn_deprecated_options(self): if self.delete_conflicting or self.ignore_conflicts_at_my_risk: log.warn( "Note: The -D, --delete-conflicting and" " --ignore-conflicts-at-my-risk no longer have any purpose" " and should not be used." ) def check_site_dir(self): """Verify that self.install_dir is .pth-capable dir, if needed""" instdir = normalize_path(self.install_dir) pth_file = os.path.join(instdir,'easy-install.pth') # Is it a configured, PYTHONPATH, implicit, or explicit site dir? is_site_dir = instdir in self.all_site_dirs if not is_site_dir: # No? Then directly test whether it does .pth file processing is_site_dir = self.check_pth_processing() else: # make sure we can write to target dir testfile = self.pseudo_tempname()+'.write-test' test_exists = os.path.exists(testfile) try: if test_exists: os.unlink(testfile) open(testfile,'w').close() os.unlink(testfile) except (OSError,IOError): self.cant_write_to_target() if not is_site_dir and not self.multi_version: # Can't install non-multi to non-site dir raise DistutilsError(self.no_default_version_msg()) if is_site_dir: if self.pth_file is None: self.pth_file = PthDistributions(pth_file, self.all_site_dirs) else: self.pth_file = None PYTHONPATH = os.environ.get('PYTHONPATH','').split(os.pathsep) if instdir not in map(normalize_path, filter(None,PYTHONPATH)): # only PYTHONPATH dirs need a site.py, so pretend it's there self.sitepy_installed = True elif self.multi_version and not os.path.exists(pth_file): self.sitepy_installed = True # don't need site.py in this case self.pth_file = None # and don't create a .pth file self.install_dir = instdir def cant_write_to_target(self): msg = """can't create or remove files in install directory The following error occurred while trying to add or remove files in the installation directory: %s The installation directory you specified (via --install-dir, --prefix, or the distutils default setting) was: %s """ % (sys.exc_info()[1], self.install_dir,) if not os.path.exists(self.install_dir): msg += """ This directory does not currently exist. Please create it and try again, or choose a different installation directory (using the -d or --install-dir option). """ else: msg += """ Perhaps your account does not have write access to this directory? If the installation directory is a system-owned directory, you may need to sign in as the administrator or "root" account. If you do not have administrative access to this machine, you may wish to choose a different installation directory, preferably one that is listed in your PYTHONPATH environment variable. For information on other options, you may wish to consult the documentation at: http://packages.python.org/distribute/easy_install.html Please make the appropriate changes for your system and try again. """ raise DistutilsError(msg) def check_pth_processing(self): """Empirically verify whether .pth files are supported in inst. dir""" instdir = self.install_dir log.info("Checking .pth file support in %s", instdir) pth_file = self.pseudo_tempname()+".pth" ok_file = pth_file+'.ok' ok_exists = os.path.exists(ok_file) try: if ok_exists: os.unlink(ok_file) dirname = os.path.dirname(ok_file) if not os.path.exists(dirname): os.makedirs(dirname) f = open(pth_file,'w') except (OSError,IOError): self.cant_write_to_target() else: try: f.write("import os; f = open(%r, 'w'); f.write('OK'); f.close()\n" % (ok_file,)) f.close(); f=None executable = sys.executable if os.name=='nt': dirname,basename = os.path.split(executable) alt = os.path.join(dirname,'pythonw.exe') if basename.lower()=='python.exe' and os.path.exists(alt): # use pythonw.exe to avoid opening a console window executable = alt from distutils.spawn import spawn spawn([executable,'-E','-c','pass'],0) if os.path.exists(ok_file): log.info( "TEST PASSED: %s appears to support .pth files", instdir ) return True finally: if f: f.close() if os.path.exists(ok_file): os.unlink(ok_file) if os.path.exists(pth_file): os.unlink(pth_file) if not self.multi_version: log.warn("TEST FAILED: %s does NOT support .pth files", instdir) return False def install_egg_scripts(self, dist): """Write all the scripts for `dist`, unless scripts are excluded""" if not self.exclude_scripts and dist.metadata_isdir('scripts'): for script_name in dist.metadata_listdir('scripts'): self.install_script( dist, script_name, dist.get_metadata('scripts/'+script_name) ) self.install_wrapper_scripts(dist) def add_output(self, path): if os.path.isdir(path): for base, dirs, files in os.walk(path): for filename in files: self.outputs.append(os.path.join(base,filename)) else: self.outputs.append(path) def not_editable(self, spec): if self.editable: raise DistutilsArgError( "Invalid argument %r: you can't use filenames or URLs " "with --editable (except via the --find-links option)." % (spec,) ) def check_editable(self,spec): if not self.editable: return if os.path.exists(os.path.join(self.build_directory, spec.key)): raise DistutilsArgError( "%r already exists in %s; can't do a checkout there" % (spec.key, self.build_directory) ) def easy_install(self, spec, deps=False): tmpdir = tempfile.mkdtemp(prefix="easy_install-") download = None if not self.editable: self.install_site_py() try: if not isinstance(spec,Requirement): if URL_SCHEME(spec): # It's a url, download it to tmpdir and process self.not_editable(spec) download = self.package_index.download(spec, tmpdir) return self.install_item(None, download, tmpdir, deps, True) elif os.path.exists(spec): # Existing file or directory, just process it directly self.not_editable(spec) return self.install_item(None, spec, tmpdir, deps, True) else: spec = parse_requirement_arg(spec) self.check_editable(spec) dist = self.package_index.fetch_distribution( spec, tmpdir, self.upgrade, self.editable, not self.always_copy, self.local_index ) if dist is None: msg = "Could not find suitable distribution for %r" % spec if self.always_copy: msg+=" (--always-copy skips system and development eggs)" raise DistutilsError(msg) elif dist.precedence==DEVELOP_DIST: # .egg-info dists don't need installing, just process deps self.process_distribution(spec, dist, deps, "Using") return dist else: return self.install_item(spec, dist.location, tmpdir, deps) finally: if os.path.exists(tmpdir): rmtree(tmpdir) def install_item(self, spec, download, tmpdir, deps, install_needed=False): # Installation is also needed if file in tmpdir or is not an egg install_needed = install_needed or self.always_copy install_needed = install_needed or os.path.dirname(download) == tmpdir install_needed = install_needed or not download.endswith('.egg') install_needed = install_needed or ( self.always_copy_from is not None and os.path.dirname(normalize_path(download)) == normalize_path(self.always_copy_from) ) if spec and not install_needed: # at this point, we know it's a local .egg, we just don't know if # it's already installed. for dist in self.local_index[spec.project_name]: if dist.location==download: break else: install_needed = True # it's not in the local index log.info("Processing %s", os.path.basename(download)) if install_needed: dists = self.install_eggs(spec, download, tmpdir) for dist in dists: self.process_distribution(spec, dist, deps) else: dists = [self.check_conflicts(self.egg_distribution(download))] self.process_distribution(spec, dists[0], deps, "Using") if spec is not None: for dist in dists: if dist in spec: return dist def select_scheme(self, name): """Sets the install directories by applying the install schemes.""" # it's the caller's problem if they supply a bad name! scheme = INSTALL_SCHEMES[name] for key in SCHEME_KEYS: attrname = 'install_' + key if getattr(self, attrname) is None: setattr(self, attrname, scheme[key]) def process_distribution(self, requirement, dist, deps=True, *info): self.update_pth(dist) self.package_index.add(dist) self.local_index.add(dist) if not self.editable: self.install_egg_scripts(dist) self.installed_projects[dist.key] = dist log.info(self.installation_report(requirement, dist, *info)) if (dist.has_metadata('dependency_links.txt') and not self.no_find_links): self.package_index.add_find_links( dist.get_metadata_lines('dependency_links.txt') ) if not deps and not self.always_copy: return elif requirement is not None and dist.key != requirement.key: log.warn("Skipping dependencies for %s", dist) return # XXX this is not the distribution we were looking for elif requirement is None or dist not in requirement: # if we wound up with a different version, resolve what we've got distreq = dist.as_requirement() requirement = requirement or distreq requirement = Requirement( distreq.project_name, distreq.specs, requirement.extras ) log.info("Processing dependencies for %s", requirement) try: distros = WorkingSet([]).resolve( [requirement], self.local_index, self.easy_install ) except DistributionNotFound, e: raise DistutilsError( "Could not find required distribution %s" % e.args ) except VersionConflict, e: raise DistutilsError( "Installed distribution %s conflicts with requirement %s" % e.args ) if self.always_copy or self.always_copy_from: # Force all the relevant distros to be copied or activated for dist in distros: if dist.key not in self.installed_projects: self.easy_install(dist.as_requirement()) log.info("Finished processing dependencies for %s", requirement) def should_unzip(self, dist): if self.zip_ok is not None: return not self.zip_ok if dist.has_metadata('not-zip-safe'): return True if not dist.has_metadata('zip-safe'): return True return True def maybe_move(self, spec, dist_filename, setup_base): dst = os.path.join(self.build_directory, spec.key) if os.path.exists(dst): log.warn( "%r already exists in %s; build directory %s will not be kept", spec.key, self.build_directory, setup_base ) return setup_base if os.path.isdir(dist_filename): setup_base = dist_filename else: if os.path.dirname(dist_filename)==setup_base: os.unlink(dist_filename) # get it out of the tmp dir contents = os.listdir(setup_base) if len(contents)==1: dist_filename = os.path.join(setup_base,contents[0]) if os.path.isdir(dist_filename): # if the only thing there is a directory, move it instead setup_base = dist_filename ensure_directory(dst); shutil.move(setup_base, dst) return dst def install_wrapper_scripts(self, dist): if not self.exclude_scripts: for args in get_script_args(dist): self.write_script(*args) def install_script(self, dist, script_name, script_text, dev_path=None): """Generate a legacy script wrapper and install it""" spec = str(dist.as_requirement()) is_script = is_python_script(script_text, script_name) def get_template(filename): """ There are a couple of template scripts in the package. This function loads one of them and prepares it for use. These templates use triple-quotes to escape variable substitutions so the scripts get the 2to3 treatment when build on Python 3. The templates cannot use triple-quotes naturally. """ raw_bytes = resource_string('setuptools', template_name) template_str = raw_bytes.decode('utf-8') clean_template = template_str.replace('"""', '') return clean_template if is_script: template_name = 'script template.py' if dev_path: template_name = template_name.replace('.py', ' (dev).py') script_text = (get_script_header(script_text) + get_template(template_name) % locals()) self.write_script(script_name, _to_ascii(script_text), 'b') def write_script(self, script_name, contents, mode="t", blockers=()): """Write an executable file to the scripts directory""" self.delete_blockers( # clean up old .py/.pyw w/o a script [os.path.join(self.script_dir,x) for x in blockers]) log.info("Installing %s script to %s", script_name, self.script_dir) target = os.path.join(self.script_dir, script_name) self.add_output(target) mask = current_umask() if not self.dry_run: ensure_directory(target) f = open(target,"w"+mode) f.write(contents) f.close() chmod(target, 0777-mask) def install_eggs(self, spec, dist_filename, tmpdir): # .egg dirs or files are already built, so just return them if dist_filename.lower().endswith('.egg'): return [self.install_egg(dist_filename, tmpdir)] elif dist_filename.lower().endswith('.exe'): return [self.install_exe(dist_filename, tmpdir)] # Anything else, try to extract and build setup_base = tmpdir if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'): unpack_archive(dist_filename, tmpdir, self.unpack_progress) elif os.path.isdir(dist_filename): setup_base = os.path.abspath(dist_filename) if (setup_base.startswith(tmpdir) # something we downloaded and self.build_directory and spec is not None ): setup_base = self.maybe_move(spec, dist_filename, setup_base) # Find the setup.py file setup_script = os.path.join(setup_base, 'setup.py') if not os.path.exists(setup_script): setups = glob(os.path.join(setup_base, '*', 'setup.py')) if not setups: raise DistutilsError( "Couldn't find a setup script in %s" % os.path.abspath(dist_filename) ) if len(setups)>1: raise DistutilsError( "Multiple setup scripts in %s" % os.path.abspath(dist_filename) ) setup_script = setups[0] # Now run it, and return the result if self.editable: log.info(self.report_editable(spec, setup_script)) return [] else: return self.build_and_install(setup_script, setup_base) def egg_distribution(self, egg_path): if os.path.isdir(egg_path): metadata = PathMetadata(egg_path,os.path.join(egg_path,'EGG-INFO')) else: metadata = EggMetadata(zipimport.zipimporter(egg_path)) return Distribution.from_filename(egg_path,metadata=metadata) def install_egg(self, egg_path, tmpdir): destination = os.path.join(self.install_dir,os.path.basename(egg_path)) destination = os.path.abspath(destination) if not self.dry_run: ensure_directory(destination) dist = self.egg_distribution(egg_path) self.check_conflicts(dist) if not samefile(egg_path, destination): if os.path.isdir(destination) and not os.path.islink(destination): dir_util.remove_tree(destination, dry_run=self.dry_run) elif os.path.exists(destination): self.execute(os.unlink,(destination,),"Removing "+destination) uncache_zipdir(destination) if os.path.isdir(egg_path): if egg_path.startswith(tmpdir): f,m = shutil.move, "Moving" else: f,m = shutil.copytree, "Copying" elif self.should_unzip(dist): self.mkpath(destination) f,m = self.unpack_and_compile, "Extracting" elif egg_path.startswith(tmpdir): f,m = shutil.move, "Moving" else: f,m = shutil.copy2, "Copying" self.execute(f, (egg_path, destination), (m+" %s to %s") % (os.path.basename(egg_path),os.path.dirname(destination))) self.add_output(destination) return self.egg_distribution(destination) def install_exe(self, dist_filename, tmpdir): # See if it's valid, get data cfg = extract_wininst_cfg(dist_filename) if cfg is None: raise DistutilsError( "%s is not a valid distutils Windows .exe" % dist_filename ) # Create a dummy distribution object until we build the real distro dist = Distribution(None, project_name=cfg.get('metadata','name'), version=cfg.get('metadata','version'), platform=get_platform() ) # Convert the .exe to an unpacked egg egg_path = dist.location = os.path.join(tmpdir, dist.egg_name()+'.egg') egg_tmp = egg_path+'.tmp' egg_info = os.path.join(egg_tmp, 'EGG-INFO') pkg_inf = os.path.join(egg_info, 'PKG-INFO') ensure_directory(pkg_inf) # make sure EGG-INFO dir exists dist._provider = PathMetadata(egg_tmp, egg_info) # XXX self.exe_to_egg(dist_filename, egg_tmp) # Write EGG-INFO/PKG-INFO if not os.path.exists(pkg_inf): f = open(pkg_inf,'w') f.write('Metadata-Version: 1.0\n') for k,v in cfg.items('metadata'): if k<>'target_version': f.write('%s: %s\n' % (k.replace('_','-').title(), v)) f.close() script_dir = os.path.join(egg_info,'scripts') self.delete_blockers( # delete entry-point scripts to avoid duping [os.path.join(script_dir,args[0]) for args in get_script_args(dist)] ) # Build .egg file from tmpdir bdist_egg.make_zipfile( egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run ) # install the .egg return self.install_egg(egg_path, tmpdir) def exe_to_egg(self, dist_filename, egg_tmp): """Extract a bdist_wininst to the directories an egg would use""" # Check for .pth file and set up prefix translations prefixes = get_exe_prefixes(dist_filename) to_compile = [] native_libs = [] top_level = {} def process(src,dst): s = src.lower() for old,new in prefixes: if s.startswith(old): src = new+src[len(old):] parts = src.split('/') dst = os.path.join(egg_tmp, *parts) dl = dst.lower() if dl.endswith('.pyd') or dl.endswith('.dll'): parts[-1] = bdist_egg.strip_module(parts[-1]) top_level[os.path.splitext(parts[0])[0]] = 1 native_libs.append(src) elif dl.endswith('.py') and old!='SCRIPTS/': top_level[os.path.splitext(parts[0])[0]] = 1 to_compile.append(dst) return dst if not src.endswith('.pth'): log.warn("WARNING: can't process %s", src) return None # extract, tracking .pyd/.dll->native_libs and .py -> to_compile unpack_archive(dist_filename, egg_tmp, process) stubs = [] for res in native_libs: if res.lower().endswith('.pyd'): # create stubs for .pyd's parts = res.split('/') resource = parts[-1] parts[-1] = bdist_egg.strip_module(parts[-1])+'.py' pyfile = os.path.join(egg_tmp, *parts) to_compile.append(pyfile); stubs.append(pyfile) bdist_egg.write_stub(resource, pyfile) self.byte_compile(to_compile) # compile .py's bdist_egg.write_safety_flag(os.path.join(egg_tmp,'EGG-INFO'), bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag for name in 'top_level','native_libs': if locals()[name]: txt = os.path.join(egg_tmp, 'EGG-INFO', name+'.txt') if not os.path.exists(txt): f = open(txt,'w') f.write('\n'.join(locals()[name])+'\n') f.close() def check_conflicts(self, dist): """Verify that there are no conflicting "old-style" packages""" return dist # XXX temporarily disable until new strategy is stable from imp import find_module, get_suffixes from glob import glob blockers = [] names = dict.fromkeys(dist._get_metadata('top_level.txt')) # XXX private attr exts = {'.pyc':1, '.pyo':1} # get_suffixes() might leave one out for ext,mode,typ in get_suffixes(): exts[ext] = 1 for path,files in expand_paths([self.install_dir]+self.all_site_dirs): for filename in files: base,ext = os.path.splitext(filename) if base in names: if not ext: # no extension, check for package try: f, filename, descr = find_module(base, [path]) except ImportError: continue else: if f: f.close() if filename not in blockers: blockers.append(filename) elif ext in exts and base!='site': # XXX ugh blockers.append(os.path.join(path,filename)) if blockers: self.found_conflicts(dist, blockers) return dist def found_conflicts(self, dist, blockers): if self.delete_conflicting: log.warn("Attempting to delete conflicting packages:") return self.delete_blockers(blockers) msg = """\ ------------------------------------------------------------------------- CONFLICT WARNING: The following modules or packages have the same names as modules or packages being installed, and will be *before* the installed packages in Python's search path. You MUST remove all of the relevant files and directories before you will be able to use the package(s) you are installing: %s """ % '\n '.join(blockers) if self.ignore_conflicts_at_my_risk: msg += """\ (Note: you can run EasyInstall on '%s' with the --delete-conflicting option to attempt deletion of the above files and/or directories.) """ % dist.project_name else: msg += """\ Note: you can attempt this installation again with EasyInstall, and use either the --delete-conflicting (-D) option or the --ignore-conflicts-at-my-risk option, to either delete the above files and directories, or to ignore the conflicts, respectively. Note that if you ignore the conflicts, the installed package(s) may not work. """ msg += """\ ------------------------------------------------------------------------- """ sys.stderr.write(msg) sys.stderr.flush() if not self.ignore_conflicts_at_my_risk: raise DistutilsError("Installation aborted due to conflicts") def installation_report(self, req, dist, what="Installed"): """Helpful installation message for display to package users""" msg = "\n%(what)s %(eggloc)s%(extras)s" if self.multi_version and not self.no_report: msg += """ Because this distribution was installed --multi-version, before you can import modules from this package in an application, you will need to 'import pkg_resources' and then use a 'require()' call similar to one of these examples, in order to select the desired version: pkg_resources.require("%(name)s") # latest installed version pkg_resources.require("%(name)s==%(version)s") # this exact version pkg_resources.require("%(name)s>=%(version)s") # this version or higher """ if self.install_dir not in map(normalize_path,sys.path): msg += """ Note also that the installation directory must be on sys.path at runtime for this to work. (e.g. by being the application's script directory, by being on PYTHONPATH, or by being added to sys.path by your code.) """ eggloc = dist.location name = dist.project_name version = dist.version extras = '' # TODO: self.report_extras(req, dist) return msg % locals() def report_editable(self, spec, setup_script): dirname = os.path.dirname(setup_script) python = sys.executable return """\nExtracted editable version of %(spec)s to %(dirname)s If it uses setuptools in its setup script, you can activate it in "development" mode by going to that directory and running:: %(python)s setup.py develop See the setuptools documentation for the "develop" command for more info. """ % locals() def run_setup(self, setup_script, setup_base, args): sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg) sys.modules.setdefault('distutils.command.egg_info', egg_info) args = list(args) if self.verbose>2: v = 'v' * (self.verbose - 1) args.insert(0,'-'+v) elif self.verbose<2: args.insert(0,'-q') if self.dry_run: args.insert(0,'-n') log.info( "Running %s %s", setup_script[len(setup_base)+1:], ' '.join(args) ) try: run_setup(setup_script, args) except SystemExit, v: raise DistutilsError("Setup script exited with %s" % (v.args[0],)) def build_and_install(self, setup_script, setup_base): args = ['bdist_egg', '--dist-dir'] dist_dir = tempfile.mkdtemp( prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script) ) try: self._set_fetcher_options(os.path.dirname(setup_script)) args.append(dist_dir) self.run_setup(setup_script, setup_base, args) all_eggs = Environment([dist_dir]) eggs = [] for key in all_eggs: for dist in all_eggs[key]: eggs.append(self.install_egg(dist.location, setup_base)) if not eggs and not self.dry_run: log.warn("No eggs found in %s (setup script problem?)", dist_dir) return eggs finally: rmtree(dist_dir) log.set_verbosity(self.verbose) # restore our log verbosity def _set_fetcher_options(self, base): """ When easy_install is about to run bdist_egg on a source dist, that source dist might have 'setup_requires' directives, requiring additional fetching. Ensure the fetcher options given to easy_install are available to that command as well. """ # find the fetch options from easy_install and write them out # to the setup.cfg file. ei_opts = self.distribution.get_option_dict('easy_install').copy() fetch_directives = ( 'find_links', 'site_dirs', 'index_url', 'optimize', 'site_dirs', 'allow_hosts', ) fetch_options = {} for key, val in ei_opts.iteritems(): if key not in fetch_directives: continue fetch_options[key.replace('_', '-')] = val[1] # create a settings dictionary suitable for `edit_config` settings = dict(easy_install=fetch_options) cfg_filename = os.path.join(base, 'setup.cfg') setopt.edit_config(cfg_filename, settings) def update_pth(self,dist): if self.pth_file is None: return for d in self.pth_file[dist.key]: # drop old entries if self.multi_version or d.location != dist.location: log.info("Removing %s from easy-install.pth file", d) self.pth_file.remove(d) if d.location in self.shadow_path: self.shadow_path.remove(d.location) if not self.multi_version: if dist.location in self.pth_file.paths: log.info( "%s is already the active version in easy-install.pth", dist ) else: log.info("Adding %s to easy-install.pth file", dist) self.pth_file.add(dist) # add new entry if dist.location not in self.shadow_path: self.shadow_path.append(dist.location) if not self.dry_run: self.pth_file.save() if dist.key=='distribute': # Ensure that setuptools itself never becomes unavailable! # XXX should this check for latest version? filename = os.path.join(self.install_dir,'setuptools.pth') if os.path.islink(filename): os.unlink(filename) f = open(filename, 'wt') f.write(self.pth_file.make_relative(dist.location)+'\n') f.close() def unpack_progress(self, src, dst): # Progress filter for unpacking log.debug("Unpacking %s to %s", src, dst) return dst # only unpack-and-compile skips files for dry run def unpack_and_compile(self, egg_path, destination): to_compile = []; to_chmod = [] def pf(src,dst): if dst.endswith('.py') and not src.startswith('EGG-INFO/'): to_compile.append(dst) to_chmod.append(dst) elif dst.endswith('.dll') or dst.endswith('.so'): to_chmod.append(dst) self.unpack_progress(src,dst) return not self.dry_run and dst or None unpack_archive(egg_path, destination, pf) self.byte_compile(to_compile) if not self.dry_run: for f in to_chmod: mode = ((os.stat(f)[stat.ST_MODE]) | 0555) & 07755 chmod(f, mode) def byte_compile(self, to_compile): if _dont_write_bytecode: self.warn('byte-compiling is disabled, skipping.') return from distutils.util import byte_compile try: # try to make the byte compile messages quieter log.set_verbosity(self.verbose - 1) byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run) if self.optimize: byte_compile( to_compile, optimize=self.optimize, force=1, dry_run=self.dry_run ) finally: log.set_verbosity(self.verbose) # restore original verbosity def no_default_version_msg(self): return """bad install directory or PYTHONPATH You are attempting to install a package to a directory that is not on PYTHONPATH and which Python does not read ".pth" files from. The installation directory you specified (via --install-dir, --prefix, or the distutils default setting) was: %s and your PYTHONPATH environment variable currently contains: %r Here are some of your options for correcting the problem: * You can choose a different installation directory, i.e., one that is on PYTHONPATH or supports .pth files * You can add the installation directory to the PYTHONPATH environment variable. (It must then also be on PYTHONPATH whenever you run Python and want to use the package(s) you are installing.) * You can set up the installation directory to support ".pth" files by using one of the approaches described here: http://packages.python.org/distribute/easy_install.html#custom-installation-locations Please make the appropriate changes for your system and try again.""" % ( self.install_dir, os.environ.get('PYTHONPATH','') ) def install_site_py(self): """Make sure there's a site.py in the target dir, if needed""" if self.sitepy_installed: return # already did it, or don't need to sitepy = os.path.join(self.install_dir, "site.py") source = resource_string(Requirement.parse("distribute"), "site.py") current = "" if os.path.exists(sitepy): log.debug("Checking existing site.py in %s", self.install_dir) f = open(sitepy,'rb') current = f.read() # we want str, not bytes if sys.version_info >= (3,): current = current.decode() f.close() if not current.startswith('def __boot():'): raise DistutilsError( "%s is not a setuptools-generated site.py; please" " remove it." % sitepy ) if current != source: log.info("Creating %s", sitepy) if not self.dry_run: ensure_directory(sitepy) f = open(sitepy,'wb') f.write(source) f.close() self.byte_compile([sitepy]) self.sitepy_installed = True def create_home_path(self): """Create directories under ~.""" if not self.user: return home = convert_path(os.path.expanduser("~")) for name, path in self.config_vars.iteritems(): if path.startswith(home) and not os.path.isdir(path): self.debug_print("os.makedirs('%s', 0700)" % path) os.makedirs(path, 0700) INSTALL_SCHEMES = dict( posix = dict( install_dir = '$base/lib/python$py_version_short/site-packages', script_dir = '$base/bin', ), ) DEFAULT_SCHEME = dict( install_dir = '$base/Lib/site-packages', script_dir = '$base/Scripts', ) def _expand(self, *attrs): config_vars = self.get_finalized_command('install').config_vars if self.prefix: # Set default install_dir/scripts from --prefix config_vars = config_vars.copy() config_vars['base'] = self.prefix scheme = self.INSTALL_SCHEMES.get(os.name,self.DEFAULT_SCHEME) for attr,val in scheme.items(): if getattr(self,attr,None) is None: setattr(self,attr,val) from distutils.util import subst_vars for attr in attrs: val = getattr(self, attr) if val is not None: val = subst_vars(val, config_vars) if os.name == 'posix': val = os.path.expanduser(val) setattr(self, attr, val) def get_site_dirs(): # return a list of 'site' dirs sitedirs = filter(None,os.environ.get('PYTHONPATH','').split(os.pathsep)) prefixes = [sys.prefix] if sys.exec_prefix != sys.prefix: prefixes.append(sys.exec_prefix) for prefix in prefixes: if prefix: if sys.platform in ('os2emx', 'riscos'): sitedirs.append(os.path.join(prefix, "Lib", "site-packages")) elif os.sep == '/': sitedirs.extend([os.path.join(prefix, "lib", "python" + sys.version[:3], "site-packages"), os.path.join(prefix, "lib", "site-python")]) else: sitedirs.extend( [prefix, os.path.join(prefix, "lib", "site-packages")] ) if sys.platform == 'darwin': # for framework builds *only* we add the standard Apple # locations. Currently only per-user, but /Library and # /Network/Library could be added too if 'Python.framework' in prefix: home = os.environ.get('HOME') if home: sitedirs.append( os.path.join(home, 'Library', 'Python', sys.version[:3], 'site-packages')) for plat_specific in (0,1): site_lib = get_python_lib(plat_specific) if site_lib not in sitedirs: sitedirs.append(site_lib) if HAS_USER_SITE: sitedirs.append(site.USER_SITE) sitedirs = map(normalize_path, sitedirs) return sitedirs def expand_paths(inputs): """Yield sys.path directories that might contain "old-style" packages""" seen = {} for dirname in inputs: dirname = normalize_path(dirname) if dirname in seen: continue seen[dirname] = 1 if not os.path.isdir(dirname): continue files = os.listdir(dirname) yield dirname, files for name in files: if not name.endswith('.pth'): # We only care about the .pth files continue if name in ('easy-install.pth','setuptools.pth'): # Ignore .pth files that we control continue # Read the .pth file f = open(os.path.join(dirname,name)) lines = list(yield_lines(f)) f.close() # Yield existing non-dupe, non-import directory lines from it for line in lines: if not line.startswith("import"): line = normalize_path(line.rstrip()) if line not in seen: seen[line] = 1 if not os.path.isdir(line): continue yield line, os.listdir(line) def extract_wininst_cfg(dist_filename): """Extract configuration data from a bdist_wininst .exe Returns a ConfigParser.RawConfigParser, or None """ f = open(dist_filename,'rb') try: endrec = zipfile._EndRecData(f) if endrec is None: return None prepended = (endrec[9] - endrec[5]) - endrec[6] if prepended < 12: # no wininst data here return None f.seek(prepended-12) import struct, StringIO, ConfigParser tag, cfglen, bmlen = struct.unpack("<iii",f.read(12)) if tag not in (0x1234567A, 0x1234567B): return None # not a valid tag f.seek(prepended-(12+cfglen)) cfg = ConfigParser.RawConfigParser({'version':'','target_version':''}) try: part = f.read(cfglen) # part is in bytes, but we need to read up to the first null # byte. if sys.version_info >= (2,6): null_byte = bytes([0]) else: null_byte = chr(0) config = part.split(null_byte, 1)[0] # Now the config is in bytes, but on Python 3, it must be # unicode for the RawConfigParser, so decode it. Is this the # right encoding? config = config.decode('ascii') cfg.readfp(StringIO.StringIO(config)) except ConfigParser.Error: return None if not cfg.has_section('metadata') or not cfg.has_section('Setup'): return None return cfg finally: f.close() def get_exe_prefixes(exe_filename): """Get exe->egg path translations for a given .exe file""" prefixes = [ ('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''), ('PLATLIB/', ''), ('SCRIPTS/', 'EGG-INFO/scripts/'), ('DATA/LIB/site-packages', ''), ] z = zipfile.ZipFile(exe_filename) try: for info in z.infolist(): name = info.filename parts = name.split('/') if len(parts)==3 and parts[2]=='PKG-INFO': if parts[1].endswith('.egg-info'): prefixes.insert(0,('/'.join(parts[:2]), 'EGG-INFO/')) break if len(parts)<>2 or not name.endswith('.pth'): continue if name.endswith('-nspkg.pth'): continue if parts[0].upper() in ('PURELIB','PLATLIB'): contents = z.read(name) if sys.version_info >= (3,): contents = contents.decode() for pth in yield_lines(contents): pth = pth.strip().replace('\\','/') if not pth.startswith('import'): prefixes.append((('%s/%s/' % (parts[0],pth)), '')) finally: z.close() prefixes = [(x.lower(),y) for x, y in prefixes] prefixes.sort(); prefixes.reverse() return prefixes def parse_requirement_arg(spec): try: return Requirement.parse(spec) except ValueError: raise DistutilsError( "Not a URL, existing file, or requirement spec: %r" % (spec,) ) class PthDistributions(Environment): """A .pth file with Distribution paths in it""" dirty = False def __init__(self, filename, sitedirs=()): self.filename = filename; self.sitedirs=map(normalize_path, sitedirs) self.basedir = normalize_path(os.path.dirname(self.filename)) self._load(); Environment.__init__(self, [], None, None) for path in yield_lines(self.paths): map(self.add, find_distributions(path, True)) def _load(self): self.paths = [] saw_import = False seen = dict.fromkeys(self.sitedirs) if os.path.isfile(self.filename): f = open(self.filename,'rt') for line in f: if line.startswith('import'): saw_import = True continue path = line.rstrip() self.paths.append(path) if not path.strip() or path.strip().startswith('#'): continue # skip non-existent paths, in case somebody deleted a package # manually, and duplicate paths as well path = self.paths[-1] = normalize_path( os.path.join(self.basedir,path) ) if not os.path.exists(path) or path in seen: self.paths.pop() # skip it self.dirty = True # we cleaned up, so we're dirty now :) continue seen[path] = 1 f.close() if self.paths and not saw_import: self.dirty = True # ensure anything we touch has import wrappers while self.paths and not self.paths[-1].strip(): self.paths.pop() def save(self): """Write changed .pth file back to disk""" if not self.dirty: return data = '\n'.join(map(self.make_relative,self.paths)) if data: log.debug("Saving %s", self.filename) data = ( "import sys; sys.__plen = len(sys.path)\n" "%s\n" "import sys; new=sys.path[sys.__plen:];" " del sys.path[sys.__plen:];" " p=getattr(sys,'__egginsert',0); sys.path[p:p]=new;" " sys.__egginsert = p+len(new)\n" ) % data if os.path.islink(self.filename): os.unlink(self.filename) f = open(self.filename,'wt') f.write(data); f.close() elif os.path.exists(self.filename): log.debug("Deleting empty %s", self.filename) os.unlink(self.filename) self.dirty = False def add(self,dist): """Add `dist` to the distribution map""" if (dist.location not in self.paths and ( dist.location not in self.sitedirs or dist.location == os.getcwd() #account for '.' being in PYTHONPATH )): self.paths.append(dist.location) self.dirty = True Environment.add(self,dist) def remove(self,dist): """Remove `dist` from the distribution map""" while dist.location in self.paths: self.paths.remove(dist.location); self.dirty = True Environment.remove(self,dist) def make_relative(self,path): npath, last = os.path.split(normalize_path(path)) baselen = len(self.basedir) parts = [last] sep = os.altsep=='/' and '/' or os.sep while len(npath)>=baselen: if npath==self.basedir: parts.append(os.curdir) parts.reverse() return sep.join(parts) npath, last = os.path.split(npath) parts.append(last) else: return path def get_script_header(script_text, executable=sys_executable, wininst=False): """Create a #! line, getting options (if any) from script_text""" from distutils.command.build_scripts import first_line_re # first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern. if not isinstance(first_line_re.pattern, str): first_line_re = re.compile(first_line_re.pattern.decode()) first = (script_text+'\n').splitlines()[0] match = first_line_re.match(first) options = '' if match: options = match.group(1) or '' if options: options = ' '+options if wininst: executable = "python.exe" else: executable = nt_quote_arg(executable) hdr = "#!%(executable)s%(options)s\n" % locals() if not isascii(hdr): # Non-ascii path to sys.executable, use -x to prevent warnings if options: if options.strip().startswith('-'): options = ' -x'+options.strip()[1:] # else: punt, we can't do it, let the warning happen anyway else: options = ' -x' executable = fix_jython_executable(executable, options) hdr = "#!%(executable)s%(options)s\n" % locals() return hdr def auto_chmod(func, arg, exc): if func is os.remove and os.name=='nt': chmod(arg, stat.S_IWRITE) return func(arg) exc = sys.exc_info() raise exc[0], (exc[1][0], exc[1][1] + (" %s %s" % (func,arg))) def uncache_zipdir(path): """Ensure that the importer caches dont have stale info for `path`""" from zipimport import _zip_directory_cache as zdc _uncache(path, zdc) _uncache(path, sys.path_importer_cache) def _uncache(path, cache): if path in cache: del cache[path] else: path = normalize_path(path) for p in cache: if normalize_path(p)==path: del cache[p] return def is_python(text, filename='<string>'): "Is this string a valid Python script?" try: compile(text, filename, 'exec') except (SyntaxError, TypeError): return False else: return True def is_sh(executable): """Determine if the specified executable is a .sh (contains a #! line)""" try: fp = open(executable) magic = fp.read(2) fp.close() except (OSError,IOError): return executable return magic == '#!' def nt_quote_arg(arg): """Quote a command line argument according to Windows parsing rules""" result = [] needquote = False nb = 0 needquote = (" " in arg) or ("\t" in arg) if needquote: result.append('"') for c in arg: if c == '\\': nb += 1 elif c == '"': # double preceding backslashes, then add a \" result.append('\\' * (nb*2) + '\\"') nb = 0 else: if nb: result.append('\\' * nb) nb = 0 result.append(c) if nb: result.append('\\' * nb) if needquote: result.append('\\' * nb) # double the trailing backslashes result.append('"') return ''.join(result) def is_python_script(script_text, filename): """Is this text, as a whole, a Python script? (as opposed to shell/bat/etc. """ if filename.endswith('.py') or filename.endswith('.pyw'): return True # extension says it's Python if is_python(script_text, filename): return True # it's syntactically valid Python if script_text.startswith('#!'): # It begins with a '#!' line, so check if 'python' is in it somewhere return 'python' in script_text.splitlines()[0].lower() return False # Not any Python I can recognize try: from os import chmod as _chmod except ImportError: # Jython compatibility def _chmod(*args): pass def chmod(path, mode): log.debug("changing mode of %s to %o", path, mode) try: _chmod(path, mode) except os.error, e: log.debug("chmod failed: %s", e) def fix_jython_executable(executable, options): if sys.platform.startswith('java') and is_sh(executable): # Workaround for Jython is not needed on Linux systems. import java if java.lang.System.getProperty("os.name") == "Linux": return executable # Workaround Jython's sys.executable being a .sh (an invalid # shebang line interpreter) if options: # Can't apply the workaround, leave it broken log.warn("WARNING: Unable to adapt shebang line for Jython," " the following script is NOT executable\n" " see http://bugs.jython.org/issue1112 for" " more information.") else: return '/usr/bin/env %s' % executable return executable def get_script_args(dist, executable=sys_executable, wininst=False): """Yield write_script() argument tuples for a distribution's entrypoints""" spec = str(dist.as_requirement()) header = get_script_header("", executable, wininst) for group in 'console_scripts', 'gui_scripts': for name, ep in dist.get_entry_map(group).items(): script_text = ( "# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r\n" "__requires__ = %(spec)r\n" "import sys\n" "from pkg_resources import load_entry_point\n" "\n" "if __name__ == '__main__':" "\n" " sys.exit(\n" " load_entry_point(%(spec)r, %(group)r, %(name)r)()\n" " )\n" ) % locals() if sys.platform=='win32' or wininst: # On Windows/wininst, add a .py extension and an .exe launcher if group=='gui_scripts': ext, launcher = '-script.pyw', 'gui.exe' old = ['.pyw'] new_header = re.sub('(?i)python.exe','pythonw.exe',header) else: ext, launcher = '-script.py', 'cli.exe' old = ['.py','.pyc','.pyo'] new_header = re.sub('(?i)pythonw.exe','python.exe',header) if is_64bit(): launcher = launcher.replace(".", "-64.") else: launcher = launcher.replace(".", "-32.") if os.path.exists(new_header[2:-1]) or sys.platform!='win32': hdr = new_header else: hdr = header yield (name+ext, hdr+script_text, 't', [name+x for x in old]) yield ( name+'.exe', resource_string('setuptools', launcher), 'b' # write in binary mode ) else: # On other platforms, we assume the right thing to do is to # just write the stub with no extension. yield (name, header+script_text) def rmtree(path, ignore_errors=False, onerror=auto_chmod): """Recursively delete a directory tree. This code is taken from the Python 2.4 version of 'shutil', because the 2.3 version doesn't really work right. """ if ignore_errors: def onerror(*args): pass elif onerror is None: def onerror(*args): raise names = [] try: names = os.listdir(path) except os.error, err: onerror(os.listdir, path, sys.exc_info()) for name in names: fullname = os.path.join(path, name) try: mode = os.lstat(fullname).st_mode except os.error: mode = 0 if stat.S_ISDIR(mode): rmtree(fullname, ignore_errors, onerror) else: try: os.remove(fullname) except os.error, err: onerror(os.remove, fullname, sys.exc_info()) try: os.rmdir(path) except os.error: onerror(os.rmdir, path, sys.exc_info()) def current_umask(): tmp = os.umask(022) os.umask(tmp) return tmp def bootstrap(): # This function is called when setuptools*.egg is run using /bin/sh import setuptools; argv0 = os.path.dirname(setuptools.__path__[0]) sys.argv[0] = argv0; sys.argv.append(argv0); main() def main(argv=None, **kw): from setuptools import setup from setuptools.dist import Distribution import distutils.core USAGE = """\ usage: %(script)s [options] requirement_or_url ... or: %(script)s --help """ def gen_usage (script_name): script = os.path.basename(script_name) return USAGE % vars() def with_ei_usage(f): old_gen_usage = distutils.core.gen_usage try: distutils.core.gen_usage = gen_usage return f() finally: distutils.core.gen_usage = old_gen_usage class DistributionWithoutHelpCommands(Distribution): common_usage = "" def _show_help(self,*args,**kw): with_ei_usage(lambda: Distribution._show_help(self,*args,**kw)) def find_config_files(self): files = Distribution.find_config_files(self) if 'setup.cfg' in files: files.remove('setup.cfg') return files if argv is None: argv = sys.argv[1:] with_ei_usage(lambda: setup( script_args = ['-q','easy_install', '-v']+argv, script_name = sys.argv[0] or 'easy_install', distclass=DistributionWithoutHelpCommands, **kw ) )
bsd-3-clause
blurrymoi/bashlex
bashlex/parser.py
1
24729
import os, copy from bashlex import yacc, tokenizer, state, ast, subst, flags, errors, heredoc def _partsspan(parts): return parts[0].pos[0], parts[-1].pos[1] tokens = [e.name for e in tokenizer.tokentype] precedence = ( ('left', 'AMPERSAND', 'SEMICOLON', 'NEWLINE', 'EOF'), ('left', 'AND_AND', 'OR_OR'), ('right', 'BAR', 'BAR_AND') ) def p_inputunit(p): '''inputunit : simple_list simple_list_terminator | NEWLINE | error NEWLINE | EOF''' # XXX if p.lexer._parserstate & flags.parser.CMDSUBST: p.lexer._parserstate.add(flags.parser.EOFTOKEN) if isinstance(p[1], ast.node): p[0] = p[1] # accept right here in case the input contains more lines that are # not part of the current command p.accept() def p_word_list(p): '''word_list : WORD | word_list WORD''' parserobj = p.context if len(p) == 2: p[0] = [_expandword(parserobj, p.slice[1])] else: p[0] = p[1] p[0].append(_expandword(parserobj, p.slice[2])) def p_redirection_heredoc(p): '''redirection : LESS_LESS WORD | NUMBER LESS_LESS WORD | REDIR_WORD LESS_LESS WORD | LESS_LESS_MINUS WORD | NUMBER LESS_LESS_MINUS WORD | REDIR_WORD LESS_LESS_MINUS WORD''' parserobj = p.context assert isinstance(parserobj, _parser) output = ast.node(kind='word', word=p[len(p)-1], parts=[], pos=p.lexspan(len(p)-1)) if len(p) == 3: p[0] = ast.node(kind='redirect', input=None, type=p[1], heredoc=None, output=output, pos=(p.lexpos(1), p.endlexpos(2))) else: p[0] = ast.node(kind='redirect', input=p[1], type=p[2], heredoc=None, output=output, pos=(p.lexpos(1), p.endlexpos(3))) if p.slice[len(p)-2].ttype == tokenizer.tokentype.LESS_LESS: parserobj.redirstack.append((p[0], False)) else: parserobj.redirstack.append((p[0], True)) def p_redirection(p): '''redirection : GREATER WORD | LESS WORD | NUMBER GREATER WORD | NUMBER LESS WORD | REDIR_WORD GREATER WORD | REDIR_WORD LESS WORD | GREATER_GREATER WORD | NUMBER GREATER_GREATER WORD | REDIR_WORD GREATER_GREATER WORD | GREATER_BAR WORD | NUMBER GREATER_BAR WORD | REDIR_WORD GREATER_BAR WORD | LESS_GREATER WORD | NUMBER LESS_GREATER WORD | REDIR_WORD LESS_GREATER WORD | LESS_LESS_LESS WORD | NUMBER LESS_LESS_LESS WORD | REDIR_WORD LESS_LESS_LESS WORD | LESS_AND NUMBER | NUMBER LESS_AND NUMBER | REDIR_WORD LESS_AND NUMBER | GREATER_AND NUMBER | NUMBER GREATER_AND NUMBER | REDIR_WORD GREATER_AND NUMBER | LESS_AND WORD | NUMBER LESS_AND WORD | REDIR_WORD LESS_AND WORD | GREATER_AND WORD | NUMBER GREATER_AND WORD | REDIR_WORD GREATER_AND WORD | GREATER_AND DASH | NUMBER GREATER_AND DASH | REDIR_WORD GREATER_AND DASH | LESS_AND DASH | NUMBER LESS_AND DASH | REDIR_WORD LESS_AND DASH | AND_GREATER WORD | AND_GREATER_GREATER WORD''' parserobj = p.context if len(p) == 3: output = p[2] if p.slice[2].ttype == tokenizer.tokentype.WORD: output = _expandword(parserobj, p.slice[2]) p[0] = ast.node(kind='redirect', input=None, type=p[1], heredoc=None, output=output, pos=(p.lexpos(1), p.endlexpos(2))) else: output = p[3] if p.slice[3].ttype == tokenizer.tokentype.WORD: output = _expandword(parserobj, p.slice[3]) p[0] = ast.node(kind='redirect', input=p[1], type=p[2], heredoc=None, output=output, pos=(p.lexpos(1), p.endlexpos(3))) def _expandword(parser, tokenword): if parser._expansionlimit == -1: # we enter this branch in the following conditions: # - currently parsing a substitution as a result of an expansion # - the previous expansion had limit == 0 # # this means that this node is a descendant of a substitution in an # unexpanded word and will be filtered in the limit == 0 condition below # # (the reason we even expand when limit == 0 is to get quote removal) node = ast.node(kind='word', word=tokenword, pos=(tokenword.lexpos, tokenword.endlexpos), parts=[]) return node else: quoted = bool(tokenword.flags & flags.word.QUOTED) doublequoted = quoted and tokenword.value[0] == '"' # TODO set qheredocument parts, expandedword = subst._expandwordinternal(parser, tokenword, 0, doublequoted, 0, 0) # limit reached, don't include substitutions (still expanded to get # quote removal though) if parser._expansionlimit == 0: parts = [node for node in parts if 'substitution' not in node.kind] node = ast.node(kind='word', word=expandedword, pos=(tokenword.lexpos, tokenword.endlexpos), parts=parts) return node def p_simple_command_element(p): '''simple_command_element : WORD | ASSIGNMENT_WORD | redirection''' if isinstance(p[1], ast.node): p[0] = [p[1]] return parserobj = p.context p[0] = [_expandword(parserobj, p.slice[1])] # change the word node to an assignment if necessary if p.slice[1].ttype == tokenizer.tokentype.ASSIGNMENT_WORD: p[0][0].kind = 'assignment' def p_redirection_list(p): '''redirection_list : redirection | redirection_list redirection''' if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] p[0].append(p[2]) def p_simple_command(p): '''simple_command : simple_command_element | simple_command simple_command_element''' p[0] = p[1] if len(p) == 3: p[0].extend(p[2]) def p_command(p): '''command : simple_command | shell_command | shell_command redirection_list | function_def | coproc''' if isinstance(p[1], ast.node): p[0] = p[1] if len(p) == 3: assert p[0].kind == 'compound' p[0].redirects.extend(p[2]) assert p[0].pos[0] < p[0].redirects[-1].pos[1] p[0].pos = (p[0].pos[0], p[0].redirects[-1].pos[1]) else: p[0] = ast.node(kind='command', parts=p[1], pos=_partsspan(p[1])) def p_shell_command(p): '''shell_command : for_command | case_command | WHILE compound_list DO compound_list DONE | UNTIL compound_list DO compound_list DONE | select_command | if_command | subshell | group_command | arith_command | cond_command | arith_for_command''' if len(p) == 2: p[0] = p[1] else: # while or until assert p[2].kind == 'list' parts = _makeparts(p) kind = parts[0].word assert kind in ('while', 'until') p[0] = ast.node(kind='compound', redirects=[], list=[ast.node(kind=kind, parts=parts, pos=_partsspan(parts))], pos=_partsspan(parts)) assert p[0].kind == 'compound' def _makeparts(p): parts = [] for i in range(1, len(p)): if isinstance(p[i], ast.node): parts.append(p[i]) elif isinstance(p[i], list): parts.extend(p[i]) elif isinstance(p.slice[i], tokenizer.token): if p.slice[i].ttype == tokenizer.tokentype.WORD: parserobj = p.context parts.append(_expandword(parserobj, p.slice[i])) else: parts.append(ast.node(kind='reservedword', word=p[i], pos=p.lexspan(i))) else: pass return parts def p_for_command(p): '''for_command : FOR WORD newline_list DO compound_list DONE | FOR WORD newline_list LEFT_CURLY compound_list RIGHT_CURLY | FOR WORD SEMICOLON newline_list DO compound_list DONE | FOR WORD SEMICOLON newline_list LEFT_CURLY compound_list RIGHT_CURLY | FOR WORD newline_list IN word_list list_terminator newline_list DO compound_list DONE | FOR WORD newline_list IN word_list list_terminator newline_list LEFT_CURLY compound_list RIGHT_CURLY | FOR WORD newline_list IN list_terminator newline_list DO compound_list DONE | FOR WORD newline_list IN list_terminator newline_list LEFT_CURLY compound_list RIGHT_CURLY''' parts = _makeparts(p) # find the operatornode that we might have there due to # list_terminator/newline_list and convert it to a reservedword so its # considered as part of the for loop for i, part in enumerate(parts): if part.kind == 'operator' and part.op == ';': parts[i] = ast.node(kind='reservedword', word=';', pos=part.pos) break # there could be only one in there... p[0] = ast.node(kind='compound', redirects=[], list=[ast.node(kind='for', parts=parts, pos=_partsspan(parts))], pos=_partsspan(parts)) def p_arith_for_command(p): '''arith_for_command : FOR ARITH_FOR_EXPRS list_terminator newline_list DO compound_list DONE | FOR ARITH_FOR_EXPRS list_terminator newline_list LEFT_CURLY compound_list RIGHT_CURLY | FOR ARITH_FOR_EXPRS DO compound_list DONE | FOR ARITH_FOR_EXPRS LEFT_CURLY compound_list RIGHT_CURLY''' raise NotImplementedError('arithmetic for') def p_select_command(p): '''select_command : SELECT WORD newline_list DO list DONE | SELECT WORD newline_list LEFT_CURLY list RIGHT_CURLY | SELECT WORD SEMICOLON newline_list DO list DONE | SELECT WORD SEMICOLON newline_list LEFT_CURLY list RIGHT_CURLY | SELECT WORD newline_list IN word_list list_terminator newline_list DO list DONE | SELECT WORD newline_list IN word_list list_terminator newline_list LEFT_CURLY list RIGHT_CURLY''' raise NotImplementedError('select command') def p_case_command(p): '''case_command : CASE WORD newline_list IN newline_list ESAC | CASE WORD newline_list IN case_clause_sequence newline_list ESAC | CASE WORD newline_list IN case_clause ESAC''' raise NotImplementedError ('case command') def p_function_def(p): '''function_def : WORD LEFT_PAREN RIGHT_PAREN newline_list function_body | FUNCTION WORD LEFT_PAREN RIGHT_PAREN newline_list function_body | FUNCTION WORD newline_list function_body''' parts = _makeparts(p) body = parts[-1] name = parts[ast.findfirstkind(parts, 'word')] p[0] = ast.node(kind='function', name=name, body=body, parts=parts, pos=_partsspan(parts)) def p_function_body(p): '''function_body : shell_command | shell_command redirection_list''' assert p[1].kind == 'compound' p[0] = p[1] if len(p) == 3: p[0].redirects.extend(p[2]) assert p[0].pos[0] < p[0].redirects[-1].pos[1] p[0].pos = (p[0].pos[0], p[0].redirects[-1].pos[1]) def p_subshell(p): '''subshell : LEFT_PAREN compound_list RIGHT_PAREN''' lparen = ast.node(kind='reservedword', word=p[1], pos=p.lexspan(1)) rparen = ast.node(kind='reservedword', word=p[3], pos=p.lexspan(3)) parts = [lparen, p[2], rparen] p[0] = ast.node(kind='compound', list=parts, redirects=[], pos=_partsspan(parts)) def p_coproc(p): '''coproc : COPROC shell_command | COPROC shell_command redirection_list | COPROC WORD shell_command | COPROC WORD shell_command redirection_list | COPROC simple_command''' raise NotImplementedError('coproc') def p_if_command(p): '''if_command : IF compound_list THEN compound_list FI | IF compound_list THEN compound_list ELSE compound_list FI | IF compound_list THEN compound_list elif_clause FI''' # we currently don't distinguish the various lists that make up the # command, because it's not needed later on. if there will be a need # we can always add different nodes for elif/else. parts = _makeparts(p) p[0] = ast.node(kind='compound', redirects=[], list=[ast.node(kind='if', parts=parts, pos=_partsspan(parts))], pos=_partsspan(parts)) def p_group_command(p): '''group_command : LEFT_CURLY compound_list RIGHT_CURLY''' lcurly = ast.node(kind='reservedword', word=p[1], pos=p.lexspan(1)) rcurly = ast.node(kind='reservedword', word=p[3], pos=p.lexspan(3)) parts = [lcurly, p[2], rcurly] p[0] = ast.node(kind='compound', list=parts, redirects=[], pos=_partsspan(parts)) def p_arith_command(p): '''arith_command : ARITH_CMD''' raise NotImplementedError('arithmetic command') def p_cond_command(p): '''cond_command : COND_START COND_CMD COND_END''' raise NotImplementedError('cond command') def p_elif_clause(p): '''elif_clause : ELIF compound_list THEN compound_list | ELIF compound_list THEN compound_list ELSE compound_list | ELIF compound_list THEN compound_list elif_clause''' parts = [] for i in range(1, len(p)): if isinstance(p[i], ast.node): parts.append(p[i]) else: parts.append(ast.node(kind='reservedword', word=p[i], pos=p.lexspan(i))) p[0] = parts def p_case_clause(p): '''case_clause : pattern_list | case_clause_sequence pattern_list''' raise NotImplementedError('case clause') def p_pattern_list(p): '''pattern_list : newline_list pattern RIGHT_PAREN compound_list | newline_list pattern RIGHT_PAREN newline_list | newline_list LEFT_PAREN pattern RIGHT_PAREN compound_list | newline_list LEFT_PAREN pattern RIGHT_PAREN newline_list''' raise NotImplementedError('pattern list') def p_case_clause_sequence(p): '''case_clause_sequence : pattern_list SEMI_SEMI | case_clause_sequence pattern_list SEMI_SEMI | pattern_list SEMI_AND | case_clause_sequence pattern_list SEMI_AND | pattern_list SEMI_SEMI_AND | case_clause_sequence pattern_list SEMI_SEMI_AND''' raise NotImplementedError('case clause') def p_pattern(p): '''pattern : WORD | pattern BAR WORD''' raise NotImplementedError('pattern') def p_list(p): '''list : newline_list list0''' p[0] = p[2] def p_compound_list(p): '''compound_list : list | newline_list list1''' if len(p) == 2: p[0] = p[1] else: parts = p[2] if len(parts) > 1: p[0] = ast.node(kind='list', parts=parts, pos=_partsspan(parts)) else: p[0] = parts[0] def p_list0(p): '''list0 : list1 NEWLINE newline_list | list1 AMPERSAND newline_list | list1 SEMICOLON newline_list''' parts = p[1] if len(parts) > 1 or p.slice[2].ttype != tokenizer.tokentype.NEWLINE: parts.append(ast.node(kind='operator', op=p[2], pos=p.lexspan(2))) p[0] = ast.node(kind='list', parts=parts, pos=_partsspan(parts)) else: p[0] = parts[0] def p_list1(p): '''list1 : list1 AND_AND newline_list list1 | list1 OR_OR newline_list list1 | list1 AMPERSAND newline_list list1 | list1 SEMICOLON newline_list list1 | list1 NEWLINE newline_list list1 | pipeline_command''' if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] # XXX newline p[0].append(ast.node(kind='operator', op=p[2], pos=p.lexspan(2))) p[0].extend(p[len(p) - 1]) def p_simple_list_terminator(p): '''simple_list_terminator : NEWLINE | EOF''' pass def p_list_terminator(p): '''list_terminator : NEWLINE | SEMICOLON | EOF''' if p[1] == ';': p[0] = ast.node(kind='operator', op=';', pos=p.lexspan(1)) def p_newline_list(p): '''newline_list : empty | newline_list NEWLINE''' pass def p_simple_list(p): '''simple_list : simple_list1 | simple_list1 AMPERSAND | simple_list1 SEMICOLON''' tok = p.lexer heredoc.gatherheredocuments(tok) if len(p) == 3 or len(p[1]) > 1: parts = p[1] if len(p) == 3: parts.append(ast.node(kind='operator', op=p[2], pos=p.lexspan(2))) p[0] = ast.node(kind='list', parts=parts, pos=_partsspan(parts)) else: assert len(p[1]) == 1 p[0] = p[1][0] if (len(p) == 2 and p.lexer._parserstate & flags.parser.CMDSUBST and p.lexer._current_token.nopos() == p.lexer._shell_eof_token): # accept the input p.accept() def p_simple_list1(p): '''simple_list1 : simple_list1 AND_AND newline_list simple_list1 | simple_list1 OR_OR newline_list simple_list1 | simple_list1 AMPERSAND simple_list1 | simple_list1 SEMICOLON simple_list1 | pipeline_command''' if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] p[0].append(ast.node(kind='operator', op=p[2], pos=p.lexspan(2))) p[0].extend(p[len(p) - 1]) def p_pipeline_command(p): '''pipeline_command : pipeline | BANG pipeline_command | timespec pipeline_command | timespec list_terminator | BANG list_terminator''' if len(p) == 2: if len(p[1]) == 1: p[0] = p[1][0] else: p[0] = ast.node(kind='pipeline', parts=p[1], pos=(p[1][0].pos[0], p[1][-1].pos[1])) else: # XXX timespec node = ast.node(kind='reservedword', word='!', pos=p.lexspan(1)) if p[2].kind == 'pipeline': p[0] = p[2] p[0].parts.insert(0, node) p[0].pos = (p[0].parts[0].pos[0], p[0].parts[-1].pos[1]) else: p[0] = ast.node(kind='pipeline', parts=[node, p[2]], pos=(node.pos[0], p[2].pos[1])) def p_pipeline(p): '''pipeline : pipeline BAR newline_list pipeline | pipeline BAR_AND newline_list pipeline | command''' if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] p[0].append(ast.node(kind='pipe', pipe=p[2], pos=p.lexspan(2))) p[0].extend(p[len(p) - 1]) def p_timespec(p): '''timespec : TIME | TIME TIMEOPT | TIME TIMEOPT TIMEIGN''' raise NotImplementedError('time command') def p_empty(p): '''empty :''' pass def p_error(p): assert isinstance(p, tokenizer.token) if p.ttype == tokenizer.tokentype.EOF: raise errors.ParsingError('unexpected EOF', p.lexer.source, len(p.lexer.source)) else: raise errors.ParsingError('unexpected token %r' % p.value, p.lexer.source, p.lexpos) yaccparser = yacc.yacc(tabmodule='bashlex.parsetab', outputdir=os.path.dirname(__file__), debug=False) # some hack to fix yacc's reduction on command substitutions yaccparser.action[45]['RIGHT_PAREN'] = -155 yaccparser.action[11]['RIGHT_PAREN'] = -148 yaccparser.action[133]['RIGHT_PAREN'] = -154 for tt in tokenizer.tokentype: yaccparser.action[62][tt.name] = -1 yaccparser.action[63][tt.name] = -141 def parsesingle(s, strictmode=True, expansionlimit=None, convertpos=False): '''like parse, but only consumes a single top level node, e.g. parsing 'a\nb' will only return a node for 'a', leaving b unparsed''' p = _parser(s, strictmode=strictmode, expansionlimit=expansionlimit) tree = p.parse() if convertpos: ast.posconverter(s).visit(tree) return tree def parse(s, strictmode=True, expansionlimit=None, convertpos=False): '''parse the input string, returning a list of nodes top level node kinds are: - command - a simple command - pipeline - a series of simple commands - list - a series of one or more pipelines - compound - contains constructs for { list; }, (list), if, for.. leafs are word nodes (which in turn can also contain any of the aforementioned nodes due to command substitutions). when strictmode is set to False, we will: - skip reading a heredoc if we're at the end of the input expansionlimit is used to limit the amount of recursive parsing done due to command substitutions found during word expansion. ''' p = _parser(s, strictmode=strictmode, expansionlimit=expansionlimit) parts = [p.parse()] class endfinder(ast.nodevisitor): def __init__(self): self.end = -1 def visitheredoc(self, node, value): self.end = node.pos[1] # find the 'real' end incase we have a heredoc in there ef = _endfinder() ef.visit(parts[-1]) index = max(parts[-1].pos[1], ef.end) + 1 while index < len(s): part = _parser(s[index:], strictmode=strictmode).parse() if not isinstance(part, ast.node): break ast.posshifter(index).visit(part) parts.append(part) ef = _endfinder() ef.visit(parts[-1]) index = max(parts[-1].pos[1], ef.end) + 1 if convertpos: for tree in parts: ast.posconverter(s).visit(tree) return parts class _parser(object): ''' this class is mainly used to provide context to the productions when we're in the middle of parsing. as a hack, we shove it into the YaccProduction context attribute to make it accessible. ''' def __init__(self, s, strictmode=True, expansionlimit=None, tokenizerargs=None): assert expansionlimit is None or isinstance(expansionlimit, int) self.s = s self._strictmode = strictmode self._expansionlimit = expansionlimit if tokenizerargs is None: tokenizerargs = {} self.parserstate = tokenizerargs.pop('parserstate', state.parserstate()) self.tok = tokenizer.tokenizer(s, parserstate=self.parserstate, strictmode=strictmode, **tokenizerargs) self.redirstack = self.tok.redirstack def parse(self): # yacc.yacc returns a parser object that is not reentrant, it has # some mutable state. we make a shallow copy of it so no # state spills over to the next call to parse on it theparser = copy.copy(yaccparser) tree = theparser.parse(lexer=self.tok, context=self) return tree class _endfinder(ast.nodevisitor): '''helper class to find the "real" end pos of a node that contains a heredoc. this is a hack because heredoc aren't really part of any node since they don't always follow the end of a node and might appear on a different line''' def __init__(self): self.end = -1 def visitheredoc(self, node, value): self.end = node.pos[1]
gpl-3.0
c2g14/2015cd_0512
static/Brython3.1.1-20150328-091302/Lib/select.py
730
9440
""" borrowed from jython https://bitbucket.org/jython/jython/raw/28a66ba038620292520470a0bb4dc9bb8ac2e403/Lib/select.py """ #import java.nio.channels.SelectableChannel #import java.nio.channels.SelectionKey #import java.nio.channels.Selector #from java.nio.channels.SelectionKey import OP_ACCEPT, OP_CONNECT, OP_WRITE, OP_READ import errno import os import queue import socket class error(Exception): pass ALL = None _exception_map = { # (<javaexception>, <circumstance>) : lambda: <code that raises the python equivalent> #(java.nio.channels.ClosedChannelException, ALL) : error(errno.ENOTCONN, 'Socket is not connected'), #(java.nio.channels.CancelledKeyException, ALL) : error(errno.ENOTCONN, 'Socket is not connected'), #(java.nio.channels.IllegalBlockingModeException, ALL) : error(errno.ESOCKISBLOCKING, 'socket must be in non-blocking mode'), } def _map_exception(exc, circumstance=ALL): try: mapped_exception = _exception_map[(exc.__class__, circumstance)] mapped_exception.java_exception = exc return mapped_exception except KeyError: return error(-1, 'Unmapped java exception: <%s:%s>' % (exc.toString(), circumstance)) POLLIN = 1 POLLOUT = 2 # The following event types are completely ignored on jython # Java does not support them, AFAICT # They are declared only to support code compatibility with cpython POLLPRI = 4 POLLERR = 8 POLLHUP = 16 POLLNVAL = 32 def _getselectable(selectable_object): try: channel = selectable_object.getchannel() except: try: channel = selectable_object.fileno().getChannel() except: raise TypeError("Object '%s' is not watchable" % selectable_object, errno.ENOTSOCK) if channel and not isinstance(channel, java.nio.channels.SelectableChannel): raise TypeError("Object '%s' is not watchable" % selectable_object, errno.ENOTSOCK) return channel class poll: def __init__(self): self.selector = java.nio.channels.Selector.open() self.chanmap = {} self.unconnected_sockets = [] def _register_channel(self, socket_object, channel, mask): jmask = 0 if mask & POLLIN: # Note that OP_READ is NOT a valid event on server socket channels. if channel.validOps() & OP_ACCEPT: jmask = OP_ACCEPT else: jmask = OP_READ if mask & POLLOUT: if channel.validOps() & OP_WRITE: jmask |= OP_WRITE if channel.validOps() & OP_CONNECT: jmask |= OP_CONNECT selectionkey = channel.register(self.selector, jmask) self.chanmap[channel] = (socket_object, selectionkey) def _check_unconnected_sockets(self): temp_list = [] for socket_object, mask in self.unconnected_sockets: channel = _getselectable(socket_object) if channel is not None: self._register_channel(socket_object, channel, mask) else: temp_list.append( (socket_object, mask) ) self.unconnected_sockets = temp_list def register(self, socket_object, mask = POLLIN|POLLOUT|POLLPRI): try: channel = _getselectable(socket_object) if channel is None: # The socket is not yet connected, and thus has no channel # Add it to a pending list, and return self.unconnected_sockets.append( (socket_object, mask) ) return self._register_channel(socket_object, channel, mask) except BaseException: #except java.lang.Exception, jlx: raise _map_exception(jlx) def unregister(self, socket_object): try: channel = _getselectable(socket_object) self.chanmap[channel][1].cancel() del self.chanmap[channel] except BaseException: #except java.lang.Exception, jlx: raise _map_exception(jlx) def _dopoll(self, timeout): if timeout is None or timeout < 0: self.selector.select() else: try: timeout = int(timeout) if not timeout: self.selector.selectNow() else: # No multiplication required: both cpython and java use millisecond timeouts self.selector.select(timeout) except ValueError as vx: raise error("poll timeout must be a number of milliseconds or None", errno.EINVAL) # The returned selectedKeys cannot be used from multiple threads! return self.selector.selectedKeys() def poll(self, timeout=None): try: self._check_unconnected_sockets() selectedkeys = self._dopoll(timeout) results = [] for k in selectedkeys.iterator(): jmask = k.readyOps() pymask = 0 if jmask & OP_READ: pymask |= POLLIN if jmask & OP_WRITE: pymask |= POLLOUT if jmask & OP_ACCEPT: pymask |= POLLIN if jmask & OP_CONNECT: pymask |= POLLOUT # Now return the original userobject, and the return event mask results.append( (self.chanmap[k.channel()][0], pymask) ) return results except BaseException: #except java.lang.Exception, jlx: raise _map_exception(jlx) def _deregister_all(self): try: for k in self.selector.keys(): k.cancel() # Keys are not actually removed from the selector until the next select operation. self.selector.selectNow() except BaseException: #except java.lang.Exception, jlx: raise _map_exception(jlx) def close(self): try: self._deregister_all() self.selector.close() except BaseException: #except java.lang.Exception, jlx: raise _map_exception(jlx) def _calcselecttimeoutvalue(value): if value is None: return None try: floatvalue = float(value) except Exception as x: raise TypeError("Select timeout value must be a number or None") if value < 0: raise error("Select timeout value cannot be negative", errno.EINVAL) if floatvalue < 0.000001: return 0 return int(floatvalue * 1000) # Convert to milliseconds # This cache for poll objects is required because of a bug in java on MS Windows # http://bugs.jython.org/issue1291 class poll_object_cache: def __init__(self): self.is_windows = os.name == 'nt' if self.is_windows: self.poll_object_queue = Queue.Queue() import atexit atexit.register(self.finalize) def get_poll_object(self): if not self.is_windows: return poll() try: return self.poll_object_queue.get(False) except Queue.Empty: return poll() def release_poll_object(self, pobj): if self.is_windows: pobj._deregister_all() self.poll_object_queue.put(pobj) else: pobj.close() def finalize(self): if self.is_windows: while True: try: p = self.poll_object_queue.get(False) p.close() except Queue.Empty: return _poll_object_cache = poll_object_cache() def native_select(read_fd_list, write_fd_list, outofband_fd_list, timeout=None): timeout = _calcselecttimeoutvalue(timeout) # First create a poll object to do the actual watching. pobj = _poll_object_cache.get_poll_object() try: registered_for_read = {} # Check the read list for fd in read_fd_list: pobj.register(fd, POLLIN) registered_for_read[fd] = 1 # And now the write list for fd in write_fd_list: if fd in registered_for_read: # registering a second time overwrites the first pobj.register(fd, POLLIN|POLLOUT) else: pobj.register(fd, POLLOUT) results = pobj.poll(timeout) # Now start preparing the results read_ready_list, write_ready_list, oob_ready_list = [], [], [] for fd, mask in results: if mask & POLLIN: read_ready_list.append(fd) if mask & POLLOUT: write_ready_list.append(fd) return read_ready_list, write_ready_list, oob_ready_list finally: _poll_object_cache.release_poll_object(pobj) select = native_select def cpython_compatible_select(read_fd_list, write_fd_list, outofband_fd_list, timeout=None): # First turn all sockets to non-blocking # keeping track of which ones have changed modified_channels = [] try: for socket_list in [read_fd_list, write_fd_list, outofband_fd_list]: for s in socket_list: channel = _getselectable(s) if channel.isBlocking(): modified_channels.append(channel) channel.configureBlocking(0) return native_select(read_fd_list, write_fd_list, outofband_fd_list, timeout) finally: for channel in modified_channels: channel.configureBlocking(1)
agpl-3.0
vv1133/home_web
build/lib.linux-armv6l-2.7/django/contrib/syndication/views.py
113
8515
from __future__ import unicode_literals from calendar import timegm from django.conf import settings from django.contrib.sites.models import get_current_site from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist from django.http import HttpResponse, Http404 from django.template import loader, TemplateDoesNotExist, RequestContext from django.utils import feedgenerator, tzinfo from django.utils.encoding import force_text, iri_to_uri, smart_text from django.utils.html import escape from django.utils.http import http_date from django.utils import six from django.utils.timezone import is_naive def add_domain(domain, url, secure=False): protocol = 'https' if secure else 'http' if url.startswith('//'): # Support network-path reference (see #16753) - RSS requires a protocol url = '%s:%s' % (protocol, url) elif not (url.startswith('http://') or url.startswith('https://') or url.startswith('mailto:')): url = iri_to_uri('%s://%s%s' % (protocol, domain, url)) return url class FeedDoesNotExist(ObjectDoesNotExist): pass class Feed(object): feed_type = feedgenerator.DefaultFeed title_template = None description_template = None def __call__(self, request, *args, **kwargs): try: obj = self.get_object(request, *args, **kwargs) except ObjectDoesNotExist: raise Http404('Feed object does not exist.') feedgen = self.get_feed(obj, request) response = HttpResponse(content_type=feedgen.mime_type) if hasattr(self, 'item_pubdate'): # if item_pubdate is defined for the feed, set header so as # ConditionalGetMiddleware is able to send 304 NOT MODIFIED response['Last-Modified'] = http_date( timegm(feedgen.latest_post_date().utctimetuple())) feedgen.write(response, 'utf-8') return response def item_title(self, item): # Titles should be double escaped by default (see #6533) return escape(force_text(item)) def item_description(self, item): return force_text(item) def item_link(self, item): try: return item.get_absolute_url() except AttributeError: raise ImproperlyConfigured('Give your %s class a get_absolute_url() method, or define an item_link() method in your Feed class.' % item.__class__.__name__) def __get_dynamic_attr(self, attname, obj, default=None): try: attr = getattr(self, attname) except AttributeError: return default if callable(attr): # Check co_argcount rather than try/excepting the function and # catching the TypeError, because something inside the function # may raise the TypeError. This technique is more accurate. try: code = six.get_function_code(attr) except AttributeError: code = six.get_function_code(attr.__call__) if code.co_argcount == 2: # one argument is 'self' return attr(obj) else: return attr() return attr def feed_extra_kwargs(self, obj): """ Returns an extra keyword arguments dictionary that is used when initializing the feed generator. """ return {} def item_extra_kwargs(self, item): """ Returns an extra keyword arguments dictionary that is used with the `add_item` call of the feed generator. """ return {} def get_object(self, request, *args, **kwargs): return None def get_context_data(self, **kwargs): """ Returns a dictionary to use as extra context if either ``self.description_template`` or ``self.item_template`` are used. Default implementation preserves the old behavior of using {'obj': item, 'site': current_site} as the context. """ return {'obj': kwargs.get('item'), 'site': kwargs.get('site')} def get_feed(self, obj, request): """ Returns a feedgenerator.DefaultFeed object, fully populated, for this feed. Raises FeedDoesNotExist for invalid parameters. """ current_site = get_current_site(request) link = self.__get_dynamic_attr('link', obj) link = add_domain(current_site.domain, link, request.is_secure()) feed = self.feed_type( title = self.__get_dynamic_attr('title', obj), subtitle = self.__get_dynamic_attr('subtitle', obj), link = link, description = self.__get_dynamic_attr('description', obj), language = settings.LANGUAGE_CODE, feed_url = add_domain( current_site.domain, self.__get_dynamic_attr('feed_url', obj) or request.path, request.is_secure(), ), author_name = self.__get_dynamic_attr('author_name', obj), author_link = self.__get_dynamic_attr('author_link', obj), author_email = self.__get_dynamic_attr('author_email', obj), categories = self.__get_dynamic_attr('categories', obj), feed_copyright = self.__get_dynamic_attr('feed_copyright', obj), feed_guid = self.__get_dynamic_attr('feed_guid', obj), ttl = self.__get_dynamic_attr('ttl', obj), **self.feed_extra_kwargs(obj) ) title_tmp = None if self.title_template is not None: try: title_tmp = loader.get_template(self.title_template) except TemplateDoesNotExist: pass description_tmp = None if self.description_template is not None: try: description_tmp = loader.get_template(self.description_template) except TemplateDoesNotExist: pass for item in self.__get_dynamic_attr('items', obj): context = self.get_context_data(item=item, site=current_site, obj=obj, request=request) if title_tmp is not None: title = title_tmp.render(RequestContext(request, context)) else: title = self.__get_dynamic_attr('item_title', item) if description_tmp is not None: description = description_tmp.render(RequestContext(request, context)) else: description = self.__get_dynamic_attr('item_description', item) link = add_domain( current_site.domain, self.__get_dynamic_attr('item_link', item), request.is_secure(), ) enc = None enc_url = self.__get_dynamic_attr('item_enclosure_url', item) if enc_url: enc = feedgenerator.Enclosure( url = smart_text(enc_url), length = smart_text(self.__get_dynamic_attr('item_enclosure_length', item)), mime_type = smart_text(self.__get_dynamic_attr('item_enclosure_mime_type', item)) ) author_name = self.__get_dynamic_attr('item_author_name', item) if author_name is not None: author_email = self.__get_dynamic_attr('item_author_email', item) author_link = self.__get_dynamic_attr('item_author_link', item) else: author_email = author_link = None pubdate = self.__get_dynamic_attr('item_pubdate', item) if pubdate and is_naive(pubdate): ltz = tzinfo.LocalTimezone(pubdate) pubdate = pubdate.replace(tzinfo=ltz) feed.add_item( title = title, link = link, description = description, unique_id = self.__get_dynamic_attr('item_guid', item, link), unique_id_is_permalink = self.__get_dynamic_attr( 'item_guid_is_permalink', item), enclosure = enc, pubdate = pubdate, author_name = author_name, author_email = author_email, author_link = author_link, categories = self.__get_dynamic_attr('item_categories', item), item_copyright = self.__get_dynamic_attr('item_copyright', item), **self.item_extra_kwargs(item) ) return feed
bsd-3-clause
StellarCN/py-stellar-base
stellar_sdk/xdr/claimant_type.py
1
1545
# This is an automatically generated file. # DO NOT EDIT or your changes may be overwritten import base64 from enum import IntEnum from xdrlib import Packer, Unpacker from ..__version__ import __issues__ from ..exceptions import ValueError __all__ = ["ClaimantType"] class ClaimantType(IntEnum): """ XDR Source Code ---------------------------------------------------------------- enum ClaimantType { CLAIMANT_TYPE_V0 = 0 }; ---------------------------------------------------------------- """ CLAIMANT_TYPE_V0 = 0 def pack(self, packer: Packer) -> None: packer.pack_int(self.value) @classmethod def unpack(cls, unpacker: Unpacker) -> "ClaimantType": value = unpacker.unpack_int() return cls(value) def to_xdr_bytes(self) -> bytes: packer = Packer() self.pack(packer) return packer.get_buffer() @classmethod def from_xdr_bytes(cls, xdr: bytes) -> "ClaimantType": unpacker = Unpacker(xdr) return cls.unpack(unpacker) def to_xdr(self) -> str: xdr_bytes = self.to_xdr_bytes() return base64.b64encode(xdr_bytes).decode() @classmethod def from_xdr(cls, xdr: str) -> "ClaimantType": xdr_bytes = base64.b64decode(xdr.encode()) return cls.from_xdr_bytes(xdr_bytes) @classmethod def _missing_(cls, value): raise ValueError( f"{value} is not a valid {cls.__name__}, please upgrade the SDK or submit an issue here: {__issues__}." )
apache-2.0
oVirt/vdsm
tests/virt/filedata_test.py
2
11202
# Copyright 2020-2021 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # # Refer to the README and COPYING files for full details of the license. from collections import namedtuple from contextlib import contextmanager import os import stat import tempfile import time import pytest from vdsm.common import exception from vdsm.common import password from vdsm.supervdsm_api import virt from vdsm.virt import filedata # Core class VariableData(filedata._FileSystemData): def __init__(self): super().__init__('/does-not-exist', compress=False) self.data = None def _retrieve(self, last_modified=-1): return self.data def _store(self, data): self.data = data def test_invalid_data(): data = VariableData() with pytest.raises(exception.ExternalDataFailed): # Not base64 data.store('!@#$%^&*()') with pytest.raises(exception.ExternalDataFailed): # Mixed data.store('aaa!ccc') with pytest.raises(exception.ExternalDataFailed): # Padding character at the beginning data.store('=aaaa') def test_invalid_compression(): data = VariableData() with pytest.raises(exception.ExternalDataFailed): # Unknown format data.store('=X=aaaa') with pytest.raises(exception.ExternalDataFailed): # Content is not bzip2 data.store('=0=aaaa') def test_legacy_data(): data = VariableData() # Data with line ends data.store(''' MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTEx MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTE= ''') assert data.data == b'11111111111111111111111111111111111111111111' + \ b'111111111111111111111111111111111111111111111111111' def test_compressed(): data = VariableData() data.store('=0=QlpoOTFBWSZTWU7wmXMAAAEBADgAIAAhsQZiEji7kinChIJ3hMuY') assert data.data == b'abcabcabc' # File data FILE_DATA = 'hello' FILE_DATA_2 = 'world' ENCODED_DATA = 'aGVsbG8=' ENCODED_DATA_BZ2 = \ '=0=QlpoOTFBWSZTWRkxZT0AAACBAAJEoAAhmmgzTQczi7kinChIDJiynoA=' DIRECTORY_MODE = stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IXOTH UUID = '12345678-1234-1234-1234-1234567890ab' def test_file_data_read(): with tempfile.TemporaryDirectory() as d: path = os.path.join(d, 'test') open(path, 'w').write(FILE_DATA) data = filedata.FileData(path, compress=False) assert data.retrieve() == ENCODED_DATA def test_file_data_write(): with tempfile.TemporaryDirectory() as d: path = os.path.join(d, 'test') data = filedata.FileData(path) data.store(ENCODED_DATA) assert open(path).read() == FILE_DATA def test_file_data_modified(): with tempfile.TemporaryDirectory() as d: path = os.path.join(d, 'test') open(path, 'w').write(FILE_DATA) data = filedata.FileData(path, compress=False) assert data.last_modified() == os.stat(path).st_mtime @pytest.mark.parametrize("last_modified, is_none", [ pytest.param( 0, False, id="forced read" ), pytest.param( time.time() - 0.1, # file mtime may differ from system time a bit False, id="new data" ), pytest.param( time.time() + 1000, False, id="future time" ), pytest.param( None, True, id="current data" ), ]) def test_file_data_conditional_read(last_modified, is_none): with tempfile.TemporaryDirectory() as d: path = os.path.join(d, 'test') open(path, 'w').write(FILE_DATA) data = filedata.FileData(path, compress=True) if last_modified is None: last_modified = data.last_modified() encoded = data.retrieve(last_modified=last_modified) if is_none: assert encoded is None else: assert encoded == ENCODED_DATA_BZ2 def test_file_data_no_data(): with tempfile.TemporaryDirectory() as d: path = os.path.join(d, 'test') # file does not exist data = filedata.FileData(path, compress=False) with pytest.raises(exception.ExternalDataFailed): data.retrieve() # file is empty open(path, 'w').write('') data = filedata.FileData(path, compress=False, allow_empty=False) with pytest.raises(exception.ExternalDataFailed): data.retrieve() data = filedata.FileData(path, compress=False, allow_empty=True) assert data.retrieve() == '' # Directory data Paths = namedtuple("Paths", ['directory', 'path', 'subdirectory', 'subpath']) @contextmanager def temporary_directory(monkeypatch=None): with tempfile.TemporaryDirectory() as d: directory = os.path.join(d, UUID) path = os.path.join(directory, 'file1') subdirectory = os.path.join(directory, 'data') subpath = os.path.join(subdirectory, 'file2') if monkeypatch is not None: monkeypatch.setattr(filedata.constants, 'P_LIBVIRT_SWTPM', os.path.dirname(directory)) yield Paths(directory=directory, path=path, subdirectory=subdirectory, subpath=subpath) @contextmanager def directory_data(monkeypatch=None): with temporary_directory(monkeypatch) as d: os.mkdir(d.directory) os.chmod(d.directory, DIRECTORY_MODE) os.mkdir(d.subdirectory) open(d.path, 'w').write(FILE_DATA) open(d.subpath, 'w').write(FILE_DATA_2) yield d def test_directory_data_read_write(): with directory_data() as d: data = filedata.DirectoryData(d.directory) encoded = data.retrieve() assert encoded is not None with temporary_directory() as d: data = filedata.DirectoryData(d.directory) data.store(encoded) assert open(d.path).read() == FILE_DATA assert open(d.subpath).read() == FILE_DATA_2 n = 0 for _root, _dirs, files in os.walk(d.directory): n += len(files) assert n == 2 permissions = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO assert os.stat(d.directory).st_mode & permissions == DIRECTORY_MODE def test_directory_data_rewrite(): with directory_data() as d: data = filedata.DirectoryData(d.directory) encoded = data.retrieve() with temporary_directory() as d: os.mkdir(d.directory) old_path = os.path.join(d.directory, 'old') open(old_path, 'w').write("invalid") open(d.path, 'w').write("invalid") data = filedata.DirectoryData(d.directory) data.store(encoded) assert not os.path.exists(old_path) assert open(d.path).read() == FILE_DATA assert open(d.subpath).read() == FILE_DATA_2 n = 0 for _root, _dirs, files in os.walk(d.directory): n += len(files) assert n == 2 def test_directory_data_modified(): with directory_data() as d: data = filedata.DirectoryData(d.directory) data.retrieve() assert data.last_modified() == \ max(os.stat(d.path).st_mtime, os.stat(d.subpath).st_mtime) def test_directory_data_no_data(): # no directory data = filedata.DirectoryData('/this-directory-must-not-exist') with pytest.raises(exception.ExternalDataFailed): data.retrieve() # directory empty with tempfile.TemporaryDirectory() as d: data = filedata.DirectoryData(d, allow_empty=False) with pytest.raises(exception.ExternalDataFailed): data.retrieve() data = filedata.DirectoryData(d, allow_empty=True) assert data.retrieve() is not None # Monitor def data_retriever(directory): data = filedata.DirectoryData(directory) def retriever(last_modified): encoded = data.retrieve(last_modified=last_modified) return encoded, data.last_modified() return retriever def test_monitor_read(): with directory_data() as d: monitor = filedata.Monitor(data_retriever(d.directory)) encoded = monitor.data() assert encoded is not None with temporary_directory() as d: data = filedata.DirectoryData(d.directory) data.store(encoded) assert open(d.path).read() == FILE_DATA assert open(d.subpath).read() == FILE_DATA_2 n = 0 for _root, _dirs, files in os.walk(d.directory): n += len(files) assert n == 2 def test_monitor_repeated_read(): with directory_data() as d: monitor = filedata.Monitor(data_retriever(d.directory)) data = monitor.data() hash_ = monitor.data_hash() assert data is not None assert hash_ is not None assert monitor.data() is None assert monitor.data_hash() == hash_ assert monitor.data(force=True) == data assert monitor.data_hash() == hash_ def test_monitor_data_change(): with directory_data() as d: monitor = filedata.Monitor(data_retriever(d.directory)) data = monitor.data() hash_ = monitor.data_hash() open(d.subpath, 'a').write('\n') new_data = monitor.data() new_hash = monitor.data_hash() assert new_data is not None assert new_data != data assert new_hash is not None assert new_hash != hash_ assert monitor.data() is None assert monitor.data_hash() == new_hash def test_monitor_no_data(): retriever = data_retriever('/this-directory-must-not-exist') monitor = filedata.Monitor(retriever) with pytest.raises(exception.ExternalDataFailed): monitor.data() # Supervdsm API def test_supervdsm_read_write(monkeypatch): with directory_data(monkeypatch): encoded, _modified = virt.read_tpm_data(UUID, -1) assert password.unprotect(encoded) with temporary_directory(monkeypatch): virt.write_tpm_data(UUID, encoded) assert encoded == virt.read_tpm_data(UUID, -1)[0] def test_supervdsm_invalid_vmid(monkeypatch): with directory_data(monkeypatch): encoded, _modified = virt.read_tpm_data(UUID, -1) with pytest.raises(exception.ExternalDataFailed): virt.write_tpm_data('../foo', encoded) def test_supervdsm_symlink(monkeypatch): with directory_data(monkeypatch) as d: os.symlink('/foo', os.path.join(d.directory, 'bar')) encoded = filedata.DirectoryData(d.directory).retrieve() with temporary_directory(monkeypatch): with pytest.raises(exception.ExternalDataFailed): virt.write_tpm_data(UUID, encoded)
gpl-2.0
MQQiang/kbengine
kbe/src/lib/python/Lib/encodings/unicode_internal.py
827
1196
""" Python 'unicode-internal' Codec Written by Marc-Andre Lemburg (mal@lemburg.com). (c) Copyright CNRI, All Rights Reserved. NO WARRANTY. """ import codecs ### Codec APIs class Codec(codecs.Codec): # Note: Binding these as C functions will result in the class not # converting them to methods. This is intended. encode = codecs.unicode_internal_encode decode = codecs.unicode_internal_decode class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.unicode_internal_encode(input, self.errors)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.unicode_internal_decode(input, self.errors)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='unicode-internal', encode=Codec.encode, decode=Codec.decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, )
lgpl-3.0
peraktong/AnniesLasso
AnniesLasso/__main__.py
2
19379
#!/usr/bin/env python # -*- coding: utf-8 -*- """ A basic command line interface for The Cannon. """ import argparse import logging import os from collections import OrderedDict from numpy import ceil, loadtxt, zeros, nan, diag, ones from subprocess import check_output from six.moves import cPickle as pickle from tempfile import mkstemp from time import sleep condor_code = """ executable = /opt/ioa/software/python/2.7/bin/python universe = vanilla output = {logging_dir}/condor-{prefix}-out.log error = {logging_dir}/condor-{prefix}-err.log log = {logging_dir}/condor-{prefix}.log request_cpus = {cpus} request_memory = {memory} Notification = never # maybe arguments? ## arguments = <arguments your script would typically take > arguments = {executable} train {model_filename} queue """ def _condorlock_filename(path): return "{0}/.{1}.condorlock".format( os.path.dirname(path), os.path.basename(path)) def fit(model_filename, spectrum_filenames, threads, clobber, from_filename, **kwargs): """ Fit a series of spectra. """ import AnniesLasso_2 as tc model = tc.load_model(model_filename, threads=threads) logger = logging.getLogger("AnniesLasso_2") assert model.is_trained chunk_size = kwargs.pop("parallel_chunks", 1000) if threads > 1 else 1 fluxes = [] ivars = [] output_filenames = [] failures = 0 fit_velocity = kwargs.pop("fit_velocity", False) # MAGIC HACK delete_meta_keys = ("fjac", ) # To save space... initial_labels = loadtxt("initial_labels.txt") if from_filename: with open(spectrum_filenames[0], "r") as fp: _ = list(map(str.strip, fp.readlines())) spectrum_filenames = _ output_suffix = kwargs.get("output_suffix", "result") N = len(spectrum_filenames) for i, filename in enumerate(spectrum_filenames): logger.info("At spectrum {0}/{1}: {2}".format(i + 1, N, filename)) basename, _ = os.path.splitext(filename) output_filename = "-".join([basename, output_suffix]) + ".pkl" if os.path.exists(output_filename) and not clobber: logger.info("Output filename {} already exists and not clobbering."\ .format(output_filename)) continue try: with open(filename, "rb") as fp: flux, ivar = pickle.load(fp) fluxes.append(flux) ivars.append(ivar) output_filenames.append(output_filename) except: logger.exception("Error occurred loading {}".format(filename)) failures += 1 else: if len(output_filenames) >= chunk_size: results, covs, metas = model.fit(fluxes, ivars, initial_labels=initial_labels, model_redshift=fit_velocity, full_output=True) for result, cov, meta, output_filename \ in zip(results, covs, metas, output_filenames): for key in delete_meta_keys: if key in meta: del meta[key] with open(output_filename, "wb") as fp: pickle.dump((result, cov, meta), fp, 2) # For legacy. logger.info("Saved output to {}".format(output_filename)) del output_filenames[0:], fluxes[0:], ivars[0:] if len(output_filenames) > 0: results, covs, metas = model.fit(fluxes, ivars, initial_labels=initial_labels, model_redshift=fit_velocity, full_output=True) for result, cov, meta, output_filename \ in zip(results, covs, metas, output_filenames): for key in delete_meta_keys: if key in meta: del meta[key] with open(output_filename, "wb") as fp: pickle.dump((result, cov, meta), fp, 2) # For legacy. logger.info("Saved output to {}".format(output_filename)) del output_filenames[0:], fluxes[0:], ivars[0:] logger.info("Number of failures: {}".format(failures)) logger.info("Number of successes: {}".format(N - failures)) return None def train(model_filename, threads, condor, condor_chunks, memory, save_training_data, condor_check_frequency, re_train, **kwargs): """ Train an existing model, with the option to distribute the work across many threads or condor resources. """ # It's bad practice not to have all the imports up here, # but I want the CLI to be fast if people are just checking out the help. import AnniesLasso_2 as tc # Load the model. model = tc.load_model(model_filename, threads=threads) logger = logging.getLogger("AnniesLasso_2") if model.is_trained and not re_train: logger.warn("Model loaded from {} is already trained.".format( model_filename)) logger.info("Exiting..") return model if condor: # We will need some temporary place to put logs etc... # MAGIC logging_dir = "logs" if not os.path.exists(logging_dir): logger.info("Creating Condor log directory: {}".format(logging_dir)) os.mkdir(logging_dir) condor_kwds = { # Get the path of this executable, since it may not be available to # the child resources. "executable": check_output("which tc", shell=True).strip(), "logging_dir": logging_dir, "cpus": 1, # MAGIC "memory": memory } # Split up the model into chunks based on the number of pixels. condor_job = "condor.job" # MAGIC chunk_size = int(ceil(model.dispersion.size / float(condor_chunks))) chunk_filenames = [] for i in range(condor_chunks): # Let's not assume anything about the model (e.g., it may have many # attributes from a sub-class that we do not know about). # Just make a soft copy and re-specify the data attributes, # since they should not be any different. si, ei = (i * chunk_size, (i + 1) * chunk_size) chunk = model.copy() chunk._dispersion = chunk._dispersion[si:ei] chunk._normalized_flux = chunk._normalized_flux[:, si:ei] chunk._normalized_ivar = chunk._normalized_ivar[:, si:ei] assert len(model._data_attributes) == 3, \ "Don't know what to do with your additional data attributes!" # Temporary filename _, chunk_filename = mkstemp(dir=os.getcwd(), prefix='tc.condorchunk.') chunk.save(chunk_filename, include_training_data=True, overwrite=True) chunk_filenames.append(chunk_filename) logger.info("Saved chunk {0} to {1}".format(i, chunk_filename)) assert os.path.exists(chunk_filename) # Now submit the jobs kwds = condor_kwds.copy() kwds.update({ "model_filename": chunk_filename, "prefix": "{0}-{1}".format(i, os.path.basename(chunk_filename)) }) with open(condor_job, "w") as fp: fp.write(condor_code.format(**kwds)) # Create a Condor lock file. condorlock_filename = _condorlock_filename(chunk_filename) os.system("touch {}".format(condorlock_filename)) # Submit the Condor job. os.system("chmod +wrx {}".format(condor_job)) os.system("condor_submit {}".format(condor_job)) logger.info("Submitted job {0} for {1}".format(i, chunk_filename)) if os.path.exists(condor_job): os.remove(condor_job) # Wait for completion of all jobs. waiting = list(map(_condorlock_filename, chunk_filenames)) logger.info("Waiting for completion of {} jobs".format(len(waiting))) while True: completed = [] for each in waiting: if not os.path.exists(each): logger.info("Finished {}".format(each)) completed.append(each) waiting = list(set(waiting).difference(completed)) logger.info("Still waiting on {0} jobs:\n{1}".format( len(waiting), "\n".join(waiting))) if len(waiting) == 0: break sleep(condor_check_frequency) # Check for Condor log files to see if they failed! logger.info("Collecting results") # Collect the results. model.theta = zeros((model.dispersion.size, model.design_matrix.shape[1])) model.s2 = zeros((model.dispersion.size)) for i, chunk_filename in enumerate(chunk_filenames): logger.info("Loading from chunk {0} {1}".format(i, chunk_filename)) chunk = tc.load_model(chunk_filename) si, ei = (i * chunk_size, (i + 1) * chunk_size) model.theta[si:ei] = chunk.theta[:].copy() model.s2[si:ei] = chunk.s2[:].copy() os.remove(chunk_filename) else: model.train( op_kwargs={"xtol": kwargs["xtol"], "ftol": kwargs["ftol"]}, op_bfgs_kwargs={"factr": kwargs["factr"], "pgtol": kwargs["pgtol"]}) # Save the model. logger.info("Saving model to {}".format(model_filename)) model.save(model_filename, include_training_data=save_training_data, overwrite=True) # Are we a child Condor process? condorlock_filename = _condorlock_filename(model_filename) if os.path.exists(condorlock_filename): logger.info("Removing Condor lock {}".format(condorlock_filename)) os.remove(condorlock_filename) logger.info("Done") return model def join_results(output_filename, result_filenames, model_filename=None, from_filename=False, clobber=False, errors=False, cov=False, **kwargs): """ Join the test results from multiple files into a single table file. """ import AnniesLasso_2 as tc from astropy.table import Table, TableColumns meta_keys = kwargs.pop("meta_keys", {}) meta_keys.update({ "chi_sq": nan, "r_chi_sq": nan, "snr": nan, # "redshift": nan, }) logger = logging.getLogger("AnniesLasso_2") # Does the output filename already exist? if os.path.exists(output_filename) and not clobber: logger.info("Output filename {} already exists and not clobbering."\ .format(output_filename)) return None if from_filename: with open(result_filenames[0], "r") as fp: _ = list(map(str.strip, fp.readlines())) result_filenames = _ # We might need the label names from the model. if model_filename is not None: model = tc.load_model(model_filename) assert model.is_trained label_names = model.vectorizer.label_names logger.warn( "Results produced from newer models do not need a model_filename "\ "to be specified when joining results.") else: with open(result_filenames[0], "rb") as fp: contents = pickle.load(fp) if "label_names" not in contents[-1]: raise ValueError( "cannot find label names; please provide the model used "\ "to produce these results") label_names = contents[-1]["label_names"] # Load results from each file. failed = [] N = len(result_filenames) # Create an ordered dictionary of lists for all the data. data_dict = OrderedDict([("FILENAME", [])]) for label_name in label_names: data_dict[label_name] = [] if errors: for label_name in label_names: data_dict["E_{}".format(label_name)] = [] if cov: data_dict["COV"] = [] for key in meta_keys: data_dict[key] = [] # Iterate over all the result filenames for i, filename in enumerate(result_filenames): logger.info("{}/{}: {}".format(i + 1, N, filename)) if not os.path.exists(filename): logger.warn("Path {} does not exist. Continuing..".format(filename)) failed.append(filename) continue with open(filename, "rb") as fp: contents = pickle.load(fp) assert len(contents) == 3, "You are using some old school version!" labels, Sigma, meta = contents if Sigma is None: Sigma = nan * ones((labels.size, labels.size)) result = [filename] + list(labels) if errors: result.extend(diag(Sigma)**0.5) if cov: result.append(Sigma) result += [meta.get(k, v) for k, v in meta_keys.items()] for key, value in zip(data_dict.keys(), result): data_dict[key].append(value) # Warn of any failures. if failed: logger.warn( "The following {} result file(s) could not be found: \n{}".format( len(failed), "\n".join(failed))) # Construct the table. table = Table(TableColumns(data_dict)) table.write(output_filename, overwrite=clobber) logger.info("Written to {}".format(output_filename)) def main(): """ The main command line interpreter. This is the console script entry point. """ # Create the main parser. parser = argparse.ArgumentParser( description="The Cannon", epilog="http://TheCannon.io") # Create parent parser. parent_parser = argparse.ArgumentParser(add_help=False) parent_parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Verbose logging mode.") parent_parser.add_argument("-t", "--threads", dest="threads", type=int, default=1, help="The number of threads to use.") parent_parser.add_argument("--condor", dest="condor", action="store_true", default=False, help="Distribute action using Condor.") parent_parser.add_argument("--condor-chunks", dest="condor_chunks", type=int, default=100, help="The number of chunks to distribute across Condor. "\ "This argument is ignored if --condor is not used.") parent_parser.add_argument("--condor-memory", dest="memory", type=int, default=2000, help="The amount of memory (MB) to request for each Condor job. "\ "This argument is ignored if --condor is not used.") parent_parser.add_argument("--condor-check-frequency", dest="condor_check_frequency", type=int, default=1, help="The number of seconds to wait before checking for finished Condor jobs.") # Allow for multiple actions. subparsers = parser.add_subparsers(title="action", dest="action", description="Specify the action to perform.") # Training parsers. train_parser = subparsers.add_parser("train", parents=[parent_parser], help="Train an existing Cannon model.") train_parser.add_argument("--save_training_data", default=False, action="store_true", dest="save_training_data", help="Once trained, save the model using the training data.") train_parser.add_argument("--re-train", default=False, action="store_true", dest="re_train", help="Re-train the model if it is already trained.") train_parser.add_argument("model_filename", type=str, help="The path of the saved Cannon model.") train_parser.add_argument("--factr", default=10000000.0, dest="factr", help="BFGS keyword argument") train_parser.add_argument("--pgtol", default=1e-5, dest="pgtol", help="BFGS keyword argument") train_parser.add_argument("--xtol", default=1e-6, dest="xtol", help="fmin_powell keyword argument") train_parser.add_argument("--ftol", default=1e-6, dest="ftol", help="fmin_powell keyword argument") train_parser.set_defaults(func=train) # Fitting parser. fit_parser = subparsers.add_parser("fit", parents=[parent_parser], help="Fit stacked spectra using a trained model.") fit_parser.add_argument("model_filename", type=str, help="The path of a trained Cannon model.") fit_parser.add_argument("spectrum_filenames", nargs="+", type=str, help="Paths of spectra to fit.") fit_parser.add_argument("--rv", dest="fit_velocity", default=False, action="store_true", help="Fit radial velocity at test time.") fit_parser.add_argument("--parallel-chunks", dest="parallel_chunks", type=int, default=1000, help="The number of spectra to fit in a chunk.") fit_parser.add_argument("--clobber", dest="clobber", default=False, action="store_true", help="Overwrite existing output files.") fit_parser.add_argument( "--output-suffix", dest="output_suffix", type=str, help="A string suffix that will be added to the spectrum filenames "\ "when creating the result filename") fit_parser.add_argument("--from-filename", dest="from_filename", action="store_true", default=False, help="Read spectrum filenames from file") fit_parser.set_defaults(func=fit) # Join results parser. join_parser = subparsers.add_parser("join", parents=[parent_parser], help="Join results from individual stars into a single table.") join_parser.add_argument("output_filename", type=str, help="The path to write the output filename.") join_parser.add_argument("result_filenames", nargs="+", type=str, help="Paths of result files to include.") join_parser.add_argument("--from-filename", dest="from_filename", action="store_true", default=False, help="Read result filenames from a file.") join_parser.add_argument("--model-filename", dest="model_filename", type=str, default=None, help="The path of a Cannon model that was used to test the stars. "\ "(Note this is only required for older models.)") join_parser.add_argument( "--errors", dest="errors", default=False, action="store_true", help="Include formal errors in destination table.") join_parser.add_argument( "--cov", dest="cov", default=False, action="store_true", help="Include covariance matrix in destination table.") join_parser.add_argument( "--clobber", dest="clobber", default=False, action="store_true", help="Ovewrite an existing table file.") join_parser.set_defaults(func=join_results) # Parse the arguments and take care of any top-level arguments. args = parser.parse_args() if args.action is None: return logger = logging.getLogger("AnniesLasso_2") if args.verbose: logger.setLevel(logging.DEBUG) if args.condor_chunks != parent_parser.get_default("condor_chunks") \ and not args.condor: logger.warn("Ignoring chunks argument because Condor is not in use.") # Do things. return args.func(**args.__dict__) if __name__ == "__main__": """ Usage examples: # tc train model.pickle --condor --chunks 100 # tc train model.pickle --threads 8 # tc join model.pickle --from-filename files """ _ = main()
mit
bigswitch/nova
nova/tests/unit/virt/xenapi/image/test_utils.py
16
8120
# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import tarfile import mock from nova import test from nova.virt.xenapi.image import utils @mock.patch.object(utils, 'IMAGE_API') class GlanceImageTestCase(test.NoDBTestCase): def _get_image(self): return utils.GlanceImage(mock.sentinel.context, mock.sentinel.image_ref) def test_meta(self, mocked): mocked.get.return_value = mock.sentinel.meta image = self._get_image() self.assertEqual(mock.sentinel.meta, image.meta) mocked.get.assert_called_once_with(mock.sentinel.context, mock.sentinel.image_ref) def test_download_to(self, mocked): mocked.download.return_value = None image = self._get_image() result = image.download_to(mock.sentinel.fobj) self.assertIsNone(result) mocked.download.assert_called_once_with(mock.sentinel.context, mock.sentinel.image_ref, mock.sentinel.fobj) def test_is_raw_tgz_empty_meta(self, mocked): mocked.get.return_value = {} image = self._get_image() self.assertFalse(image.is_raw_tgz()) def test_is_raw_tgz_for_raw_tgz(self, mocked): mocked.get.return_value = { 'disk_format': 'raw', 'container_format': 'tgz' } image = self._get_image() self.assertTrue(image.is_raw_tgz()) def test_data(self, mocked): mocked.download.return_value = mock.sentinel.image image = self._get_image() self.assertEqual(mock.sentinel.image, image.data()) class RawImageTestCase(test.NoDBTestCase): def test_get_size(self): glance_image = self.mox.CreateMock(utils.GlanceImage) glance_image.meta = {'size': '123'} raw_image = utils.RawImage(glance_image) self.mox.ReplayAll() self.assertEqual(123, raw_image.get_size()) def test_stream_to(self): glance_image = self.mox.CreateMock(utils.GlanceImage) glance_image.download_to('file').AndReturn('result') raw_image = utils.RawImage(glance_image) self.mox.ReplayAll() self.assertEqual('result', raw_image.stream_to('file')) class TestIterableBasedFile(test.NoDBTestCase): def test_constructor(self): class FakeIterable(object): def __iter__(_self): return 'iterator' the_file = utils.IterableToFileAdapter(FakeIterable()) self.assertEqual('iterator', the_file.iterator) def test_read_one_character(self): the_file = utils.IterableToFileAdapter([ 'chunk1', 'chunk2' ]) self.assertEqual('c', the_file.read(1)) def test_read_stores_remaining_characters(self): the_file = utils.IterableToFileAdapter([ 'chunk1', 'chunk2' ]) the_file.read(1) self.assertEqual('hunk1', the_file.remaining_data) def test_read_remaining_characters(self): the_file = utils.IterableToFileAdapter([ 'chunk1', 'chunk2' ]) self.assertEqual('c', the_file.read(1)) self.assertEqual('h', the_file.read(1)) def test_read_reached_end_of_file(self): the_file = utils.IterableToFileAdapter([ 'chunk1', 'chunk2' ]) self.assertEqual('chunk1', the_file.read(100)) self.assertEqual('chunk2', the_file.read(100)) self.assertEqual('', the_file.read(100)) def test_empty_chunks(self): the_file = utils.IterableToFileAdapter([ '', '', 'chunk2' ]) self.assertEqual('chunk2', the_file.read(100)) class RawTGZTestCase(test.NoDBTestCase): def test_as_tarfile(self): image = utils.RawTGZImage(None) self.mox.StubOutWithMock(image, '_as_file') self.mox.StubOutWithMock(utils.tarfile, 'open') image._as_file().AndReturn('the_file') utils.tarfile.open(mode='r|gz', fileobj='the_file').AndReturn('tf') self.mox.ReplayAll() result = image._as_tarfile() self.assertEqual('tf', result) def test_as_file(self): self.mox.StubOutWithMock(utils, 'IterableToFileAdapter') glance_image = self.mox.CreateMock(utils.GlanceImage) image = utils.RawTGZImage(glance_image) glance_image.data().AndReturn('iterable-data') utils.IterableToFileAdapter('iterable-data').AndReturn('data-as-file') self.mox.ReplayAll() result = image._as_file() self.assertEqual('data-as-file', result) def test_get_size(self): tar_file = self.mox.CreateMock(tarfile.TarFile) tar_info = self.mox.CreateMock(tarfile.TarInfo) image = utils.RawTGZImage(None) self.mox.StubOutWithMock(image, '_as_tarfile') image._as_tarfile().AndReturn(tar_file) tar_file.next().AndReturn(tar_info) tar_info.size = 124 self.mox.ReplayAll() result = image.get_size() self.assertEqual(124, result) self.assertEqual(image._tar_info, tar_info) self.assertEqual(image._tar_file, tar_file) def test_get_size_called_twice(self): tar_file = self.mox.CreateMock(tarfile.TarFile) tar_info = self.mox.CreateMock(tarfile.TarInfo) image = utils.RawTGZImage(None) self.mox.StubOutWithMock(image, '_as_tarfile') image._as_tarfile().AndReturn(tar_file) tar_file.next().AndReturn(tar_info) tar_info.size = 124 self.mox.ReplayAll() image.get_size() result = image.get_size() self.assertEqual(124, result) self.assertEqual(image._tar_info, tar_info) self.assertEqual(image._tar_file, tar_file) def test_stream_to_without_size_retrieved(self): source_tar = self.mox.CreateMock(tarfile.TarFile) first_tarinfo = self.mox.CreateMock(tarfile.TarInfo) target_file = self.mox.CreateMock(open) source_file = self.mox.CreateMock(open) image = utils.RawTGZImage(None) image._image_service_and_image_id = ('service', 'id') self.mox.StubOutWithMock(image, '_as_tarfile', source_tar) self.mox.StubOutWithMock(utils.shutil, 'copyfileobj') image._as_tarfile().AndReturn(source_tar) source_tar.next().AndReturn(first_tarinfo) source_tar.extractfile(first_tarinfo).AndReturn(source_file) utils.shutil.copyfileobj(source_file, target_file) source_tar.close() self.mox.ReplayAll() image.stream_to(target_file) def test_stream_to_with_size_retrieved(self): source_tar = self.mox.CreateMock(tarfile.TarFile) first_tarinfo = self.mox.CreateMock(tarfile.TarInfo) target_file = self.mox.CreateMock(open) source_file = self.mox.CreateMock(open) first_tarinfo.size = 124 image = utils.RawTGZImage(None) image._image_service_and_image_id = ('service', 'id') self.mox.StubOutWithMock(image, '_as_tarfile', source_tar) self.mox.StubOutWithMock(utils.shutil, 'copyfileobj') image._as_tarfile().AndReturn(source_tar) source_tar.next().AndReturn(first_tarinfo) source_tar.extractfile(first_tarinfo).AndReturn(source_file) utils.shutil.copyfileobj(source_file, target_file) source_tar.close() self.mox.ReplayAll() image.get_size() image.stream_to(target_file)
apache-2.0
40223204/w16b_test
static/Brython3.1.1-20150328-091302/Lib/contextlib.py
737
8788
"""Utilities for with-statement contexts. See PEP 343.""" import sys from collections import deque from functools import wraps __all__ = ["contextmanager", "closing", "ContextDecorator", "ExitStack"] class ContextDecorator(object): "A base class or mixin that enables context managers to work as decorators." def _recreate_cm(self): """Return a recreated instance of self. Allows an otherwise one-shot context manager like _GeneratorContextManager to support use as a decorator via implicit recreation. This is a private interface just for _GeneratorContextManager. See issue #11647 for details. """ return self def __call__(self, func): @wraps(func) def inner(*args, **kwds): with self._recreate_cm(): return func(*args, **kwds) return inner class _GeneratorContextManager(ContextDecorator): """Helper for @contextmanager decorator.""" def __init__(self, func, *args, **kwds): self.gen = func(*args, **kwds) self.func, self.args, self.kwds = func, args, kwds def _recreate_cm(self): # _GCM instances are one-shot context managers, so the # CM must be recreated each time a decorated function is # called return self.__class__(self.func, *self.args, **self.kwds) def __enter__(self): try: return next(self.gen) except StopIteration: raise RuntimeError("generator didn't yield") def __exit__(self, type, value, traceback): if type is None: try: next(self.gen) except StopIteration: return else: raise RuntimeError("generator didn't stop") else: if value is None: # Need to force instantiation so we can reliably # tell if we get the same exception back value = type() try: self.gen.throw(type, value, traceback) raise RuntimeError("generator didn't stop after throw()") except StopIteration as exc: # Suppress the exception *unless* it's the same exception that # was passed to throw(). This prevents a StopIteration # raised inside the "with" statement from being suppressed return exc is not value except: # only re-raise if it's *not* the exception that was # passed to throw(), because __exit__() must not raise # an exception unless __exit__() itself failed. But throw() # has to raise the exception to signal propagation, so this # fixes the impedance mismatch between the throw() protocol # and the __exit__() protocol. # if sys.exc_info()[1] is not value: raise def contextmanager(func): """@contextmanager decorator. Typical usage: @contextmanager def some_generator(<arguments>): <setup> try: yield <value> finally: <cleanup> This makes this: with some_generator(<arguments>) as <variable>: <body> equivalent to this: <setup> try: <variable> = <value> <body> finally: <cleanup> """ @wraps(func) def helper(*args, **kwds): return _GeneratorContextManager(func, *args, **kwds) return helper class closing(object): """Context to automatically close something at the end of a block. Code like this: with closing(<module>.open(<arguments>)) as f: <block> is equivalent to this: f = <module>.open(<arguments>) try: <block> finally: f.close() """ def __init__(self, thing): self.thing = thing def __enter__(self): return self.thing def __exit__(self, *exc_info): self.thing.close() # Inspired by discussions on http://bugs.python.org/issue13585 class ExitStack(object): """Context manager for dynamic management of a stack of exit callbacks For example: with ExitStack() as stack: files = [stack.enter_context(open(fname)) for fname in filenames] # All opened files will automatically be closed at the end of # the with statement, even if attempts to open files later # in the list raise an exception """ def __init__(self): self._exit_callbacks = deque() def pop_all(self): """Preserve the context stack by transferring it to a new instance""" new_stack = type(self)() new_stack._exit_callbacks = self._exit_callbacks self._exit_callbacks = deque() return new_stack def _push_cm_exit(self, cm, cm_exit): """Helper to correctly register callbacks to __exit__ methods""" def _exit_wrapper(*exc_details): return cm_exit(cm, *exc_details) _exit_wrapper.__self__ = cm self.push(_exit_wrapper) def push(self, exit): """Registers a callback with the standard __exit__ method signature Can suppress exceptions the same way __exit__ methods can. Also accepts any object with an __exit__ method (registering a call to the method instead of the object itself) """ # We use an unbound method rather than a bound method to follow # the standard lookup behaviour for special methods _cb_type = type(exit) try: exit_method = _cb_type.__exit__ except AttributeError: # Not a context manager, so assume its a callable self._exit_callbacks.append(exit) else: self._push_cm_exit(exit, exit_method) return exit # Allow use as a decorator def callback(self, callback, *args, **kwds): """Registers an arbitrary callback and arguments. Cannot suppress exceptions. """ def _exit_wrapper(exc_type, exc, tb): callback(*args, **kwds) # We changed the signature, so using @wraps is not appropriate, but # setting __wrapped__ may still help with introspection _exit_wrapper.__wrapped__ = callback self.push(_exit_wrapper) return callback # Allow use as a decorator def enter_context(self, cm): """Enters the supplied context manager If successful, also pushes its __exit__ method as a callback and returns the result of the __enter__ method. """ # We look up the special methods on the type to match the with statement _cm_type = type(cm) _exit = _cm_type.__exit__ result = _cm_type.__enter__(cm) self._push_cm_exit(cm, _exit) return result def close(self): """Immediately unwind the context stack""" self.__exit__(None, None, None) def __enter__(self): return self def __exit__(self, *exc_details): received_exc = exc_details[0] is not None # We manipulate the exception state so it behaves as though # we were actually nesting multiple with statements frame_exc = sys.exc_info()[1] def _fix_exception_context(new_exc, old_exc): while 1: exc_context = new_exc.__context__ if exc_context in (None, frame_exc): break new_exc = exc_context new_exc.__context__ = old_exc # Callbacks are invoked in LIFO order to match the behaviour of # nested context managers suppressed_exc = False pending_raise = False while self._exit_callbacks: cb = self._exit_callbacks.pop() try: if cb(*exc_details): suppressed_exc = True pending_raise = False exc_details = (None, None, None) except: new_exc_details = sys.exc_info() # simulate the stack of exceptions by setting the context _fix_exception_context(new_exc_details[1], exc_details[1]) pending_raise = True exc_details = new_exc_details if pending_raise: try: # bare "raise exc_details[1]" replaces our carefully # set-up context fixed_ctx = exc_details[1].__context__ raise exc_details[1] except BaseException: exc_details[1].__context__ = fixed_ctx raise return received_exc and suppressed_exc
agpl-3.0
aaleotti-unimore/ComicsScraper
lib/tests/contrib/django_util/test_decorators.py
21
9648
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the django_util decorators.""" import copy from django import http import django.conf from django.contrib.auth import models as django_models import mock from six.moves import http_client from six.moves import reload_module from six.moves.urllib import parse import oauth2client.contrib.django_util from oauth2client.contrib.django_util import decorators from tests.contrib import django_util as tests_django_util class OAuth2EnabledDecoratorTest(tests_django_util.TestWithDjangoEnvironment): def setUp(self): super(OAuth2EnabledDecoratorTest, self).setUp() self.save_settings = copy.deepcopy(django.conf.settings) # OAuth2 Settings gets configured based on Django settings # at import time, so in order for us to reload the settings # we need to reload the module reload_module(oauth2client.contrib.django_util) self.user = django_models.User.objects.create_user( username='bill', email='bill@example.com', password='hunter2') def tearDown(self): super(OAuth2EnabledDecoratorTest, self).tearDown() django.conf.settings = copy.deepcopy(self.save_settings) def test_no_credentials_without_credentials(self): request = self.factory.get('/test') request.session = self.session @decorators.oauth_enabled def test_view(request): return http.HttpResponse("test") # pragma: NO COVER response = test_view(request) self.assertEqual(response.status_code, http_client.OK) self.assertIsNotNone(request.oauth) self.assertFalse(request.oauth.has_credentials()) self.assertIsNone(request.oauth.http) @mock.patch('oauth2client.client.OAuth2Credentials') def test_has_credentials_in_storage(self, OAuth2Credentials): request = self.factory.get('/test') request.session = mock.Mock() credentials_mock = mock.Mock( scopes=set(django.conf.settings.GOOGLE_OAUTH2_SCOPES)) credentials_mock.has_scopes.return_value = True credentials_mock.invalid = False credentials_mock.scopes = set([]) OAuth2Credentials.from_json.return_value = credentials_mock @decorators.oauth_enabled def test_view(request): return http.HttpResponse('test') response = test_view(request) self.assertEqual(response.status_code, http_client.OK) self.assertEqual(response.content, b'test') self.assertTrue(request.oauth.has_credentials()) self.assertIsNotNone(request.oauth.http) self.assertSetEqual( request.oauth.scopes, set(django.conf.settings.GOOGLE_OAUTH2_SCOPES)) @mock.patch('oauth2client.contrib.dictionary_storage.DictionaryStorage') def test_specified_scopes(self, dictionary_storage_mock): request = self.factory.get('/test') request.session = mock.Mock() credentials_mock = mock.Mock( scopes=set(django.conf.settings.GOOGLE_OAUTH2_SCOPES)) credentials_mock.has_scopes = mock.Mock(return_value=True) credentials_mock.is_valid = True dictionary_storage_mock.get.return_value = credentials_mock @decorators.oauth_enabled(scopes=['additional-scope']) def test_view(request): return http.HttpResponse('hello world') # pragma: NO COVER response = test_view(request) self.assertEqual(response.status_code, http_client.OK) self.assertIsNotNone(request.oauth) self.assertFalse(request.oauth.has_credentials()) class OAuth2RequiredDecoratorTest(tests_django_util.TestWithDjangoEnvironment): def setUp(self): super(OAuth2RequiredDecoratorTest, self).setUp() self.save_settings = copy.deepcopy(django.conf.settings) reload_module(oauth2client.contrib.django_util) self.user = django_models.User.objects.create_user( username='bill', email='bill@example.com', password='hunter2') def tearDown(self): super(OAuth2RequiredDecoratorTest, self).tearDown() django.conf.settings = copy.deepcopy(self.save_settings) def test_redirects_without_credentials(self): request = self.factory.get('/test') request.session = self.session @decorators.oauth_required def test_view(request): return http.HttpResponse('test') # pragma: NO COVER response = test_view(request) self.assertIsInstance(response, http.HttpResponseRedirect) self.assertEqual(parse.urlparse(response['Location']).path, '/oauth2/oauth2authorize/') self.assertIn( 'return_url=%2Ftest', parse.urlparse(response['Location']).query) self.assertEqual(response.status_code, http.HttpResponseRedirect.status_code) @mock.patch('oauth2client.contrib.django_util.UserOAuth2', autospec=True) def test_has_credentials_in_storage(self, UserOAuth2): request = self.factory.get('/test') request.session = mock.Mock() @decorators.oauth_required def test_view(request): return http.HttpResponse("test") my_user_oauth = mock.Mock() UserOAuth2.return_value = my_user_oauth my_user_oauth.has_credentials.return_value = True response = test_view(request) self.assertEqual(response.status_code, http_client.OK) self.assertEqual(response.content, b"test") @mock.patch('oauth2client.client.OAuth2Credentials') def test_has_credentials_in_storage_no_scopes( self, OAuth2Credentials): request = self.factory.get('/test') request.session = mock.Mock() credentials_mock = mock.Mock( scopes=set(django.conf.settings.GOOGLE_OAUTH2_SCOPES)) credentials_mock.has_scopes.return_value = False OAuth2Credentials.from_json.return_value = credentials_mock @decorators.oauth_required def test_view(request): return http.HttpResponse("test") # pragma: NO COVER response = test_view(request) self.assertEqual( response.status_code, django.http.HttpResponseRedirect.status_code) @mock.patch('oauth2client.client.OAuth2Credentials') def test_specified_scopes(self, OAuth2Credentials): request = self.factory.get('/test') request.session = mock.Mock() credentials_mock = mock.Mock( scopes=set(django.conf.settings.GOOGLE_OAUTH2_SCOPES)) credentials_mock.has_scopes = mock.Mock(return_value=False) OAuth2Credentials.from_json.return_value = credentials_mock @decorators.oauth_required(scopes=['additional-scope']) def test_view(request): return http.HttpResponse("hello world") # pragma: NO COVER response = test_view(request) self.assertEqual( response.status_code, django.http.HttpResponseRedirect.status_code) class OAuth2RequiredDecoratorStorageModelTest( tests_django_util.TestWithDjangoEnvironment): def setUp(self): super(OAuth2RequiredDecoratorStorageModelTest, self).setUp() self.save_settings = copy.deepcopy(django.conf.settings) STORAGE_MODEL = { 'model': 'tests.contrib.django_util.models.CredentialsModel', 'user_property': 'user_id', 'credentials_property': 'credentials' } django.conf.settings.GOOGLE_OAUTH2_STORAGE_MODEL = STORAGE_MODEL reload_module(oauth2client.contrib.django_util) self.user = django_models.User.objects.create_user( username='bill', email='bill@example.com', password='hunter2') def tearDown(self): super(OAuth2RequiredDecoratorStorageModelTest, self).tearDown() django.conf.settings = copy.deepcopy(self.save_settings) def test_redirects_anonymous_to_login(self): request = self.factory.get('/test') request.session = self.session request.user = django_models.AnonymousUser() @decorators.oauth_required def test_view(request): return http.HttpResponse("test") # pragma: NO COVER response = test_view(request) self.assertIsInstance(response, http.HttpResponseRedirect) self.assertEqual(parse.urlparse(response['Location']).path, django.conf.settings.LOGIN_URL) def test_redirects_user_to_oauth_authorize(self): request = self.factory.get('/test') request.session = self.session request.user = django_models.User.objects.create_user( username='bill3', email='bill@example.com', password='hunter2') @decorators.oauth_required def test_view(request): return http.HttpResponse("test") # pragma: NO COVER response = test_view(request) self.assertIsInstance(response, http.HttpResponseRedirect) self.assertEqual(parse.urlparse(response['Location']).path, '/oauth2/oauth2authorize/')
apache-2.0
pforai/easybuild-framework
easybuild/tools/utilities.py
4
5398
# # # Copyright 2012-2015 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en), # the Hercules foundation (http://www.herculesstichting.be/in_English) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # http://github.com/hpcugent/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. # # """ Module with various utility functions @author: Kenneth Hoste (Ghent University) """ import glob import os import string import sys from vsc.utils import fancylogger import easybuild.tools.environment as env from easybuild.tools.build_log import EasyBuildError _log = fancylogger.getLogger('tools.utilities') # a list of all ascii characters ASCII_CHARS = string.maketrans('', '') # a list of all unwanted ascii characters (we only want to keep digits, letters and _) UNWANTED_CHARS = ASCII_CHARS.translate(ASCII_CHARS, string.digits + string.ascii_letters + "_") def read_environment(env_vars, strict=False): """NO LONGER SUPPORTED: use read_environment from easybuild.tools.environment instead""" _log.nosupport("read_environment has been moved to easybuild.tools.environment", '2.0') def flatten(lst): """Flatten a list of lists.""" res = [] for x in lst: res.extend(x) return res def quote_str(val, escape_newline=False, prefer_single_quotes=False): """ Obtain a new value to be used in string replacement context. For non-string values, it just returns the exact same value. For string values, it tries to escape the string in quotes, e.g., foo becomes 'foo', foo'bar becomes "foo'bar", foo'bar"baz becomes \"\"\"foo'bar"baz\"\"\", etc. @param escape_newline: wrap strings that include a newline in triple quotes """ if isinstance(val, basestring): # forced triple double quotes if ("'" in val and '"' in val) or (escape_newline and '\n' in val): return '"""%s"""' % val # single quotes to escape double quote used in strings elif '"' in val: return "'%s'" % val # if single quotes are preferred, use single quotes; # unless a space or a single quote are in the string elif prefer_single_quotes and "'" not in val and ' ' not in val: return "'%s'" % val # fallback on double quotes (required in tcl syntax) else: return '"%s"' % val else: return val def quote_py_str(val): """Version of quote_str specific for generating use in Python context (e.g., easyconfig parameters).""" return quote_str(val, escape_newline=True, prefer_single_quotes=True) def remove_unwanted_chars(inputstring): """Remove unwanted characters from the given string and return a copy All non-letter and non-numeral characters are considered unwanted except for underscore ('_'), see UNWANTED_CHARS. """ return inputstring.translate(ASCII_CHARS, UNWANTED_CHARS) def import_available_modules(namespace): """ Import all available module in the specified namespace. @param namespace: The namespace to import modules from. """ modules = [] for path in sys.path: for module in sorted(glob.glob(os.path.sep.join([path] + namespace.split('.') + ['*.py']))): if not module.endswith('__init__.py'): mod_name = module.split(os.path.sep)[-1].split('.')[0] modpath = '.'.join([namespace, mod_name]) _log.debug("importing module %s" % modpath) try: mod = __import__(modpath, globals(), locals(), ['']) except ImportError as err: raise EasyBuildError("import_available_modules: Failed to import %s: %s", modpath, err) modules.append(mod) return modules def only_if_module_is_available(modname, pkgname=None, url=None): """Decorator to guard functions/methods against missing required module with specified name.""" if pkgname and url is None: url = 'https://pypi.python.org/pypi/%s' % pkgname def wrap(orig): """Decorated function, raises ImportError if specified module is not available.""" try: __import__(modname) return orig except ImportError as err: def error(*args, **kwargs): msg = "%s; required module '%s' is not available" % (err, modname) if pkgname: msg += " (provided by Python package %s, available from %s)" % (pkgname, url) elif url: msg += " (available from %s)" % url raise ImportError(msg) return error return wrap
gpl-2.0
SummerLW/Perf-Insight-Report
third_party/gsutil/third_party/boto/boto/elastictranscoder/__init__.py
145
1723
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. # All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # from boto.regioninfo import RegionInfo, get_regions def regions(): """ Get all available regions for the AWS Elastic Transcoder service. :rtype: list :return: A list of :class:`boto.regioninfo.RegionInfo` """ from boto.elastictranscoder.layer1 import ElasticTranscoderConnection return get_regions( 'elastictranscoder', connection_cls=ElasticTranscoderConnection ) def connect_to_region(region_name, **kw_params): for region in regions(): if region.name == region_name: return region.connect(**kw_params) return None
bsd-3-clause
infobloxopen/infoblox-netmri
infoblox_netmri/api/broker/v2_6_0/vrrp_router_stat_broker.py
16
72949
from ..broker import Broker class VrrpRouterStatBroker(Broker): controller = "vrrp_router_stats" def show(self, **kwargs): """Shows the details for the specified vrrp router stat. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` True | ``default:`` None :param VrrpRouterStatsID: The internal NetMRI identifier of the Vrrp Router Statistics. :type VrrpRouterStatsID: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param methods: A list of vrrp router stat methods. The listed methods will be called on each vrrp router stat returned and included in the output. Available methods are: device. :type methods: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device. :type include: Array of String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return vrrp_router_stat: The vrrp router stat identified by the specified VrrpRouterStatsID. :rtype vrrp_router_stat: VrrpRouterStat """ return self.api_request(self._get_method_fullname("show"), kwargs) def index(self, **kwargs): """Lists the available vrrp router stats. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient. **Inputs** | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param DeviceID: The internal NetMRI identifier for the device from which Vrrp Routes statistics information was collected. :type DeviceID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceID: The internal NetMRI identifier for the device from which Vrrp Routes statistics information was collected. :type DeviceID: Array of Integer | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param EndTime: The date and time the record was last modified in NetMRI. :type EndTime: DateTime | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param EndTime: The date and time the record was last modified in NetMRI. :type EndTime: Array of DateTime | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param VrrpRouterStatsID: The internal NetMRI identifier of the Vrrp Router Statistics. :type VrrpRouterStatsID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param VrrpRouterStatsID: The internal NetMRI identifier of the Vrrp Router Statistics. :type VrrpRouterStatsID: Array of Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results. :type DeviceGroupID: Array of Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` today :param starttime: The data returned will represent the vrrp router stats with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data. :type starttime: DateTime | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` tomorrow :param endtime: The data returned will represent the vrrp router stats with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data. :type endtime: DateTime | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param methods: A list of vrrp router stat methods. The listed methods will be called on each vrrp router stat returned and included in the output. Available methods are: device. :type methods: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device. :type include: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 0 :param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information. :type start: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 1000 :param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000. :type limit: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` VrrpRouterStatsID :param sort: The data field(s) to use for sorting the output. Default is VrrpRouterStatsID. Valid values are VrrpRouterStatsID, DeviceID, IprgMemberID, InterfaceID, StartTime, EndTime, ifIndex, IprgNumber, VrrpBecomeMaster, VrrpAdvertiseRcvd, VrrpAdvertiseIntervalErrors, VrrpAuthFailures, VrrpIpTtlErrors, VrrpPriorityZeroPktsRcvd, VrrpPriorityZeroPktsSent, VrrpInvalidTypePktsRcvd, VrrpAddressListErrors, VrrpInvalidAuthType, VrrpAuthTypeMismatch, VrrpPacketLengthErrors. :type sort: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` asc :param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'. :type dir: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param select: The list of attributes to return for each VrrpRouterStat. Valid values are VrrpRouterStatsID, DeviceID, IprgMemberID, InterfaceID, StartTime, EndTime, ifIndex, IprgNumber, VrrpBecomeMaster, VrrpAdvertiseRcvd, VrrpAdvertiseIntervalErrors, VrrpAuthFailures, VrrpIpTtlErrors, VrrpPriorityZeroPktsRcvd, VrrpPriorityZeroPktsSent, VrrpInvalidTypePktsRcvd, VrrpAddressListErrors, VrrpInvalidAuthType, VrrpAuthTypeMismatch, VrrpPacketLengthErrors. If empty or omitted, all attributes will be returned. :type select: Array | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_field: The field name for NIOS GOTO that is used for locating a row position of records. :type goto_field: String | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records. :type goto_value: String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return vrrp_router_stats: An array of the VrrpRouterStat objects that match the specified input criteria. :rtype vrrp_router_stats: Array of VrrpRouterStat """ return self.api_list_request(self._get_method_fullname("index"), kwargs) def search(self, **kwargs): """Lists the available vrrp router stats matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below. **Inputs** | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param DeviceID: The internal NetMRI identifier for the device from which Vrrp Routes statistics information was collected. :type DeviceID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceID: The internal NetMRI identifier for the device from which Vrrp Routes statistics information was collected. :type DeviceID: Array of Integer | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param EndTime: The date and time the record was last modified in NetMRI. :type EndTime: DateTime | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param EndTime: The date and time the record was last modified in NetMRI. :type EndTime: Array of DateTime | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param InterfaceID: The internal NetMRI identifier for the local interface for this Vrrp Router Statistics table entry. :type InterfaceID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param InterfaceID: The internal NetMRI identifier for the local interface for this Vrrp Router Statistics table entry. :type InterfaceID: Array of Integer | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param IprgMemberID: The internal NetMRI identifier of Iprg member in the vrrp router statistics. :type IprgMemberID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param IprgMemberID: The internal NetMRI identifier of Iprg member in the vrrp router statistics. :type IprgMemberID: Array of Integer | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param IprgNumber: The unique IprgNumber in the Vrrp router. :type IprgNumber: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param IprgNumber: The unique IprgNumber in the Vrrp router. :type IprgNumber: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param StartTime: The date and time the record was initially created in NetMRI. :type StartTime: DateTime | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param StartTime: The date and time the record was initially created in NetMRI. :type StartTime: Array of DateTime | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param VrrpAddressListErrors: The number of address list errors in the Vrrp router statistic :type VrrpAddressListErrors: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param VrrpAddressListErrors: The number of address list errors in the Vrrp router statistic :type VrrpAddressListErrors: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param VrrpAdvertiseIntervalErrors: The total number of interval errors in the Vrrp Router Statistics. :type VrrpAdvertiseIntervalErrors: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param VrrpAdvertiseIntervalErrors: The total number of interval errors in the Vrrp Router Statistics. :type VrrpAdvertiseIntervalErrors: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param VrrpAdvertiseRcvd: The received advertise of the Vrrp router statistics. :type VrrpAdvertiseRcvd: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param VrrpAdvertiseRcvd: The received advertise of the Vrrp router statistics. :type VrrpAdvertiseRcvd: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param VrrpAuthFailures: The total number of authentication failures occurred in the Vrrp router statistics. :type VrrpAuthFailures: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param VrrpAuthFailures: The total number of authentication failures occurred in the Vrrp router statistics. :type VrrpAuthFailures: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param VrrpAuthTypeMismatch: The mismatch authentication type. :type VrrpAuthTypeMismatch: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param VrrpAuthTypeMismatch: The mismatch authentication type. :type VrrpAuthTypeMismatch: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param VrrpBecomeMaster: The master of the Vrrp Router Statistics. :type VrrpBecomeMaster: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param VrrpBecomeMaster: The master of the Vrrp Router Statistics. :type VrrpBecomeMaster: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param VrrpInvalidAuthType: The Invalid Authentication type of Vrrp Router Statistics. :type VrrpInvalidAuthType: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param VrrpInvalidAuthType: The Invalid Authentication type of Vrrp Router Statistics. :type VrrpInvalidAuthType: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param VrrpInvalidTypePktsRcvd: The packet received with Invalid Type. :type VrrpInvalidTypePktsRcvd: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param VrrpInvalidTypePktsRcvd: The packet received with Invalid Type. :type VrrpInvalidTypePktsRcvd: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param VrrpIpTtlErrors: The total number of IP address errors occurred in the Vrrp Router Statistics. :type VrrpIpTtlErrors: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param VrrpIpTtlErrors: The total number of IP address errors occurred in the Vrrp Router Statistics. :type VrrpIpTtlErrors: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param VrrpPacketLengthErrors: The number of packet length errors in the Vrrp Router Statistics. :type VrrpPacketLengthErrors: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param VrrpPacketLengthErrors: The number of packet length errors in the Vrrp Router Statistics. :type VrrpPacketLengthErrors: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param VrrpPriorityZeroPktsRcvd: The packet received with priority zero. :type VrrpPriorityZeroPktsRcvd: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param VrrpPriorityZeroPktsRcvd: The packet received with priority zero. :type VrrpPriorityZeroPktsRcvd: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param VrrpPriorityZeroPktsSent: The packet sent with priority zero. :type VrrpPriorityZeroPktsSent: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param VrrpPriorityZeroPktsSent: The packet sent with priority zero. :type VrrpPriorityZeroPktsSent: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param VrrpRouterStatsID: The internal NetMRI identifier of the Vrrp Router Statistics. :type VrrpRouterStatsID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param VrrpRouterStatsID: The internal NetMRI identifier of the Vrrp Router Statistics. :type VrrpRouterStatsID: Array of Integer | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param ifIndex: The SNMP index for the local interface for this Vrrp router statistics table entry. :type ifIndex: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param ifIndex: The SNMP index for the local interface for this Vrrp router statistics table entry. :type ifIndex: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results. :type DeviceGroupID: Array of Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` today :param starttime: The data returned will represent the vrrp router stats with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data. :type starttime: DateTime | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` tomorrow :param endtime: The data returned will represent the vrrp router stats with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data. :type endtime: DateTime | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param methods: A list of vrrp router stat methods. The listed methods will be called on each vrrp router stat returned and included in the output. Available methods are: device. :type methods: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device. :type include: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 0 :param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information. :type start: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 1000 :param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000. :type limit: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` VrrpRouterStatsID :param sort: The data field(s) to use for sorting the output. Default is VrrpRouterStatsID. Valid values are VrrpRouterStatsID, DeviceID, IprgMemberID, InterfaceID, StartTime, EndTime, ifIndex, IprgNumber, VrrpBecomeMaster, VrrpAdvertiseRcvd, VrrpAdvertiseIntervalErrors, VrrpAuthFailures, VrrpIpTtlErrors, VrrpPriorityZeroPktsRcvd, VrrpPriorityZeroPktsSent, VrrpInvalidTypePktsRcvd, VrrpAddressListErrors, VrrpInvalidAuthType, VrrpAuthTypeMismatch, VrrpPacketLengthErrors. :type sort: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` asc :param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'. :type dir: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param select: The list of attributes to return for each VrrpRouterStat. Valid values are VrrpRouterStatsID, DeviceID, IprgMemberID, InterfaceID, StartTime, EndTime, ifIndex, IprgNumber, VrrpBecomeMaster, VrrpAdvertiseRcvd, VrrpAdvertiseIntervalErrors, VrrpAuthFailures, VrrpIpTtlErrors, VrrpPriorityZeroPktsRcvd, VrrpPriorityZeroPktsSent, VrrpInvalidTypePktsRcvd, VrrpAddressListErrors, VrrpInvalidAuthType, VrrpAuthTypeMismatch, VrrpPacketLengthErrors. If empty or omitted, all attributes will be returned. :type select: Array | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_field: The field name for NIOS GOTO that is used for locating a row position of records. :type goto_field: String | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records. :type goto_value: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param query: This value will be matched against vrrp router stats, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DeviceID, EndTime, InterfaceID, IprgMemberID, IprgNumber, StartTime, VrrpAddressListErrors, VrrpAdvertiseIntervalErrors, VrrpAdvertiseRcvd, VrrpAuthFailures, VrrpAuthTypeMismatch, VrrpBecomeMaster, VrrpInvalidAuthType, VrrpInvalidTypePktsRcvd, VrrpIpTtlErrors, VrrpPacketLengthErrors, VrrpPriorityZeroPktsRcvd, VrrpPriorityZeroPktsSent, VrrpRouterStatsID, ifIndex. :type query: String | ``api version min:`` 2.3 | ``api version max:`` None | ``required:`` False | ``default:`` None :param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering. :type xml_filter: String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return vrrp_router_stats: An array of the VrrpRouterStat objects that match the specified input criteria. :rtype vrrp_router_stats: Array of VrrpRouterStat """ return self.api_list_request(self._get_method_fullname("search"), kwargs) def find(self, **kwargs): """Lists the available vrrp router stats matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DeviceID, EndTime, InterfaceID, IprgMemberID, IprgNumber, StartTime, VrrpAddressListErrors, VrrpAdvertiseIntervalErrors, VrrpAdvertiseRcvd, VrrpAuthFailures, VrrpAuthTypeMismatch, VrrpBecomeMaster, VrrpInvalidAuthType, VrrpInvalidTypePktsRcvd, VrrpIpTtlErrors, VrrpPacketLengthErrors, VrrpPriorityZeroPktsRcvd, VrrpPriorityZeroPktsSent, VrrpRouterStatsID, ifIndex. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device from which Vrrp Routes statistics information was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_DeviceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified. :type val_f_DeviceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified. :type val_c_DeviceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_EndTime: The operator to apply to the field EndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. EndTime: The date and time the record was last modified in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_EndTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_EndTime: If op_EndTime is specified, the field named in this input will be compared to the value in EndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_EndTime must be specified if op_EndTime is specified. :type val_f_EndTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_EndTime: If op_EndTime is specified, this value will be compared to the value in EndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_EndTime must be specified if op_EndTime is specified. :type val_c_EndTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_InterfaceID: The operator to apply to the field InterfaceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. InterfaceID: The internal NetMRI identifier for the local interface for this Vrrp Router Statistics table entry. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_InterfaceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_InterfaceID: If op_InterfaceID is specified, the field named in this input will be compared to the value in InterfaceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_InterfaceID must be specified if op_InterfaceID is specified. :type val_f_InterfaceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_InterfaceID: If op_InterfaceID is specified, this value will be compared to the value in InterfaceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_InterfaceID must be specified if op_InterfaceID is specified. :type val_c_InterfaceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_IprgMemberID: The operator to apply to the field IprgMemberID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberID: The internal NetMRI identifier of Iprg member in the vrrp router statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_IprgMemberID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_IprgMemberID: If op_IprgMemberID is specified, the field named in this input will be compared to the value in IprgMemberID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberID must be specified if op_IprgMemberID is specified. :type val_f_IprgMemberID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_IprgMemberID: If op_IprgMemberID is specified, this value will be compared to the value in IprgMemberID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberID must be specified if op_IprgMemberID is specified. :type val_c_IprgMemberID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_IprgNumber: The operator to apply to the field IprgNumber. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgNumber: The unique IprgNumber in the Vrrp router. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_IprgNumber: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_IprgNumber: If op_IprgNumber is specified, the field named in this input will be compared to the value in IprgNumber using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgNumber must be specified if op_IprgNumber is specified. :type val_f_IprgNumber: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_IprgNumber: If op_IprgNumber is specified, this value will be compared to the value in IprgNumber using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgNumber must be specified if op_IprgNumber is specified. :type val_c_IprgNumber: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_StartTime: The operator to apply to the field StartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StartTime: The date and time the record was initially created in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_StartTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_StartTime: If op_StartTime is specified, the field named in this input will be compared to the value in StartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StartTime must be specified if op_StartTime is specified. :type val_f_StartTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_StartTime: If op_StartTime is specified, this value will be compared to the value in StartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StartTime must be specified if op_StartTime is specified. :type val_c_StartTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_VrrpAddressListErrors: The operator to apply to the field VrrpAddressListErrors. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpAddressListErrors: The number of address list errors in the Vrrp router statistic For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_VrrpAddressListErrors: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_VrrpAddressListErrors: If op_VrrpAddressListErrors is specified, the field named in this input will be compared to the value in VrrpAddressListErrors using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpAddressListErrors must be specified if op_VrrpAddressListErrors is specified. :type val_f_VrrpAddressListErrors: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_VrrpAddressListErrors: If op_VrrpAddressListErrors is specified, this value will be compared to the value in VrrpAddressListErrors using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpAddressListErrors must be specified if op_VrrpAddressListErrors is specified. :type val_c_VrrpAddressListErrors: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_VrrpAdvertiseIntervalErrors: The operator to apply to the field VrrpAdvertiseIntervalErrors. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpAdvertiseIntervalErrors: The total number of interval errors in the Vrrp Router Statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_VrrpAdvertiseIntervalErrors: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_VrrpAdvertiseIntervalErrors: If op_VrrpAdvertiseIntervalErrors is specified, the field named in this input will be compared to the value in VrrpAdvertiseIntervalErrors using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpAdvertiseIntervalErrors must be specified if op_VrrpAdvertiseIntervalErrors is specified. :type val_f_VrrpAdvertiseIntervalErrors: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_VrrpAdvertiseIntervalErrors: If op_VrrpAdvertiseIntervalErrors is specified, this value will be compared to the value in VrrpAdvertiseIntervalErrors using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpAdvertiseIntervalErrors must be specified if op_VrrpAdvertiseIntervalErrors is specified. :type val_c_VrrpAdvertiseIntervalErrors: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_VrrpAdvertiseRcvd: The operator to apply to the field VrrpAdvertiseRcvd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpAdvertiseRcvd: The received advertise of the Vrrp router statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_VrrpAdvertiseRcvd: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_VrrpAdvertiseRcvd: If op_VrrpAdvertiseRcvd is specified, the field named in this input will be compared to the value in VrrpAdvertiseRcvd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpAdvertiseRcvd must be specified if op_VrrpAdvertiseRcvd is specified. :type val_f_VrrpAdvertiseRcvd: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_VrrpAdvertiseRcvd: If op_VrrpAdvertiseRcvd is specified, this value will be compared to the value in VrrpAdvertiseRcvd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpAdvertiseRcvd must be specified if op_VrrpAdvertiseRcvd is specified. :type val_c_VrrpAdvertiseRcvd: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_VrrpAuthFailures: The operator to apply to the field VrrpAuthFailures. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpAuthFailures: The total number of authentication failures occurred in the Vrrp router statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_VrrpAuthFailures: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_VrrpAuthFailures: If op_VrrpAuthFailures is specified, the field named in this input will be compared to the value in VrrpAuthFailures using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpAuthFailures must be specified if op_VrrpAuthFailures is specified. :type val_f_VrrpAuthFailures: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_VrrpAuthFailures: If op_VrrpAuthFailures is specified, this value will be compared to the value in VrrpAuthFailures using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpAuthFailures must be specified if op_VrrpAuthFailures is specified. :type val_c_VrrpAuthFailures: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_VrrpAuthTypeMismatch: The operator to apply to the field VrrpAuthTypeMismatch. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpAuthTypeMismatch: The mismatch authentication type. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_VrrpAuthTypeMismatch: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_VrrpAuthTypeMismatch: If op_VrrpAuthTypeMismatch is specified, the field named in this input will be compared to the value in VrrpAuthTypeMismatch using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpAuthTypeMismatch must be specified if op_VrrpAuthTypeMismatch is specified. :type val_f_VrrpAuthTypeMismatch: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_VrrpAuthTypeMismatch: If op_VrrpAuthTypeMismatch is specified, this value will be compared to the value in VrrpAuthTypeMismatch using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpAuthTypeMismatch must be specified if op_VrrpAuthTypeMismatch is specified. :type val_c_VrrpAuthTypeMismatch: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_VrrpBecomeMaster: The operator to apply to the field VrrpBecomeMaster. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpBecomeMaster: The master of the Vrrp Router Statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_VrrpBecomeMaster: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_VrrpBecomeMaster: If op_VrrpBecomeMaster is specified, the field named in this input will be compared to the value in VrrpBecomeMaster using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpBecomeMaster must be specified if op_VrrpBecomeMaster is specified. :type val_f_VrrpBecomeMaster: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_VrrpBecomeMaster: If op_VrrpBecomeMaster is specified, this value will be compared to the value in VrrpBecomeMaster using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpBecomeMaster must be specified if op_VrrpBecomeMaster is specified. :type val_c_VrrpBecomeMaster: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_VrrpInvalidAuthType: The operator to apply to the field VrrpInvalidAuthType. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpInvalidAuthType: The Invalid Authentication type of Vrrp Router Statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_VrrpInvalidAuthType: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_VrrpInvalidAuthType: If op_VrrpInvalidAuthType is specified, the field named in this input will be compared to the value in VrrpInvalidAuthType using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpInvalidAuthType must be specified if op_VrrpInvalidAuthType is specified. :type val_f_VrrpInvalidAuthType: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_VrrpInvalidAuthType: If op_VrrpInvalidAuthType is specified, this value will be compared to the value in VrrpInvalidAuthType using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpInvalidAuthType must be specified if op_VrrpInvalidAuthType is specified. :type val_c_VrrpInvalidAuthType: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_VrrpInvalidTypePktsRcvd: The operator to apply to the field VrrpInvalidTypePktsRcvd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpInvalidTypePktsRcvd: The packet received with Invalid Type. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_VrrpInvalidTypePktsRcvd: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_VrrpInvalidTypePktsRcvd: If op_VrrpInvalidTypePktsRcvd is specified, the field named in this input will be compared to the value in VrrpInvalidTypePktsRcvd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpInvalidTypePktsRcvd must be specified if op_VrrpInvalidTypePktsRcvd is specified. :type val_f_VrrpInvalidTypePktsRcvd: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_VrrpInvalidTypePktsRcvd: If op_VrrpInvalidTypePktsRcvd is specified, this value will be compared to the value in VrrpInvalidTypePktsRcvd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpInvalidTypePktsRcvd must be specified if op_VrrpInvalidTypePktsRcvd is specified. :type val_c_VrrpInvalidTypePktsRcvd: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_VrrpIpTtlErrors: The operator to apply to the field VrrpIpTtlErrors. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpIpTtlErrors: The total number of IP address errors occurred in the Vrrp Router Statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_VrrpIpTtlErrors: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_VrrpIpTtlErrors: If op_VrrpIpTtlErrors is specified, the field named in this input will be compared to the value in VrrpIpTtlErrors using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpIpTtlErrors must be specified if op_VrrpIpTtlErrors is specified. :type val_f_VrrpIpTtlErrors: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_VrrpIpTtlErrors: If op_VrrpIpTtlErrors is specified, this value will be compared to the value in VrrpIpTtlErrors using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpIpTtlErrors must be specified if op_VrrpIpTtlErrors is specified. :type val_c_VrrpIpTtlErrors: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_VrrpPacketLengthErrors: The operator to apply to the field VrrpPacketLengthErrors. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpPacketLengthErrors: The number of packet length errors in the Vrrp Router Statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_VrrpPacketLengthErrors: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_VrrpPacketLengthErrors: If op_VrrpPacketLengthErrors is specified, the field named in this input will be compared to the value in VrrpPacketLengthErrors using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpPacketLengthErrors must be specified if op_VrrpPacketLengthErrors is specified. :type val_f_VrrpPacketLengthErrors: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_VrrpPacketLengthErrors: If op_VrrpPacketLengthErrors is specified, this value will be compared to the value in VrrpPacketLengthErrors using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpPacketLengthErrors must be specified if op_VrrpPacketLengthErrors is specified. :type val_c_VrrpPacketLengthErrors: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_VrrpPriorityZeroPktsRcvd: The operator to apply to the field VrrpPriorityZeroPktsRcvd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpPriorityZeroPktsRcvd: The packet received with priority zero. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_VrrpPriorityZeroPktsRcvd: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_VrrpPriorityZeroPktsRcvd: If op_VrrpPriorityZeroPktsRcvd is specified, the field named in this input will be compared to the value in VrrpPriorityZeroPktsRcvd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpPriorityZeroPktsRcvd must be specified if op_VrrpPriorityZeroPktsRcvd is specified. :type val_f_VrrpPriorityZeroPktsRcvd: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_VrrpPriorityZeroPktsRcvd: If op_VrrpPriorityZeroPktsRcvd is specified, this value will be compared to the value in VrrpPriorityZeroPktsRcvd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpPriorityZeroPktsRcvd must be specified if op_VrrpPriorityZeroPktsRcvd is specified. :type val_c_VrrpPriorityZeroPktsRcvd: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_VrrpPriorityZeroPktsSent: The operator to apply to the field VrrpPriorityZeroPktsSent. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpPriorityZeroPktsSent: The packet sent with priority zero. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_VrrpPriorityZeroPktsSent: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_VrrpPriorityZeroPktsSent: If op_VrrpPriorityZeroPktsSent is specified, the field named in this input will be compared to the value in VrrpPriorityZeroPktsSent using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpPriorityZeroPktsSent must be specified if op_VrrpPriorityZeroPktsSent is specified. :type val_f_VrrpPriorityZeroPktsSent: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_VrrpPriorityZeroPktsSent: If op_VrrpPriorityZeroPktsSent is specified, this value will be compared to the value in VrrpPriorityZeroPktsSent using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpPriorityZeroPktsSent must be specified if op_VrrpPriorityZeroPktsSent is specified. :type val_c_VrrpPriorityZeroPktsSent: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_VrrpRouterStatsID: The operator to apply to the field VrrpRouterStatsID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpRouterStatsID: The internal NetMRI identifier of the Vrrp Router Statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_VrrpRouterStatsID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_VrrpRouterStatsID: If op_VrrpRouterStatsID is specified, the field named in this input will be compared to the value in VrrpRouterStatsID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpRouterStatsID must be specified if op_VrrpRouterStatsID is specified. :type val_f_VrrpRouterStatsID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_VrrpRouterStatsID: If op_VrrpRouterStatsID is specified, this value will be compared to the value in VrrpRouterStatsID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpRouterStatsID must be specified if op_VrrpRouterStatsID is specified. :type val_c_VrrpRouterStatsID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_ifIndex: The operator to apply to the field ifIndex. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifIndex: The SNMP index for the local interface for this Vrrp router statistics table entry. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_ifIndex: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_ifIndex: If op_ifIndex is specified, the field named in this input will be compared to the value in ifIndex using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifIndex must be specified if op_ifIndex is specified. :type val_f_ifIndex: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_ifIndex: If op_ifIndex is specified, this value will be compared to the value in ifIndex using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifIndex must be specified if op_ifIndex is specified. :type val_c_ifIndex: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results. :type DeviceGroupID: Array of Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` today :param starttime: The data returned will represent the vrrp router stats with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data. :type starttime: DateTime | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` tomorrow :param endtime: The data returned will represent the vrrp router stats with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data. :type endtime: DateTime | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param methods: A list of vrrp router stat methods. The listed methods will be called on each vrrp router stat returned and included in the output. Available methods are: device. :type methods: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device. :type include: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 0 :param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information. :type start: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 1000 :param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000. :type limit: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` VrrpRouterStatsID :param sort: The data field(s) to use for sorting the output. Default is VrrpRouterStatsID. Valid values are VrrpRouterStatsID, DeviceID, IprgMemberID, InterfaceID, StartTime, EndTime, ifIndex, IprgNumber, VrrpBecomeMaster, VrrpAdvertiseRcvd, VrrpAdvertiseIntervalErrors, VrrpAuthFailures, VrrpIpTtlErrors, VrrpPriorityZeroPktsRcvd, VrrpPriorityZeroPktsSent, VrrpInvalidTypePktsRcvd, VrrpAddressListErrors, VrrpInvalidAuthType, VrrpAuthTypeMismatch, VrrpPacketLengthErrors. :type sort: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` asc :param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'. :type dir: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param select: The list of attributes to return for each VrrpRouterStat. Valid values are VrrpRouterStatsID, DeviceID, IprgMemberID, InterfaceID, StartTime, EndTime, ifIndex, IprgNumber, VrrpBecomeMaster, VrrpAdvertiseRcvd, VrrpAdvertiseIntervalErrors, VrrpAuthFailures, VrrpIpTtlErrors, VrrpPriorityZeroPktsRcvd, VrrpPriorityZeroPktsSent, VrrpInvalidTypePktsRcvd, VrrpAddressListErrors, VrrpInvalidAuthType, VrrpAuthTypeMismatch, VrrpPacketLengthErrors. If empty or omitted, all attributes will be returned. :type select: Array | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_field: The field name for NIOS GOTO that is used for locating a row position of records. :type goto_field: String | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records. :type goto_value: String | ``api version min:`` 2.3 | ``api version max:`` None | ``required:`` False | ``default:`` None :param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering. :type xml_filter: String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return vrrp_router_stats: An array of the VrrpRouterStat objects that match the specified input criteria. :rtype vrrp_router_stats: Array of VrrpRouterStat """ return self.api_list_request(self._get_method_fullname("find"), kwargs) def device(self, **kwargs): """The device from which this data was collected. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` True | ``default:`` None :param VrrpRouterStatsID: The internal NetMRI identifier of the Vrrp Router Statistics. :type VrrpRouterStatsID: Integer **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return : The device from which this data was collected. :rtype : Device """ return self.api_request(self._get_method_fullname("device"), kwargs) def infradevice(self, **kwargs): """The device from which this data was collected. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` True | ``default:`` None :param VrrpRouterStatsID: The internal NetMRI identifier of the Vrrp Router Statistics. :type VrrpRouterStatsID: Integer **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return : The device from which this data was collected. :rtype : InfraDevice """ return self.api_request(self._get_method_fullname("infradevice"), kwargs)
apache-2.0
koobonil/Boss2D
Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/contrib/integrate/__init__.py
100
1081
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Integration and ODE solvers. See the @{$python/contrib.integrate} guide. @@odeint """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=wildcard-import from tensorflow.contrib.integrate.python.ops.odes import * from tensorflow.python.util.all_util import remove_undocumented remove_undocumented(__name__)
mit
dhermes/gcloud-python
automl/google/cloud/automl_v1beta1/__init__.py
2
1240
# -*- coding: utf-8 -*- # # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from google.cloud.automl_v1beta1 import types from google.cloud.automl_v1beta1.gapic import auto_ml_client from google.cloud.automl_v1beta1.gapic import enums from google.cloud.automl_v1beta1.gapic import prediction_service_client class PredictionServiceClient(prediction_service_client.PredictionServiceClient): __doc__ = prediction_service_client.PredictionServiceClient.__doc__ enums = enums class AutoMlClient(auto_ml_client.AutoMlClient): __doc__ = auto_ml_client.AutoMlClient.__doc__ enums = enums __all__ = ("enums", "types", "PredictionServiceClient", "AutoMlClient")
apache-2.0
arnaud-morvan/QGIS
python/plugins/processing/algs/grass7/ext/v_what_vect.py
12
1535
# -*- coding: utf-8 -*- """ *************************************************************************** v_what_vect.py -------------- Date : March 2016 Copyright : (C) 2016 by Médéric Ribreux Email : medspx at medspx dot fr *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Médéric Ribreux' __date__ = 'March 2016' __copyright__ = '(C) 2016, Médéric Ribreux' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' def processCommand(alg, parameters, context, feedback): # Exclude outputs from commands alg.processCommand(parameters, context, feedback, True) def processOutputs(alg, parameters, context, feedback): # We need to add the initial vector layer to outputs: fileName = alg.parameterAsOutputLayer(parameters, 'output', context) grassName = alg.exportedLayers['map'] alg.exportVectorLayer(grassName, fileName)
gpl-2.0
panjia1983/hd_trajopt
trajoptpy/kin_utils.py
8
4501
""" Kinematics helpers for openrave """ import numpy as np def shortest_paths(ncost_nk,ecost_nkk): """ Find minimum cost paths through graph (one path for each end point) where all nodes in nth row are connected to all nodes in (n+1)st row ncost_nk: node costs ecost_nkk: edge costs returns: (paths, costs) where paths is a K x N array of integers. Each row gives the minimum-cost path to its final node. costs has length K and gives the cost of each path """ N = len(ncost_nk) assert len(ecost_nkk) == N-1 cost_nk = [None for _ in xrange(N)] prev_nk = [None for _ in xrange(N-1)] cost_nk[0] = ncost_nk[0] for n in xrange(1,N): cost_kk = ecost_nkk[n-1] + cost_nk[n-1][:,None] + ncost_nk[n][None,:] cost_nk[n] = cost_kk.min(axis=0) prev_nk[n-1] = cost_kk.argmin(axis=0) path_costs = cost_nk[N-1] paths = [None for _ in xrange(N)] paths[N-1] = np.arange(len(ncost_nk[-1])) for n in xrange(N-1,0,-1): paths[n-1] = prev_nk[n-1][paths[n]] return np.array(paths).T, path_costs def pairwise_squared_dist(x,y, timestep=None): "pairwise squared distance between rows of matrices x and y" return (x**2).sum(axis=1)[:,None]+(y**2).sum(axis=1)[None,:]-2*x.dot(y.T) def traj_cart2joint(hmats, ikfunc, start_joints = None, nodecost=None, edgecost = None): """ hmats: poses at times t = 0,1,2,...T-1 ikfunc: a function f: R^(4x4) -> [R^k] i.e., map from 4x4 matrix to a list of joint states start_joints: None, starting joint state, or list of starting joint states if start_joints is supplied, the first element of hmats is ignored nodecost (optional): the cost of a joint state function f : R^k, timestep -> R edgecost (optional): the cost of transitions function f : R^(n x k), R^(m x k), timestep -> R^(n x m) defaults to squared euclidean distance returns: (trajectories, costs, timesteps) where trajectories is a list of M 2d arrays, each of which has length S <= T costs is a list of M floats, each of which gives the cost of the corresponding trajectory and timesteps is a list of length S--the timesteps when an IK solution exists """ iksolns = [] timesteps = [] for (i,hmat) in enumerate(hmats): if i==0 and start_joints is not None: solns = np.atleast_2d(start_joints) else: solns = ikfunc(hmat) if len(solns) > 0: iksolns.append(solns) timesteps.append(i) if edgecost is None: edgecost = pairwise_squared_dist N = len(iksolns) ncost_nk = [] ecost_nkk = [] for i in xrange(0,len(iksolns)): solns0 = iksolns[i] if nodecost is None: ncost_nk.append(np.zeros(len(solns0))) else: ncost_nk.append(np.array([nodecost(soln, timesteps[i]) for soln in solns0])) if i>0: solnsprev = iksolns[i-1] ecost_nkk.append(pairwise_squared_dist(solnsprev, solns0, i-1)) paths, path_costs = shortest_paths(ncost_nk, ecost_nkk) return [np.array([iksolns[t][i] for (t,i) in enumerate(path)]) for path in paths], path_costs, timesteps def ik_for_link(T_w_link, manip, link_name, filter_options = 18, return_all_solns = False): """ Perform IK for an arbitrary link attached to the manipulator e.g. you might want ik for pr2 "r_gripper_tool_frame" instead of the openrave EE frame T_w_link: 4x4 matrix. "world frame from link frame" manip: openrave Manipulator link_name: (you know) filter_options: see openravepy.IkFilterOptions return_all_solns: if True, returns a list. if false, returns a single solution (openrave's default) if a solution exists, otherwise return None """ robot = manip.GetRobot() link = robot.GetLink(link_name) if not robot.DoesAffect(manip.GetArmJoints()[-1], link.GetIndex()): raise Exception("link %s is not attached to end effector of manipulator %s"%(link_name, manip.GetName())) Tcur_w_link = link.GetTransform() Tcur_w_ee = manip.GetEndEffectorTransform() Tf_link_ee = np.linalg.solve(Tcur_w_link, Tcur_w_ee) T_w_ee = T_w_link.dot(Tf_link_ee) if return_all_solns: return manip.FindIKSolutions(T_w_ee, filter_options) else: return manip.FindIKSolution(T_w_ee, filter_options)
bsd-2-clause
jenisys/behave
tests/unit/tag_expression/test_model_ext.py
3
2200
# -*- coding: UTF-8 -*- # pylint: disable=bad-whitespace from __future__ import absolute_import from behave.tag_expression.model import Expression, Literal from behave.tag_expression.model_ext import Matcher # NOT-NEEDED: from cucumber_tag_expressions.model import Literal, Matcher # NOT-NEEDED: from cucumber_tag_expressions.model import And, Or, Not, True_ import pytest # ----------------------------------------------------------------------------- # TEST SUITE: Model Class Extension(s) # ----------------------------------------------------------------------------- # NOT-NEEDED: xfail = pytest.mark.xfail class TestExpression(object): def test_check__can_be_used(self): tag_expression = Literal("foo") assert tag_expression.check(["foo"]) is True assert tag_expression.check(["other"]) is False class TestMatcher(object): @pytest.mark.parametrize("expected, tag, case", [ (True, "foo.bar", "startswith_1"), (True, "foo.bax", "startswith_2"), (True, "foo.", "exact_match"), (False, "something.foo.bar", "not_starts_with"), (False, "foo_bar", "similar"), ]) def test_evaluate_with_startswith_pattern(self, expected, tag, case): expression = Matcher("foo.*") assert expression.evaluate([tag]) == expected @pytest.mark.parametrize("expected, tag, case", [ (True, "bar.foo", "endswith_1"), (True, "bax.foo", "endswith_2"), (True, ".foo", "exact_match"), (False, "something.foo.bar", "not_endswith"), (False, "bar_foo", "similar"), ]) def test_evaluate_with_endswith_pattern(self, expected, tag, case): expression = Matcher("*.foo") assert expression.evaluate([tag]) == expected @pytest.mark.parametrize("expected, tag, case", [ (False, "bar.foo", "startwith_1"), (False, "foo.bax", "endswith_2"), (True, "bar.foo.bax", "contains"), (True, ".foo.", "exact_match"), (False, "bar_foo.bax", "similar"), ]) def test_evaluate_with_contains_pattern(self, expected, tag, case): expression = Matcher("*.foo.*") assert expression.evaluate([tag]) == expected
bsd-2-clause
maropu/spark
python/pyspark/ml/param/__init__.py
15
18531
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import array from abc import ABCMeta import copy import numpy as np from py4j.java_gateway import JavaObject from pyspark.ml.linalg import DenseVector, Vector, Matrix from pyspark.ml.util import Identifiable __all__ = ['Param', 'Params', 'TypeConverters'] class Param(object): """ A param with self-contained documentation. .. versionadded:: 1.3.0 """ def __init__(self, parent, name, doc, typeConverter=None): if not isinstance(parent, Identifiable): raise TypeError("Parent must be an Identifiable but got type %s." % type(parent)) self.parent = parent.uid self.name = str(name) self.doc = str(doc) self.typeConverter = TypeConverters.identity if typeConverter is None else typeConverter def _copy_new_parent(self, parent): """Copy the current param to a new parent, must be a dummy param.""" if self.parent == "undefined": param = copy.copy(self) param.parent = parent.uid return param else: raise ValueError("Cannot copy from non-dummy parent %s." % parent) def __str__(self): return str(self.parent) + "__" + self.name def __repr__(self): return "Param(parent=%r, name=%r, doc=%r)" % (self.parent, self.name, self.doc) def __hash__(self): return hash(str(self)) def __eq__(self, other): if isinstance(other, Param): return self.parent == other.parent and self.name == other.name else: return False class TypeConverters(object): """ Factory methods for common type conversion functions for `Param.typeConverter`. .. versionadded:: 2.0.0 """ @staticmethod def _is_numeric(value): vtype = type(value) return vtype in [int, float, np.float64, np.int64] or vtype.__name__ == 'long' @staticmethod def _is_integer(value): return TypeConverters._is_numeric(value) and float(value).is_integer() @staticmethod def _can_convert_to_list(value): vtype = type(value) return vtype in [list, np.ndarray, tuple, range, array.array] or isinstance(value, Vector) @staticmethod def _can_convert_to_string(value): vtype = type(value) return isinstance(value, str) or vtype in [np.unicode_, np.string_, np.str_] @staticmethod def identity(value): """ Dummy converter that just returns value. """ return value @staticmethod def toList(value): """ Convert a value to a list, if possible. """ if type(value) == list: return value elif type(value) in [np.ndarray, tuple, range, array.array]: return list(value) elif isinstance(value, Vector): return list(value.toArray()) else: raise TypeError("Could not convert %s to list" % value) @staticmethod def toListFloat(value): """ Convert a value to list of floats, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_numeric(v), value)): return [float(v) for v in value] raise TypeError("Could not convert %s to list of floats" % value) @staticmethod def toListListFloat(value): """ Convert a value to list of list of floats, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) return [TypeConverters.toListFloat(v) for v in value] raise TypeError("Could not convert %s to list of list of floats" % value) @staticmethod def toListInt(value): """ Convert a value to list of ints, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_integer(v), value)): return [int(v) for v in value] raise TypeError("Could not convert %s to list of ints" % value) @staticmethod def toListString(value): """ Convert a value to list of strings, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._can_convert_to_string(v), value)): return [TypeConverters.toString(v) for v in value] raise TypeError("Could not convert %s to list of strings" % value) @staticmethod def toVector(value): """ Convert a value to a MLlib Vector, if possible. """ if isinstance(value, Vector): return value elif TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_numeric(v), value)): return DenseVector(value) raise TypeError("Could not convert %s to vector" % value) @staticmethod def toMatrix(value): """ Convert a value to a MLlib Matrix, if possible. """ if isinstance(value, Matrix): return value raise TypeError("Could not convert %s to matrix" % value) @staticmethod def toFloat(value): """ Convert a value to a float, if possible. """ if TypeConverters._is_numeric(value): return float(value) else: raise TypeError("Could not convert %s to float" % value) @staticmethod def toInt(value): """ Convert a value to an int, if possible. """ if TypeConverters._is_integer(value): return int(value) else: raise TypeError("Could not convert %s to int" % value) @staticmethod def toString(value): """ Convert a value to a string, if possible. """ if isinstance(value, str): return value elif type(value) in [np.string_, np.str_, np.unicode_]: return str(value) else: raise TypeError("Could not convert %s to string type" % type(value)) @staticmethod def toBoolean(value): """ Convert a value to a boolean, if possible. """ if type(value) == bool: return value else: raise TypeError("Boolean Param requires value of type bool. Found %s." % type(value)) class Params(Identifiable, metaclass=ABCMeta): """ Components that take parameters. This also provides an internal param map to store parameter values attached to the instance. .. versionadded:: 1.3.0 """ def __init__(self): super(Params, self).__init__() #: internal param map for user-supplied values param map self._paramMap = {} #: internal param map for default values self._defaultParamMap = {} #: value returned by :py:func:`params` self._params = None # Copy the params from the class to the object self._copy_params() def _copy_params(self): """ Copy all params defined on the class to current object. """ cls = type(self) src_name_attrs = [(x, getattr(cls, x)) for x in dir(cls)] src_params = list(filter(lambda nameAttr: isinstance(nameAttr[1], Param), src_name_attrs)) for name, param in src_params: setattr(self, name, param._copy_new_parent(self)) @property def params(self): """ Returns all params ordered by name. The default implementation uses :py:func:`dir` to get all attributes of type :py:class:`Param`. """ if self._params is None: self._params = list(filter(lambda attr: isinstance(attr, Param), [getattr(self, x) for x in dir(self) if x != "params" and not isinstance(getattr(type(self), x, None), property)])) return self._params def explainParam(self, param): """ Explains a single param and returns its name, doc, and optional default value and user-supplied value in a string. """ param = self._resolveParam(param) values = [] if self.isDefined(param): if param in self._defaultParamMap: values.append("default: %s" % self._defaultParamMap[param]) if param in self._paramMap: values.append("current: %s" % self._paramMap[param]) else: values.append("undefined") valueStr = "(" + ", ".join(values) + ")" return "%s: %s %s" % (param.name, param.doc, valueStr) def explainParams(self): """ Returns the documentation of all params with their optionally default values and user-supplied values. """ return "\n".join([self.explainParam(param) for param in self.params]) def getParam(self, paramName): """ Gets a param by its name. """ param = getattr(self, paramName) if isinstance(param, Param): return param else: raise ValueError("Cannot find param with name %s." % paramName) def isSet(self, param): """ Checks whether a param is explicitly set by user. """ param = self._resolveParam(param) return param in self._paramMap def hasDefault(self, param): """ Checks whether a param has a default value. """ param = self._resolveParam(param) return param in self._defaultParamMap def isDefined(self, param): """ Checks whether a param is explicitly set by user or has a default value. """ return self.isSet(param) or self.hasDefault(param) def hasParam(self, paramName): """ Tests whether this instance contains a param with a given (string) name. """ if isinstance(paramName, str): p = getattr(self, paramName, None) return isinstance(p, Param) else: raise TypeError("hasParam(): paramName must be a string") def getOrDefault(self, param): """ Gets the value of a param in the user-supplied param map or its default value. Raises an error if neither is set. """ param = self._resolveParam(param) if param in self._paramMap: return self._paramMap[param] else: return self._defaultParamMap[param] def extractParamMap(self, extra=None): """ Extracts the embedded default param values and user-supplied values, and then merges them with extra values from input into a flat param map, where the latter value is used if there exist conflicts, i.e., with ordering: default param values < user-supplied values < extra. Parameters ---------- extra : dict, optional extra param values Returns ------- dict merged param map """ if extra is None: extra = dict() paramMap = self._defaultParamMap.copy() paramMap.update(self._paramMap) paramMap.update(extra) return paramMap def copy(self, extra=None): """ Creates a copy of this instance with the same uid and some extra params. The default implementation creates a shallow copy using :py:func:`copy.copy`, and then copies the embedded and extra parameters over and returns the copy. Subclasses should override this method if the default approach is not sufficient. Parameters ---------- extra : dict, optional Extra parameters to copy to the new instance Returns ------- :py:class:`Params` Copy of this instance """ if extra is None: extra = dict() that = copy.copy(self) that._paramMap = {} that._defaultParamMap = {} return self._copyValues(that, extra) def set(self, param, value): """ Sets a parameter in the embedded param map. """ self._shouldOwn(param) try: value = param.typeConverter(value) except ValueError as e: raise ValueError('Invalid param value given for param "%s". %s' % (param.name, e)) self._paramMap[param] = value def _shouldOwn(self, param): """ Validates that the input param belongs to this Params instance. """ if not (self.uid == param.parent and self.hasParam(param.name)): raise ValueError("Param %r does not belong to %r." % (param, self)) def _resolveParam(self, param): """ Resolves a param and validates the ownership. Parameters ---------- param : str or :py:class:`Param` param name or the param instance, which must belong to this Params instance Returns ------- :py:class:`Param` resolved param instance """ if isinstance(param, Param): self._shouldOwn(param) return param elif isinstance(param, str): return self.getParam(param) else: raise TypeError("Cannot resolve %r as a param." % param) def _testOwnParam(self, param_parent, param_name): """ Test the ownership. Return True or False """ return self.uid == param_parent and self.hasParam(param_name) @staticmethod def _dummy(): """ Returns a dummy Params instance used as a placeholder to generate docs. """ dummy = Params() dummy.uid = "undefined" return dummy def _set(self, **kwargs): """ Sets user-supplied params. """ for param, value in kwargs.items(): p = getattr(self, param) if value is not None: try: value = p.typeConverter(value) except TypeError as e: raise TypeError('Invalid param value given for param "%s". %s' % (p.name, e)) self._paramMap[p] = value return self def clear(self, param): """ Clears a param from the param map if it has been explicitly set. """ if self.isSet(param): del self._paramMap[param] def _setDefault(self, **kwargs): """ Sets default params. """ for param, value in kwargs.items(): p = getattr(self, param) if value is not None and not isinstance(value, JavaObject): try: value = p.typeConverter(value) except TypeError as e: raise TypeError('Invalid default param value given for param "%s". %s' % (p.name, e)) self._defaultParamMap[p] = value return self def _copyValues(self, to, extra=None): """ Copies param values from this instance to another instance for params shared by them. Parameters ---------- to : :py:class:`Params` the target instance extra : dict, optional extra params to be copied Returns ------- :py:class:`Params` the target instance with param values copied """ paramMap = self._paramMap.copy() if isinstance(extra, dict): for param, value in extra.items(): if isinstance(param, Param): paramMap[param] = value else: raise TypeError("Expecting a valid instance of Param, but received: {}" .format(param)) elif extra is not None: raise TypeError("Expecting a dict, but received an object of type {}." .format(type(extra))) for param in self.params: # copy default params if param in self._defaultParamMap and to.hasParam(param.name): to._defaultParamMap[to.getParam(param.name)] = self._defaultParamMap[param] # copy explicitly set params if param in paramMap and to.hasParam(param.name): to._set(**{param.name: paramMap[param]}) return to def _resetUid(self, newUid): """ Changes the uid of this instance. This updates both the stored uid and the parent uid of params and param maps. This is used by persistence (loading). Parameters ---------- newUid new uid to use, which is converted to unicode Returns ------- :py:class:`Params` same instance, but with the uid and Param.parent values updated, including within param maps """ newUid = str(newUid) self.uid = newUid newDefaultParamMap = dict() newParamMap = dict() for param in self.params: newParam = copy.copy(param) newParam.parent = newUid if param in self._defaultParamMap: newDefaultParamMap[newParam] = self._defaultParamMap[param] if param in self._paramMap: newParamMap[newParam] = self._paramMap[param] param.parent = newUid self._defaultParamMap = newDefaultParamMap self._paramMap = newParamMap return self
apache-2.0
ryfeus/lambda-packs
Pyrestest_wrk/source/pyresttest/yaml/parser.py
409
25542
# The following YAML grammar is LL(1) and is parsed by a recursive descent # parser. # # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END # implicit_document ::= block_node DOCUMENT-END* # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* # block_node_or_indentless_sequence ::= # ALIAS # | properties (block_content | indentless_block_sequence)? # | block_content # | indentless_block_sequence # block_node ::= ALIAS # | properties block_content? # | block_content # flow_node ::= ALIAS # | properties flow_content? # | flow_content # properties ::= TAG ANCHOR? | ANCHOR TAG? # block_content ::= block_collection | flow_collection | SCALAR # flow_content ::= flow_collection | SCALAR # block_collection ::= block_sequence | block_mapping # flow_collection ::= flow_sequence | flow_mapping # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ # block_mapping ::= BLOCK-MAPPING_START # ((KEY block_node_or_indentless_sequence?)? # (VALUE block_node_or_indentless_sequence?)?)* # BLOCK-END # flow_sequence ::= FLOW-SEQUENCE-START # (flow_sequence_entry FLOW-ENTRY)* # flow_sequence_entry? # FLOW-SEQUENCE-END # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? # flow_mapping ::= FLOW-MAPPING-START # (flow_mapping_entry FLOW-ENTRY)* # flow_mapping_entry? # FLOW-MAPPING-END # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? # # FIRST sets: # # stream: { STREAM-START } # explicit_document: { DIRECTIVE DOCUMENT-START } # implicit_document: FIRST(block_node) # block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } # flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } # block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } # flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } # block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } # flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } # block_sequence: { BLOCK-SEQUENCE-START } # block_mapping: { BLOCK-MAPPING-START } # block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } # indentless_sequence: { ENTRY } # flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } # flow_sequence: { FLOW-SEQUENCE-START } # flow_mapping: { FLOW-MAPPING-START } # flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } # flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } __all__ = ['Parser', 'ParserError'] from error import MarkedYAMLError from tokens import * from events import * from scanner import * class ParserError(MarkedYAMLError): pass class Parser(object): # Since writing a recursive-descendant parser is a straightforward task, we # do not give many comments here. DEFAULT_TAGS = { u'!': u'!', u'!!': u'tag:yaml.org,2002:', } def __init__(self): self.current_event = None self.yaml_version = None self.tag_handles = {} self.states = [] self.marks = [] self.state = self.parse_stream_start def dispose(self): # Reset the state attributes (to clear self-references) self.states = [] self.state = None def check_event(self, *choices): # Check the type of the next event. if self.current_event is None: if self.state: self.current_event = self.state() if self.current_event is not None: if not choices: return True for choice in choices: if isinstance(self.current_event, choice): return True return False def peek_event(self): # Get the next event. if self.current_event is None: if self.state: self.current_event = self.state() return self.current_event def get_event(self): # Get the next event and proceed further. if self.current_event is None: if self.state: self.current_event = self.state() value = self.current_event self.current_event = None return value # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END # implicit_document ::= block_node DOCUMENT-END* # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* def parse_stream_start(self): # Parse the stream start. token = self.get_token() event = StreamStartEvent(token.start_mark, token.end_mark, encoding=token.encoding) # Prepare the next state. self.state = self.parse_implicit_document_start return event def parse_implicit_document_start(self): # Parse an implicit document. if not self.check_token(DirectiveToken, DocumentStartToken, StreamEndToken): self.tag_handles = self.DEFAULT_TAGS token = self.peek_token() start_mark = end_mark = token.start_mark event = DocumentStartEvent(start_mark, end_mark, explicit=False) # Prepare the next state. self.states.append(self.parse_document_end) self.state = self.parse_block_node return event else: return self.parse_document_start() def parse_document_start(self): # Parse any extra document end indicators. while self.check_token(DocumentEndToken): self.get_token() # Parse an explicit document. if not self.check_token(StreamEndToken): token = self.peek_token() start_mark = token.start_mark version, tags = self.process_directives() if not self.check_token(DocumentStartToken): raise ParserError(None, None, "expected '<document start>', but found %r" % self.peek_token().id, self.peek_token().start_mark) token = self.get_token() end_mark = token.end_mark event = DocumentStartEvent(start_mark, end_mark, explicit=True, version=version, tags=tags) self.states.append(self.parse_document_end) self.state = self.parse_document_content else: # Parse the end of the stream. token = self.get_token() event = StreamEndEvent(token.start_mark, token.end_mark) assert not self.states assert not self.marks self.state = None return event def parse_document_end(self): # Parse the document end. token = self.peek_token() start_mark = end_mark = token.start_mark explicit = False if self.check_token(DocumentEndToken): token = self.get_token() end_mark = token.end_mark explicit = True event = DocumentEndEvent(start_mark, end_mark, explicit=explicit) # Prepare the next state. self.state = self.parse_document_start return event def parse_document_content(self): if self.check_token(DirectiveToken, DocumentStartToken, DocumentEndToken, StreamEndToken): event = self.process_empty_scalar(self.peek_token().start_mark) self.state = self.states.pop() return event else: return self.parse_block_node() def process_directives(self): self.yaml_version = None self.tag_handles = {} while self.check_token(DirectiveToken): token = self.get_token() if token.name == u'YAML': if self.yaml_version is not None: raise ParserError(None, None, "found duplicate YAML directive", token.start_mark) major, minor = token.value if major != 1: raise ParserError(None, None, "found incompatible YAML document (version 1.* is required)", token.start_mark) self.yaml_version = token.value elif token.name == u'TAG': handle, prefix = token.value if handle in self.tag_handles: raise ParserError(None, None, "duplicate tag handle %r" % handle.encode('utf-8'), token.start_mark) self.tag_handles[handle] = prefix if self.tag_handles: value = self.yaml_version, self.tag_handles.copy() else: value = self.yaml_version, None for key in self.DEFAULT_TAGS: if key not in self.tag_handles: self.tag_handles[key] = self.DEFAULT_TAGS[key] return value # block_node_or_indentless_sequence ::= ALIAS # | properties (block_content | indentless_block_sequence)? # | block_content # | indentless_block_sequence # block_node ::= ALIAS # | properties block_content? # | block_content # flow_node ::= ALIAS # | properties flow_content? # | flow_content # properties ::= TAG ANCHOR? | ANCHOR TAG? # block_content ::= block_collection | flow_collection | SCALAR # flow_content ::= flow_collection | SCALAR # block_collection ::= block_sequence | block_mapping # flow_collection ::= flow_sequence | flow_mapping def parse_block_node(self): return self.parse_node(block=True) def parse_flow_node(self): return self.parse_node() def parse_block_node_or_indentless_sequence(self): return self.parse_node(block=True, indentless_sequence=True) def parse_node(self, block=False, indentless_sequence=False): if self.check_token(AliasToken): token = self.get_token() event = AliasEvent(token.value, token.start_mark, token.end_mark) self.state = self.states.pop() else: anchor = None tag = None start_mark = end_mark = tag_mark = None if self.check_token(AnchorToken): token = self.get_token() start_mark = token.start_mark end_mark = token.end_mark anchor = token.value if self.check_token(TagToken): token = self.get_token() tag_mark = token.start_mark end_mark = token.end_mark tag = token.value elif self.check_token(TagToken): token = self.get_token() start_mark = tag_mark = token.start_mark end_mark = token.end_mark tag = token.value if self.check_token(AnchorToken): token = self.get_token() end_mark = token.end_mark anchor = token.value if tag is not None: handle, suffix = tag if handle is not None: if handle not in self.tag_handles: raise ParserError("while parsing a node", start_mark, "found undefined tag handle %r" % handle.encode('utf-8'), tag_mark) tag = self.tag_handles[handle]+suffix else: tag = suffix #if tag == u'!': # raise ParserError("while parsing a node", start_mark, # "found non-specific tag '!'", tag_mark, # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") if start_mark is None: start_mark = end_mark = self.peek_token().start_mark event = None implicit = (tag is None or tag == u'!') if indentless_sequence and self.check_token(BlockEntryToken): end_mark = self.peek_token().end_mark event = SequenceStartEvent(anchor, tag, implicit, start_mark, end_mark) self.state = self.parse_indentless_sequence_entry else: if self.check_token(ScalarToken): token = self.get_token() end_mark = token.end_mark if (token.plain and tag is None) or tag == u'!': implicit = (True, False) elif tag is None: implicit = (False, True) else: implicit = (False, False) event = ScalarEvent(anchor, tag, implicit, token.value, start_mark, end_mark, style=token.style) self.state = self.states.pop() elif self.check_token(FlowSequenceStartToken): end_mark = self.peek_token().end_mark event = SequenceStartEvent(anchor, tag, implicit, start_mark, end_mark, flow_style=True) self.state = self.parse_flow_sequence_first_entry elif self.check_token(FlowMappingStartToken): end_mark = self.peek_token().end_mark event = MappingStartEvent(anchor, tag, implicit, start_mark, end_mark, flow_style=True) self.state = self.parse_flow_mapping_first_key elif block and self.check_token(BlockSequenceStartToken): end_mark = self.peek_token().start_mark event = SequenceStartEvent(anchor, tag, implicit, start_mark, end_mark, flow_style=False) self.state = self.parse_block_sequence_first_entry elif block and self.check_token(BlockMappingStartToken): end_mark = self.peek_token().start_mark event = MappingStartEvent(anchor, tag, implicit, start_mark, end_mark, flow_style=False) self.state = self.parse_block_mapping_first_key elif anchor is not None or tag is not None: # Empty scalars are allowed even if a tag or an anchor is # specified. event = ScalarEvent(anchor, tag, (implicit, False), u'', start_mark, end_mark) self.state = self.states.pop() else: if block: node = 'block' else: node = 'flow' token = self.peek_token() raise ParserError("while parsing a %s node" % node, start_mark, "expected the node content, but found %r" % token.id, token.start_mark) return event # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END def parse_block_sequence_first_entry(self): token = self.get_token() self.marks.append(token.start_mark) return self.parse_block_sequence_entry() def parse_block_sequence_entry(self): if self.check_token(BlockEntryToken): token = self.get_token() if not self.check_token(BlockEntryToken, BlockEndToken): self.states.append(self.parse_block_sequence_entry) return self.parse_block_node() else: self.state = self.parse_block_sequence_entry return self.process_empty_scalar(token.end_mark) if not self.check_token(BlockEndToken): token = self.peek_token() raise ParserError("while parsing a block collection", self.marks[-1], "expected <block end>, but found %r" % token.id, token.start_mark) token = self.get_token() event = SequenceEndEvent(token.start_mark, token.end_mark) self.state = self.states.pop() self.marks.pop() return event # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ def parse_indentless_sequence_entry(self): if self.check_token(BlockEntryToken): token = self.get_token() if not self.check_token(BlockEntryToken, KeyToken, ValueToken, BlockEndToken): self.states.append(self.parse_indentless_sequence_entry) return self.parse_block_node() else: self.state = self.parse_indentless_sequence_entry return self.process_empty_scalar(token.end_mark) token = self.peek_token() event = SequenceEndEvent(token.start_mark, token.start_mark) self.state = self.states.pop() return event # block_mapping ::= BLOCK-MAPPING_START # ((KEY block_node_or_indentless_sequence?)? # (VALUE block_node_or_indentless_sequence?)?)* # BLOCK-END def parse_block_mapping_first_key(self): token = self.get_token() self.marks.append(token.start_mark) return self.parse_block_mapping_key() def parse_block_mapping_key(self): if self.check_token(KeyToken): token = self.get_token() if not self.check_token(KeyToken, ValueToken, BlockEndToken): self.states.append(self.parse_block_mapping_value) return self.parse_block_node_or_indentless_sequence() else: self.state = self.parse_block_mapping_value return self.process_empty_scalar(token.end_mark) if not self.check_token(BlockEndToken): token = self.peek_token() raise ParserError("while parsing a block mapping", self.marks[-1], "expected <block end>, but found %r" % token.id, token.start_mark) token = self.get_token() event = MappingEndEvent(token.start_mark, token.end_mark) self.state = self.states.pop() self.marks.pop() return event def parse_block_mapping_value(self): if self.check_token(ValueToken): token = self.get_token() if not self.check_token(KeyToken, ValueToken, BlockEndToken): self.states.append(self.parse_block_mapping_key) return self.parse_block_node_or_indentless_sequence() else: self.state = self.parse_block_mapping_key return self.process_empty_scalar(token.end_mark) else: self.state = self.parse_block_mapping_key token = self.peek_token() return self.process_empty_scalar(token.start_mark) # flow_sequence ::= FLOW-SEQUENCE-START # (flow_sequence_entry FLOW-ENTRY)* # flow_sequence_entry? # FLOW-SEQUENCE-END # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? # # Note that while production rules for both flow_sequence_entry and # flow_mapping_entry are equal, their interpretations are different. # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` # generate an inline mapping (set syntax). def parse_flow_sequence_first_entry(self): token = self.get_token() self.marks.append(token.start_mark) return self.parse_flow_sequence_entry(first=True) def parse_flow_sequence_entry(self, first=False): if not self.check_token(FlowSequenceEndToken): if not first: if self.check_token(FlowEntryToken): self.get_token() else: token = self.peek_token() raise ParserError("while parsing a flow sequence", self.marks[-1], "expected ',' or ']', but got %r" % token.id, token.start_mark) if self.check_token(KeyToken): token = self.peek_token() event = MappingStartEvent(None, None, True, token.start_mark, token.end_mark, flow_style=True) self.state = self.parse_flow_sequence_entry_mapping_key return event elif not self.check_token(FlowSequenceEndToken): self.states.append(self.parse_flow_sequence_entry) return self.parse_flow_node() token = self.get_token() event = SequenceEndEvent(token.start_mark, token.end_mark) self.state = self.states.pop() self.marks.pop() return event def parse_flow_sequence_entry_mapping_key(self): token = self.get_token() if not self.check_token(ValueToken, FlowEntryToken, FlowSequenceEndToken): self.states.append(self.parse_flow_sequence_entry_mapping_value) return self.parse_flow_node() else: self.state = self.parse_flow_sequence_entry_mapping_value return self.process_empty_scalar(token.end_mark) def parse_flow_sequence_entry_mapping_value(self): if self.check_token(ValueToken): token = self.get_token() if not self.check_token(FlowEntryToken, FlowSequenceEndToken): self.states.append(self.parse_flow_sequence_entry_mapping_end) return self.parse_flow_node() else: self.state = self.parse_flow_sequence_entry_mapping_end return self.process_empty_scalar(token.end_mark) else: self.state = self.parse_flow_sequence_entry_mapping_end token = self.peek_token() return self.process_empty_scalar(token.start_mark) def parse_flow_sequence_entry_mapping_end(self): self.state = self.parse_flow_sequence_entry token = self.peek_token() return MappingEndEvent(token.start_mark, token.start_mark) # flow_mapping ::= FLOW-MAPPING-START # (flow_mapping_entry FLOW-ENTRY)* # flow_mapping_entry? # FLOW-MAPPING-END # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? def parse_flow_mapping_first_key(self): token = self.get_token() self.marks.append(token.start_mark) return self.parse_flow_mapping_key(first=True) def parse_flow_mapping_key(self, first=False): if not self.check_token(FlowMappingEndToken): if not first: if self.check_token(FlowEntryToken): self.get_token() else: token = self.peek_token() raise ParserError("while parsing a flow mapping", self.marks[-1], "expected ',' or '}', but got %r" % token.id, token.start_mark) if self.check_token(KeyToken): token = self.get_token() if not self.check_token(ValueToken, FlowEntryToken, FlowMappingEndToken): self.states.append(self.parse_flow_mapping_value) return self.parse_flow_node() else: self.state = self.parse_flow_mapping_value return self.process_empty_scalar(token.end_mark) elif not self.check_token(FlowMappingEndToken): self.states.append(self.parse_flow_mapping_empty_value) return self.parse_flow_node() token = self.get_token() event = MappingEndEvent(token.start_mark, token.end_mark) self.state = self.states.pop() self.marks.pop() return event def parse_flow_mapping_value(self): if self.check_token(ValueToken): token = self.get_token() if not self.check_token(FlowEntryToken, FlowMappingEndToken): self.states.append(self.parse_flow_mapping_key) return self.parse_flow_node() else: self.state = self.parse_flow_mapping_key return self.process_empty_scalar(token.end_mark) else: self.state = self.parse_flow_mapping_key token = self.peek_token() return self.process_empty_scalar(token.start_mark) def parse_flow_mapping_empty_value(self): self.state = self.parse_flow_mapping_key return self.process_empty_scalar(self.peek_token().start_mark) def process_empty_scalar(self, mark): return ScalarEvent(None, None, (True, False), u'', mark, mark)
mit
zorojean/scikit-learn
sklearn/linear_model/base.py
23
16013
""" Generalized Linear models. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Fabian Pedregosa <fabian.pedregosa@inria.fr> # Olivier Grisel <olivier.grisel@ensta.org> # Vincent Michel <vincent.michel@inria.fr> # Peter Prettenhofer <peter.prettenhofer@gmail.com> # Mathieu Blondel <mathieu@mblondel.org> # Lars Buitinck <L.J.Buitinck@uva.nl> # # License: BSD 3 clause from __future__ import division from abc import ABCMeta, abstractmethod import numbers import warnings import numpy as np import scipy.sparse as sp from scipy import linalg from scipy import sparse from ..externals import six from ..externals.joblib import Parallel, delayed from ..base import BaseEstimator, ClassifierMixin, RegressorMixin from ..utils import as_float_array, check_array, check_X_y, deprecated, column_or_1d from ..utils.extmath import safe_sparse_dot from ..utils.sparsefuncs import mean_variance_axis, inplace_column_scale from ..utils.fixes import sparse_lsqr from ..utils.validation import NotFittedError, check_is_fitted ### ### TODO: intercept for all models ### We should define a common function to center data instead of ### repeating the same code inside each fit method. ### TODO: bayesian_ridge_regression and bayesian_regression_ard ### should be squashed into its respective objects. def sparse_center_data(X, y, fit_intercept, normalize=False): """ Compute information needed to center data to have mean zero along axis 0. Be aware that X will not be centered since it would break the sparsity, but will be normalized if asked so. """ if fit_intercept: # we might require not to change the csr matrix sometimes # store a copy if normalize is True. # Change dtype to float64 since mean_variance_axis accepts # it that way. if sp.isspmatrix(X) and X.getformat() == 'csr': X = sp.csr_matrix(X, copy=normalize, dtype=np.float64) else: X = sp.csc_matrix(X, copy=normalize, dtype=np.float64) X_mean, X_var = mean_variance_axis(X, axis=0) if normalize: # transform variance to std in-place # XXX: currently scaled to variance=n_samples to match center_data X_var *= X.shape[0] X_std = np.sqrt(X_var, X_var) del X_var X_std[X_std == 0] = 1 inplace_column_scale(X, 1. / X_std) else: X_std = np.ones(X.shape[1]) y_mean = y.mean(axis=0) y = y - y_mean else: X_mean = np.zeros(X.shape[1]) X_std = np.ones(X.shape[1]) y_mean = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype) return X, y, X_mean, y_mean, X_std def center_data(X, y, fit_intercept, normalize=False, copy=True, sample_weight=None): """ Centers data to have mean zero along axis 0. This is here because nearly all linear models will want their data to be centered. If sample_weight is not None, then the weighted mean of X and y is zero, and not the mean itself """ X = as_float_array(X, copy) if fit_intercept: if isinstance(sample_weight, numbers.Number): sample_weight = None if sp.issparse(X): X_mean = np.zeros(X.shape[1]) X_std = np.ones(X.shape[1]) else: X_mean = np.average(X, axis=0, weights=sample_weight) X -= X_mean if normalize: # XXX: currently scaled to variance=n_samples X_std = np.sqrt(np.sum(X ** 2, axis=0)) X_std[X_std == 0] = 1 X /= X_std else: X_std = np.ones(X.shape[1]) y_mean = np.average(y, axis=0, weights=sample_weight) y = y - y_mean else: X_mean = np.zeros(X.shape[1]) X_std = np.ones(X.shape[1]) y_mean = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype) return X, y, X_mean, y_mean, X_std def _rescale_data(X, y, sample_weight): """Rescale data so as to support sample_weight""" n_samples = X.shape[0] sample_weight = sample_weight * np.ones(n_samples) sample_weight = np.sqrt(sample_weight) sw_matrix = sparse.dia_matrix((sample_weight, 0), shape=(n_samples, n_samples)) X = safe_sparse_dot(sw_matrix, X) y = safe_sparse_dot(sw_matrix, y) return X, y class LinearModel(six.with_metaclass(ABCMeta, BaseEstimator)): """Base class for Linear Models""" @abstractmethod def fit(self, X, y): """Fit model.""" @deprecated(" and will be removed in 0.19.") def decision_function(self, X): """Decision function of the linear model. Parameters ---------- X : {array-like, sparse matrix}, shape = (n_samples, n_features) Samples. Returns ------- C : array, shape = (n_samples,) Returns predicted values. """ return self._decision_function(X) def _decision_function(self, X): check_is_fitted(self, "coef_") X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) return safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ def predict(self, X): """Predict using the linear model Parameters ---------- X : {array-like, sparse matrix}, shape = (n_samples, n_features) Samples. Returns ------- C : array, shape = (n_samples,) Returns predicted values. """ return self._decision_function(X) _center_data = staticmethod(center_data) def _set_intercept(self, X_mean, y_mean, X_std): """Set the intercept_ """ if self.fit_intercept: self.coef_ = self.coef_ / X_std self.intercept_ = y_mean - np.dot(X_mean, self.coef_.T) else: self.intercept_ = 0. # XXX Should this derive from LinearModel? It should be a mixin, not an ABC. # Maybe the n_features checking can be moved to LinearModel. class LinearClassifierMixin(ClassifierMixin): """Mixin for linear classifiers. Handles prediction for sparse and dense X. """ def decision_function(self, X): """Predict confidence scores for samples. The confidence score for a sample is the signed distance of that sample to the hyperplane. Parameters ---------- X : {array-like, sparse matrix}, shape = (n_samples, n_features) Samples. Returns ------- array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes) Confidence scores per (sample, class) combination. In the binary case, confidence score for self.classes_[1] where >0 means this class would be predicted. """ if not hasattr(self, 'coef_') or self.coef_ is None: raise NotFittedError("This %(name)s instance is not fitted " "yet" % {'name': type(self).__name__}) X = check_array(X, accept_sparse='csr') n_features = self.coef_.shape[1] if X.shape[1] != n_features: raise ValueError("X has %d features per sample; expecting %d" % (X.shape[1], n_features)) scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ return scores.ravel() if scores.shape[1] == 1 else scores def predict(self, X): """Predict class labels for samples in X. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Samples. Returns ------- C : array, shape = [n_samples] Predicted class label per sample. """ scores = self.decision_function(X) if len(scores.shape) == 1: indices = (scores > 0).astype(np.int) else: indices = scores.argmax(axis=1) return self.classes_[indices] def _predict_proba_lr(self, X): """Probability estimation for OvR logistic regression. Positive class probabilities are computed as 1. / (1. + np.exp(-self.decision_function(X))); multiclass is handled by normalizing that over all classes. """ prob = self.decision_function(X) prob *= -1 np.exp(prob, prob) prob += 1 np.reciprocal(prob, prob) if prob.ndim == 1: return np.vstack([1 - prob, prob]).T else: # OvR normalization, like LibLinear's predict_probability prob /= prob.sum(axis=1).reshape((prob.shape[0], -1)) return prob class SparseCoefMixin(object): """Mixin for converting coef_ to and from CSR format. L1-regularizing estimators should inherit this. """ def densify(self): """Convert coefficient matrix to dense array format. Converts the ``coef_`` member (back) to a numpy.ndarray. This is the default format of ``coef_`` and is required for fitting, so calling this method is only required on models that have previously been sparsified; otherwise, it is a no-op. Returns ------- self: estimator """ msg = "Estimator, %(name)s, must be fitted before densifying." check_is_fitted(self, "coef_", msg=msg) if sp.issparse(self.coef_): self.coef_ = self.coef_.toarray() return self def sparsify(self): """Convert coefficient matrix to sparse format. Converts the ``coef_`` member to a scipy.sparse matrix, which for L1-regularized models can be much more memory- and storage-efficient than the usual numpy.ndarray representation. The ``intercept_`` member is not converted. Notes ----- For non-sparse models, i.e. when there are not many zeros in ``coef_``, this may actually *increase* memory usage, so use this method with care. A rule of thumb is that the number of zero elements, which can be computed with ``(coef_ == 0).sum()``, must be more than 50% for this to provide significant benefits. After calling this method, further fitting with the partial_fit method (if any) will not work until you call densify. Returns ------- self: estimator """ msg = "Estimator, %(name)s, must be fitted before sparsifying." check_is_fitted(self, "coef_", msg=msg) self.coef_ = sp.csr_matrix(self.coef_) return self class LinearRegression(LinearModel, RegressorMixin): """ Ordinary least squares Linear Regression. Parameters ---------- fit_intercept : boolean, optional whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. n_jobs : int, optional, default 1 The number of jobs to use for the computation. If -1 all CPUs are used. This will only provide speedup for n_targets > 1 and sufficient large problems. Attributes ---------- coef_ : array, shape (n_features, ) or (n_targets, n_features) Estimated coefficients for the linear regression problem. If multiple targets are passed during the fit (y 2D), this is a 2D array of shape (n_targets, n_features), while if only one target is passed, this is a 1D array of length n_features. intercept_ : array Independent term in the linear model. Notes ----- From the implementation point of view, this is just plain Ordinary Least Squares (scipy.linalg.lstsq) wrapped as a predictor object. """ def __init__(self, fit_intercept=True, normalize=False, copy_X=True, n_jobs=1): self.fit_intercept = fit_intercept self.normalize = normalize self.copy_X = copy_X self.n_jobs = n_jobs def fit(self, X, y, sample_weight=None): """ Fit linear model. Parameters ---------- X : numpy array or sparse matrix of shape [n_samples,n_features] Training data y : numpy array of shape [n_samples, n_targets] Target values sample_weight : numpy array of shape [n_samples] Individual weights for each sample Returns ------- self : returns an instance of self. """ n_jobs_ = self.n_jobs X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], y_numeric=True, multi_output=True) if ((sample_weight is not None) and np.atleast_1d(sample_weight).ndim > 1): sample_weight = column_or_1d(sample_weight, warn=True) X, y, X_mean, y_mean, X_std = self._center_data( X, y, self.fit_intercept, self.normalize, self.copy_X, sample_weight=sample_weight) if sample_weight is not None: # Sample weight can be implemented via a simple rescaling. X, y = _rescale_data(X, y, sample_weight) if sp.issparse(X): if y.ndim < 2: out = sparse_lsqr(X, y) self.coef_ = out[0] self.residues_ = out[3] else: # sparse_lstsq cannot handle y with shape (M, K) outs = Parallel(n_jobs=n_jobs_)( delayed(sparse_lsqr)(X, y[:, j].ravel()) for j in range(y.shape[1])) self.coef_ = np.vstack(out[0] for out in outs) self.residues_ = np.vstack(out[3] for out in outs) else: self.coef_, self.residues_, self.rank_, self.singular_ = \ linalg.lstsq(X, y) self.coef_ = self.coef_.T if y.ndim == 1: self.coef_ = np.ravel(self.coef_) self._set_intercept(X_mean, y_mean, X_std) return self def _pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy, Xy_precompute_order=None): """Aux function used at beginning of fit in linear models""" n_samples, n_features = X.shape if sparse.isspmatrix(X): precompute = False X, y, X_mean, y_mean, X_std = sparse_center_data( X, y, fit_intercept, normalize) else: # copy was done in fit if necessary X, y, X_mean, y_mean, X_std = center_data( X, y, fit_intercept, normalize, copy=copy) if hasattr(precompute, '__array__') and ( fit_intercept and not np.allclose(X_mean, np.zeros(n_features)) or normalize and not np.allclose(X_std, np.ones(n_features))): warnings.warn("Gram matrix was provided but X was centered" " to fit intercept, " "or X was normalized : recomputing Gram matrix.", UserWarning) # recompute Gram precompute = 'auto' Xy = None # precompute if n_samples > n_features if precompute == 'auto': precompute = (n_samples > n_features) if precompute is True: precompute = np.dot(X.T, X) if Xy_precompute_order == 'F': precompute = np.dot(X.T, X).T if not hasattr(precompute, '__array__'): Xy = None # cannot use Xy if precompute is not Gram if hasattr(precompute, '__array__') and Xy is None: if Xy_precompute_order == 'F': Xy = np.dot(y.T, X).T else: Xy = np.dot(X.T, y) return X, y, X_mean, y_mean, X_std, precompute, Xy
bsd-3-clause
pigshell/nhnick
src/qt/qtwebkit/Tools/Scripts/webkitpy/thirdparty/mock.py
148
9681
# mock.py # Test tools for mocking and patching. # Copyright (C) 2007-2009 Michael Foord # E-mail: fuzzyman AT voidspace DOT org DOT uk # mock 0.6.0 # http://www.voidspace.org.uk/python/mock/ # Released subject to the BSD License # Please see http://www.voidspace.org.uk/python/license.shtml # 2009-11-25: Licence downloaded from above URL. # BEGIN DOWNLOADED LICENSE # # Copyright (c) 2003-2009, Michael Foord # All rights reserved. # E-mail : fuzzyman AT voidspace DOT org DOT uk # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # * Neither the name of Michael Foord nor the name of Voidspace # may be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # END DOWNLOADED LICENSE # Scripts maintained at http://www.voidspace.org.uk/python/index.shtml # Comments, suggestions and bug reports welcome. __all__ = ( 'Mock', 'patch', 'patch_object', 'sentinel', 'DEFAULT' ) __version__ = '0.6.0' class SentinelObject(object): def __init__(self, name): self.name = name def __repr__(self): return '<SentinelObject "%s">' % self.name class Sentinel(object): def __init__(self): self._sentinels = {} def __getattr__(self, name): return self._sentinels.setdefault(name, SentinelObject(name)) sentinel = Sentinel() DEFAULT = sentinel.DEFAULT class OldStyleClass: pass ClassType = type(OldStyleClass) def _is_magic(name): return '__%s__' % name[2:-2] == name def _copy(value): if type(value) in (dict, list, tuple, set): return type(value)(value) return value class Mock(object): def __init__(self, spec=None, side_effect=None, return_value=DEFAULT, name=None, parent=None, wraps=None): self._parent = parent self._name = name if spec is not None and not isinstance(spec, list): spec = [member for member in dir(spec) if not _is_magic(member)] self._methods = spec self._children = {} self._return_value = return_value self.side_effect = side_effect self._wraps = wraps self.reset_mock() def reset_mock(self): self.called = False self.call_args = None self.call_count = 0 self.call_args_list = [] self.method_calls = [] for child in self._children.itervalues(): child.reset_mock() if isinstance(self._return_value, Mock): self._return_value.reset_mock() def __get_return_value(self): if self._return_value is DEFAULT: self._return_value = Mock() return self._return_value def __set_return_value(self, value): self._return_value = value return_value = property(__get_return_value, __set_return_value) def __call__(self, *args, **kwargs): self.called = True self.call_count += 1 self.call_args = (args, kwargs) self.call_args_list.append((args, kwargs)) parent = self._parent name = self._name while parent is not None: parent.method_calls.append((name, args, kwargs)) if parent._parent is None: break name = parent._name + '.' + name parent = parent._parent ret_val = DEFAULT if self.side_effect is not None: if (isinstance(self.side_effect, Exception) or isinstance(self.side_effect, (type, ClassType)) and issubclass(self.side_effect, Exception)): raise self.side_effect ret_val = self.side_effect(*args, **kwargs) if ret_val is DEFAULT: ret_val = self.return_value if self._wraps is not None and self._return_value is DEFAULT: return self._wraps(*args, **kwargs) if ret_val is DEFAULT: ret_val = self.return_value return ret_val def __getattr__(self, name): if self._methods is not None: if name not in self._methods: raise AttributeError("Mock object has no attribute '%s'" % name) elif _is_magic(name): raise AttributeError(name) if name not in self._children: wraps = None if self._wraps is not None: wraps = getattr(self._wraps, name) self._children[name] = Mock(parent=self, name=name, wraps=wraps) return self._children[name] def assert_called_with(self, *args, **kwargs): assert self.call_args == (args, kwargs), 'Expected: %s\nCalled with: %s' % ((args, kwargs), self.call_args) def _dot_lookup(thing, comp, import_path): try: return getattr(thing, comp) except AttributeError: __import__(import_path) return getattr(thing, comp) def _importer(target): components = target.split('.') import_path = components.pop(0) thing = __import__(import_path) for comp in components: import_path += ".%s" % comp thing = _dot_lookup(thing, comp, import_path) return thing class _patch(object): def __init__(self, target, attribute, new, spec, create): self.target = target self.attribute = attribute self.new = new self.spec = spec self.create = create self.has_local = False def __call__(self, func): if hasattr(func, 'patchings'): func.patchings.append(self) return func def patched(*args, **keywargs): # don't use a with here (backwards compatability with 2.5) extra_args = [] for patching in patched.patchings: arg = patching.__enter__() if patching.new is DEFAULT: extra_args.append(arg) args += tuple(extra_args) try: return func(*args, **keywargs) finally: for patching in getattr(patched, 'patchings', []): patching.__exit__() patched.patchings = [self] patched.__name__ = func.__name__ patched.compat_co_firstlineno = getattr(func, "compat_co_firstlineno", func.func_code.co_firstlineno) return patched def get_original(self): target = self.target name = self.attribute create = self.create original = DEFAULT if _has_local_attr(target, name): try: original = target.__dict__[name] except AttributeError: # for instances of classes with slots, they have no __dict__ original = getattr(target, name) elif not create and not hasattr(target, name): raise AttributeError("%s does not have the attribute %r" % (target, name)) return original def __enter__(self): new, spec, = self.new, self.spec original = self.get_original() if new is DEFAULT: # XXXX what if original is DEFAULT - shouldn't use it as a spec inherit = False if spec == True: # set spec to the object we are replacing spec = original if isinstance(spec, (type, ClassType)): inherit = True new = Mock(spec=spec) if inherit: new.return_value = Mock(spec=spec) self.temp_original = original setattr(self.target, self.attribute, new) return new def __exit__(self, *_): if self.temp_original is not DEFAULT: setattr(self.target, self.attribute, self.temp_original) else: delattr(self.target, self.attribute) del self.temp_original def patch_object(target, attribute, new=DEFAULT, spec=None, create=False): return _patch(target, attribute, new, spec, create) def patch(target, new=DEFAULT, spec=None, create=False): try: target, attribute = target.rsplit('.', 1) except (TypeError, ValueError): raise TypeError("Need a valid target to patch. You supplied: %r" % (target,)) target = _importer(target) return _patch(target, attribute, new, spec, create) def _has_local_attr(obj, name): try: return name in vars(obj) except TypeError: # objects without a __dict__ return hasattr(obj, name)
bsd-3-clause
yuanagain/seniorthesis
venv/lib/python3.5/site-packages/pip/utils/deprecation.py
148
2239
""" A module that implments tooling to enable easy warnings about deprecations. """ from __future__ import absolute_import import logging import warnings class PipDeprecationWarning(Warning): pass class Pending(object): pass class RemovedInPip9Warning(PipDeprecationWarning): pass class RemovedInPip10Warning(PipDeprecationWarning, Pending): pass class Python26DeprecationWarning(PipDeprecationWarning, Pending): pass # Warnings <-> Logging Integration _warnings_showwarning = None def _showwarning(message, category, filename, lineno, file=None, line=None): if file is not None: if _warnings_showwarning is not None: _warnings_showwarning( message, category, filename, lineno, file, line, ) else: if issubclass(category, PipDeprecationWarning): # We use a specially named logger which will handle all of the # deprecation messages for pip. logger = logging.getLogger("pip.deprecations") # This is purposely using the % formatter here instead of letting # the logging module handle the interpolation. This is because we # want it to appear as if someone typed this entire message out. log_message = "DEPRECATION: %s" % message # PipDeprecationWarnings that are Pending still have at least 2 # versions to go until they are removed so they can just be # warnings. Otherwise, they will be removed in the very next # version of pip. We want these to be more obvious so we use the # ERROR logging level. if issubclass(category, Pending): logger.warning(log_message) else: logger.error(log_message) else: _warnings_showwarning( message, category, filename, lineno, file, line, ) def install_warning_logger(): # Enable our Deprecation Warnings warnings.simplefilter("default", PipDeprecationWarning, append=True) global _warnings_showwarning if _warnings_showwarning is None: _warnings_showwarning = warnings.showwarning warnings.showwarning = _showwarning
mit
languitar/android_kernel_lge_hammerhead
tools/perf/scripts/python/sctop.py
11180
1924
# system call top # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Periodically displays system-wide system call totals, broken down by # syscall. If a [comm] arg is specified, only syscalls called by # [comm] are displayed. If an [interval] arg is specified, the display # will be refreshed every [interval] seconds. The default interval is # 3 seconds. import os, sys, thread, time sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s sctop.py [comm] [interval]\n"; for_comm = None default_interval = 3 interval = default_interval if len(sys.argv) > 3: sys.exit(usage) if len(sys.argv) > 2: for_comm = sys.argv[1] interval = int(sys.argv[2]) elif len(sys.argv) > 1: try: interval = int(sys.argv[1]) except ValueError: for_comm = sys.argv[1] interval = default_interval syscalls = autodict() def trace_begin(): thread.start_new_thread(print_syscall_totals, (interval,)) pass def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def print_syscall_totals(interval): while 1: clear_term() if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "----------"), for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): try: print "%-40s %10d\n" % (syscall_name(id), val), except TypeError: pass syscalls.clear() time.sleep(interval)
gpl-2.0
wunderlist/night-shift
tests/attempt_log_sizes.py
1
1969
from __future__ import print_function import os import sys import glob import argparse import datetime # datetime.date def valid_date(s): try: return datetime.datetime.strptime(s, "%Y-%m-%d").date() except ValueError: msg = "Not a valid date: `{}`.".format(s) raise argparse.ArgumentTypeError(msg) # float def median(lst): if not lst: return None elif len(lst) % 2 == 1: return sorted(lst)[((len(lst)+1)/2)-1] else: return float(sum(sorted(lst)[(len(lst)/2)-1:(len(lst)/2)+1]))/2.0 # list<int> def get_log_size_for_date(date): return sum(map(os.path.getsize, map(os.path.abspath, glob.glob("logs/{}/attempt-*.log".format(str(date)))))) # list<int> def get_log_size_for_last_week(date): return filter(lambda v: v, [ get_log_size_for_date(date - datetime.timedelta(days=i)) for i in range(7) ]) # tuple<int,int,int> def get_median_thresholds(lst): median_value = median(lst) return int(median_value * 0.9), int(median_value), int(median_value * 1.15) if __name__ == '__main__': parser = argparse.ArgumentParser(prog='test_log_sizes') parser.add_argument('-d', '--date', help="current date", type=valid_date, default=str(datetime.date.today())) args = parser.parse_args() log_sizes_lst = list(get_log_size_for_last_week(args.date)) if not log_sizes_lst: print('[!] No log files were found!') sys.exit(0) if len(log_sizes_lst) < 5: print('[!] Not enough log files are available!') sys.exit(0) min_thr, _, max_thr = get_median_thresholds(log_sizes_lst) today_log_size = get_log_size_for_date(args.date) if today_log_size > max_thr or today_log_size < min_thr: print('[!] Log size is below or above threshold:') print('Expected min: {min_thr} < actual: {actual} < max: {max_thr}' \ .format(min_thr=min_thr, max_thr=max_thr, actual=today_log_size)) sys.exit(1) sys.exit(0)
mit
portfoliome/foil
tests/test_order.py
1
1396
import unittest from collections import namedtuple from operator import attrgetter, itemgetter from foil.order import partition_ordered, partition MockTuple = namedtuple('MockTuple', ('a', 'b')) def is_even(x): return True if x % 2 == 0 else False class TestPartitionOrdered(unittest.TestCase): def test_partition_by_attribute(self): data = [{'a': 5, 'b': 8}, {'a': 5, 'b': 7}, {'a': 4, 'b': 4}] tups = [MockTuple(**d) for d in data] expected = [(5, [MockTuple(a=5, b=8), MockTuple(a=5, b=7)]), (4, [MockTuple(a=4, b=4)])] result = list(partition_ordered(tups, key=attrgetter('a'))) self.assertSequenceEqual(expected, result) def test_partition_by_item(self): data = ['123', '234', '221', '210', '780', '822'] expected = [('1', ['123']), ('2', ['234', '221', '210']), ('7', ['780']), ('8', ['822'])] result = list(partition_ordered(data, key=itemgetter(0))) self.assertEqual(expected, result) class TestPartition(unittest.TestCase): def test_partition(self): expected_true = [0, 2] expected_false = [1, 3] result_false, result_true = partition(is_even, range(0, 4)) self.assertEqual(expected_true, list(result_true)) self.assertEqual(expected_false, list(result_false))
mit
dmigo/incubator-superset
superset/migrations/versions/80a67c5192fa_single_pie_chart_metric.py
2
1641
"""single pie chart metric Revision ID: 80a67c5192fa Revises: afb7730f6a9c Create Date: 2018-06-14 14:31:06.624370 """ # revision identifiers, used by Alembic. revision = '80a67c5192fa' down_revision = 'afb7730f6a9c' import json from alembic import op from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer, String, Text from superset import db Base = declarative_base() class Slice(Base): __tablename__ = 'slices' id = Column(Integer, primary_key=True) params = Column(Text) viz_type = Column(String(250)) def upgrade(): bind = op.get_bind() session = db.Session(bind=bind) for slc in session.query(Slice).filter(Slice.viz_type == 'pie').all(): try: params = json.loads(slc.params) if 'metrics' in params: if params['metrics']: params['metric'] = params['metrics'][0] del params['metrics'] slc.params = json.dumps(params, sort_keys=True) except Exception: pass session.commit() session.close() def downgrade(): bind = op.get_bind() session = db.Session(bind=bind) for slc in session.query(Slice).filter(Slice.viz_type == 'pie').all(): try: params = json.loads(slc.params) if 'metric' in params: if params['metric']: params['metrics'] = [params['metric']] del params['metric'] slc.params = json.dumps(params, sort_keys=True) except Exception: pass session.commit() session.close()
apache-2.0
goldeneye-source/ges-python
lib/xml/dom/minidom.py
13
66803
"""Simple implementation of the Level 1 DOM. Namespaces and other minor Level 2 features are also supported. parse("foo.xml") parseString("<foo><bar/></foo>") Todo: ===== * convenience methods for getting elements and text. * more testing * bring some of the writer and linearizer code into conformance with this interface * SAX 2 namespaces """ import io import xml.dom from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE, domreg from xml.dom.minicompat import * from xml.dom.xmlbuilder import DOMImplementationLS, DocumentLS # This is used by the ID-cache invalidation checks; the list isn't # actually complete, since the nodes being checked will never be the # DOCUMENT_NODE or DOCUMENT_FRAGMENT_NODE. (The node being checked is # the node being added or removed, not the node being modified.) # _nodeTypes_with_children = (xml.dom.Node.ELEMENT_NODE, xml.dom.Node.ENTITY_REFERENCE_NODE) class Node(xml.dom.Node): namespaceURI = None # this is non-null only for elements and attributes parentNode = None ownerDocument = None nextSibling = None previousSibling = None prefix = EMPTY_PREFIX # non-null only for NS elements and attributes def __bool__(self): return True def toxml(self, encoding=None): return self.toprettyxml("", "", encoding) def toprettyxml(self, indent="\t", newl="\n", encoding=None): if encoding is None: writer = io.StringIO() else: writer = io.TextIOWrapper(io.BytesIO(), encoding=encoding, errors="xmlcharrefreplace", newline='\n') if self.nodeType == Node.DOCUMENT_NODE: # Can pass encoding only to document, to put it into XML header self.writexml(writer, "", indent, newl, encoding) else: self.writexml(writer, "", indent, newl) if encoding is None: return writer.getvalue() else: return writer.detach().getvalue() def hasChildNodes(self): return bool(self.childNodes) def _get_childNodes(self): return self.childNodes def _get_firstChild(self): if self.childNodes: return self.childNodes[0] def _get_lastChild(self): if self.childNodes: return self.childNodes[-1] def insertBefore(self, newChild, refChild): if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE: for c in tuple(newChild.childNodes): self.insertBefore(c, refChild) ### The DOM does not clearly specify what to return in this case return newChild if newChild.nodeType not in self._child_node_types: raise xml.dom.HierarchyRequestErr( "%s cannot be child of %s" % (repr(newChild), repr(self))) if newChild.parentNode is not None: newChild.parentNode.removeChild(newChild) if refChild is None: self.appendChild(newChild) else: try: index = self.childNodes.index(refChild) except ValueError: raise xml.dom.NotFoundErr() if newChild.nodeType in _nodeTypes_with_children: _clear_id_cache(self) self.childNodes.insert(index, newChild) newChild.nextSibling = refChild refChild.previousSibling = newChild if index: node = self.childNodes[index-1] node.nextSibling = newChild newChild.previousSibling = node else: newChild.previousSibling = None newChild.parentNode = self return newChild def appendChild(self, node): if node.nodeType == self.DOCUMENT_FRAGMENT_NODE: for c in tuple(node.childNodes): self.appendChild(c) ### The DOM does not clearly specify what to return in this case return node if node.nodeType not in self._child_node_types: raise xml.dom.HierarchyRequestErr( "%s cannot be child of %s" % (repr(node), repr(self))) elif node.nodeType in _nodeTypes_with_children: _clear_id_cache(self) if node.parentNode is not None: node.parentNode.removeChild(node) _append_child(self, node) node.nextSibling = None return node def replaceChild(self, newChild, oldChild): if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE: refChild = oldChild.nextSibling self.removeChild(oldChild) return self.insertBefore(newChild, refChild) if newChild.nodeType not in self._child_node_types: raise xml.dom.HierarchyRequestErr( "%s cannot be child of %s" % (repr(newChild), repr(self))) if newChild is oldChild: return if newChild.parentNode is not None: newChild.parentNode.removeChild(newChild) try: index = self.childNodes.index(oldChild) except ValueError: raise xml.dom.NotFoundErr() self.childNodes[index] = newChild newChild.parentNode = self oldChild.parentNode = None if (newChild.nodeType in _nodeTypes_with_children or oldChild.nodeType in _nodeTypes_with_children): _clear_id_cache(self) newChild.nextSibling = oldChild.nextSibling newChild.previousSibling = oldChild.previousSibling oldChild.nextSibling = None oldChild.previousSibling = None if newChild.previousSibling: newChild.previousSibling.nextSibling = newChild if newChild.nextSibling: newChild.nextSibling.previousSibling = newChild return oldChild def removeChild(self, oldChild): try: self.childNodes.remove(oldChild) except ValueError: raise xml.dom.NotFoundErr() if oldChild.nextSibling is not None: oldChild.nextSibling.previousSibling = oldChild.previousSibling if oldChild.previousSibling is not None: oldChild.previousSibling.nextSibling = oldChild.nextSibling oldChild.nextSibling = oldChild.previousSibling = None if oldChild.nodeType in _nodeTypes_with_children: _clear_id_cache(self) oldChild.parentNode = None return oldChild def normalize(self): L = [] for child in self.childNodes: if child.nodeType == Node.TEXT_NODE: if not child.data: # empty text node; discard if L: L[-1].nextSibling = child.nextSibling if child.nextSibling: child.nextSibling.previousSibling = child.previousSibling child.unlink() elif L and L[-1].nodeType == child.nodeType: # collapse text node node = L[-1] node.data = node.data + child.data node.nextSibling = child.nextSibling if child.nextSibling: child.nextSibling.previousSibling = node child.unlink() else: L.append(child) else: L.append(child) if child.nodeType == Node.ELEMENT_NODE: child.normalize() self.childNodes[:] = L def cloneNode(self, deep): return _clone_node(self, deep, self.ownerDocument or self) def isSupported(self, feature, version): return self.ownerDocument.implementation.hasFeature(feature, version) def _get_localName(self): # Overridden in Element and Attr where localName can be Non-Null return None # Node interfaces from Level 3 (WD 9 April 2002) def isSameNode(self, other): return self is other def getInterface(self, feature): if self.isSupported(feature, None): return self else: return None # The "user data" functions use a dictionary that is only present # if some user data has been set, so be careful not to assume it # exists. def getUserData(self, key): try: return self._user_data[key][0] except (AttributeError, KeyError): return None def setUserData(self, key, data, handler): old = None try: d = self._user_data except AttributeError: d = {} self._user_data = d if key in d: old = d[key][0] if data is None: # ignore handlers passed for None handler = None if old is not None: del d[key] else: d[key] = (data, handler) return old def _call_user_data_handler(self, operation, src, dst): if hasattr(self, "_user_data"): for key, (data, handler) in list(self._user_data.items()): if handler is not None: handler.handle(operation, key, data, src, dst) # minidom-specific API: def unlink(self): self.parentNode = self.ownerDocument = None if self.childNodes: for child in self.childNodes: child.unlink() self.childNodes = NodeList() self.previousSibling = None self.nextSibling = None # A Node is its own context manager, to ensure that an unlink() call occurs. # This is similar to how a file object works. def __enter__(self): return self def __exit__(self, et, ev, tb): self.unlink() defproperty(Node, "firstChild", doc="First child node, or None.") defproperty(Node, "lastChild", doc="Last child node, or None.") defproperty(Node, "localName", doc="Namespace-local name of this node.") def _append_child(self, node): # fast path with less checks; usable by DOM builders if careful childNodes = self.childNodes if childNodes: last = childNodes[-1] node.previousSibling = last last.nextSibling = node childNodes.append(node) node.parentNode = self def _in_document(node): # return True iff node is part of a document tree while node is not None: if node.nodeType == Node.DOCUMENT_NODE: return True node = node.parentNode return False def _write_data(writer, data): "Writes datachars to writer." if data: data = data.replace("&", "&amp;").replace("<", "&lt;"). \ replace("\"", "&quot;").replace(">", "&gt;") writer.write(data) def _get_elements_by_tagName_helper(parent, name, rc): for node in parent.childNodes: if node.nodeType == Node.ELEMENT_NODE and \ (name == "*" or node.tagName == name): rc.append(node) _get_elements_by_tagName_helper(node, name, rc) return rc def _get_elements_by_tagName_ns_helper(parent, nsURI, localName, rc): for node in parent.childNodes: if node.nodeType == Node.ELEMENT_NODE: if ((localName == "*" or node.localName == localName) and (nsURI == "*" or node.namespaceURI == nsURI)): rc.append(node) _get_elements_by_tagName_ns_helper(node, nsURI, localName, rc) return rc class DocumentFragment(Node): nodeType = Node.DOCUMENT_FRAGMENT_NODE nodeName = "#document-fragment" nodeValue = None attributes = None parentNode = None _child_node_types = (Node.ELEMENT_NODE, Node.TEXT_NODE, Node.CDATA_SECTION_NODE, Node.ENTITY_REFERENCE_NODE, Node.PROCESSING_INSTRUCTION_NODE, Node.COMMENT_NODE, Node.NOTATION_NODE) def __init__(self): self.childNodes = NodeList() class Attr(Node): __slots__=('_name', '_value', 'namespaceURI', '_prefix', 'childNodes', '_localName', 'ownerDocument', 'ownerElement') nodeType = Node.ATTRIBUTE_NODE attributes = None specified = False _is_id = False _child_node_types = (Node.TEXT_NODE, Node.ENTITY_REFERENCE_NODE) def __init__(self, qName, namespaceURI=EMPTY_NAMESPACE, localName=None, prefix=None): self.ownerElement = None self._name = qName self.namespaceURI = namespaceURI self._prefix = prefix self.childNodes = NodeList() # Add the single child node that represents the value of the attr self.childNodes.append(Text()) # nodeValue and value are set elsewhere def _get_localName(self): try: return self._localName except AttributeError: return self.nodeName.split(":", 1)[-1] def _get_specified(self): return self.specified def _get_name(self): return self._name def _set_name(self, value): self._name = value if self.ownerElement is not None: _clear_id_cache(self.ownerElement) nodeName = name = property(_get_name, _set_name) def _get_value(self): return self._value def _set_value(self, value): self._value = value self.childNodes[0].data = value if self.ownerElement is not None: _clear_id_cache(self.ownerElement) self.childNodes[0].data = value nodeValue = value = property(_get_value, _set_value) def _get_prefix(self): return self._prefix def _set_prefix(self, prefix): nsuri = self.namespaceURI if prefix == "xmlns": if nsuri and nsuri != XMLNS_NAMESPACE: raise xml.dom.NamespaceErr( "illegal use of 'xmlns' prefix for the wrong namespace") self._prefix = prefix if prefix is None: newName = self.localName else: newName = "%s:%s" % (prefix, self.localName) if self.ownerElement: _clear_id_cache(self.ownerElement) self.name = newName prefix = property(_get_prefix, _set_prefix) def unlink(self): # This implementation does not call the base implementation # since most of that is not needed, and the expense of the # method call is not warranted. We duplicate the removal of # children, but that's all we needed from the base class. elem = self.ownerElement if elem is not None: del elem._attrs[self.nodeName] del elem._attrsNS[(self.namespaceURI, self.localName)] if self._is_id: self._is_id = False elem._magic_id_nodes -= 1 self.ownerDocument._magic_id_count -= 1 for child in self.childNodes: child.unlink() del self.childNodes[:] def _get_isId(self): if self._is_id: return True doc = self.ownerDocument elem = self.ownerElement if doc is None or elem is None: return False info = doc._get_elem_info(elem) if info is None: return False if self.namespaceURI: return info.isIdNS(self.namespaceURI, self.localName) else: return info.isId(self.nodeName) def _get_schemaType(self): doc = self.ownerDocument elem = self.ownerElement if doc is None or elem is None: return _no_type info = doc._get_elem_info(elem) if info is None: return _no_type if self.namespaceURI: return info.getAttributeTypeNS(self.namespaceURI, self.localName) else: return info.getAttributeType(self.nodeName) defproperty(Attr, "isId", doc="True if this attribute is an ID.") defproperty(Attr, "localName", doc="Namespace-local name of this attribute.") defproperty(Attr, "schemaType", doc="Schema type for this attribute.") class NamedNodeMap(object): """The attribute list is a transient interface to the underlying dictionaries. Mutations here will change the underlying element's dictionary. Ordering is imposed artificially and does not reflect the order of attributes as found in an input document. """ __slots__ = ('_attrs', '_attrsNS', '_ownerElement') def __init__(self, attrs, attrsNS, ownerElement): self._attrs = attrs self._attrsNS = attrsNS self._ownerElement = ownerElement def _get_length(self): return len(self._attrs) def item(self, index): try: return self[list(self._attrs.keys())[index]] except IndexError: return None def items(self): L = [] for node in self._attrs.values(): L.append((node.nodeName, node.value)) return L def itemsNS(self): L = [] for node in self._attrs.values(): L.append(((node.namespaceURI, node.localName), node.value)) return L def __contains__(self, key): if isinstance(key, str): return key in self._attrs else: return key in self._attrsNS def keys(self): return self._attrs.keys() def keysNS(self): return self._attrsNS.keys() def values(self): return self._attrs.values() def get(self, name, value=None): return self._attrs.get(name, value) __len__ = _get_length def _cmp(self, other): if self._attrs is getattr(other, "_attrs", None): return 0 else: return (id(self) > id(other)) - (id(self) < id(other)) def __eq__(self, other): return self._cmp(other) == 0 def __ge__(self, other): return self._cmp(other) >= 0 def __gt__(self, other): return self._cmp(other) > 0 def __le__(self, other): return self._cmp(other) <= 0 def __lt__(self, other): return self._cmp(other) < 0 def __ne__(self, other): return self._cmp(other) != 0 def __getitem__(self, attname_or_tuple): if isinstance(attname_or_tuple, tuple): return self._attrsNS[attname_or_tuple] else: return self._attrs[attname_or_tuple] # same as set def __setitem__(self, attname, value): if isinstance(value, str): try: node = self._attrs[attname] except KeyError: node = Attr(attname) node.ownerDocument = self._ownerElement.ownerDocument self.setNamedItem(node) node.value = value else: if not isinstance(value, Attr): raise TypeError("value must be a string or Attr object") node = value self.setNamedItem(node) def getNamedItem(self, name): try: return self._attrs[name] except KeyError: return None def getNamedItemNS(self, namespaceURI, localName): try: return self._attrsNS[(namespaceURI, localName)] except KeyError: return None def removeNamedItem(self, name): n = self.getNamedItem(name) if n is not None: _clear_id_cache(self._ownerElement) del self._attrs[n.nodeName] del self._attrsNS[(n.namespaceURI, n.localName)] if hasattr(n, 'ownerElement'): n.ownerElement = None return n else: raise xml.dom.NotFoundErr() def removeNamedItemNS(self, namespaceURI, localName): n = self.getNamedItemNS(namespaceURI, localName) if n is not None: _clear_id_cache(self._ownerElement) del self._attrsNS[(n.namespaceURI, n.localName)] del self._attrs[n.nodeName] if hasattr(n, 'ownerElement'): n.ownerElement = None return n else: raise xml.dom.NotFoundErr() def setNamedItem(self, node): if not isinstance(node, Attr): raise xml.dom.HierarchyRequestErr( "%s cannot be child of %s" % (repr(node), repr(self))) old = self._attrs.get(node.name) if old: old.unlink() self._attrs[node.name] = node self._attrsNS[(node.namespaceURI, node.localName)] = node node.ownerElement = self._ownerElement _clear_id_cache(node.ownerElement) return old def setNamedItemNS(self, node): return self.setNamedItem(node) def __delitem__(self, attname_or_tuple): node = self[attname_or_tuple] _clear_id_cache(node.ownerElement) node.unlink() def __getstate__(self): return self._attrs, self._attrsNS, self._ownerElement def __setstate__(self, state): self._attrs, self._attrsNS, self._ownerElement = state defproperty(NamedNodeMap, "length", doc="Number of nodes in the NamedNodeMap.") AttributeList = NamedNodeMap class TypeInfo(object): __slots__ = 'namespace', 'name' def __init__(self, namespace, name): self.namespace = namespace self.name = name def __repr__(self): if self.namespace: return "<TypeInfo %r (from %r)>" % (self.name, self.namespace) else: return "<TypeInfo %r>" % self.name def _get_name(self): return self.name def _get_namespace(self): return self.namespace _no_type = TypeInfo(None, None) class Element(Node): __slots__=('ownerDocument', 'parentNode', 'tagName', 'nodeName', 'prefix', 'namespaceURI', '_localName', 'childNodes', '_attrs', '_attrsNS', 'nextSibling', 'previousSibling') nodeType = Node.ELEMENT_NODE nodeValue = None schemaType = _no_type _magic_id_nodes = 0 _child_node_types = (Node.ELEMENT_NODE, Node.PROCESSING_INSTRUCTION_NODE, Node.COMMENT_NODE, Node.TEXT_NODE, Node.CDATA_SECTION_NODE, Node.ENTITY_REFERENCE_NODE) def __init__(self, tagName, namespaceURI=EMPTY_NAMESPACE, prefix=None, localName=None): self.parentNode = None self.tagName = self.nodeName = tagName self.prefix = prefix self.namespaceURI = namespaceURI self.childNodes = NodeList() self.nextSibling = self.previousSibling = None # Attribute dictionaries are lazily created # attributes are double-indexed: # tagName -> Attribute # URI,localName -> Attribute # in the future: consider lazy generation # of attribute objects this is too tricky # for now because of headaches with # namespaces. self._attrs = None self._attrsNS = None def _ensure_attributes(self): if self._attrs is None: self._attrs = {} self._attrsNS = {} def _get_localName(self): try: return self._localName except AttributeError: return self.tagName.split(":", 1)[-1] def _get_tagName(self): return self.tagName def unlink(self): if self._attrs is not None: for attr in list(self._attrs.values()): attr.unlink() self._attrs = None self._attrsNS = None Node.unlink(self) def getAttribute(self, attname): if self._attrs is None: return "" try: return self._attrs[attname].value except KeyError: return "" def getAttributeNS(self, namespaceURI, localName): if self._attrsNS is None: return "" try: return self._attrsNS[(namespaceURI, localName)].value except KeyError: return "" def setAttribute(self, attname, value): attr = self.getAttributeNode(attname) if attr is None: attr = Attr(attname) attr.value = value # also sets nodeValue attr.ownerDocument = self.ownerDocument self.setAttributeNode(attr) elif value != attr.value: attr.value = value if attr.isId: _clear_id_cache(self) def setAttributeNS(self, namespaceURI, qualifiedName, value): prefix, localname = _nssplit(qualifiedName) attr = self.getAttributeNodeNS(namespaceURI, localname) if attr is None: attr = Attr(qualifiedName, namespaceURI, localname, prefix) attr.value = value attr.ownerDocument = self.ownerDocument self.setAttributeNode(attr) else: if value != attr.value: attr.value = value if attr.isId: _clear_id_cache(self) if attr.prefix != prefix: attr.prefix = prefix attr.nodeName = qualifiedName def getAttributeNode(self, attrname): if self._attrs is None: return None return self._attrs.get(attrname) def getAttributeNodeNS(self, namespaceURI, localName): if self._attrsNS is None: return None return self._attrsNS.get((namespaceURI, localName)) def setAttributeNode(self, attr): if attr.ownerElement not in (None, self): raise xml.dom.InuseAttributeErr("attribute node already owned") self._ensure_attributes() old1 = self._attrs.get(attr.name, None) if old1 is not None: self.removeAttributeNode(old1) old2 = self._attrsNS.get((attr.namespaceURI, attr.localName), None) if old2 is not None and old2 is not old1: self.removeAttributeNode(old2) _set_attribute_node(self, attr) if old1 is not attr: # It might have already been part of this node, in which case # it doesn't represent a change, and should not be returned. return old1 if old2 is not attr: return old2 setAttributeNodeNS = setAttributeNode def removeAttribute(self, name): if self._attrsNS is None: raise xml.dom.NotFoundErr() try: attr = self._attrs[name] except KeyError: raise xml.dom.NotFoundErr() self.removeAttributeNode(attr) def removeAttributeNS(self, namespaceURI, localName): if self._attrsNS is None: raise xml.dom.NotFoundErr() try: attr = self._attrsNS[(namespaceURI, localName)] except KeyError: raise xml.dom.NotFoundErr() self.removeAttributeNode(attr) def removeAttributeNode(self, node): if node is None: raise xml.dom.NotFoundErr() try: self._attrs[node.name] except KeyError: raise xml.dom.NotFoundErr() _clear_id_cache(self) node.unlink() # Restore this since the node is still useful and otherwise # unlinked node.ownerDocument = self.ownerDocument removeAttributeNodeNS = removeAttributeNode def hasAttribute(self, name): if self._attrs is None: return False return name in self._attrs def hasAttributeNS(self, namespaceURI, localName): if self._attrsNS is None: return False return (namespaceURI, localName) in self._attrsNS def getElementsByTagName(self, name): return _get_elements_by_tagName_helper(self, name, NodeList()) def getElementsByTagNameNS(self, namespaceURI, localName): return _get_elements_by_tagName_ns_helper( self, namespaceURI, localName, NodeList()) def __repr__(self): return "<DOM Element: %s at %#x>" % (self.tagName, id(self)) def writexml(self, writer, indent="", addindent="", newl=""): # indent = current indentation # addindent = indentation to add to higher levels # newl = newline string writer.write(indent+"<" + self.tagName) attrs = self._get_attributes() a_names = sorted(attrs.keys()) for a_name in a_names: writer.write(" %s=\"" % a_name) _write_data(writer, attrs[a_name].value) writer.write("\"") if self.childNodes: writer.write(">") if (len(self.childNodes) == 1 and self.childNodes[0].nodeType == Node.TEXT_NODE): self.childNodes[0].writexml(writer, '', '', '') else: writer.write(newl) for node in self.childNodes: node.writexml(writer, indent+addindent, addindent, newl) writer.write(indent) writer.write("</%s>%s" % (self.tagName, newl)) else: writer.write("/>%s"%(newl)) def _get_attributes(self): self._ensure_attributes() return NamedNodeMap(self._attrs, self._attrsNS, self) def hasAttributes(self): if self._attrs: return True else: return False # DOM Level 3 attributes, based on the 22 Oct 2002 draft def setIdAttribute(self, name): idAttr = self.getAttributeNode(name) self.setIdAttributeNode(idAttr) def setIdAttributeNS(self, namespaceURI, localName): idAttr = self.getAttributeNodeNS(namespaceURI, localName) self.setIdAttributeNode(idAttr) def setIdAttributeNode(self, idAttr): if idAttr is None or not self.isSameNode(idAttr.ownerElement): raise xml.dom.NotFoundErr() if _get_containing_entref(self) is not None: raise xml.dom.NoModificationAllowedErr() if not idAttr._is_id: idAttr._is_id = True self._magic_id_nodes += 1 self.ownerDocument._magic_id_count += 1 _clear_id_cache(self) defproperty(Element, "attributes", doc="NamedNodeMap of attributes on the element.") defproperty(Element, "localName", doc="Namespace-local name of this element.") def _set_attribute_node(element, attr): _clear_id_cache(element) element._ensure_attributes() element._attrs[attr.name] = attr element._attrsNS[(attr.namespaceURI, attr.localName)] = attr # This creates a circular reference, but Element.unlink() # breaks the cycle since the references to the attribute # dictionaries are tossed. attr.ownerElement = element class Childless: """Mixin that makes childless-ness easy to implement and avoids the complexity of the Node methods that deal with children. """ __slots__ = () attributes = None childNodes = EmptyNodeList() firstChild = None lastChild = None def _get_firstChild(self): return None def _get_lastChild(self): return None def appendChild(self, node): raise xml.dom.HierarchyRequestErr( self.nodeName + " nodes cannot have children") def hasChildNodes(self): return False def insertBefore(self, newChild, refChild): raise xml.dom.HierarchyRequestErr( self.nodeName + " nodes do not have children") def removeChild(self, oldChild): raise xml.dom.NotFoundErr( self.nodeName + " nodes do not have children") def normalize(self): # For childless nodes, normalize() has nothing to do. pass def replaceChild(self, newChild, oldChild): raise xml.dom.HierarchyRequestErr( self.nodeName + " nodes do not have children") class ProcessingInstruction(Childless, Node): nodeType = Node.PROCESSING_INSTRUCTION_NODE __slots__ = ('target', 'data') def __init__(self, target, data): self.target = target self.data = data # nodeValue is an alias for data def _get_nodeValue(self): return self.data def _set_nodeValue(self, value): self.data = data nodeValue = property(_get_nodeValue, _set_nodeValue) # nodeName is an alias for target def _get_nodeName(self): return self.target def _set_nodeName(self, value): self.target = value nodeName = property(_get_nodeName, _set_nodeName) def writexml(self, writer, indent="", addindent="", newl=""): writer.write("%s<?%s %s?>%s" % (indent,self.target, self.data, newl)) class CharacterData(Childless, Node): __slots__=('_data', 'ownerDocument','parentNode', 'previousSibling', 'nextSibling') def __init__(self): self.ownerDocument = self.parentNode = None self.previousSibling = self.nextSibling = None self._data = '' Node.__init__(self) def _get_length(self): return len(self.data) __len__ = _get_length def _get_data(self): return self._data def _set_data(self, data): self._data = data data = nodeValue = property(_get_data, _set_data) def __repr__(self): data = self.data if len(data) > 10: dotdotdot = "..." else: dotdotdot = "" return '<DOM %s node "%r%s">' % ( self.__class__.__name__, data[0:10], dotdotdot) def substringData(self, offset, count): if offset < 0: raise xml.dom.IndexSizeErr("offset cannot be negative") if offset >= len(self.data): raise xml.dom.IndexSizeErr("offset cannot be beyond end of data") if count < 0: raise xml.dom.IndexSizeErr("count cannot be negative") return self.data[offset:offset+count] def appendData(self, arg): self.data = self.data + arg def insertData(self, offset, arg): if offset < 0: raise xml.dom.IndexSizeErr("offset cannot be negative") if offset >= len(self.data): raise xml.dom.IndexSizeErr("offset cannot be beyond end of data") if arg: self.data = "%s%s%s" % ( self.data[:offset], arg, self.data[offset:]) def deleteData(self, offset, count): if offset < 0: raise xml.dom.IndexSizeErr("offset cannot be negative") if offset >= len(self.data): raise xml.dom.IndexSizeErr("offset cannot be beyond end of data") if count < 0: raise xml.dom.IndexSizeErr("count cannot be negative") if count: self.data = self.data[:offset] + self.data[offset+count:] def replaceData(self, offset, count, arg): if offset < 0: raise xml.dom.IndexSizeErr("offset cannot be negative") if offset >= len(self.data): raise xml.dom.IndexSizeErr("offset cannot be beyond end of data") if count < 0: raise xml.dom.IndexSizeErr("count cannot be negative") if count: self.data = "%s%s%s" % ( self.data[:offset], arg, self.data[offset+count:]) defproperty(CharacterData, "length", doc="Length of the string data.") class Text(CharacterData): __slots__ = () nodeType = Node.TEXT_NODE nodeName = "#text" attributes = None def splitText(self, offset): if offset < 0 or offset > len(self.data): raise xml.dom.IndexSizeErr("illegal offset value") newText = self.__class__() newText.data = self.data[offset:] newText.ownerDocument = self.ownerDocument next = self.nextSibling if self.parentNode and self in self.parentNode.childNodes: if next is None: self.parentNode.appendChild(newText) else: self.parentNode.insertBefore(newText, next) self.data = self.data[:offset] return newText def writexml(self, writer, indent="", addindent="", newl=""): _write_data(writer, "%s%s%s" % (indent, self.data, newl)) # DOM Level 3 (WD 9 April 2002) def _get_wholeText(self): L = [self.data] n = self.previousSibling while n is not None: if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): L.insert(0, n.data) n = n.previousSibling else: break n = self.nextSibling while n is not None: if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): L.append(n.data) n = n.nextSibling else: break return ''.join(L) def replaceWholeText(self, content): # XXX This needs to be seriously changed if minidom ever # supports EntityReference nodes. parent = self.parentNode n = self.previousSibling while n is not None: if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): next = n.previousSibling parent.removeChild(n) n = next else: break n = self.nextSibling if not content: parent.removeChild(self) while n is not None: if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): next = n.nextSibling parent.removeChild(n) n = next else: break if content: self.data = content return self else: return None def _get_isWhitespaceInElementContent(self): if self.data.strip(): return False elem = _get_containing_element(self) if elem is None: return False info = self.ownerDocument._get_elem_info(elem) if info is None: return False else: return info.isElementContent() defproperty(Text, "isWhitespaceInElementContent", doc="True iff this text node contains only whitespace" " and is in element content.") defproperty(Text, "wholeText", doc="The text of all logically-adjacent text nodes.") def _get_containing_element(node): c = node.parentNode while c is not None: if c.nodeType == Node.ELEMENT_NODE: return c c = c.parentNode return None def _get_containing_entref(node): c = node.parentNode while c is not None: if c.nodeType == Node.ENTITY_REFERENCE_NODE: return c c = c.parentNode return None class Comment(CharacterData): nodeType = Node.COMMENT_NODE nodeName = "#comment" def __init__(self, data): CharacterData.__init__(self) self._data = data def writexml(self, writer, indent="", addindent="", newl=""): if "--" in self.data: raise ValueError("'--' is not allowed in a comment node") writer.write("%s<!--%s-->%s" % (indent, self.data, newl)) class CDATASection(Text): __slots__ = () nodeType = Node.CDATA_SECTION_NODE nodeName = "#cdata-section" def writexml(self, writer, indent="", addindent="", newl=""): if self.data.find("]]>") >= 0: raise ValueError("']]>' not allowed in a CDATA section") writer.write("<![CDATA[%s]]>" % self.data) class ReadOnlySequentialNamedNodeMap(object): __slots__ = '_seq', def __init__(self, seq=()): # seq should be a list or tuple self._seq = seq def __len__(self): return len(self._seq) def _get_length(self): return len(self._seq) def getNamedItem(self, name): for n in self._seq: if n.nodeName == name: return n def getNamedItemNS(self, namespaceURI, localName): for n in self._seq: if n.namespaceURI == namespaceURI and n.localName == localName: return n def __getitem__(self, name_or_tuple): if isinstance(name_or_tuple, tuple): node = self.getNamedItemNS(*name_or_tuple) else: node = self.getNamedItem(name_or_tuple) if node is None: raise KeyError(name_or_tuple) return node def item(self, index): if index < 0: return None try: return self._seq[index] except IndexError: return None def removeNamedItem(self, name): raise xml.dom.NoModificationAllowedErr( "NamedNodeMap instance is read-only") def removeNamedItemNS(self, namespaceURI, localName): raise xml.dom.NoModificationAllowedErr( "NamedNodeMap instance is read-only") def setNamedItem(self, node): raise xml.dom.NoModificationAllowedErr( "NamedNodeMap instance is read-only") def setNamedItemNS(self, node): raise xml.dom.NoModificationAllowedErr( "NamedNodeMap instance is read-only") def __getstate__(self): return [self._seq] def __setstate__(self, state): self._seq = state[0] defproperty(ReadOnlySequentialNamedNodeMap, "length", doc="Number of entries in the NamedNodeMap.") class Identified: """Mix-in class that supports the publicId and systemId attributes.""" __slots__ = 'publicId', 'systemId' def _identified_mixin_init(self, publicId, systemId): self.publicId = publicId self.systemId = systemId def _get_publicId(self): return self.publicId def _get_systemId(self): return self.systemId class DocumentType(Identified, Childless, Node): nodeType = Node.DOCUMENT_TYPE_NODE nodeValue = None name = None publicId = None systemId = None internalSubset = None def __init__(self, qualifiedName): self.entities = ReadOnlySequentialNamedNodeMap() self.notations = ReadOnlySequentialNamedNodeMap() if qualifiedName: prefix, localname = _nssplit(qualifiedName) self.name = localname self.nodeName = self.name def _get_internalSubset(self): return self.internalSubset def cloneNode(self, deep): if self.ownerDocument is None: # it's ok clone = DocumentType(None) clone.name = self.name clone.nodeName = self.name operation = xml.dom.UserDataHandler.NODE_CLONED if deep: clone.entities._seq = [] clone.notations._seq = [] for n in self.notations._seq: notation = Notation(n.nodeName, n.publicId, n.systemId) clone.notations._seq.append(notation) n._call_user_data_handler(operation, n, notation) for e in self.entities._seq: entity = Entity(e.nodeName, e.publicId, e.systemId, e.notationName) entity.actualEncoding = e.actualEncoding entity.encoding = e.encoding entity.version = e.version clone.entities._seq.append(entity) e._call_user_data_handler(operation, n, entity) self._call_user_data_handler(operation, self, clone) return clone else: return None def writexml(self, writer, indent="", addindent="", newl=""): writer.write("<!DOCTYPE ") writer.write(self.name) if self.publicId: writer.write("%s PUBLIC '%s'%s '%s'" % (newl, self.publicId, newl, self.systemId)) elif self.systemId: writer.write("%s SYSTEM '%s'" % (newl, self.systemId)) if self.internalSubset is not None: writer.write(" [") writer.write(self.internalSubset) writer.write("]") writer.write(">"+newl) class Entity(Identified, Node): attributes = None nodeType = Node.ENTITY_NODE nodeValue = None actualEncoding = None encoding = None version = None def __init__(self, name, publicId, systemId, notation): self.nodeName = name self.notationName = notation self.childNodes = NodeList() self._identified_mixin_init(publicId, systemId) def _get_actualEncoding(self): return self.actualEncoding def _get_encoding(self): return self.encoding def _get_version(self): return self.version def appendChild(self, newChild): raise xml.dom.HierarchyRequestErr( "cannot append children to an entity node") def insertBefore(self, newChild, refChild): raise xml.dom.HierarchyRequestErr( "cannot insert children below an entity node") def removeChild(self, oldChild): raise xml.dom.HierarchyRequestErr( "cannot remove children from an entity node") def replaceChild(self, newChild, oldChild): raise xml.dom.HierarchyRequestErr( "cannot replace children of an entity node") class Notation(Identified, Childless, Node): nodeType = Node.NOTATION_NODE nodeValue = None def __init__(self, name, publicId, systemId): self.nodeName = name self._identified_mixin_init(publicId, systemId) class DOMImplementation(DOMImplementationLS): _features = [("core", "1.0"), ("core", "2.0"), ("core", None), ("xml", "1.0"), ("xml", "2.0"), ("xml", None), ("ls-load", "3.0"), ("ls-load", None), ] def hasFeature(self, feature, version): if version == "": version = None return (feature.lower(), version) in self._features def createDocument(self, namespaceURI, qualifiedName, doctype): if doctype and doctype.parentNode is not None: raise xml.dom.WrongDocumentErr( "doctype object owned by another DOM tree") doc = self._create_document() add_root_element = not (namespaceURI is None and qualifiedName is None and doctype is None) if not qualifiedName and add_root_element: # The spec is unclear what to raise here; SyntaxErr # would be the other obvious candidate. Since Xerces raises # InvalidCharacterErr, and since SyntaxErr is not listed # for createDocument, that seems to be the better choice. # XXX: need to check for illegal characters here and in # createElement. # DOM Level III clears this up when talking about the return value # of this function. If namespaceURI, qName and DocType are # Null the document is returned without a document element # Otherwise if doctype or namespaceURI are not None # Then we go back to the above problem raise xml.dom.InvalidCharacterErr("Element with no name") if add_root_element: prefix, localname = _nssplit(qualifiedName) if prefix == "xml" \ and namespaceURI != "http://www.w3.org/XML/1998/namespace": raise xml.dom.NamespaceErr("illegal use of 'xml' prefix") if prefix and not namespaceURI: raise xml.dom.NamespaceErr( "illegal use of prefix without namespaces") element = doc.createElementNS(namespaceURI, qualifiedName) if doctype: doc.appendChild(doctype) doc.appendChild(element) if doctype: doctype.parentNode = doctype.ownerDocument = doc doc.doctype = doctype doc.implementation = self return doc def createDocumentType(self, qualifiedName, publicId, systemId): doctype = DocumentType(qualifiedName) doctype.publicId = publicId doctype.systemId = systemId return doctype # DOM Level 3 (WD 9 April 2002) def getInterface(self, feature): if self.hasFeature(feature, None): return self else: return None # internal def _create_document(self): return Document() class ElementInfo(object): """Object that represents content-model information for an element. This implementation is not expected to be used in practice; DOM builders should provide implementations which do the right thing using information available to it. """ __slots__ = 'tagName', def __init__(self, name): self.tagName = name def getAttributeType(self, aname): return _no_type def getAttributeTypeNS(self, namespaceURI, localName): return _no_type def isElementContent(self): return False def isEmpty(self): """Returns true iff this element is declared to have an EMPTY content model.""" return False def isId(self, aname): """Returns true iff the named attribute is a DTD-style ID.""" return False def isIdNS(self, namespaceURI, localName): """Returns true iff the identified attribute is a DTD-style ID.""" return False def __getstate__(self): return self.tagName def __setstate__(self, state): self.tagName = state def _clear_id_cache(node): if node.nodeType == Node.DOCUMENT_NODE: node._id_cache.clear() node._id_search_stack = None elif _in_document(node): node.ownerDocument._id_cache.clear() node.ownerDocument._id_search_stack= None class Document(Node, DocumentLS): __slots__ = ('_elem_info', 'doctype', '_id_search_stack', 'childNodes', '_id_cache') _child_node_types = (Node.ELEMENT_NODE, Node.PROCESSING_INSTRUCTION_NODE, Node.COMMENT_NODE, Node.DOCUMENT_TYPE_NODE) implementation = DOMImplementation() nodeType = Node.DOCUMENT_NODE nodeName = "#document" nodeValue = None attributes = None parentNode = None previousSibling = nextSibling = None # Document attributes from Level 3 (WD 9 April 2002) actualEncoding = None encoding = None standalone = None version = None strictErrorChecking = False errorHandler = None documentURI = None _magic_id_count = 0 def __init__(self): self.doctype = None self.childNodes = NodeList() # mapping of (namespaceURI, localName) -> ElementInfo # and tagName -> ElementInfo self._elem_info = {} self._id_cache = {} self._id_search_stack = None def _get_elem_info(self, element): if element.namespaceURI: key = element.namespaceURI, element.localName else: key = element.tagName return self._elem_info.get(key) def _get_actualEncoding(self): return self.actualEncoding def _get_doctype(self): return self.doctype def _get_documentURI(self): return self.documentURI def _get_encoding(self): return self.encoding def _get_errorHandler(self): return self.errorHandler def _get_standalone(self): return self.standalone def _get_strictErrorChecking(self): return self.strictErrorChecking def _get_version(self): return self.version def appendChild(self, node): if node.nodeType not in self._child_node_types: raise xml.dom.HierarchyRequestErr( "%s cannot be child of %s" % (repr(node), repr(self))) if node.parentNode is not None: # This needs to be done before the next test since this # may *be* the document element, in which case it should # end up re-ordered to the end. node.parentNode.removeChild(node) if node.nodeType == Node.ELEMENT_NODE \ and self._get_documentElement(): raise xml.dom.HierarchyRequestErr( "two document elements disallowed") return Node.appendChild(self, node) def removeChild(self, oldChild): try: self.childNodes.remove(oldChild) except ValueError: raise xml.dom.NotFoundErr() oldChild.nextSibling = oldChild.previousSibling = None oldChild.parentNode = None if self.documentElement is oldChild: self.documentElement = None return oldChild def _get_documentElement(self): for node in self.childNodes: if node.nodeType == Node.ELEMENT_NODE: return node def unlink(self): if self.doctype is not None: self.doctype.unlink() self.doctype = None Node.unlink(self) def cloneNode(self, deep): if not deep: return None clone = self.implementation.createDocument(None, None, None) clone.encoding = self.encoding clone.standalone = self.standalone clone.version = self.version for n in self.childNodes: childclone = _clone_node(n, deep, clone) assert childclone.ownerDocument.isSameNode(clone) clone.childNodes.append(childclone) if childclone.nodeType == Node.DOCUMENT_NODE: assert clone.documentElement is None elif childclone.nodeType == Node.DOCUMENT_TYPE_NODE: assert clone.doctype is None clone.doctype = childclone childclone.parentNode = clone self._call_user_data_handler(xml.dom.UserDataHandler.NODE_CLONED, self, clone) return clone def createDocumentFragment(self): d = DocumentFragment() d.ownerDocument = self return d def createElement(self, tagName): e = Element(tagName) e.ownerDocument = self return e def createTextNode(self, data): if not isinstance(data, str): raise TypeError("node contents must be a string") t = Text() t.data = data t.ownerDocument = self return t def createCDATASection(self, data): if not isinstance(data, str): raise TypeError("node contents must be a string") c = CDATASection() c.data = data c.ownerDocument = self return c def createComment(self, data): c = Comment(data) c.ownerDocument = self return c def createProcessingInstruction(self, target, data): p = ProcessingInstruction(target, data) p.ownerDocument = self return p def createAttribute(self, qName): a = Attr(qName) a.ownerDocument = self a.value = "" return a def createElementNS(self, namespaceURI, qualifiedName): prefix, localName = _nssplit(qualifiedName) e = Element(qualifiedName, namespaceURI, prefix) e.ownerDocument = self return e def createAttributeNS(self, namespaceURI, qualifiedName): prefix, localName = _nssplit(qualifiedName) a = Attr(qualifiedName, namespaceURI, localName, prefix) a.ownerDocument = self a.value = "" return a # A couple of implementation-specific helpers to create node types # not supported by the W3C DOM specs: def _create_entity(self, name, publicId, systemId, notationName): e = Entity(name, publicId, systemId, notationName) e.ownerDocument = self return e def _create_notation(self, name, publicId, systemId): n = Notation(name, publicId, systemId) n.ownerDocument = self return n def getElementById(self, id): if id in self._id_cache: return self._id_cache[id] if not (self._elem_info or self._magic_id_count): return None stack = self._id_search_stack if stack is None: # we never searched before, or the cache has been cleared stack = [self.documentElement] self._id_search_stack = stack elif not stack: # Previous search was completed and cache is still valid; # no matching node. return None result = None while stack: node = stack.pop() # add child elements to stack for continued searching stack.extend([child for child in node.childNodes if child.nodeType in _nodeTypes_with_children]) # check this node info = self._get_elem_info(node) if info: # We have to process all ID attributes before # returning in order to get all the attributes set to # be IDs using Element.setIdAttribute*(). for attr in node.attributes.values(): if attr.namespaceURI: if info.isIdNS(attr.namespaceURI, attr.localName): self._id_cache[attr.value] = node if attr.value == id: result = node elif not node._magic_id_nodes: break elif info.isId(attr.name): self._id_cache[attr.value] = node if attr.value == id: result = node elif not node._magic_id_nodes: break elif attr._is_id: self._id_cache[attr.value] = node if attr.value == id: result = node elif node._magic_id_nodes == 1: break elif node._magic_id_nodes: for attr in node.attributes.values(): if attr._is_id: self._id_cache[attr.value] = node if attr.value == id: result = node if result is not None: break return result def getElementsByTagName(self, name): return _get_elements_by_tagName_helper(self, name, NodeList()) def getElementsByTagNameNS(self, namespaceURI, localName): return _get_elements_by_tagName_ns_helper( self, namespaceURI, localName, NodeList()) def isSupported(self, feature, version): return self.implementation.hasFeature(feature, version) def importNode(self, node, deep): if node.nodeType == Node.DOCUMENT_NODE: raise xml.dom.NotSupportedErr("cannot import document nodes") elif node.nodeType == Node.DOCUMENT_TYPE_NODE: raise xml.dom.NotSupportedErr("cannot import document type nodes") return _clone_node(node, deep, self) def writexml(self, writer, indent="", addindent="", newl="", encoding=None): if encoding is None: writer.write('<?xml version="1.0" ?>'+newl) else: writer.write('<?xml version="1.0" encoding="%s"?>%s' % ( encoding, newl)) for node in self.childNodes: node.writexml(writer, indent, addindent, newl) # DOM Level 3 (WD 9 April 2002) def renameNode(self, n, namespaceURI, name): if n.ownerDocument is not self: raise xml.dom.WrongDocumentErr( "cannot rename nodes from other documents;\n" "expected %s,\nfound %s" % (self, n.ownerDocument)) if n.nodeType not in (Node.ELEMENT_NODE, Node.ATTRIBUTE_NODE): raise xml.dom.NotSupportedErr( "renameNode() only applies to element and attribute nodes") if namespaceURI != EMPTY_NAMESPACE: if ':' in name: prefix, localName = name.split(':', 1) if ( prefix == "xmlns" and namespaceURI != xml.dom.XMLNS_NAMESPACE): raise xml.dom.NamespaceErr( "illegal use of 'xmlns' prefix") else: if ( name == "xmlns" and namespaceURI != xml.dom.XMLNS_NAMESPACE and n.nodeType == Node.ATTRIBUTE_NODE): raise xml.dom.NamespaceErr( "illegal use of the 'xmlns' attribute") prefix = None localName = name else: prefix = None localName = None if n.nodeType == Node.ATTRIBUTE_NODE: element = n.ownerElement if element is not None: is_id = n._is_id element.removeAttributeNode(n) else: element = None n.prefix = prefix n._localName = localName n.namespaceURI = namespaceURI n.nodeName = name if n.nodeType == Node.ELEMENT_NODE: n.tagName = name else: # attribute node n.name = name if element is not None: element.setAttributeNode(n) if is_id: element.setIdAttributeNode(n) # It's not clear from a semantic perspective whether we should # call the user data handlers for the NODE_RENAMED event since # we're re-using the existing node. The draft spec has been # interpreted as meaning "no, don't call the handler unless a # new node is created." return n defproperty(Document, "documentElement", doc="Top-level element of this document.") def _clone_node(node, deep, newOwnerDocument): """ Clone a node and give it the new owner document. Called by Node.cloneNode and Document.importNode """ if node.ownerDocument.isSameNode(newOwnerDocument): operation = xml.dom.UserDataHandler.NODE_CLONED else: operation = xml.dom.UserDataHandler.NODE_IMPORTED if node.nodeType == Node.ELEMENT_NODE: clone = newOwnerDocument.createElementNS(node.namespaceURI, node.nodeName) for attr in node.attributes.values(): clone.setAttributeNS(attr.namespaceURI, attr.nodeName, attr.value) a = clone.getAttributeNodeNS(attr.namespaceURI, attr.localName) a.specified = attr.specified if deep: for child in node.childNodes: c = _clone_node(child, deep, newOwnerDocument) clone.appendChild(c) elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE: clone = newOwnerDocument.createDocumentFragment() if deep: for child in node.childNodes: c = _clone_node(child, deep, newOwnerDocument) clone.appendChild(c) elif node.nodeType == Node.TEXT_NODE: clone = newOwnerDocument.createTextNode(node.data) elif node.nodeType == Node.CDATA_SECTION_NODE: clone = newOwnerDocument.createCDATASection(node.data) elif node.nodeType == Node.PROCESSING_INSTRUCTION_NODE: clone = newOwnerDocument.createProcessingInstruction(node.target, node.data) elif node.nodeType == Node.COMMENT_NODE: clone = newOwnerDocument.createComment(node.data) elif node.nodeType == Node.ATTRIBUTE_NODE: clone = newOwnerDocument.createAttributeNS(node.namespaceURI, node.nodeName) clone.specified = True clone.value = node.value elif node.nodeType == Node.DOCUMENT_TYPE_NODE: assert node.ownerDocument is not newOwnerDocument operation = xml.dom.UserDataHandler.NODE_IMPORTED clone = newOwnerDocument.implementation.createDocumentType( node.name, node.publicId, node.systemId) clone.ownerDocument = newOwnerDocument if deep: clone.entities._seq = [] clone.notations._seq = [] for n in node.notations._seq: notation = Notation(n.nodeName, n.publicId, n.systemId) notation.ownerDocument = newOwnerDocument clone.notations._seq.append(notation) if hasattr(n, '_call_user_data_handler'): n._call_user_data_handler(operation, n, notation) for e in node.entities._seq: entity = Entity(e.nodeName, e.publicId, e.systemId, e.notationName) entity.actualEncoding = e.actualEncoding entity.encoding = e.encoding entity.version = e.version entity.ownerDocument = newOwnerDocument clone.entities._seq.append(entity) if hasattr(e, '_call_user_data_handler'): e._call_user_data_handler(operation, n, entity) else: # Note the cloning of Document and DocumentType nodes is # implementation specific. minidom handles those cases # directly in the cloneNode() methods. raise xml.dom.NotSupportedErr("Cannot clone node %s" % repr(node)) # Check for _call_user_data_handler() since this could conceivably # used with other DOM implementations (one of the FourThought # DOMs, perhaps?). if hasattr(node, '_call_user_data_handler'): node._call_user_data_handler(operation, node, clone) return clone def _nssplit(qualifiedName): fields = qualifiedName.split(':', 1) if len(fields) == 2: return fields else: return (None, fields[0]) def _do_pulldom_parse(func, args, kwargs): events = func(*args, **kwargs) toktype, rootNode = events.getEvent() events.expandNode(rootNode) events.clear() return rootNode def parse(file, parser=None, bufsize=None): """Parse a file into a DOM by filename or file object.""" if parser is None and not bufsize: from xml.dom import expatbuilder return expatbuilder.parse(file) else: from xml.dom import pulldom return _do_pulldom_parse(pulldom.parse, (file,), {'parser': parser, 'bufsize': bufsize}) def parseString(string, parser=None): """Parse a file into a DOM from a string.""" if parser is None: from xml.dom import expatbuilder return expatbuilder.parseString(string) else: from xml.dom import pulldom return _do_pulldom_parse(pulldom.parseString, (string,), {'parser': parser}) def getDOMImplementation(features=None): if features: if isinstance(features, str): features = domreg._parse_feature_string(features) for f, v in features: if not Document.implementation.hasFeature(f, v): return None return Document.implementation
gpl-3.0
brainelectronics/towerdefense
examples/pyglet/image/codecs/dds.py
22
7735
# ---------------------------------------------------------------------------- # pyglet # Copyright (c) 2006-2008 Alex Holkner # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name of pyglet nor the names of its # contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------------- '''DDS texture loader. Reference: http://msdn2.microsoft.com/en-us/library/bb172993.aspx ''' from __future__ import division __docformat__ = 'restructuredtext' __version__ = '$Id$' from ctypes import * import struct from pyglet.gl import * from pyglet.image import CompressedImageData from pyglet.image import codecs from pyglet.image.codecs import s3tc from pyglet.compat import izip_longest as compat_izip_longest class DDSException(codecs.ImageDecodeException): exception_priority = 0 # dwFlags of DDSURFACEDESC2 DDSD_CAPS = 0x00000001 DDSD_HEIGHT = 0x00000002 DDSD_WIDTH = 0x00000004 DDSD_PITCH = 0x00000008 DDSD_PIXELFORMAT = 0x00001000 DDSD_MIPMAPCOUNT = 0x00020000 DDSD_LINEARSIZE = 0x00080000 DDSD_DEPTH = 0x00800000 # ddpfPixelFormat of DDSURFACEDESC2 DDPF_ALPHAPIXELS = 0x00000001 DDPF_FOURCC = 0x00000004 DDPF_RGB = 0x00000040 # dwCaps1 of DDSCAPS2 DDSCAPS_COMPLEX = 0x00000008 DDSCAPS_TEXTURE = 0x00001000 DDSCAPS_MIPMAP = 0x00400000 # dwCaps2 of DDSCAPS2 DDSCAPS2_CUBEMAP = 0x00000200 DDSCAPS2_CUBEMAP_POSITIVEX = 0x00000400 DDSCAPS2_CUBEMAP_NEGATIVEX = 0x00000800 DDSCAPS2_CUBEMAP_POSITIVEY = 0x00001000 DDSCAPS2_CUBEMAP_NEGATIVEY = 0x00002000 DDSCAPS2_CUBEMAP_POSITIVEZ = 0x00004000 DDSCAPS2_CUBEMAP_NEGATIVEZ = 0x00008000 DDSCAPS2_VOLUME = 0x00200000 class _filestruct(object): def __init__(self, data): if len(data) < self.get_size(): raise DDSException('Not a DDS file') items = struct.unpack(self.get_format(), data) for field, value in compat_izip_longest(self._fields, items, fillvalue=None): setattr(self, field[0], value) def __repr__(self): name = self.__class__.__name__ return '%s(%s)' % \ (name, (', \n%s' % (' ' * (len(name) + 1))).join( \ ['%s = %s' % (field[0], repr(getattr(self, field[0]))) \ for field in self._fields])) @classmethod def get_format(cls): return '<' + ''.join([f[1] for f in cls._fields]) @classmethod def get_size(cls): return struct.calcsize(cls.get_format()) class DDSURFACEDESC2(_filestruct): _fields = [ ('dwMagic', '4s'), ('dwSize', 'I'), ('dwFlags', 'I'), ('dwHeight', 'I'), ('dwWidth', 'I'), ('dwPitchOrLinearSize', 'I'), ('dwDepth', 'I'), ('dwMipMapCount', 'I'), ('dwReserved1', '44s'), ('ddpfPixelFormat', '32s'), ('dwCaps1', 'I'), ('dwCaps2', 'I'), ('dwCapsReserved', '8s'), ('dwReserved2', 'I') ] def __init__(self, data): super(DDSURFACEDESC2, self).__init__(data) self.ddpfPixelFormat = DDPIXELFORMAT(self.ddpfPixelFormat) class DDPIXELFORMAT(_filestruct): _fields = [ ('dwSize', 'I'), ('dwFlags', 'I'), ('dwFourCC', '4s'), ('dwRGBBitCount', 'I'), ('dwRBitMask', 'I'), ('dwGBitMask', 'I'), ('dwBBitMask', 'I'), ('dwRGBAlphaBitMask', 'I') ] _compression_formats = { (b'DXT1', False): (GL_COMPRESSED_RGB_S3TC_DXT1_EXT, s3tc.decode_dxt1_rgb), (b'DXT1', True): (GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, s3tc.decode_dxt1_rgba), (b'DXT3', False): (GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, s3tc.decode_dxt3), (b'DXT3', True): (GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, s3tc.decode_dxt3), (b'DXT5', False): (GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, s3tc.decode_dxt5), (b'DXT5', True): (GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, s3tc.decode_dxt5), } def _check_error(): e = glGetError() if e != 0: print 'GL error %d' % e class DDSImageDecoder(codecs.ImageDecoder): def get_file_extensions(self): return ['.dds'] def decode(self, file, filename): header = file.read(DDSURFACEDESC2.get_size()) desc = DDSURFACEDESC2(header) if desc.dwMagic != b'DDS ' or desc.dwSize != 124: raise DDSException('Invalid DDS file (incorrect header).') width = desc.dwWidth height = desc.dwHeight mipmaps = 1 if desc.dwFlags & DDSD_DEPTH: raise DDSException('Volume DDS files unsupported') if desc.dwFlags & DDSD_MIPMAPCOUNT: mipmaps = desc.dwMipMapCount if desc.ddpfPixelFormat.dwSize != 32: raise DDSException('Invalid DDS file (incorrect pixel format).') if desc.dwCaps2 & DDSCAPS2_CUBEMAP: raise DDSException('Cubemap DDS files unsupported') if not desc.ddpfPixelFormat.dwFlags & DDPF_FOURCC: raise DDSException('Uncompressed DDS textures not supported.') has_alpha = desc.ddpfPixelFormat.dwRGBAlphaBitMask != 0 selector = (desc.ddpfPixelFormat.dwFourCC, has_alpha) if selector not in _compression_formats: raise DDSException('Unsupported texture compression %s' % \ desc.ddpfPixelFormat.dwFourCC) dformat, decoder = _compression_formats[selector] if dformat == GL_COMPRESSED_RGB_S3TC_DXT1_EXT: block_size = 8 else: block_size = 16 datas = [] w, h = width, height for i in range(mipmaps): if not w and not h: break if not w: w = 1 if not h: h = 1 size = ((w + 3) // 4) * ((h + 3) // 4) * block_size data = file.read(size) datas.append(data) w >>= 1 h >>= 1 image = CompressedImageData(width, height, dformat, datas[0], 'GL_EXT_texture_compression_s3tc', decoder) level = 0 for data in datas[1:]: level += 1 image.set_mipmap_data(level, data) return image def get_decoders(): return [DDSImageDecoder()] def get_encoders(): return []
bsd-3-clause
bowenliu16/deepchem
examples/delaney/delaney_graph_conv.py
1
2087
""" Script that trains graph-conv models on Tox21 dataset. """ from __future__ import print_function from __future__ import division from __future__ import unicode_literals import numpy as np import tensorflow as tf import deepchem as dc from keras import backend as K from delaney_datasets import load_delaney # Only for debug! np.random.seed(123) g = tf.Graph() sess = tf.Session(graph=g) K.set_session(sess) with g.as_default(): # Load Tox21 dataset tf.set_random_seed(123) delaney_tasks, delaney_datasets, transformers = load_delaney(featurizer='GraphConv',split='index') train_dataset, valid_dataset, test_dataset = delaney_datasets # Fit models metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean) # Do setup required for tf/keras models # Number of features on conv-mols n_feat = 75 # Batch size of models batch_size = 128 graph_model = dc.nn.SequentialGraph(n_feat) graph_model.add(dc.nn.GraphConv(128, activation='relu')) graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1)) graph_model.add(dc.nn.GraphPool()) graph_model.add(dc.nn.GraphConv(128, activation='relu')) graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1)) graph_model.add(dc.nn.GraphPool()) # Gather Projection graph_model.add(dc.nn.Dense(256, activation='relu')) graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1)) graph_model.add(dc.nn.GraphGather(batch_size, activation="tanh")) # Dense post-processing layer with tf.Session() as sess: model = dc.models.MultitaskGraphRegressor( sess, graph_model, len(delaney_tasks), batch_size=batch_size, learning_rate=1e-3, learning_rate_decay_time=1000, optimizer_type="adam", beta1=.9, beta2=.999) # Fit trained model model.fit(train_dataset, nb_epoch=20) print("Evaluating model") train_scores = model.evaluate(train_dataset, [metric], transformers) valid_scores = model.evaluate(valid_dataset, [metric], transformers) print("Train scores") print(train_scores) print("Validation scores") print(valid_scores)
gpl-3.0
salivatears/ansible
lib/ansible/inventory/vars_plugins/noop.py
317
1632
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # (c) 2014, Serge van Ginderachter <serge@vanginderachter.be> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type class VarsModule(object): """ Loads variables for groups and/or hosts """ def __init__(self, inventory): """ constructor """ self.inventory = inventory self.inventory_basedir = inventory.basedir() def run(self, host, vault_password=None): """ For backwards compatibility, when only vars per host were retrieved This method should return both host specific vars as well as vars calculated from groups it is a member of """ return {} def get_host_vars(self, host, vault_password=None): """ Get host specific variables. """ return {} def get_group_vars(self, group, vault_password=None): """ Get group specific variables. """ return {}
gpl-3.0
nzavagli/UnrealPy
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/django/contrib/gis/geos/__init__.py
61
1176
""" The GeoDjango GEOS module. Please consult the GeoDjango documentation for more details: https://docs.djangoproject.com/en/dev/ref/contrib/gis/geos/ """ __all__ = ['HAS_GEOS'] try: from .libgeos import geos_version, geos_version_info # NOQA: flake8 detects only the last __all__ HAS_GEOS = True __all__ += ['geos_version', 'geos_version_info'] except ImportError: HAS_GEOS = False if HAS_GEOS: from .geometry import GEOSGeometry, wkt_regex, hex_regex from .point import Point from .linestring import LineString, LinearRing from .polygon import Polygon from .collections import GeometryCollection, MultiPoint, MultiLineString, MultiPolygon from .error import GEOSException, GEOSIndexError from .io import WKTReader, WKTWriter, WKBReader, WKBWriter from .factory import fromfile, fromstr __all__ += [ 'GEOSGeometry', 'wkt_regex', 'hex_regex', 'Point', 'LineString', 'LinearRing', 'Polygon', 'GeometryCollection', 'MultiPoint', 'MultiLineString', 'MultiPolygon', 'GEOSException', 'GEOSIndexError', 'WKTReader', 'WKTWriter', 'WKBReader', 'WKBWriter', 'fromfile', 'fromstr', ]
mit
mattjmuw/iam-messaging
messagetools.old/dao_implementation/mock.py
3
3996
import sys import os from os.path import abspath, dirname import re import json import logging import time import socket import settings # from messagetools.mock.mock_http import MockHTTP logger = logging.getLogger(__name__) """ A centralized the mock data access """ fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding() def get_mockdata_message(service_name, queue_name, event_no): """ :param service_name: possible "aws", "azure", etc. """ file_path = None success = False start_time = time.time() dir_base = dirname(__file__) app_root = abspath(dir_base) response = _load_resource_from_path(app_root, service_name, queue_name, event_no) if response: return response # If no event has been found return None return response def _load_resource_from_path(app_root, service_name, queue_name, event_no): mock_root = app_root + '/../mock' std_root = mock_root if hasattr(settings, 'MESSAGETOOLS_MOCK_ROOT'): mock_root = settings.MESSAGETOOLS_MOCK_ROOT root = mock_root fname = 'event' if hasattr(settings, 'MESSAGETOOLS_MOCK_FILENAME'): fname = settings.MESSAGETOOLS_MOCK_FILENAME fpath = '/' + service_name + '/' + queue_name + '/' + fname + '.' + str(event_no) try: file_path = convert_to_platform_safe(root + fpath) logger.info('mock file: ' + file_path) handle = open(file_path) except IOError: try: file_path = convert_to_platform_safe(std_root + fpath) logger.info('mock file: ' + file_path) handle = open(file_path) except IOError: return data = handle.read() logger.debug('data[%s]' % data) response = json.loads(data) return response def post_mockdata_url(service_name, implementation_name, url, headers, body, dir_base = dirname(__file__)): """ :param service_name: possible "sws", "pws", "book", "hfs", etc. :param implementation_name: possible values: "file", etc. """ #Currently this post method does not return a response body response = MockHTTP() if body is not None: if "dispatch" in url: response.status = 200 else: response.status = 201 response.headers = {"X-Data-Source": service_name + " file mock data", "Content-Type": headers['Content-Type']} else: response.status = 400 response.data = "Bad Request: no POST body" return response def put_mockdata_url(service_name, implementation_name, url, headers, body, dir_base = dirname(__file__)): """ :param service_name: possible "sws", "pws", "book", "hfs", etc. :param implementation_name: possible values: "file", etc. """ #Currently this put method does not return a response body response = MockHTTP() if body is not None: response.status = 204 response.headers = {"X-Data-Source": service_name + " file mock data", "Content-Type": headers['Content-Type']} else: response.status = 400 response.data = "Bad Request: no POST body" return response def delete_mockdata_url(service_name, implementation_name, url, headers, dir_base = dirname(__file__)): """ :param service_name: possible "sws", "pws", "book", "hfs", etc. :param implementation_name: possible values: "file", etc. """ #Http response code 204 No Content: #The server has fulfilled the request but does not need to return an entity-body response = MockHTTP() response.status = 204 return response def convert_to_platform_safe(dir_file_name): """ :param dir_file_name: a string to be processed :return: a string with all the reserved characters replaced """ return re.sub('[\?|<>=:*,;+&"@]', '_', dir_file_name)
apache-2.0
gcp/leela-zero
training/tf/quantize_weights.py
1
1387
#!/usr/bin/env python3 import sys, os, argparse def format_n(x): x = float(x) x = '{:.3g}'.format(x) x = x.replace('e-0', 'e-') if x.startswith('0.'): x = x[1:] if x.startswith('-0.'): x = '-' + x[2:] return x if __name__ == "__main__": parser = argparse.ArgumentParser( description='Quantize network file to decrease the file size.') parser.add_argument("input", help='Input file', type=str) parser.add_argument("-o", "--output", help='Output file. Defaults to input + "_quantized"', required=False, type=str, default=None) args = parser.parse_args() if args.output == None: output_name = os.path.splitext(sys.argv[1]) output_name = output_name[0] + '_quantized' + output_name[1] else: output_name = args.output output = open(output_name, 'w') calculate_error = True error = 0 with open(args.input, 'r') as f: for line in f: line = line.split(' ') lineq = list(map(format_n, line)) if calculate_error: e = sum((float(line[i]) - float(lineq[i]))**2 for i in range(len(line))) error += e/len(line) output.write(' '.join(lineq) + '\n') if calculate_error: print('Weight file difference L2-norm: {}'.format(error**0.5)) output.close()
gpl-3.0
hogarthj/ansible
lib/ansible/modules/monitoring/zabbix/zabbix_maintenance.py
50
12826
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Alexander Bulimov <lazywolf0@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' module: zabbix_maintenance short_description: Create Zabbix maintenance windows description: - This module will let you create Zabbix maintenance windows. version_added: "1.8" author: "Alexander Bulimov (@abulimov)" requirements: - "python >= 2.6" - zabbix-api options: state: description: - Create or remove a maintenance window. Maintenance window to remove is identified by name. default: present choices: [ "present", "absent" ] host_names: description: - Hosts to manage maintenance window for. Separate multiple hosts with commas. C(host_name) is an alias for C(host_names). B(Required) option when C(state) is I(present) and no C(host_groups) specified. aliases: [ "host_name" ] host_groups: description: - Host groups to manage maintenance window for. Separate multiple groups with commas. C(host_group) is an alias for C(host_groups). B(Required) option when C(state) is I(present) and no C(host_names) specified. aliases: [ "host_group" ] minutes: description: - Length of maintenance window in minutes. default: 10 name: description: - Unique name of maintenance window. required: true desc: description: - Short description of maintenance window. required: true default: Created by Ansible collect_data: description: - Type of maintenance. With data collection, or without. type: bool default: 'yes' extends_documentation_fragment: - zabbix notes: - Useful for setting hosts in maintenance mode before big update, and removing maintenance window after update. - Module creates maintenance window from now() to now() + minutes, so if Zabbix server's time and host's time are not synchronized, you will get strange results. - Install required module with 'pip install zabbix-api' command. ''' EXAMPLES = ''' - name: Create a named maintenance window for host www1 for 90 minutes zabbix_maintenance: name: Update of www1 host_name: www1.example.com state: present minutes: 90 server_url: https://monitoring.example.com login_user: ansible login_password: pAsSwOrD - name: Create a named maintenance window for host www1 and host groups Office and Dev zabbix_maintenance: name: Update of www1 host_name: www1.example.com host_groups: - Office - Dev state: present server_url: https://monitoring.example.com login_user: ansible login_password: pAsSwOrD - name: Create a named maintenance window for hosts www1 and db1, without data collection. zabbix_maintenance: name: update host_names: - www1.example.com - db1.example.com state: present collect_data: False server_url: https://monitoring.example.com login_user: ansible login_password: pAsSwOrD - name: Remove maintenance window by name zabbix_maintenance: name: Test1 state: absent server_url: https://monitoring.example.com login_user: ansible login_password: pAsSwOrD ''' import datetime import time try: from zabbix_api import ZabbixAPI HAS_ZABBIX_API = True except ImportError: HAS_ZABBIX_API = False from ansible.module_utils.basic import AnsibleModule def create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc): end_time = start_time + period try: zbx.maintenance.create( { "groupids": group_ids, "hostids": host_ids, "name": name, "maintenance_type": maintenance_type, "active_since": str(start_time), "active_till": str(end_time), "description": desc, "timeperiods": [{ "timeperiod_type": "0", "start_date": str(start_time), "period": str(period), }] } ) except BaseException as e: return 1, None, str(e) return 0, None, None def update_maintenance(zbx, maintenance_id, group_ids, host_ids, start_time, maintenance_type, period, desc): end_time = start_time + period try: zbx.maintenance.update( { "maintenanceid": maintenance_id, "groupids": group_ids, "hostids": host_ids, "maintenance_type": maintenance_type, "active_since": str(start_time), "active_till": str(end_time), "description": desc, "timeperiods": [{ "timeperiod_type": "0", "start_date": str(start_time), "period": str(period), }] } ) except BaseException as e: return 1, None, str(e) return 0, None, None def get_maintenance(zbx, name): try: maintenances = zbx.maintenance.get( { "filter": { "name": name, }, "selectGroups": "extend", "selectHosts": "extend" } ) except BaseException as e: return 1, None, str(e) for maintenance in maintenances: maintenance["groupids"] = [group["groupid"] for group in maintenance["groups"]] if "groups" in maintenance else [] maintenance["hostids"] = [host["hostid"] for host in maintenance["hosts"]] if "hosts" in maintenance else [] return 0, maintenance, None return 0, None, None def delete_maintenance(zbx, maintenance_id): try: zbx.maintenance.delete([maintenance_id]) except BaseException as e: return 1, None, str(e) return 0, None, None def get_group_ids(zbx, host_groups): group_ids = [] for group in host_groups: try: result = zbx.hostgroup.get( { "output": "extend", "filter": { "name": group } } ) except BaseException as e: return 1, None, str(e) if not result: return 1, None, "Group id for group %s not found" % group group_ids.append(result[0]["groupid"]) return 0, group_ids, None def get_host_ids(zbx, host_names): host_ids = [] for host in host_names: try: result = zbx.host.get( { "output": "extend", "filter": { "name": host } } ) except BaseException as e: return 1, None, str(e) if not result: return 1, None, "Host id for host %s not found" % host host_ids.append(result[0]["hostid"]) return 0, host_ids, None def main(): module = AnsibleModule( argument_spec=dict( state=dict(required=False, default='present', choices=['present', 'absent']), server_url=dict(type='str', required=True, default=None, aliases=['url']), host_names=dict(type='list', required=False, default=None, aliases=['host_name']), minutes=dict(type='int', required=False, default=10), host_groups=dict(type='list', required=False, default=None, aliases=['host_group']), login_user=dict(type='str', required=True), login_password=dict(type='str', required=True, no_log=True), validate_certs=dict(type='bool', required=False, default=True), http_login_user=dict(type='str', required=False, default=None), http_login_password=dict(type='str', required=False, default=None, no_log=True), name=dict(type='str', required=True), desc=dict(type='str', required=False, default="Created by Ansible"), collect_data=dict(type='bool', required=False, default=True), timeout=dict(type='int', default=10), ), supports_check_mode=True, ) if not HAS_ZABBIX_API: module.fail_json(msg="Missing required zabbix-api module (check docs or install with: pip install zabbix-api)") host_names = module.params['host_names'] host_groups = module.params['host_groups'] state = module.params['state'] login_user = module.params['login_user'] login_password = module.params['login_password'] http_login_user = module.params['http_login_user'] http_login_password = module.params['http_login_password'] validate_certs = module.params['validate_certs'] minutes = module.params['minutes'] name = module.params['name'] desc = module.params['desc'] server_url = module.params['server_url'] collect_data = module.params['collect_data'] timeout = module.params['timeout'] if collect_data: maintenance_type = 0 else: maintenance_type = 1 try: zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, validate_certs=validate_certs) zbx.login(login_user, login_password) except BaseException as e: module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) changed = False if state == "present": if not host_names and not host_groups: module.fail_json(msg="At least one host_name or host_group must be defined for each created maintenance.") now = datetime.datetime.now().replace(second=0) start_time = time.mktime(now.timetuple()) period = 60 * int(minutes) # N * 60 seconds if host_groups: (rc, group_ids, error) = get_group_ids(zbx, host_groups) if rc != 0: module.fail_json(msg="Failed to get group_ids: %s" % error) else: group_ids = [] if host_names: (rc, host_ids, error) = get_host_ids(zbx, host_names) if rc != 0: module.fail_json(msg="Failed to get host_ids: %s" % error) else: host_ids = [] (rc, maintenance, error) = get_maintenance(zbx, name) if rc != 0: module.fail_json(msg="Failed to check maintenance %s existence: %s" % (name, error)) if maintenance and ( sorted(group_ids) != sorted(maintenance["groupids"]) or sorted(host_ids) != sorted(maintenance["hostids"]) or str(maintenance_type) != maintenance["maintenance_type"] or str(int(start_time)) != maintenance["active_since"] or str(int(start_time + period)) != maintenance["active_till"] ): if module.check_mode: changed = True else: (rc, _, error) = update_maintenance(zbx, maintenance["maintenanceid"], group_ids, host_ids, start_time, maintenance_type, period, desc) if rc == 0: changed = True else: module.fail_json(msg="Failed to update maintenance: %s" % error) if not maintenance: if module.check_mode: changed = True else: (rc, _, error) = create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc) if rc == 0: changed = True else: module.fail_json(msg="Failed to create maintenance: %s" % error) if state == "absent": (rc, maintenance, error) = get_maintenance(zbx, name) if rc != 0: module.fail_json(msg="Failed to check maintenance %s existence: %s" % (name, error)) if maintenance: if module.check_mode: changed = True else: (rc, _, error) = delete_maintenance(zbx, maintenance["maintenanceid"]) if rc == 0: changed = True else: module.fail_json(msg="Failed to remove maintenance: %s" % error) module.exit_json(changed=changed) if __name__ == '__main__': main()
gpl-3.0
tashaxe/Red-DiscordBot
lib/youtube_dl/extractor/teachertube.py
36
4645
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( qualities, determine_ext, ) class TeacherTubeIE(InfoExtractor): IE_NAME = 'teachertube' IE_DESC = 'teachertube.com videos' _VALID_URL = r'https?://(?:www\.)?teachertube\.com/(viewVideo\.php\?video_id=|music\.php\?music_id=|video/(?:[\da-z-]+-)?|audio/)(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.teachertube.com/viewVideo.php?video_id=339997', 'md5': 'f9434ef992fd65936d72999951ee254c', 'info_dict': { 'id': '339997', 'ext': 'mp4', 'title': 'Measures of dispersion from a frequency table', 'description': 'Measures of dispersion from a frequency table', 'thumbnail': r're:http://.*\.jpg', }, }, { 'url': 'http://www.teachertube.com/viewVideo.php?video_id=340064', 'md5': '0d625ec6bc9bf50f70170942ad580676', 'info_dict': { 'id': '340064', 'ext': 'mp4', 'title': 'How to Make Paper Dolls _ Paper Art Projects', 'description': 'Learn how to make paper dolls in this simple', 'thumbnail': r're:http://.*\.jpg', }, }, { 'url': 'http://www.teachertube.com/music.php?music_id=8805', 'md5': '01e8352006c65757caf7b961f6050e21', 'info_dict': { 'id': '8805', 'ext': 'mp3', 'title': 'PER ASPERA AD ASTRA', 'description': 'RADIJSKA EMISIJA ZRAKOPLOVNE TEHNI?KE ?KOLE P', }, }, { 'url': 'http://www.teachertube.com/video/intro-video-schleicher-297790', 'md5': '9c79fbb2dd7154823996fc28d4a26998', 'info_dict': { 'id': '297790', 'ext': 'mp4', 'title': 'Intro Video - Schleicher', 'description': 'Intro Video - Why to flip, how flipping will', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_meta('title', webpage, 'title', fatal=True) TITLE_SUFFIX = ' - TeacherTube' if title.endswith(TITLE_SUFFIX): title = title[:-len(TITLE_SUFFIX)].strip() description = self._html_search_meta('description', webpage, 'description') if description: description = description.strip() quality = qualities(['mp3', 'flv', 'mp4']) media_urls = re.findall(r'data-contenturl="([^"]+)"', webpage) media_urls.extend(re.findall(r'var\s+filePath\s*=\s*"([^"]+)"', webpage)) media_urls.extend(re.findall(r'\'file\'\s*:\s*["\']([^"\']+)["\'],', webpage)) formats = [ { 'url': media_url, 'quality': quality(determine_ext(media_url)) } for media_url in set(media_urls) ] self._sort_formats(formats) return { 'id': video_id, 'title': title, 'thumbnail': self._html_search_regex(r'\'image\'\s*:\s*["\']([^"\']+)["\']', webpage, 'thumbnail'), 'formats': formats, 'description': description, } class TeacherTubeUserIE(InfoExtractor): IE_NAME = 'teachertube:user:collection' IE_DESC = 'teachertube.com user and collection videos' _VALID_URL = r'https?://(?:www\.)?teachertube\.com/(user/profile|collection)/(?P<user>[0-9a-zA-Z]+)/?' _MEDIA_RE = r'''(?sx) class="?sidebar_thumb_time"?>[0-9:]+</div> \s* <a\s+href="(https?://(?:www\.)?teachertube\.com/(?:video|audio)/[^"]+)" ''' _TEST = { 'url': 'http://www.teachertube.com/user/profile/rbhagwati2', 'info_dict': { 'id': 'rbhagwati2' }, 'playlist_mincount': 179, } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) user_id = mobj.group('user') urls = [] webpage = self._download_webpage(url, user_id) urls.extend(re.findall(self._MEDIA_RE, webpage)) pages = re.findall(r'/ajax-user/user-videos/%s\?page=([0-9]+)' % user_id, webpage)[:-1] for p in pages: more = 'http://www.teachertube.com/ajax-user/user-videos/%s?page=%s' % (user_id, p) webpage = self._download_webpage(more, user_id, 'Downloading page %s/%s' % (p, len(pages))) video_urls = re.findall(self._MEDIA_RE, webpage) urls.extend(video_urls) entries = [self.url_result(vurl, 'TeacherTube') for vurl in urls] return self.playlist_result(entries, user_id)
gpl-3.0
rbaumg/trac
trac/wiki/interwiki.py
1
7205
# -*- coding: utf-8 -*- # # Copyright (C) 2005-2019 Edgewall Software # Copyright (C) 2005-2006 Christian Boos <cboos@edgewall.org> # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://trac.edgewall.org/wiki/TracLicense. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://trac.edgewall.org/log/. # # Author: Christian Boos <cboos@edgewall.org> import re from trac.cache import cached from trac.config import ConfigSection from trac.core import * from trac.util import lazy from trac.util.html import tag from trac.util.translation import _, N_ from trac.wiki.api import IWikiChangeListener, IWikiMacroProvider, WikiSystem from trac.wiki.parser import WikiParser from trac.wiki.formatter import split_url_into_path_query_fragment class InterWikiMap(Component): """InterWiki map manager.""" implements(IWikiChangeListener, IWikiMacroProvider) interwiki_section = ConfigSection('interwiki', """Every option in the `[interwiki]` section defines one InterWiki prefix. The option name defines the prefix. The option value defines the URL, optionally followed by a description separated from the URL by whitespace. Parametric URLs are supported as well. '''Example:''' {{{ [interwiki] MeatBall = http://www.usemod.com/cgi-bin/mb.pl? PEP = http://www.python.org/peps/pep-$1.html Python Enhancement Proposal $1 tsvn = tsvn: Interact with TortoiseSvn }}} """) _page_name = 'InterMapTxt' _interwiki_re = re.compile(r"(%s)[ \t]+([^ \t]+)(?:[ \t]+#(.*))?" % WikiParser.LINK_SCHEME, re.UNICODE) _argspec_re = re.compile(r"\$\d") # The component itself behaves as a read-only map def __contains__(self, ns): return ns.upper() in self.interwiki_map def __getitem__(self, ns): return self.interwiki_map[ns.upper()] def keys(self): return list(self.interwiki_map) # Expansion of positional arguments ($1, $2, ...) in URL and title def _expand(self, txt, args): """Replace "$1" by the first args, "$2" by the second, etc.""" def setarg(match): num = int(match.group()[1:]) return args[num - 1] if 0 < num <= len(args) else '' return re.sub(InterWikiMap._argspec_re, setarg, txt) def _expand_or_append(self, txt, args): """Like expand, but also append first arg if there's no "$".""" if not args: return txt expanded = self._expand(txt, args) return txt + args[0] if expanded == txt else expanded def url(self, ns, target): """Return `(url, title)` for the given InterWiki `ns`. Expand the colon-separated `target` arguments. """ ns, url, title = self[ns] maxargnum = max([0] + [int(a[1:]) for a in re.findall(InterWikiMap._argspec_re, url)]) target, query, fragment = split_url_into_path_query_fragment(target) if maxargnum > 0: args = target.split(':', (maxargnum - 1)) else: args = [target] url = self._expand_or_append(url, args) ntarget, nquery, nfragment = split_url_into_path_query_fragment(url) if query and nquery: nquery = '%s&%s' % (nquery, query[1:]) else: nquery = nquery or query nfragment = fragment or nfragment # user provided takes precedence expanded_url = ntarget + nquery + nfragment if not self._is_safe_url(expanded_url): expanded_url = '' expanded_title = self._expand(title, args) if expanded_title == title: expanded_title = _("%(target)s in %(name)s", target=target, name=title) return expanded_url, expanded_title # IWikiChangeListener methods def wiki_page_added(self, page): if page.name == InterWikiMap._page_name: del self.interwiki_map def wiki_page_changed(self, page, version, t, comment, author): if page.name == InterWikiMap._page_name: del self.interwiki_map def wiki_page_deleted(self, page): if page.name == InterWikiMap._page_name: del self.interwiki_map def wiki_page_version_deleted(self, page): if page.name == InterWikiMap._page_name: del self.interwiki_map @cached def interwiki_map(self): """Map from upper-cased namespaces to (namespace, prefix, title) values. """ from trac.wiki.model import WikiPage map = {} content = WikiPage(self.env, InterWikiMap._page_name).text in_map = False for line in content.split('\n'): if in_map: if line.startswith('----'): in_map = False else: m = re.match(InterWikiMap._interwiki_re, line) if m: prefix, url, title = m.groups() url = url.strip() title = title.strip() if title else prefix map[prefix.upper()] = (prefix, url, title) elif line.startswith('----'): in_map = True for prefix, value in self.interwiki_section.options(): value = value.split(None, 1) if value: url = value[0].strip() title = value[1].strip() if len(value) > 1 else prefix map[prefix.upper()] = (prefix, url, title) return map # IWikiMacroProvider methods def get_macros(self): yield 'InterWiki' def get_macro_description(self, name): return 'messages', \ N_("Provide a description list for the known InterWiki " "prefixes.") def expand_macro(self, formatter, name, content): interwikis = [] for k in sorted(self.keys()): prefix, url, title = self[k] interwikis.append({ 'prefix': prefix, 'url': url, 'title': title, 'rc_url': self._expand_or_append(url, ['RecentChanges']), 'description': url if title == prefix else title}) return tag.table(tag.tr(tag.th(tag.em(_("Prefix"))), tag.th(tag.em(_("Site")))), [tag.tr(tag.td(tag.a(w['prefix'], href=w['rc_url'])), tag.td(tag.a(w['description'], href=w['url']))) for w in interwikis ], class_="wiki interwiki") # Internal methods def _is_safe_url(self, url): return WikiSystem(self.env).render_unsafe_content or \ ':' not in url or \ url.split(':', 1)[0] in self._safe_schemes @lazy def _safe_schemes(self): return set(WikiSystem(self.env).safe_schemes)
bsd-3-clause
xodus7/tensorflow
tensorflow/contrib/mixed_precision/python/loss_scale_manager_test.py
22
6362
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for LossScaleManager classes..""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.mixed_precision.python import loss_scale_manager as lsm_lib from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context from tensorflow.python.framework import test_util from tensorflow.python.ops import variables from tensorflow.python.platform import test def _GetExampleIter(inputs): dataset = dataset_ops.Dataset.from_tensor_slices(inputs) return dataset.make_one_shot_iterator() class FixedLossScaleManagerTest(test.TestCase): @test_util.run_in_graph_and_eager_modes def test_basic(self): itr = _GetExampleIter([True] * 10 + [False] * 10) loss_scale = 1000 lsm = lsm_lib.FixedLossScaleManager(loss_scale) update_fn = lambda: lsm.update_loss_scale(itr.get_next()) self.evaluate(variables.global_variables_initializer()) if not context.executing_eagerly(): update_op = update_fn() for _ in range(10): if context.executing_eagerly(): update_fn() else: self.evaluate(update_op) self.assertEqual(loss_scale, self.evaluate(lsm.get_loss_scale())) class ExponentialUpdateLossScaleManagerTest(test.TestCase): def _test_helper(self, inputs, expected_outputs, init_loss_scale=1, incr_every_n_step=2, decr_every_n_nan_or_inf=2): ratio = 2 lsm = lsm_lib.ExponentialUpdateLossScaleManager( init_loss_scale=init_loss_scale, incr_every_n_steps=incr_every_n_step, decr_every_n_nan_or_inf=decr_every_n_nan_or_inf, incr_ratio=ratio, decr_ratio=1. / ratio) itr = _GetExampleIter(inputs) update_fn = lambda: lsm.update_loss_scale(itr.get_next()) self.evaluate(variables.global_variables_initializer()) actual_outputs = [] if not context.executing_eagerly(): update_op = update_fn() for _ in range(len(inputs)): if context.executing_eagerly(): update_fn() else: self.evaluate(update_op) actual_outputs.append(self.evaluate(lsm.get_loss_scale())) self.assertEqual(actual_outputs, expected_outputs) @test_util.run_in_graph_and_eager_modes def test_increase_every_n_steps(self): inputs = [True] * 6 expected_outputs = [1, 2, 2, 4, 4, 8] self._test_helper(inputs, expected_outputs) @test_util.run_in_graph_and_eager_modes def test_keep_increasing_until_capped(self): init_loss_scale = np.finfo(np.float32).max / 4 + 10 max_float = np.finfo(np.float32).max inputs = [True] * 6 # Output is capped the 2nd time it doubles. expected_outputs = [ init_loss_scale, init_loss_scale * 2, init_loss_scale * 2, max_float, max_float, max_float ] self._test_helper(inputs, expected_outputs, init_loss_scale) @test_util.run_in_graph_and_eager_modes def test_decrease_every_n_steps(self): inputs = [False] * 6 init_loss_scale = 1024 expected_outputs = [1024, 512, 512, 256, 256, 128] self._test_helper(inputs, expected_outputs, init_loss_scale) @test_util.run_in_graph_and_eager_modes def test_keep_decreasing_until_one(self): inputs = [False] * 10 init_loss_scale = 16 expected_outputs = [16, 8, 8, 4, 4, 2, 2, 1, 1, 1] self._test_helper(inputs, expected_outputs, init_loss_scale) @test_util.run_in_graph_and_eager_modes def test_incr_bad_step_clear_good_step(self): inputs = [True, True, True, False, True] expected_outputs = [1, 2, 2, 2, 2] self._test_helper(inputs, expected_outputs) @test_util.run_in_graph_and_eager_modes def test_incr_good_step_does_not_clear_bad_step(self): inputs = [True, True, True, False, True, False] expected_outputs = [1, 2, 2, 2, 2, 1] self._test_helper(inputs, expected_outputs) @test_util.run_in_graph_and_eager_modes def test_trigger_loss_scale_update_each_step(self): """Test when incr_every_n_step and decr_every_n_nan_or_inf is 1.""" init_loss_scale = 1 incr_every_n_step = 1 decr_every_n_nan_or_inf = 1 inputs = [True] * 3 + [False, True, True] expected_outputs = [2, 4, 8, 4, 8, 16] self._test_helper(inputs, expected_outputs, init_loss_scale, incr_every_n_step, decr_every_n_nan_or_inf) @test_util.run_in_graph_and_eager_modes def test_alternating_good_and_bad_gradients_trigger_each_step(self): init_loss_scale = 1 incr_every_n_step = 1 decr_every_n_nan_or_inf = 1 inputs = [True, False] * 4 + [True] expected_outputs = [2, 1, 2, 1, 2, 1, 2, 1, 2] self._test_helper(inputs, expected_outputs, init_loss_scale, incr_every_n_step, decr_every_n_nan_or_inf) @test_util.run_in_graph_and_eager_modes def test_alternating_good_and_bad_gradients_trigger_incr_every_2steps(self): init_loss_scale = 32 incr_every_n_step = 2 decr_every_n_nan_or_inf = 1 inputs = [True, False] * 3 + [True] expected_outputs = [32, 16, 16, 8, 8, 4, 4] self._test_helper(inputs, expected_outputs, init_loss_scale, incr_every_n_step, decr_every_n_nan_or_inf) @test_util.run_in_graph_and_eager_modes def test_random_mix_good_and_bad_gradients(self): init_loss_scale = 4 inputs = [ False, False, True, True, True, False, True, False, True, True, True, False ] expected_outputs = [4, 2, 2, 4, 4, 4, 4, 2, 2, 4, 4, 4] self._test_helper(inputs, expected_outputs, init_loss_scale) if __name__ == "__main__": test.main()
apache-2.0
ebsaral/django-rest-framework
rest_framework/relations.py
39
18026
# coding: utf-8 from __future__ import unicode_literals from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist from django.core.urlresolvers import ( NoReverseMatch, Resolver404, get_script_prefix, resolve ) from django.db.models import Manager from django.db.models.query import QuerySet from django.utils import six from django.utils.encoding import smart_text from django.utils.six.moves.urllib import parse as urlparse from django.utils.translation import ugettext_lazy as _ from rest_framework.compat import OrderedDict from rest_framework.fields import ( Field, empty, get_attribute, is_simple_callable, iter_options ) from rest_framework.reverse import reverse from rest_framework.utils import html class Hyperlink(six.text_type): """ A string like object that additionally has an associated name. We use this for hyperlinked URLs that may render as a named link in some contexts, or render as a plain URL in others. """ def __new__(self, url, name): ret = six.text_type.__new__(self, url) ret.name = name return ret is_hyperlink = True class PKOnlyObject(object): """ This is a mock object, used for when we only need the pk of the object instance, but still want to return an object with a .pk attribute, in order to keep the same interface as a regular model instance. """ def __init__(self, pk): self.pk = pk # We assume that 'validators' are intended for the child serializer, # rather than the parent serializer. MANY_RELATION_KWARGS = ( 'read_only', 'write_only', 'required', 'default', 'initial', 'source', 'label', 'help_text', 'style', 'error_messages', 'allow_empty' ) class RelatedField(Field): queryset = None html_cutoff = 1000 html_cutoff_text = _('More than {count} items...') def __init__(self, **kwargs): self.queryset = kwargs.pop('queryset', self.queryset) self.html_cutoff = kwargs.pop('html_cutoff', self.html_cutoff) self.html_cutoff_text = kwargs.pop('html_cutoff_text', self.html_cutoff_text) assert self.queryset is not None or kwargs.get('read_only', None), ( 'Relational field must provide a `queryset` argument, ' 'or set read_only=`True`.' ) assert not (self.queryset is not None and kwargs.get('read_only', None)), ( 'Relational fields should not provide a `queryset` argument, ' 'when setting read_only=`True`.' ) kwargs.pop('many', None) kwargs.pop('allow_empty', None) super(RelatedField, self).__init__(**kwargs) def __new__(cls, *args, **kwargs): # We override this method in order to automagically create # `ManyRelatedField` classes instead when `many=True` is set. if kwargs.pop('many', False): return cls.many_init(*args, **kwargs) return super(RelatedField, cls).__new__(cls, *args, **kwargs) @classmethod def many_init(cls, *args, **kwargs): """ This method handles creating a parent `ManyRelatedField` instance when the `many=True` keyword argument is passed. Typically you won't need to override this method. Note that we're over-cautious in passing most arguments to both parent and child classes in order to try to cover the general case. If you're overriding this method you'll probably want something much simpler, eg: @classmethod def many_init(cls, *args, **kwargs): kwargs['child'] = cls() return CustomManyRelatedField(*args, **kwargs) """ list_kwargs = {'child_relation': cls(*args, **kwargs)} for key in kwargs.keys(): if key in MANY_RELATION_KWARGS: list_kwargs[key] = kwargs[key] return ManyRelatedField(**list_kwargs) def run_validation(self, data=empty): # We force empty strings to None values for relational fields. if data == '': data = None return super(RelatedField, self).run_validation(data) def get_queryset(self): queryset = self.queryset if isinstance(queryset, (QuerySet, Manager)): # Ensure queryset is re-evaluated whenever used. # Note that actually a `Manager` class may also be used as the # queryset argument. This occurs on ModelSerializer fields, # as it allows us to generate a more expressive 'repr' output # for the field. # Eg: 'MyRelationship(queryset=ExampleModel.objects.all())' queryset = queryset.all() return queryset def use_pk_only_optimization(self): return False def get_attribute(self, instance): if self.use_pk_only_optimization() and self.source_attrs: # Optimized case, return a mock object only containing the pk attribute. try: instance = get_attribute(instance, self.source_attrs[:-1]) value = instance.serializable_value(self.source_attrs[-1]) if is_simple_callable(value): # Handle edge case where the relationship `source` argument # points to a `get_relationship()` method on the model value = value().pk return PKOnlyObject(pk=value) except AttributeError: pass # Standard case, return the object instance. return get_attribute(instance, self.source_attrs) @property def choices(self): queryset = self.get_queryset() if queryset is None: # Ensure that field.choices returns something sensible # even when accessed with a read-only field. return {} return OrderedDict([ ( six.text_type(self.to_representation(item)), self.display_value(item) ) for item in queryset ]) @property def grouped_choices(self): return self.choices def iter_options(self): return iter_options( self.grouped_choices, cutoff=self.html_cutoff, cutoff_text=self.html_cutoff_text ) def display_value(self, instance): return six.text_type(instance) class StringRelatedField(RelatedField): """ A read only field that represents its targets using their plain string representation. """ def __init__(self, **kwargs): kwargs['read_only'] = True super(StringRelatedField, self).__init__(**kwargs) def to_representation(self, value): return six.text_type(value) class PrimaryKeyRelatedField(RelatedField): default_error_messages = { 'required': _('This field is required.'), 'does_not_exist': _('Invalid pk "{pk_value}" - object does not exist.'), 'incorrect_type': _('Incorrect type. Expected pk value, received {data_type}.'), } def __init__(self, **kwargs): self.pk_field = kwargs.pop('pk_field', None) super(PrimaryKeyRelatedField, self).__init__(**kwargs) def use_pk_only_optimization(self): return True def to_internal_value(self, data): if self.pk_field is not None: data = self.pk_field.to_internal_value(data) try: return self.get_queryset().get(pk=data) except ObjectDoesNotExist: self.fail('does_not_exist', pk_value=data) except (TypeError, ValueError): self.fail('incorrect_type', data_type=type(data).__name__) def to_representation(self, value): if self.pk_field is not None: return self.pk_field.to_representation(value.pk) return value.pk class HyperlinkedRelatedField(RelatedField): lookup_field = 'pk' view_name = None default_error_messages = { 'required': _('This field is required.'), 'no_match': _('Invalid hyperlink - No URL match.'), 'incorrect_match': _('Invalid hyperlink - Incorrect URL match.'), 'does_not_exist': _('Invalid hyperlink - Object does not exist.'), 'incorrect_type': _('Incorrect type. Expected URL string, received {data_type}.'), } def __init__(self, view_name=None, **kwargs): if view_name is not None: self.view_name = view_name assert self.view_name is not None, 'The `view_name` argument is required.' self.lookup_field = kwargs.pop('lookup_field', self.lookup_field) self.lookup_url_kwarg = kwargs.pop('lookup_url_kwarg', self.lookup_field) self.format = kwargs.pop('format', None) # We include this simply for dependency injection in tests. # We can't add it as a class attributes or it would expect an # implicit `self` argument to be passed. self.reverse = reverse super(HyperlinkedRelatedField, self).__init__(**kwargs) def use_pk_only_optimization(self): return self.lookup_field == 'pk' def get_object(self, view_name, view_args, view_kwargs): """ Return the object corresponding to a matched URL. Takes the matched URL conf arguments, and should return an object instance, or raise an `ObjectDoesNotExist` exception. """ lookup_value = view_kwargs[self.lookup_url_kwarg] lookup_kwargs = {self.lookup_field: lookup_value} return self.get_queryset().get(**lookup_kwargs) def get_url(self, obj, view_name, request, format): """ Given an object, return the URL that hyperlinks to the object. May raise a `NoReverseMatch` if the `view_name` and `lookup_field` attributes are not configured to correctly match the URL conf. """ # Unsaved objects will not yet have a valid URL. if hasattr(obj, 'pk') and obj.pk is None: return None lookup_value = getattr(obj, self.lookup_field) kwargs = {self.lookup_url_kwarg: lookup_value} return self.reverse(view_name, kwargs=kwargs, request=request, format=format) def get_name(self, obj): return six.text_type(obj) def to_internal_value(self, data): request = self.context.get('request', None) try: http_prefix = data.startswith(('http:', 'https:')) except AttributeError: self.fail('incorrect_type', data_type=type(data).__name__) if http_prefix: # If needed convert absolute URLs to relative path data = urlparse.urlparse(data).path prefix = get_script_prefix() if data.startswith(prefix): data = '/' + data[len(prefix):] try: match = resolve(data) except Resolver404: self.fail('no_match') try: expected_viewname = request.versioning_scheme.get_versioned_viewname( self.view_name, request ) except AttributeError: expected_viewname = self.view_name if match.view_name != expected_viewname: self.fail('incorrect_match') try: return self.get_object(match.view_name, match.args, match.kwargs) except (ObjectDoesNotExist, TypeError, ValueError): self.fail('does_not_exist') def to_representation(self, value): request = self.context.get('request', None) format = self.context.get('format', None) assert request is not None, ( "`%s` requires the request in the serializer" " context. Add `context={'request': request}` when instantiating " "the serializer." % self.__class__.__name__ ) # By default use whatever format is given for the current context # unless the target is a different type to the source. # # Eg. Consider a HyperlinkedIdentityField pointing from a json # representation to an html property of that representation... # # '/snippets/1/' should link to '/snippets/1/highlight/' # ...but... # '/snippets/1/.json' should link to '/snippets/1/highlight/.html' if format and self.format and self.format != format: format = self.format # Return the hyperlink, or error if incorrectly configured. try: url = self.get_url(value, self.view_name, request, format) except NoReverseMatch: msg = ( 'Could not resolve URL for hyperlinked relationship using ' 'view name "%s". You may have failed to include the related ' 'model in your API, or incorrectly configured the ' '`lookup_field` attribute on this field.' ) if value in ('', None): value_string = {'': 'the empty string', None: 'None'}[value] msg += ( " WARNING: The value of the field on the model instance " "was %s, which may be why it didn't match any " "entries in your URL conf." % value_string ) raise ImproperlyConfigured(msg % self.view_name) if url is None: return None name = self.get_name(value) return Hyperlink(url, name) class HyperlinkedIdentityField(HyperlinkedRelatedField): """ A read-only field that represents the identity URL for an object, itself. This is in contrast to `HyperlinkedRelatedField` which represents the URL of relationships to other objects. """ def __init__(self, view_name=None, **kwargs): assert view_name is not None, 'The `view_name` argument is required.' kwargs['read_only'] = True kwargs['source'] = '*' super(HyperlinkedIdentityField, self).__init__(view_name, **kwargs) def use_pk_only_optimization(self): # We have the complete object instance already. We don't need # to run the 'only get the pk for this relationship' code. return False class SlugRelatedField(RelatedField): """ A read-write field the represents the target of the relationship by a unique 'slug' attribute. """ default_error_messages = { 'does_not_exist': _('Object with {slug_name}={value} does not exist.'), 'invalid': _('Invalid value.'), } def __init__(self, slug_field=None, **kwargs): assert slug_field is not None, 'The `slug_field` argument is required.' self.slug_field = slug_field super(SlugRelatedField, self).__init__(**kwargs) def to_internal_value(self, data): try: return self.get_queryset().get(**{self.slug_field: data}) except ObjectDoesNotExist: self.fail('does_not_exist', slug_name=self.slug_field, value=smart_text(data)) except (TypeError, ValueError): self.fail('invalid') def to_representation(self, obj): return getattr(obj, self.slug_field) class ManyRelatedField(Field): """ Relationships with `many=True` transparently get coerced into instead being a ManyRelatedField with a child relationship. The `ManyRelatedField` class is responsible for handling iterating through the values and passing each one to the child relationship. This class is treated as private API. You shouldn't generally need to be using this class directly yourself, and should instead simply set 'many=True' on the relationship. """ initial = [] default_empty_html = [] default_error_messages = { 'not_a_list': _('Expected a list of items but got type "{input_type}".'), 'empty': _('This list may not be empty.') } html_cutoff = 1000 html_cutoff_text = _('More than {count} items...') def __init__(self, child_relation=None, *args, **kwargs): self.child_relation = child_relation self.allow_empty = kwargs.pop('allow_empty', True) self.html_cutoff = kwargs.pop('html_cutoff', self.html_cutoff) self.html_cutoff_text = kwargs.pop('html_cutoff_text', self.html_cutoff_text) assert child_relation is not None, '`child_relation` is a required argument.' super(ManyRelatedField, self).__init__(*args, **kwargs) self.child_relation.bind(field_name='', parent=self) def get_value(self, dictionary): # We override the default field access in order to support # lists in HTML forms. if html.is_html_input(dictionary): # Don't return [] if the update is partial if self.field_name not in dictionary: if getattr(self.root, 'partial', False): return empty return dictionary.getlist(self.field_name) return dictionary.get(self.field_name, empty) def to_internal_value(self, data): if isinstance(data, type('')) or not hasattr(data, '__iter__'): self.fail('not_a_list', input_type=type(data).__name__) if not self.allow_empty and len(data) == 0: self.fail('empty') return [ self.child_relation.to_internal_value(item) for item in data ] def get_attribute(self, instance): # Can't have any relationships if not created if hasattr(instance, 'pk') and instance.pk is None: return [] relationship = get_attribute(instance, self.source_attrs) return relationship.all() if (hasattr(relationship, 'all')) else relationship def to_representation(self, iterable): return [ self.child_relation.to_representation(value) for value in iterable ] @property def choices(self): return self.child_relation.choices @property def grouped_choices(self): return self.choices def iter_options(self): return iter_options( self.grouped_choices, cutoff=self.html_cutoff, cutoff_text=self.html_cutoff_text )
bsd-2-clause
beezee/GAE-Django-base-app
django/contrib/gis/geos/point.py
403
4253
from ctypes import c_uint from django.contrib.gis.geos.error import GEOSException from django.contrib.gis.geos.geometry import GEOSGeometry from django.contrib.gis.geos import prototypes as capi class Point(GEOSGeometry): _minlength = 2 _maxlength = 3 def __init__(self, x, y=None, z=None, srid=None): """ The Point object may be initialized with either a tuple, or individual parameters. For Example: >>> p = Point((5, 23)) # 2D point, passed in as a tuple >>> p = Point(5, 23, 8) # 3D point, passed in with individual parameters """ if isinstance(x, (tuple, list)): # Here a tuple or list was passed in under the `x` parameter. ndim = len(x) coords = x elif isinstance(x, (int, float, long)) and isinstance(y, (int, float, long)): # Here X, Y, and (optionally) Z were passed in individually, as parameters. if isinstance(z, (int, float, long)): ndim = 3 coords = [x, y, z] else: ndim = 2 coords = [x, y] else: raise TypeError('Invalid parameters given for Point initialization.') point = self._create_point(ndim, coords) # Initializing using the address returned from the GEOS # createPoint factory. super(Point, self).__init__(point, srid=srid) def _create_point(self, ndim, coords): """ Create a coordinate sequence, set X, Y, [Z], and create point """ if ndim < 2 or ndim > 3: raise TypeError('Invalid point dimension: %s' % str(ndim)) cs = capi.create_cs(c_uint(1), c_uint(ndim)) i = iter(coords) capi.cs_setx(cs, 0, i.next()) capi.cs_sety(cs, 0, i.next()) if ndim == 3: capi.cs_setz(cs, 0, i.next()) return capi.create_point(cs) def _set_list(self, length, items): ptr = self._create_point(length, items) if ptr: capi.destroy_geom(self.ptr) self._ptr = ptr self._set_cs() else: # can this happen? raise GEOSException('Geometry resulting from slice deletion was invalid.') def _set_single(self, index, value): self._cs.setOrdinate(index, 0, value) def __iter__(self): "Allows iteration over coordinates of this Point." for i in xrange(len(self)): yield self[i] def __len__(self): "Returns the number of dimensions for this Point (either 0, 2 or 3)." if self.empty: return 0 if self.hasz: return 3 else: return 2 def _get_single_external(self, index): if index == 0: return self.x elif index == 1: return self.y elif index == 2: return self.z _get_single_internal = _get_single_external def get_x(self): "Returns the X component of the Point." return self._cs.getOrdinate(0, 0) def set_x(self, value): "Sets the X component of the Point." self._cs.setOrdinate(0, 0, value) def get_y(self): "Returns the Y component of the Point." return self._cs.getOrdinate(1, 0) def set_y(self, value): "Sets the Y component of the Point." self._cs.setOrdinate(1, 0, value) def get_z(self): "Returns the Z component of the Point." if self.hasz: return self._cs.getOrdinate(2, 0) else: return None def set_z(self, value): "Sets the Z component of the Point." if self.hasz: self._cs.setOrdinate(2, 0, value) else: raise GEOSException('Cannot set Z on 2D Point.') # X, Y, Z properties x = property(get_x, set_x) y = property(get_y, set_y) z = property(get_z, set_z) ### Tuple setting and retrieval routines. ### def get_coords(self): "Returns a tuple of the point." return self._cs.tuple def set_coords(self, tup): "Sets the coordinates of the point with the given tuple." self._cs[0] = tup # The tuple and coords properties tuple = property(get_coords, set_coords) coords = tuple
bsd-3-clause
PSUdaemon/trafficserver
tests/gold_tests/tls_hooks/tls_hooks9.test.py
4
3490
''' Test one immediate cert callback ''' # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re Test.Summary = ''' Test different combinations of TLS handshake hooks to ensure they are applied consistently. ''' Test.SkipUnless(Condition.HasProgram("grep", "grep needs to be installed on system for this test to work")) ts = Test.MakeATSProcess("ts", select_ports=False) server = Test.MakeOriginServer("server") request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} # desired response form the origin server response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) ts.addSSLfile("ssl/server.pem") ts.addSSLfile("ssl/server.key") ts.Variables.ssl_port = 4443 ts.Disk.records_config.update({ 'proxy.config.diags.debug.enabled': 1, 'proxy.config.diags.debug.tags': 'ssl_hook_test', 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), # enable ssl port 'proxy.config.http.server_ports': '{0}:ssl'.format(ts.Variables.ssl_port), 'proxy.config.ssl.client.verify.server': 0, 'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2', }) ts.Disk.ssl_multicert_config.AddLine( 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' ) ts.Disk.remap_config.AddLine( 'map https://example.com:4443 http://127.0.0.1:{0}'.format(server.Variables.Port) ) Test.PreparePlugin(os.path.join(Test.Variables.AtsTestToolsDir, 'plugins', 'ssl_hook_test.cc'), ts, '-i=1') tr = Test.AddTestRun("Test one immediate cert hooks") tr.Processes.Default.StartBefore(server) tr.Processes.Default.StartBefore(Test.Processes.ts, ready=When.PortOpen(ts.Variables.ssl_port)) tr.StillRunningAfter = ts tr.StillRunningAfter = server tr.Processes.Default.Command = 'curl -k -H \'host:example.com:{0}\' https://127.0.0.1:{0}'.format(ts.Variables.ssl_port) tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.stdout = "gold/preaccept-1.gold" ts.Streams.stderr = "gold/ts-cert-im-1.gold" certstring0 = "Cert callback 0" ts.Streams.All = Testers.ContainsExpression( "\A(?:(?!{0}).)*{0}(?!.*{0}).*\Z".format(certstring0), "Cert message appears only once", reflags=re.S | re.M) tr.Processes.Default.TimeOut = 5 tr.TimeOut = 5
apache-2.0