code
stringlengths
3
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
3
1.05M
"""Describes Rotest's test running handler class.""" # pylint: disable=too-many-arguments from __future__ import absolute_import import sys from collections import defaultdict from future.builtins import range from rotest.common.utils import get_class_fields from rotest.core.runners.base_runner import BaseTestRunner from rotest.management.base_resource import ResourceRequest from rotest.core.runners.multiprocess.manager.runner import MultiprocessRunner from .case import TestCase from .flow import TestFlow from .block import TestBlock from .suite import TestSuite LAST_RUN_INDEX = -1 MINIMUM_TIMES_TO_RUN = 1 def get_runner(save_state=False, outputs=None, config=None, processes_number=None, run_delta=False, run_name=None, fail_fast=False, enable_debug=False, skip_init=None, stream=sys.stderr): """Return a test runner instance. Args: save_state (bool): determine if storing resources state is required. The behavior can be overridden using resource's save_state flag. outputs (list): list of the required output handlers' names. config (object): config object, will be transfered to each test. run_delta (bool): determine whether to run only tests that failed the last run (according to the results DB). processes_number (number): number of multiprocess runner's worker processes, None means that a regular runner will be used. run_name (str): name of the current run. fail_fast (bool): whether to stop the run on the first failure. enable_debug (bool): whether to enable entering ipdb debugging mode upon any exception in a test statement. skip_init (bool): True to skip resources initialize and validation. stream (file): output stream. Returns: runner. test runner instance. """ if processes_number is not None and processes_number > 0: if enable_debug: raise RuntimeError("Cannot debug in multiprocess") return MultiprocessRunner(stream=stream, config=config, outputs=outputs, run_name=run_name, failfast=fail_fast, enable_debug=False, skip_init=skip_init, run_delta=run_delta, save_state=save_state, workers_number=processes_number) return BaseTestRunner(stream=stream, config=config, outputs=outputs, run_name=run_name, failfast=fail_fast, run_delta=run_delta, skip_init=skip_init, save_state=save_state, enable_debug=enable_debug) def run(test_class, save_state=None, outputs=None, config=None, processes_number=None, delta_iterations=None, run_name=None, fail_fast=None, enable_debug=None, skip_init=None): """Return a test runner instance. Args: test_class (type): test class inheriting from :class:`rotest.core.case.TestCase` or :class:`rotest.core.suite.TestSuite` or :class:`rotest.core.flow.TestFlow` or :class:`rotest.core.block.TestBlock`. save_state (bool): determine if storing resources state is required. The behavior can be overridden using resource's save_state flag. outputs (list): list of the required output handlers' names. config (object): config object, will be transfered to each test. processes_number (number): number of multiprocess runner's worker processes, None means that a regular runner will be used. delta_iterations (number): determine whether to run only tests that failed the last run (according to the results DB), and how many times to do so. If delta_iterations = 0, the tests will run normally. If delta_iterations = 1, the 'delta-tests' will be run once. If delta_iterations > 1, the 'delta-tests' will run delta_iterations times. run_name (str): name of the current run. fail_fast (bool): whether to stop the run on the first failure. enable_debug (bool): whether to enable entering ipdb debugging mode upon any exception in a test statement. skip_init (bool): True to skip resources initialization and validation. Returns: list. list of RunData of the test runs. """ times_to_run = max(delta_iterations, MINIMUM_TIMES_TO_RUN) runs_data = [] test_runner = get_runner(config=config, outputs=outputs, run_name=run_name, fail_fast=fail_fast, skip_init=skip_init, save_state=save_state, enable_debug=enable_debug, run_delta=bool(delta_iterations), processes_number=processes_number) for _ in range(times_to_run): runs_data.append(test_runner.run(test_class)) return runs_data # Syntax symbol used to access the fields of Django models in querying SUBFIELD_ACCESSOR = '__' def parse_resource_identifiers(resources_str): """Update the tests' resources to ask for specific instances. Note: Requests which don't specify an instance will be handled automatically. Args: resources_str (str): string representation of the required resources. Example: input: 'resource_a=demo_res1,resource_b.ip_address=10.0.0.1' output: {'resource_a': {'name': 'demo_res1'}, 'resource_b': {'ip_address': '10.0.0.1'}} Returns: dict. the parsed resource identifiers. """ if resources_str is None or len(resources_str) == 0: return {} resource_requests = resources_str.split(',') requests_dict = defaultdict(dict) requests = (request.split('=', 1) for request in resource_requests) for resource_type, request_value in requests: request_fields = resource_type.split('.') if len(request_fields) == 1: requests_dict[resource_type]['name'] = request_value else: resource_name = request_fields[0] request_fields = request_fields[1:] resource_request = requests_dict[resource_name] resource_request[SUBFIELD_ACCESSOR.join(request_fields)] = \ request_value return requests_dict def _update_test_resources(test_element, identifiers_dict): """Update resource requests for a specific test. Args: test_element (type): target test class inheriting from :class:`rotest.core.abstract_test.AbstractTest`. identifiers_dict (dict): states the resources constraints in the form of <request name>: <resource constraints>. """ requests_found = set() for resource_request in test_element.resources: if resource_request.name in identifiers_dict: resource_request.kwargs.update( identifiers_dict[resource_request.name]) requests_found.add(resource_request.name) for (field_name, field) in get_class_fields(test_element, ResourceRequest): if field_name in identifiers_dict: field.kwargs.update(identifiers_dict[field_name]) requests_found.add(field_name) return requests_found def update_requests(test_element, identifiers_dict): """Recursively update resources requests. Update requests to use specific resources according to the identifiers. Args: test_element (type): target test class inheriting from :class:`rotest.core.case.TestCase or :class:`rotest.core.Suite.TestSuite or :class:`rotest.core.flow.TestFlow` or :class:`rotest.core.block.TestBlock`. identifiers_dict (dict): states the resources constraints in the form of <request name>: <resource constraints>. Returns: set. the 'specific' constraints that were found and fulfilled. """ requests_found = set() if issubclass(test_element, TestSuite): for component in test_element.components: requests_found.update( update_requests(component, identifiers_dict)) if issubclass(test_element, (TestCase, TestFlow, TestBlock)): requests_found.update( _update_test_resources(test_element, identifiers_dict)) if issubclass(test_element, TestFlow): for block_class in test_element.blocks: requests_found.update( _update_test_resources(block_class, identifiers_dict)) return requests_found def update_resource_requests(test_class, resource_identifiers): """Update the resources requests according to the parameters. Args: test_class (type): test class to update its resources, inheriting form :class:`rotest.core.case.TestCase or :class:`rotest.core.Suite.TestSuite or :class:`rotest.core.flow.TestFlow` or :class:`rotest.core.block.TestBlock`. resource_identifiers (dict): states the resources constraints in the form of <request name>: <resource constraints>. """ specifics_fulfilled = update_requests(test_class, resource_identifiers) if len(specifics_fulfilled) != len(resource_identifiers): unfound_requests = list(set(resource_identifiers.keys()) - specifics_fulfilled) raise ValueError("Tests do not contain requests of the names %s" % unfound_requests)
gregoil/rotest
src/rotest/core/runner.py
Python
mit
10,057
from django import forms from django.contrib.auth.forms import UserCreationForm from .models import WhatTheUser class RegistrationForm(UserCreationForm): #first_name = forms.CharField() #last_name = forms.CharField() #email = forms.EmailField() #password1 = forms.CharField() #password2 = forms.CharField() class Meta: model = WhatTheUser fields = ['email', 'first_name', 'last_name', ] #fields = [first_name, last_name, email, password1, password2] """def clean(self): cleaned_data = super(RegistrationtForm, self).clean() if cleaned_data.get('password1') is None \ or cleaned_data.get('password1') == cleaned_data.get('password2'): raise forms.ValidationError('Passwords must be provided and they must match.') """
mikeshultz/whatthediff
whatthediff/forms.py
Python
gpl-2.0
818
import signal from twisted.internet import reactor, defer from pyrake.core.engine import ExecutionEngine from pyrake.resolver import CachingThreadedResolver from pyrake.extension import ExtensionManager from pyrake.signalmanager import SignalManager from pyrake.utils.ossignal import install_shutdown_handlers, signal_names from pyrake.utils.misc import load_object from pyrake import log, signals class Crawler(object): def __init__(self, settings): self.configured = False self.settings = settings self.signals = SignalManager(self) self.stats = load_object(settings['STATS_CLASS'])(self) self._start_requests = lambda: () self._spider = None # TODO: move SpiderManager to CrawlerProcess spman_cls = load_object(self.settings['SPIDER_MANAGER_CLASS']) self.spiders = spman_cls.from_crawler(self) def install(self): # TODO: remove together with pyrake.project.crawler usage import pyrake.project assert not hasattr(pyrake.project, 'crawler'), "crawler already installed" pyrake.project.crawler = self def uninstall(self): # TODO: remove together with pyrake.project.crawler usage import pyrake.project assert hasattr(pyrake.project, 'crawler'), "crawler not installed" del pyrake.project.crawler def configure(self): if self.configured: return self.configured = True lf_cls = load_object(self.settings['LOG_FORMATTER']) self.logformatter = lf_cls.from_crawler(self) self.extensions = ExtensionManager.from_crawler(self) self.engine = ExecutionEngine(self, self._spider_closed) def crawl(self, spider, requests=None): assert self._spider is None, 'Spider already attached' self._spider = spider spider.set_crawler(self) if requests is None: self._start_requests = spider.start_requests else: self._start_requests = lambda: requests def _spider_closed(self, spider=None): if not self.engine.open_spiders: self.stop() @defer.inlineCallbacks def start(self): yield defer.maybeDeferred(self.configure) if self._spider: yield self.engine.open_spider(self._spider, self._start_requests()) yield defer.maybeDeferred(self.engine.start) @defer.inlineCallbacks def stop(self): if self.configured and self.engine.running: yield defer.maybeDeferred(self.engine.stop) class CrawlerProcess(object): """ A class to run multiple pyrake crawlers in a process sequentially""" def __init__(self, settings): install_shutdown_handlers(self._signal_shutdown) self.settings = settings self.crawlers = {} self.stopping = False self._started = None def create_crawler(self, name=None): if name not in self.crawlers: self.crawlers[name] = Crawler(self.settings) return self.crawlers[name] def start(self): if self.start_crawling(): self.start_reactor() @defer.inlineCallbacks def stop(self): self.stopping = True if self._active_crawler: yield self._active_crawler.stop() def _signal_shutdown(self, signum, _): install_shutdown_handlers(self._signal_kill) signame = signal_names[signum] log.msg(format="Received %(signame)s, shutting down gracefully. Send again to force ", level=log.INFO, signame=signame) reactor.callFromThread(self.stop) def _signal_kill(self, signum, _): install_shutdown_handlers(signal.SIG_IGN) signame = signal_names[signum] log.msg(format='Received %(signame)s twice, forcing unclean shutdown', level=log.INFO, signame=signame) reactor.callFromThread(self._stop_reactor) # ------------------------------------------------------------------------# # The following public methods can't be considered stable and may change at # any moment. # # start_crawling and start_reactor are called from pyrake.commands.shell # They are splitted because reactor is started on a different thread than IPython shell. # def start_crawling(self): log.pyrake_info(self.settings) return self._start_crawler() is not None def start_reactor(self): if self.settings.getbool('DNSCACHE_ENABLED'): reactor.installResolver(CachingThreadedResolver(reactor)) reactor.addSystemEventTrigger('before', 'shutdown', self.stop) reactor.run(installSignalHandlers=False) # blocking call def _start_crawler(self): if not self.crawlers or self.stopping: return name, crawler = self.crawlers.popitem() self._active_crawler = crawler log_observer = log.start_from_crawler(crawler) crawler.configure() crawler.install() crawler.signals.connect(crawler.uninstall, signals.engine_stopped) if log_observer: crawler.signals.connect(log_observer.stop, signals.engine_stopped) crawler.signals.connect(self._check_done, signals.engine_stopped) crawler.start() return name, crawler def _check_done(self, **kwargs): if not self._start_crawler(): self._stop_reactor() def _stop_reactor(self, _=None): try: reactor.stop() except RuntimeError: # raised if already stopped or in shutdown stage pass
elkingtowa/pyrake
pyrake/crawler.py
Python
mit
5,564
# -*- coding: utf-8 -*- import pytest import fauxfactory from cfme import test_requirements from cfme.cloud.provider.openstack import OpenStackProvider from cfme.storage.volume import VolumeAllView from cfme.utils.log import logger from cfme.utils.update import update pytestmark = [ pytest.mark.tier(3), test_requirements.storage, pytest.mark.ignore_stream("upstream"), pytest.mark.usefixtures('setup_provider'), pytest.mark.provider([OpenStackProvider], scope='module'), ] STORAGE_SIZE = 1 @pytest.fixture(scope='module') def volume(appliance, provider): # create new volume volume_collection = appliance.collections.volumes manager_name = '{} Cinder Manager'.format(provider.name) volume = volume_collection.create(name=fauxfactory.gen_alpha(), storage_manager=manager_name, tenant=provider.data['provisioning']['cloud_tenant'], size=STORAGE_SIZE, provider=provider) yield volume try: if volume.exists: volume.delete(wait=True) except Exception as e: logger.warning("{name}:{msg} Volume deletion - skipping...".format( name=type(e).__name__, msg=str(e))) def test_storage_volume_create_cancelled_validation(appliance, provider): """ Test Attach instance to storage volume cancelled prerequisites: * Storage provider Steps: * Navigate to storage add volume page * Click Cancel button * Assert flash message """ volume_collection = appliance.collections.volumes manager_name = '{} Cinder Manager'.format(provider.name) volume_collection.create(name=fauxfactory.gen_alpha(), storage_manager=manager_name, tenant=provider.data['provisioning']['cloud_tenant'], size=STORAGE_SIZE, provider=provider, cancel=True) view = volume_collection.create_view(VolumeAllView) view.flash.assert_message('Add of new Cloud Volume was cancelled by the user') @pytest.mark.tier(1) def test_storage_volume_crud(appliance, provider): """ Test storage volume crud prerequisites: * Storage provider Steps: * Crate new volume * Delete volume """ # create volume volume_collection = appliance.collections.volumes manager_name = '{} Cinder Manager'.format(provider.name) volume = volume_collection.create(name=fauxfactory.gen_alpha(), storage_manager=manager_name, tenant=provider.data['provisioning']['cloud_tenant'], size=STORAGE_SIZE, provider=provider) assert volume.exists # update volume old_name = volume.name new_name = fauxfactory.gen_alpha() with update(volume): volume.name = new_name with update(volume): volume.name = old_name # delete volume volume.delete(wait=True) assert not volume.exists def test_storage_volume_edit_tag(volume): """ Test add and remove tag to storage volume prerequisites: * Storage Volume Steps: * Add tag and check * Remove tag and check """ # add tag with category Department and tag communication added_tag = volume.add_tag() tag_available = volume.get_tags() assert tag_available[0].display_name == added_tag.display_name assert tag_available[0].category.display_name == added_tag.category.display_name # remove assigned tag volume.remove_tag(added_tag) tag_available = volume.get_tags() assert not tag_available
lkhomenk/integration_tests
cfme/tests/storage/test_volume.py
Python
gpl-2.0
3,854
import time import os import platform import sys if platform.system() != 'Linux': print 'Skipped for non Linux platform' sys.exit(0) sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from nw_util import * from selenium import webdriver from selenium.webdriver.chrome.options import Options chrome_options = Options() testdir = os.path.dirname(os.path.abspath(__file__)) chrome_options.add_argument("nwapp=" + testdir) node_module = os.path.join(testdir, "node_modules") os.chdir(testdir) install_native_modules() driver = webdriver.Chrome(executable_path=os.environ['CHROMEDRIVER'], chrome_options=chrome_options) driver.implicitly_wait(2) try: switch_to_app(driver) print driver.current_url print 'waiting for crash' wait_for_element_id(driver, "iframe_a") driver.switch_to_frame("iframe_a") timeout = 10 while timeout > 0: try: ret = driver.find_element_by_class_name('ytp-large-play-button') break except selenium.common.exceptions.NoSuchElementException: driver.switch_to_default_content() driver.switch_to_frame("iframe_a") except selenium.common.exceptions.WebDriverException: pass time.sleep(1) timeout = timeout - 1 if timeout <= 0: raise Exception('Timeout when waiting for element ytp-large-play-button') assert(driver.find_element_by_class_name("ytp-large-play-button") is not None) print 'There is no crash' finally: driver.quit()
nwjs/nw.js
test/sanity/issue5980-aws-sdk-embedded-youtobe-video-crash/test.py
Python
mit
1,545
from __future__ import absolute_import import six from functools import total_ordering from sentry.utils.compat import implements_to_string @implements_to_string @total_ordering class Problem(object): # Used for issues that may render the system inoperable or have effects on # data integrity (e.g. issues in the processing pipeline.) SEVERITY_CRITICAL = 'critical' # Used for issues that may cause the system to operate in a degraded (but # still operational) state, as well as configuration options that are set # in unexpected ways or deprecated in future versions. SEVERITY_WARNING = 'warning' # Mapping of severity level to a priority score, where the greater the # score, the more critical the issue. (The numeric values should only be # used for comparison purposes, and are subject to change as levels are # modified.) SEVERITY_LEVELS = { SEVERITY_CRITICAL: 2, SEVERITY_WARNING: 1, } def __init__(self, message, severity=SEVERITY_CRITICAL, url=None): assert severity in self.SEVERITY_LEVELS self.message = six.text_type(message) self.severity = severity self.url = url def __eq__(self, other): return self.SEVERITY_LEVELS[self.severity] == self.SEVERITY_LEVELS[other.severity] def __lt__(self, other): return self.SEVERITY_LEVELS[self.severity] < self.SEVERITY_LEVELS[other.severity] def __str__(self): return self.message @classmethod def threshold(cls, severity): threshold = cls.SEVERITY_LEVELS[severity] def predicate(problem): return cls.SEVERITY_LEVELS[problem.severity] >= threshold return predicate class StatusCheck(object): def check(self): """ Perform required checks and return a list of ``Problem`` instances. """ raise NotImplementedError
JackDanger/sentry
src/sentry/status_checks/base.py
Python
bsd-3-clause
1,898
# Telnet login data HOST = 'shpiler.net' TELNET_PORT = 30004 PASSWORD = 'JebKerman' # TCP params SOCKET_CONTROL_LENGTH = 5 TCP_PORT = 42404 # workers collections PARSERS = ( 'parsers.chat.ChatParser', 'parsers.heartbeat.HeartBeatParser', # 'parsers.playerenterpf.PlayerEnterPFParser', # 'parsers.playerexitpf.PlayerExitPFParser', # 'parsers.playerconnected.PlayerConnectedParser', # 'parsers.playerdisconnected.PlayerDisonnectedParser', # 'parsers.loginask.LoginAskParser' ) ASKERS = { 'chat': 'core.askers.chat.ChatAsker', 'sne': 'core.askers.saveandexit.SaveAndExitAsker', 'universal': 'core.askers.universal.UniversalAsker' } SCHEDULE = ( # { # 'command': 'chat test command', # 'name': 'test', # 'repeater': 'interval', # 'params': { # 'interval': 1, # seconds # } # }, ) # DATABASE = { # 'type': 'mysql', # available: sqlite, postgress # 'name': 'egsdsm', # database/schema name # 'params': { # parameters for connection # 'host': 'localhost', # 'user': 'egsdsm', # 'password': '123QWErty' # } # } DATABASE = { 'type': 'sqlite', 'name': 'test.db', 'params': {} } LOG_LEVEL_DEBUG = 3 LOG_LEVEL_WARN = 2 LOG_LEVEL_ERROR = 1 LOG_LEVELS = { 'database': LOG_LEVEL_WARN, 'file': False, 'stdout': LOG_LEVEL_DEBUG, } LOG_FILE = 'dsm-{}.log'
Glucksistemi/EGS-DSM
core/settings.py
Python
bsd-3-clause
1,413
import argparse import progressbar from baselines.common.azure_utils import Container def parse_args(): parser = argparse.ArgumentParser("Download a pretrained model from Azure.") # Environment parser.add_argument("--model-dir", type=str, default=None, help="save model in this directory this directory. ") parser.add_argument("--account-name", type=str, default="openaisciszymon", help="account name for Azure Blob Storage") parser.add_argument("--account-key", type=str, default=None, help="account key for Azure Blob Storage") parser.add_argument("--container", type=str, default="dqn-blogpost", help="container name and blob name separated by colon serparated by colon") parser.add_argument("--blob", type=str, default=None, help="blob with the model") return parser.parse_args() def main(): args = parse_args() c = Container(account_name=args.account_name, account_key=args.account_key, container_name=args.container) if args.blob is None: print("Listing available models:") print() for blob in sorted(c.list(prefix="model-")): print(blob) else: print("Downloading {} to {}...".format(args.blob, args.model_dir)) bar = None def callback(current, total): nonlocal bar if bar is None: bar = progressbar.ProgressBar(max_value=total) bar.update(current) assert c.exists(args.blob), "model {} does not exist".format(args.blob) assert args.model_dir is not None c.get(args.model_dir, args.blob, callback=callback) if __name__ == '__main__': main()
ViktorM/baselines
baselines/deepq/experiments/atari/download_model.py
Python
mit
1,779
# Copyright 2019 Oihane Crucelaegui - AvanzOSC # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html). from odoo.tests import common class ProductUsability(common.SavepointCase): @classmethod def setUpClass(cls): super(ProductUsability, cls).setUpClass() cls.category_model = cls.env['product.category'] cls.parent_category = cls.category_model.create({ 'name': 'Test Parent Category', }) cls.son_category = cls.category_model.create({ 'name': 'Test Son Category', 'parent_id': cls.parent_category.id, })
oihane/odoo-addons
product_usability/tests/common.py
Python
agpl-3.0
615
from django import template from django.contrib.contenttypes.models import ContentType from molo.commenting.models import MoloComment # NOTE: heavily inspired by # https://github.com/santiagobasulto/django-comments-utils register = template.Library() def get_molo_comments(parser, token): """ Get a limited set of comments for a given object. Defaults to a limit of 5. Setting the limit to -1 disables limiting. Set the amount of comments to usage: {% get_molo_comments for object as variable_name %} {% get_molo_comments for object as variable_name limit amount %} {% get_molo_comments for object as variable_name limit amount child_limit amount %} # noqa """ keywords = token.contents.split() if len(keywords) != 5 and len(keywords) != 7 and len(keywords) != 9: raise template.TemplateSyntaxError( "'%s' tag takes exactly 2,4 or 6 arguments" % (keywords[0],)) if keywords[1] != 'for': raise template.TemplateSyntaxError( "first argument to '%s' tag must be 'for'" % (keywords[0],)) if keywords[3] != 'as': raise template.TemplateSyntaxError( "first argument to '%s' tag must be 'as'" % (keywords[0],)) if len(keywords) > 5 and keywords[5] != 'limit': raise template.TemplateSyntaxError( "third argument to '%s' tag must be 'limit'" % (keywords[0],)) if len(keywords) == 7: return GetMoloCommentsNode(keywords[2], keywords[4], keywords[6]) if len(keywords) > 7 and keywords[7] != 'child_limit': raise template.TemplateSyntaxError( "third argument to '%s' tag must be 'child_limit'" % (keywords[0],)) if len(keywords) > 7: return GetMoloCommentsNode(keywords[2], keywords[4], keywords[6], keywords[8]) return GetMoloCommentsNode(keywords[2], keywords[4]) class GetMoloCommentsNode(template.Node): def __init__(self, obj, variable_name, limit=5, child_limit=0): self.obj = obj self.variable_name = variable_name self.limit = int(limit) self.child_limit = int(child_limit) * (-1) def render(self, context): try: obj = template.Variable(self.obj).resolve(context) except template.VariableDoesNotExist: return '' qs = MoloComment.objects.for_model(obj.__class__).filter( object_pk=obj.pk, parent__isnull=True) if self.limit > 0: qs = qs[:self.limit] qs = [[c] + list(c.get_descendants())[self.child_limit:] for c in qs] context[self.variable_name] = qs return '' def get_comments_content_object(parser, token): """ Get a limited set of comments for a given object. Defaults to a limit of 5. Setting the limit to -1 disables limiting. usage: {% get_comments_content_object for form_object as variable_name %} """ keywords = token.contents.split() if len(keywords) != 5: raise template.TemplateSyntaxError( "'%s' tag takes exactly 2 arguments" % (keywords[0],)) if keywords[1] != 'for': raise template.TemplateSyntaxError( "first argument to '%s' tag must be 'for'" % (keywords[0],)) if keywords[3] != 'as': raise template.TemplateSyntaxError( "first argument to '%s' tag must be 'as'" % (keywords[0],)) return GetCommentsContentObject(keywords[2], keywords[4]) class GetCommentsContentObject(template.Node): def __init__(self, obj, variable_name): self.obj = obj self.variable_name = variable_name def render(self, context): try: form = template.Variable(self.obj).resolve(context) except template.VariableDoesNotExist: return '' app_label, model = form['content_type'].value().split('.') object_pk = form['object_pk'].value() content_type = ContentType.objects.get(app_label=app_label, model=model) context[self.variable_name] = content_type.get_object_for_this_type( pk=object_pk) return '' def is_in_group(user, group_name): """ Check if a user in a group named ``group_name``. :param user User: The auth.User object :param group_name str: The name of the group :returns: bool """ return user.groups.filter(name__exact=group_name).exists() def is_in_admin_group(person): return ( person.is_superuser or is_in_group(person, 'Moderator') or is_in_group(person, 'Comment Moderator') or is_in_group(person, 'Expert') ) def get_number_replies(comment): # TODO include reported/banned comments? return comment.get_children().count() @register.inclusion_tag( 'notifications/tags/notification_banner.html', takes_context=True) def display_unread_notifications(context): user = context['request'].user number_unread_notifications = 0 if user.is_authenticated(): number_unread_notifications = len(user.notifications.unread()) return { 'user': user, 'number_unread_notifications': number_unread_notifications, } register.filter('is_in_group', is_in_group) register.tag('get_molo_comments', get_molo_comments) register.tag('get_comments_content_object', get_comments_content_object) register.filter('get_number_replies', get_number_replies) register.filter('is_in_admin_group', is_in_admin_group)
praekelt/molo.commenting
molo/commenting/templatetags/molo_commenting_tags.py
Python
bsd-2-clause
5,536
# -*- Mode:Python -*- ########################################################################## # # # This file is part of AVANGO. # # # # Copyright 1997 - 2009 Fraunhofer-Gesellschaft zur Foerderung der # # angewandten Forschung (FhG), Munich, Germany. # # # # AVANGO is free software: you can redistribute it and/or modify # # it under the terms of the GNU Lesser General Public License as # # published by the Free Software Foundation, version 3. # # # # AVANGO is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU Lesser General Public # # License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. # # # ########################################################################## import avango.display class AutoStereoDisplay(avango.display.Display): def __init__(self, inspector, options): super(AutoStereoDisplay, self).__init__("AutoStereoDisplay", inspector) window = self.make_window(0, 0, 1200, 1600, 0.33, 0.43, True) window.Name.value = "" self.add_window(window, avango.osg.make_trans_mat(0, 1.7, -0.7), 0) user = avango.display.nodes.User() user.Matrix.value = avango.osg.make_trans_mat(avango.osg.Vec3(0., 1.7, 0.)) self.add_user(user)
jakobharlan/avango
avango-display/python/avango/display/setups/AutoStereoDisplay.py
Python
lgpl-3.0
2,055
import numpy as np from scipy.stats import t import manager.operations.method as method from manager.operations.methodsteps.selectanalyte import SelectAnalyte from manager.operations.methodsteps.selectrange import SelectRange import manager.plotmanager as pm import manager.models as mmodels from manager.helpers.fithelpers import calc_normal_equation_fit from manager.helpers.fithelpers import calc_sx0 from manager.helpers.fithelpers import significant_digit from manager.exceptions import VoltPyFailed from manager.operations.checks.check_analyte import check_analyte class RegularStandardAddition(method.AnalysisMethod): can_be_applied = True _steps = [ { 'class': SelectAnalyte, 'title': 'Select analyte', 'desc': 'Select analyte for analysis.', }, { 'class': SelectRange, 'title': 'Select range', 'desc': 'Select range containing peak and press Forward, or press Back to change the analyte selection.', }, ] checks = (check_analyte, ) description = """ The standard addition method uses estimation of the unknown concentration based on the linear regression fit with <i>peak&nbsp;current</i>&nbsp;=&nbsp;m&middot;<i>concentration</i>&nbsp;+&nbsp;b and final result is given by result&nbsp;=&nbsp;b/m. Peak current is calculated as the difference between highest and lowest point in the given interval. Because of the way sx0 (standard deviation of x0) value is calculated the point [0, y], i.e. point for concentration 0, has to be included in the dataset. """ @classmethod def __str__(cls): return "Regular Standard Addition" def exportableData(self): if not self.model.completed: raise VoltPyFailed('Incomplete data') return np.matrix(self.model.custom_data['matrix']).T def apply(self, user, dataset): an = self.model.getCopy() an.dataset = dataset an.appliesModel = self.model an.save() self.model = an try: self.finalize(user) except: an.deleted = True an.save() raise VoltPyFailed('Could not apply model.') return an.id def finalize(self, user): xvalues = [] yvalues = [] selRange = SelectRange.getData(self.model) try: analyte = self.model.analytes.get(id=int(SelectAnalyte.getData(self.model))) except: VoltPyFailed('Wrong analyte selected.') self.model.custom_data['analyte'] = analyte.name unitsTrans = dict(mmodels.Dataset.CONC_UNITS) self.model.custom_data['units'] = unitsTrans[self.model.dataset.analytes_conc_unit[analyte.id]] for cd in self.model.dataset.curves_data.all(): startIndex = cd.xValue2Index(selRange[0]) endIndex = cd.xValue2Index(selRange[1]) if endIndex < startIndex: endIndex, startIndex = startIndex, endIndex yvalues.append(max(cd.yVector[startIndex:endIndex])-min(cd.yVector[startIndex:endIndex])) xvalues.append(self.model.dataset.analytes_conc.get(analyte.id, {}).get(cd.id, 0)) if 0 not in xvalues: raise VoltPyFailed('The method requires signal value for concentration 0 %s' % self.model.custom_data['units']) data = [ [float(b) for b in xvalues], [float(b) for b in yvalues] ] self.model.custom_data['matrix'] = data p = calc_normal_equation_fit(data[0], data[1]) sx0, sslope, sintercept = calc_sx0(p['slope'], p['intercept'], data[0], data[1]) if p['slope'] != 0: self.model.custom_data['fitEquation'] = p self.model.custom_data['slopeStdDev'] = sslope self.model.custom_data['interceptStdDev'] = sintercept self.model.custom_data['result'] = p['intercept']/p['slope'] self.model.custom_data['resultStdDev'] = sx0, self.model.custom_data['corrCoef'] = np.corrcoef(data[0], data[1])[0, 1] else: self.model.custom_data['fitEquation'] = p self.model.custom_data['result'] = None self.model.custom_data['resultStdDev'] = None self.model.custom_data['corrCoef'] = None self.model.completed = True self.model.step = 0 self.model.save() def getFinalContent(self, request, user): p = pm.PlotManager() data = p.analysisHelper(self.model.owner, self.model.id) for d in data: p.add(**d) p.plot_width = 500 p.plot_height = 400 p.sizing_mode = 'fixed' p.xlabel = 'c_({analyte}) / {units}'.format( analyte=self.model.custom_data['analyte'], units=self.model.custom_data['units'] ) p.ylabel = 'i / µA' scr, div, buttons = p.getEmbeded(request, user, 'analysis', self.model.id) n = len(self.model.custom_data['matrix'][0]) talpha = t.ppf(0.975, n-2) conf_interval = np.multiply(self.model.custom_data['resultStdDev'], talpha) sd = significant_digit(conf_interval, 2) slope_interval = np.multiply(self.model.custom_data['slopeStdDev'], talpha) slopesd = significant_digit(slope_interval, 2) int_interval = np.multiply(self.model.custom_data['interceptStdDev'], talpha) intsd = significant_digit(int_interval, 2) return { 'head': scr, 'body': ''.join([ '<table><tr><td style="width: 500px; height: 400px">', div, """ </td></tr><tr><td> Analyte: {an}<br /> Equation: y = {slope}(&plusmn;{sci}) &middot; x + {int}(&plusmn;{ici})<br /> r = {corrcoef}<br /> Result: {res}&plusmn;{ci} {anu} </td></tr></table> """.format( res='%.*f' % (sd, self.model.custom_data['result']), ci='%.*f' % (sd, conf_interval), corrcoef='%.4f' % self.model.custom_data['corrCoef'], slope='%.*f' % (slopesd, self.model.custom_data['fitEquation']['slope']), sci='%.*f' % (slopesd, slope_interval), int='%.*f' % (intsd, self.model.custom_data['fitEquation']['intercept']), ici='%.*f' % (intsd, int_interval), an=self.model.custom_data['analyte'], anu=self.model.custom_data['units'] ) ]) }
efce/voltPy
manager/operations/methods/RegularStandardAddition.py
Python
gpl-3.0
6,619
# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet import mock import netaddr import testtools from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import utils from neutron.plugins.common import constants as p_const from neutron.plugins.common import utils as plugin_utils from neutron.tests import base from oslo_log import log as logging class TestParseMappings(base.BaseTestCase): def parse(self, mapping_list, unique_values=True): return utils.parse_mappings(mapping_list, unique_values) def test_parse_mappings_fails_for_missing_separator(self): with testtools.ExpectedException(ValueError): self.parse(['key']) def test_parse_mappings_fails_for_missing_key(self): with testtools.ExpectedException(ValueError): self.parse([':val']) def test_parse_mappings_fails_for_missing_value(self): with testtools.ExpectedException(ValueError): self.parse(['key:']) def test_parse_mappings_fails_for_extra_separator(self): with testtools.ExpectedException(ValueError): self.parse(['key:val:junk']) def test_parse_mappings_fails_for_duplicate_key(self): with testtools.ExpectedException(ValueError): self.parse(['key:val1', 'key:val2']) def test_parse_mappings_fails_for_duplicate_value(self): with testtools.ExpectedException(ValueError): self.parse(['key1:val', 'key2:val']) def test_parse_mappings_succeeds_for_one_mapping(self): self.assertEqual(self.parse(['key:val']), {'key': 'val'}) def test_parse_mappings_succeeds_for_n_mappings(self): self.assertEqual(self.parse(['key1:val1', 'key2:val2']), {'key1': 'val1', 'key2': 'val2'}) def test_parse_mappings_succeeds_for_duplicate_value(self): self.assertEqual(self.parse(['key1:val', 'key2:val'], False), {'key1': 'val', 'key2': 'val'}) def test_parse_mappings_succeeds_for_no_mappings(self): self.assertEqual(self.parse(['']), {}) class TestParseTunnelRangesMixin(object): TUN_MIN = None TUN_MAX = None TYPE = None _err_prefix = "Invalid network Tunnel range: '%d:%d' - " _err_suffix = "%s is not a valid %s identifier" _err_range = "End of tunnel range is less than start of tunnel range" def _build_invalid_tunnel_range_msg(self, t_range_tuple, n): bad_id = t_range_tuple[n - 1] return (self._err_prefix % t_range_tuple) + (self._err_suffix % (bad_id, self.TYPE)) def _build_range_reversed_msg(self, t_range_tuple): return (self._err_prefix % t_range_tuple) + self._err_range def _verify_range(self, tunnel_range): return plugin_utils.verify_tunnel_range(tunnel_range, self.TYPE) def _check_range_valid_ranges(self, tunnel_range): self.assertIsNone(self._verify_range(tunnel_range)) def _check_range_invalid_ranges(self, bad_range, which): expected_msg = self._build_invalid_tunnel_range_msg(bad_range, which) err = self.assertRaises(n_exc.NetworkTunnelRangeError, self._verify_range, bad_range) self.assertEqual(expected_msg, str(err)) def _check_range_reversed(self, bad_range): err = self.assertRaises(n_exc.NetworkTunnelRangeError, self._verify_range, bad_range) expected_msg = self._build_range_reversed_msg(bad_range) self.assertEqual(expected_msg, str(err)) def test_range_tunnel_id_valid(self): self._check_range_valid_ranges((self.TUN_MIN, self.TUN_MAX)) def test_range_tunnel_id_invalid(self): self._check_range_invalid_ranges((-1, self.TUN_MAX), 1) self._check_range_invalid_ranges((self.TUN_MIN, self.TUN_MAX + 1), 2) self._check_range_invalid_ranges((self.TUN_MIN - 1, self.TUN_MAX + 1), 1) def test_range_tunnel_id_reversed(self): self._check_range_reversed((self.TUN_MAX, self.TUN_MIN)) class TestGreTunnelRangeVerifyValid(TestParseTunnelRangesMixin, base.BaseTestCase): TUN_MIN = p_const.MIN_GRE_ID TUN_MAX = p_const.MAX_GRE_ID TYPE = p_const.TYPE_GRE class TestVxlanTunnelRangeVerifyValid(TestParseTunnelRangesMixin, base.BaseTestCase): TUN_MIN = p_const.MIN_VXLAN_VNI TUN_MAX = p_const.MAX_VXLAN_VNI TYPE = p_const.TYPE_VXLAN class UtilTestParseVlanRanges(base.BaseTestCase): _err_prefix = "Invalid network VLAN range: '" _err_too_few = "' - 'need more than 2 values to unpack'" _err_too_many = "' - 'too many values to unpack'" _err_not_int = "' - 'invalid literal for int() with base 10: '%s''" _err_bad_vlan = "' - '%s is not a valid VLAN tag'" _err_range = "' - 'End of VLAN range is less than start of VLAN range'" def _range_too_few_err(self, nv_range): return self._err_prefix + nv_range + self._err_too_few def _range_too_many_err(self, nv_range): return self._err_prefix + nv_range + self._err_too_many def _vlan_not_int_err(self, nv_range, vlan): return self._err_prefix + nv_range + (self._err_not_int % vlan) def _nrange_invalid_vlan(self, nv_range, n): vlan = nv_range.split(':')[n] v_range = ':'.join(nv_range.split(':')[1:]) return self._err_prefix + v_range + (self._err_bad_vlan % vlan) def _vrange_invalid_vlan(self, v_range_tuple, n): vlan = v_range_tuple[n - 1] v_range_str = '%d:%d' % v_range_tuple return self._err_prefix + v_range_str + (self._err_bad_vlan % vlan) def _vrange_invalid(self, v_range_tuple): v_range_str = '%d:%d' % v_range_tuple return self._err_prefix + v_range_str + self._err_range class TestVlanNetworkNameValid(base.BaseTestCase): def parse_vlan_ranges(self, vlan_range): return plugin_utils.parse_network_vlan_ranges(vlan_range) def test_validate_provider_phynet_name_mixed(self): self.assertRaises(n_exc.PhysicalNetworkNameError, self.parse_vlan_ranges, ['', ':23:30', 'physnet1', 'tenant_net:100:200']) def test_validate_provider_phynet_name_bad(self): self.assertRaises(n_exc.PhysicalNetworkNameError, self.parse_vlan_ranges, [':1:34']) class TestVlanRangeVerifyValid(UtilTestParseVlanRanges): def verify_range(self, vlan_range): return plugin_utils.verify_vlan_range(vlan_range) def test_range_valid_ranges(self): self.assertIsNone(self.verify_range((1, 2))) self.assertIsNone(self.verify_range((1, 1999))) self.assertIsNone(self.verify_range((100, 100))) self.assertIsNone(self.verify_range((100, 200))) self.assertIsNone(self.verify_range((4001, 4094))) self.assertIsNone(self.verify_range((1, 4094))) def check_one_vlan_invalid(self, bad_range, which): expected_msg = self._vrange_invalid_vlan(bad_range, which) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.verify_range, bad_range) self.assertEqual(str(err), expected_msg) def test_range_first_vlan_invalid_negative(self): self.check_one_vlan_invalid((-1, 199), 1) def test_range_first_vlan_invalid_zero(self): self.check_one_vlan_invalid((0, 199), 1) def test_range_first_vlan_invalid_limit_plus_one(self): self.check_one_vlan_invalid((4095, 199), 1) def test_range_first_vlan_invalid_too_big(self): self.check_one_vlan_invalid((9999, 199), 1) def test_range_second_vlan_invalid_negative(self): self.check_one_vlan_invalid((299, -1), 2) def test_range_second_vlan_invalid_zero(self): self.check_one_vlan_invalid((299, 0), 2) def test_range_second_vlan_invalid_limit_plus_one(self): self.check_one_vlan_invalid((299, 4095), 2) def test_range_second_vlan_invalid_too_big(self): self.check_one_vlan_invalid((299, 9999), 2) def test_range_both_vlans_invalid_01(self): self.check_one_vlan_invalid((-1, 0), 1) def test_range_both_vlans_invalid_02(self): self.check_one_vlan_invalid((0, 4095), 1) def test_range_both_vlans_invalid_03(self): self.check_one_vlan_invalid((4095, 9999), 1) def test_range_both_vlans_invalid_04(self): self.check_one_vlan_invalid((9999, -1), 1) def test_range_reversed(self): bad_range = (95, 10) expected_msg = self._vrange_invalid(bad_range) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.verify_range, bad_range) self.assertEqual(str(err), expected_msg) class TestParseOneVlanRange(UtilTestParseVlanRanges): def parse_one(self, cfg_entry): return plugin_utils.parse_network_vlan_range(cfg_entry) def test_parse_one_net_no_vlan_range(self): config_str = "net1" expected_networks = ("net1", None) self.assertEqual(self.parse_one(config_str), expected_networks) def test_parse_one_net_and_vlan_range(self): config_str = "net1:100:199" expected_networks = ("net1", (100, 199)) self.assertEqual(self.parse_one(config_str), expected_networks) def test_parse_one_net_incomplete_range(self): config_str = "net1:100" expected_msg = self._range_too_few_err(config_str) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_one, config_str) self.assertEqual(str(err), expected_msg) def test_parse_one_net_range_too_many(self): config_str = "net1:100:150:200" expected_msg = self._range_too_many_err(config_str) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_one, config_str) self.assertEqual(str(err), expected_msg) def test_parse_one_net_vlan1_not_int(self): config_str = "net1:foo:199" expected_msg = self._vlan_not_int_err(config_str, 'foo') err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_one, config_str) self.assertEqual(str(err), expected_msg) def test_parse_one_net_vlan2_not_int(self): config_str = "net1:100:bar" expected_msg = self._vlan_not_int_err(config_str, 'bar') err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_one, config_str) self.assertEqual(str(err), expected_msg) def test_parse_one_net_and_max_range(self): config_str = "net1:1:4094" expected_networks = ("net1", (1, 4094)) self.assertEqual(self.parse_one(config_str), expected_networks) def test_parse_one_net_range_bad_vlan1(self): config_str = "net1:9000:150" expected_msg = self._nrange_invalid_vlan(config_str, 1) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_one, config_str) self.assertEqual(str(err), expected_msg) def test_parse_one_net_range_bad_vlan2(self): config_str = "net1:4000:4999" expected_msg = self._nrange_invalid_vlan(config_str, 2) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_one, config_str) self.assertEqual(str(err), expected_msg) class TestParseVlanRangeList(UtilTestParseVlanRanges): def parse_list(self, cfg_entries): return plugin_utils.parse_network_vlan_ranges(cfg_entries) def test_parse_list_one_net_no_vlan_range(self): config_list = ["net1"] expected_networks = {"net1": []} self.assertEqual(self.parse_list(config_list), expected_networks) def test_parse_list_one_net_vlan_range(self): config_list = ["net1:100:199"] expected_networks = {"net1": [(100, 199)]} self.assertEqual(self.parse_list(config_list), expected_networks) def test_parse_two_nets_no_vlan_range(self): config_list = ["net1", "net2"] expected_networks = {"net1": [], "net2": []} self.assertEqual(self.parse_list(config_list), expected_networks) def test_parse_two_nets_range_and_no_range(self): config_list = ["net1:100:199", "net2"] expected_networks = {"net1": [(100, 199)], "net2": []} self.assertEqual(self.parse_list(config_list), expected_networks) def test_parse_two_nets_no_range_and_range(self): config_list = ["net1", "net2:200:299"] expected_networks = {"net1": [], "net2": [(200, 299)]} self.assertEqual(self.parse_list(config_list), expected_networks) def test_parse_two_nets_bad_vlan_range1(self): config_list = ["net1:100", "net2:200:299"] expected_msg = self._range_too_few_err(config_list[0]) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_list, config_list) self.assertEqual(str(err), expected_msg) def test_parse_two_nets_vlan_not_int2(self): config_list = ["net1:100:199", "net2:200:0x200"] expected_msg = self._vlan_not_int_err(config_list[1], '0x200') err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_list, config_list) self.assertEqual(str(err), expected_msg) def test_parse_two_nets_and_append_1_2(self): config_list = ["net1:100:199", "net1:1000:1099", "net2:200:299"] expected_networks = {"net1": [(100, 199), (1000, 1099)], "net2": [(200, 299)]} self.assertEqual(self.parse_list(config_list), expected_networks) def test_parse_two_nets_and_append_1_3(self): config_list = ["net1:100:199", "net2:200:299", "net1:1000:1099"] expected_networks = {"net1": [(100, 199), (1000, 1099)], "net2": [(200, 299)]} self.assertEqual(self.parse_list(config_list), expected_networks) class TestDictUtils(base.BaseTestCase): def test_dict2str(self): dic = {"key1": "value1", "key2": "value2", "key3": "value3"} expected = "key1=value1,key2=value2,key3=value3" self.assertEqual(utils.dict2str(dic), expected) def test_str2dict(self): string = "key1=value1,key2=value2,key3=value3" expected = {"key1": "value1", "key2": "value2", "key3": "value3"} self.assertEqual(utils.str2dict(string), expected) def test_dict_str_conversion(self): dic = {"key1": "value1", "key2": "value2"} self.assertEqual(utils.str2dict(utils.dict2str(dic)), dic) def test_diff_list_of_dict(self): old_list = [{"key1": "value1"}, {"key2": "value2"}, {"key3": "value3"}] new_list = [{"key1": "value1"}, {"key2": "value2"}, {"key4": "value4"}] added, removed = utils.diff_list_of_dict(old_list, new_list) self.assertEqual(added, [dict(key4="value4")]) self.assertEqual(removed, [dict(key3="value3")]) class _CachingDecorator(object): def __init__(self): self.func_retval = 'bar' self._cache = mock.Mock() @utils.cache_method_results def func(self, *args, **kwargs): return self.func_retval class TestCachingDecorator(base.BaseTestCase): def setUp(self): super(TestCachingDecorator, self).setUp() self.decor = _CachingDecorator() self.func_name = '%(module)s._CachingDecorator.func' % { 'module': self.__module__ } self.not_cached = self.decor.func.func.im_self._not_cached def test_cache_miss(self): expected_key = (self.func_name, 1, 2, ('foo', 'bar')) args = (1, 2) kwargs = {'foo': 'bar'} self.decor._cache.get.return_value = self.not_cached retval = self.decor.func(*args, **kwargs) self.decor._cache.set.assert_called_once_with( expected_key, self.decor.func_retval, None) self.assertEqual(self.decor.func_retval, retval) def test_cache_hit(self): expected_key = (self.func_name, 1, 2, ('foo', 'bar')) args = (1, 2) kwargs = {'foo': 'bar'} retval = self.decor.func(*args, **kwargs) self.assertFalse(self.decor._cache.set.called) self.assertEqual(self.decor._cache.get.return_value, retval) self.decor._cache.get.assert_called_once_with(expected_key, self.not_cached) def test_get_unhashable(self): expected_key = (self.func_name, [1], 2) self.decor._cache.get.side_effect = TypeError retval = self.decor.func([1], 2) self.assertFalse(self.decor._cache.set.called) self.assertEqual(self.decor.func_retval, retval) self.decor._cache.get.assert_called_once_with(expected_key, self.not_cached) def test_missing_cache(self): delattr(self.decor, '_cache') self.assertRaises(NotImplementedError, self.decor.func, (1, 2)) def test_no_cache(self): self.decor._cache = False retval = self.decor.func((1, 2)) self.assertEqual(self.decor.func_retval, retval) class TestDict2Tuples(base.BaseTestCase): def test_dict(self): input_dict = {'foo': 'bar', 42: 'baz', 'aaa': 'zzz'} expected = ((42, 'baz'), ('aaa', 'zzz'), ('foo', 'bar')) output_tuple = utils.dict2tuple(input_dict) self.assertEqual(expected, output_tuple) class TestExceptionLogger(base.BaseTestCase): def test_normal_call(self): result = "Result" @utils.exception_logger() def func(): return result self.assertEqual(result, func()) def test_raise(self): result = "Result" @utils.exception_logger() def func(): raise RuntimeError(result) self.assertRaises(RuntimeError, func) def test_spawn_normal(self): result = "Result" logger = mock.Mock() @utils.exception_logger(logger=logger) def func(): return result gt = eventlet.spawn(func) self.assertEqual(result, gt.wait()) self.assertFalse(logger.called) def test_spawn_raise(self): result = "Result" logger = mock.Mock() @utils.exception_logger(logger=logger) def func(): raise RuntimeError(result) gt = eventlet.spawn(func) self.assertRaises(RuntimeError, gt.wait) self.assertTrue(logger.called) def test_pool_spawn_normal(self): logger = mock.Mock() calls = mock.Mock() @utils.exception_logger(logger=logger) def func(i): calls(i) pool = eventlet.GreenPool(4) for i in range(0, 4): pool.spawn(func, i) pool.waitall() calls.assert_has_calls([mock.call(0), mock.call(1), mock.call(2), mock.call(3)], any_order=True) self.assertFalse(logger.called) def test_pool_spawn_raise(self): logger = mock.Mock() calls = mock.Mock() @utils.exception_logger(logger=logger) def func(i): if i == 2: raise RuntimeError(2) else: calls(i) pool = eventlet.GreenPool(4) for i in range(0, 4): pool.spawn(func, i) pool.waitall() calls.assert_has_calls([mock.call(0), mock.call(1), mock.call(3)], any_order=True) self.assertTrue(logger.called) class TestDvrServices(base.BaseTestCase): def _test_is_dvr_serviced(self, device_owner, expected): self.assertEqual(expected, utils.is_dvr_serviced(device_owner)) def test_is_dvr_serviced_with_lb_port(self): self._test_is_dvr_serviced(constants.DEVICE_OWNER_LOADBALANCER, True) def test_is_dvr_serviced_with_dhcp_port(self): self._test_is_dvr_serviced(constants.DEVICE_OWNER_DHCP, True) def test_is_dvr_serviced_with_vm_port(self): self._test_is_dvr_serviced('compute:', True) class TestIpToCidr(base.BaseTestCase): def test_ip_to_cidr_ipv4_default(self): self.assertEqual('15.1.2.3/32', utils.ip_to_cidr('15.1.2.3')) def test_ip_to_cidr_ipv4_prefix(self): self.assertEqual('15.1.2.3/24', utils.ip_to_cidr('15.1.2.3', 24)) def test_ip_to_cidr_ipv4_netaddr(self): ip_address = netaddr.IPAddress('15.1.2.3') self.assertEqual('15.1.2.3/32', utils.ip_to_cidr(ip_address)) def test_ip_to_cidr_ipv4_bad_prefix(self): self.assertRaises(netaddr.core.AddrFormatError, utils.ip_to_cidr, '15.1.2.3', 33) def test_ip_to_cidr_ipv6_default(self): self.assertEqual('::1/128', utils.ip_to_cidr('::1')) def test_ip_to_cidr_ipv6_prefix(self): self.assertEqual('::1/64', utils.ip_to_cidr('::1', 64)) def test_ip_to_cidr_ipv6_bad_prefix(self): self.assertRaises(netaddr.core.AddrFormatError, utils.ip_to_cidr, '2000::1', 129) class TestCidrIsHost(base.BaseTestCase): def test_is_cidr_host_ipv4(self): self.assertTrue(utils.is_cidr_host('15.1.2.3/32')) def test_is_cidr_host_ipv4_not_cidr(self): self.assertRaises(ValueError, utils.is_cidr_host, '15.1.2.3') def test_is_cidr_host_ipv6(self): self.assertTrue(utils.is_cidr_host('2000::1/128')) def test_is_cidr_host_ipv6_netaddr(self): net = netaddr.IPNetwork("2000::1") self.assertTrue(utils.is_cidr_host(net)) def test_is_cidr_host_ipv6_32(self): self.assertFalse(utils.is_cidr_host('2000::1/32')) def test_is_cidr_host_ipv6_not_cidr(self): self.assertRaises(ValueError, utils.is_cidr_host, '2000::1') def test_is_cidr_host_ipv6_not_cidr_netaddr(self): ip_address = netaddr.IPAddress("2000::3") self.assertRaises(ValueError, utils.is_cidr_host, ip_address) class TestIpVersionFromInt(base.BaseTestCase): def test_ip_version_from_int_ipv4(self): self.assertEqual(utils.ip_version_from_int(4), constants.IPv4) def test_ip_version_from_int_ipv6(self): self.assertEqual(utils.ip_version_from_int(6), constants.IPv6) def test_ip_version_from_int_illegal_int(self): self.assertRaises(ValueError, utils.ip_version_from_int, 8) class TestDelayedStringRenderer(base.BaseTestCase): def test_call_deferred_until_str(self): my_func = mock.MagicMock(return_value='Brie cheese!') delayed = utils.DelayedStringRenderer(my_func, 1, 2, key_arg=44) self.assertFalse(my_func.called) string = "Type: %s" % delayed my_func.assert_called_once_with(1, 2, key_arg=44) self.assertEqual("Type: Brie cheese!", string) def test_not_called_with_low_log_level(self): LOG = logging.getLogger(__name__) # make sure we return logging to previous level current_log_level = LOG.logger.getEffectiveLevel() self.addCleanup(LOG.logger.setLevel, current_log_level) my_func = mock.MagicMock() delayed = utils.DelayedStringRenderer(my_func) # set to warning so we shouldn't be logging debug messages LOG.logger.setLevel(logging.logging.WARNING) LOG.debug("Hello %s", delayed) self.assertFalse(my_func.called) # but it should be called with the debug level LOG.logger.setLevel(logging.logging.DEBUG) LOG.debug("Hello %s", delayed) self.assertTrue(my_func.called)
JioCloud/neutron
neutron/tests/unit/common/test_utils.py
Python
apache-2.0
25,134
from __future__ import unicode_literals from datetime import datetime from operator import attrgetter from django.test import TestCase from .models import (Person, Group, Membership, CustomMembership, PersonSelfRefM2M, Friendship, Event, Invitation, Employee, Relationship, Ingredient, Recipe, RecipeIngredient) class M2mThroughTests(TestCase): def setUp(self): self.bob = Person.objects.create(name='Bob') self.jim = Person.objects.create(name='Jim') self.jane = Person.objects.create(name='Jane') self.rock = Group.objects.create(name='Rock') self.roll = Group.objects.create(name='Roll') def test_retrieve_intermediate_items(self): Membership.objects.create(person=self.jim, group=self.rock) Membership.objects.create(person=self.jane, group=self.rock) expected = ['Jane', 'Jim'] self.assertQuerysetEqual( self.rock.members.all(), expected, attrgetter("name") ) def test_get_on_intermediate_model(self): Membership.objects.create(person=self.jane, group=self.rock) queryset = Membership.objects.get(person=self.jane, group=self.rock) self.assertEqual( repr(queryset), '<Membership: Jane is a member of Rock>' ) def test_filter_on_intermediate_model(self): Membership.objects.create(person=self.jim, group=self.rock) Membership.objects.create(person=self.jane, group=self.rock) queryset = Membership.objects.filter(group=self.rock) expected = [ '<Membership: Jim is a member of Rock>', '<Membership: Jane is a member of Rock>', ] self.assertQuerysetEqual( queryset, expected ) def test_cannot_use_add_on_m2m_with_intermediary_model(self): msg = 'Cannot use add() on a ManyToManyField which specifies an intermediary model' with self.assertRaisesMessage(AttributeError, msg): self.rock.members.add(self.bob) self.assertQuerysetEqual( self.rock.members.all(), [] ) def test_cannot_use_create_on_m2m_with_intermediary_model(self): msg = 'Cannot use create() on a ManyToManyField which specifies an intermediary model' with self.assertRaisesMessage(AttributeError, msg): self.rock.members.create(name='Annie') self.assertQuerysetEqual( self.rock.members.all(), [] ) def test_cannot_use_remove_on_m2m_with_intermediary_model(self): Membership.objects.create(person=self.jim, group=self.rock) msg = 'Cannot use remove() on a ManyToManyField which specifies an intermediary model' with self.assertRaisesMessage(AttributeError, msg): self.rock.members.remove(self.jim) self.assertQuerysetEqual( self.rock.members.all(), ['Jim', ], attrgetter("name") ) def test_cannot_use_setattr_on_m2m_with_intermediary_model(self): msg = 'Cannot set values on a ManyToManyField which specifies an intermediary model' members = list(Person.objects.filter(name__in=['Bob', 'Jim'])) with self.assertRaisesMessage(AttributeError, msg): setattr(self.rock, 'members', members) self.assertQuerysetEqual( self.rock.members.all(), [] ) def test_clear_removes_all_the_m2m_relationships(self): Membership.objects.create(person=self.jim, group=self.rock) Membership.objects.create(person=self.jane, group=self.rock) self.rock.members.clear() self.assertQuerysetEqual( self.rock.members.all(), [] ) def test_retrieve_reverse_intermediate_items(self): Membership.objects.create(person=self.jim, group=self.rock) Membership.objects.create(person=self.jim, group=self.roll) expected = ['Rock', 'Roll'] self.assertQuerysetEqual( self.jim.group_set.all(), expected, attrgetter("name") ) def test_cannot_use_add_on_reverse_m2m_with_intermediary_model(self): msg = 'Cannot use add() on a ManyToManyField which specifies an intermediary model' with self.assertRaisesMessage(AttributeError, msg): self.bob.group_set.add(self.bob) self.assertQuerysetEqual( self.bob.group_set.all(), [] ) def test_cannot_use_create_on_reverse_m2m_with_intermediary_model(self): msg = 'Cannot use create() on a ManyToManyField which specifies an intermediary model' with self.assertRaisesMessage(AttributeError, msg): self.bob.group_set.create(name='Funk') self.assertQuerysetEqual( self.bob.group_set.all(), [] ) def test_cannot_use_remove_on_reverse_m2m_with_intermediary_model(self): Membership.objects.create(person=self.bob, group=self.rock) msg = 'Cannot use remove() on a ManyToManyField which specifies an intermediary model' with self.assertRaisesMessage(AttributeError, msg): self.bob.group_set.remove(self.rock) self.assertQuerysetEqual( self.bob.group_set.all(), ['Rock', ], attrgetter('name') ) def test_cannot_use_setattr_on_reverse_m2m_with_intermediary_model(self): msg = 'Cannot set values on a ManyToManyField which specifies an intermediary model' members = list(Group.objects.filter(name__in=['Rock', 'Roll'])) with self.assertRaisesMessage(AttributeError, msg): setattr(self.bob, 'group_set', members) self.assertQuerysetEqual( self.bob.group_set.all(), [] ) def test_clear_on_reverse_removes_all_the_m2m_relationships(self): Membership.objects.create(person=self.jim, group=self.rock) Membership.objects.create(person=self.jim, group=self.roll) self.jim.group_set.clear() self.assertQuerysetEqual( self.jim.group_set.all(), [] ) def test_query_model_by_attribute_name_of_related_model(self): Membership.objects.create(person=self.jim, group=self.rock) Membership.objects.create(person=self.jane, group=self.rock) Membership.objects.create(person=self.bob, group=self.roll) Membership.objects.create(person=self.jim, group=self.roll) Membership.objects.create(person=self.jane, group=self.roll) self.assertQuerysetEqual( Group.objects.filter(members__name='Bob'), ['Roll', ], attrgetter("name") ) def test_query_first_model_by_intermediate_model_attribute(self): Membership.objects.create( person=self.jane, group=self.roll, invite_reason="She was just awesome." ) Membership.objects.create( person=self.jim, group=self.roll, invite_reason="He is good." ) Membership.objects.create(person=self.bob, group=self.roll) qs = Group.objects.filter( membership__invite_reason="She was just awesome." ) self.assertQuerysetEqual( qs, ['Roll'], attrgetter("name") ) def test_query_second_model_by_intermediate_model_attribute(self): Membership.objects.create( person=self.jane, group=self.roll, invite_reason="She was just awesome." ) Membership.objects.create( person=self.jim, group=self.roll, invite_reason="He is good." ) Membership.objects.create(person=self.bob, group=self.roll) qs = Person.objects.filter( membership__invite_reason="She was just awesome." ) self.assertQuerysetEqual( qs, ['Jane'], attrgetter("name") ) def test_query_model_by_related_model_name(self): Membership.objects.create(person=self.jim, group=self.rock) Membership.objects.create(person=self.jane, group=self.rock) Membership.objects.create(person=self.bob, group=self.roll) Membership.objects.create(person=self.jim, group=self.roll) Membership.objects.create(person=self.jane, group=self.roll) self.assertQuerysetEqual( Person.objects.filter(group__name="Rock"), ['Jane', 'Jim'], attrgetter("name") ) def test_query_model_by_custom_related_name(self): CustomMembership.objects.create(person=self.bob, group=self.rock) CustomMembership.objects.create(person=self.jim, group=self.rock) self.assertQuerysetEqual( Person.objects.filter(custom__name="Rock"), ['Bob', 'Jim'], attrgetter("name") ) def test_query_model_by_intermediate_can_return_non_unique_queryset(self): Membership.objects.create(person=self.jim, group=self.rock) Membership.objects.create( person=self.jane, group=self.rock, date_joined=datetime(2006, 1, 1) ) Membership.objects.create( person=self.bob, group=self.roll, date_joined=datetime(2004, 1, 1)) Membership.objects.create(person=self.jim, group=self.roll) Membership.objects.create( person=self.jane, group=self.roll, date_joined=datetime(2004, 1, 1)) qs = Person.objects.filter( membership__date_joined__gt=datetime(2004, 1, 1) ) self.assertQuerysetEqual( qs, ['Jane', 'Jim', 'Jim'], attrgetter("name") ) def test_custom_related_name_forward_empty_qs(self): self.assertQuerysetEqual( self.rock.custom_members.all(), [] ) def test_custom_related_name_reverse_empty_qs(self): self.assertQuerysetEqual( self.bob.custom.all(), [] ) def test_custom_related_name_forward_non_empty_qs(self): CustomMembership.objects.create(person=self.bob, group=self.rock) CustomMembership.objects.create(person=self.jim, group=self.rock) self.assertQuerysetEqual( self.rock.custom_members.all(), ['Bob', 'Jim'], attrgetter("name") ) def test_custom_related_name_reverse_non_empty_qs(self): CustomMembership.objects.create(person=self.bob, group=self.rock) CustomMembership.objects.create(person=self.jim, group=self.rock) self.assertQuerysetEqual( self.bob.custom.all(), ['Rock'], attrgetter("name") ) def test_custom_related_name_doesnt_conflict_with_fky_related_name(self): CustomMembership.objects.create(person=self.bob, group=self.rock) self.assertQuerysetEqual( self.bob.custom_person_related_name.all(), ['<CustomMembership: Bob is a member of Rock>'] ) def test_through_fields(self): """ Tests that relations with intermediary tables with multiple FKs to the M2M's ``to`` model are possible. """ event = Event.objects.create(title='Rockwhale 2014') Invitation.objects.create(event=event, inviter=self.bob, invitee=self.jim) Invitation.objects.create(event=event, inviter=self.bob, invitee=self.jane) self.assertQuerysetEqual( event.invitees.all(), ['Jane', 'Jim'], attrgetter('name') ) class M2mThroughReferentialTests(TestCase): def test_self_referential_empty_qs(self): tony = PersonSelfRefM2M.objects.create(name="Tony") self.assertQuerysetEqual( tony.friends.all(), [] ) def test_self_referential_non_symmentrical_first_side(self): tony = PersonSelfRefM2M.objects.create(name="Tony") chris = PersonSelfRefM2M.objects.create(name="Chris") Friendship.objects.create( first=tony, second=chris, date_friended=datetime.now() ) self.assertQuerysetEqual( tony.friends.all(), ['Chris'], attrgetter("name") ) def test_self_referential_non_symmentrical_second_side(self): tony = PersonSelfRefM2M.objects.create(name="Tony") chris = PersonSelfRefM2M.objects.create(name="Chris") Friendship.objects.create( first=tony, second=chris, date_friended=datetime.now() ) self.assertQuerysetEqual( chris.friends.all(), [] ) def test_self_referential_non_symmentrical_clear_first_side(self): tony = PersonSelfRefM2M.objects.create(name="Tony") chris = PersonSelfRefM2M.objects.create(name="Chris") Friendship.objects.create( first=tony, second=chris, date_friended=datetime.now() ) chris.friends.clear() self.assertQuerysetEqual( chris.friends.all(), [] ) # Since this isn't a symmetrical relation, Tony's friend link still exists. self.assertQuerysetEqual( tony.friends.all(), ['Chris'], attrgetter("name") ) def test_self_referential_symmentrical(self): tony = PersonSelfRefM2M.objects.create(name="Tony") chris = PersonSelfRefM2M.objects.create(name="Chris") Friendship.objects.create( first=tony, second=chris, date_friended=datetime.now() ) Friendship.objects.create( first=chris, second=tony, date_friended=datetime.now() ) self.assertQuerysetEqual( tony.friends.all(), ['Chris'], attrgetter("name") ) self.assertQuerysetEqual( chris.friends.all(), ['Tony'], attrgetter("name") ) def test_through_fields_self_referential(self): john = Employee.objects.create(name='john') peter = Employee.objects.create(name='peter') mary = Employee.objects.create(name='mary') harry = Employee.objects.create(name='harry') Relationship.objects.create(source=john, target=peter, another=None) Relationship.objects.create(source=john, target=mary, another=None) Relationship.objects.create(source=john, target=harry, another=peter) self.assertQuerysetEqual( john.subordinates.all(), ['peter', 'mary', 'harry'], attrgetter('name') ) class M2mThroughToFieldsTests(TestCase): def setUp(self): self.pea = Ingredient.objects.create(iname='pea') self.potato = Ingredient.objects.create(iname='potato') self.tomato = Ingredient.objects.create(iname='tomato') self.curry = Recipe.objects.create(rname='curry') RecipeIngredient.objects.create(recipe=self.curry, ingredient=self.potato) RecipeIngredient.objects.create(recipe=self.curry, ingredient=self.pea) RecipeIngredient.objects.create(recipe=self.curry, ingredient=self.tomato) def test_retrieval(self): # Forward retrieval self.assertQuerysetEqual( self.curry.ingredients.all(), [self.pea, self.potato, self.tomato], lambda x: x ) # Backward retrieval self.assertEqual(self.tomato.recipes.get(), self.curry) def test_choices(self): field = Recipe._meta.get_field('ingredients') self.assertEqual( [choice[0] for choice in field.get_choices(include_blank=False)], ['pea', 'potato', 'tomato'] )
iambibhas/django
tests/m2m_through/tests.py
Python
bsd-3-clause
15,767
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """This module contains Google Search Ads operators.""" import json from tempfile import NamedTemporaryFile from typing import Any, Dict, Optional, Sequence, Union from airflow.exceptions import AirflowException from airflow.models import BaseOperator from airflow.providers.google.cloud.hooks.gcs import GCSHook from airflow.providers.google.marketing_platform.hooks.search_ads import GoogleSearchAdsHook class GoogleSearchAdsInsertReportOperator(BaseOperator): """ Inserts a report request into the reporting system. .. seealso: For API documentation check: https://developers.google.com/search-ads/v2/reference/reports/request .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:GoogleSearchAdsInsertReportOperator` :param report: Report to be generated :type report: Dict[str, Any] :param api_version: The version of the api that will be requested for example 'v3'. :type api_version: str :param gcp_conn_id: The connection ID to use when fetching connection info. :type gcp_conn_id: str :param delegate_to: The account to impersonate using domain-wide delegation of authority, if any. For this to work, the service account making the request must have domain-wide delegation enabled. :type delegate_to: str :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). :type impersonation_chain: Union[str, Sequence[str]] """ template_fields = ( "report", "impersonation_chain", ) template_ext = (".json",) def __init__( self, *, report: Dict[str, Any], api_version: str = "v2", gcp_conn_id: str = "google_cloud_default", delegate_to: Optional[str] = None, impersonation_chain: Optional[Union[str, Sequence[str]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) self.report = report self.api_version = api_version self.gcp_conn_id = gcp_conn_id self.delegate_to = delegate_to self.impersonation_chain = impersonation_chain def prepare_template(self) -> None: # If .json is passed then we have to read the file if isinstance(self.report, str) and self.report.endswith('.json'): with open(self.report) as file: self.report = json.load(file) def execute(self, context: dict): hook = GoogleSearchAdsHook( gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to, api_version=self.api_version, impersonation_chain=self.impersonation_chain, ) self.log.info("Generating Search Ads report") response = hook.insert_report(report=self.report) report_id = response.get("id") self.xcom_push(context, key="report_id", value=report_id) self.log.info("Report generated, id: %s", report_id) return response class GoogleSearchAdsDownloadReportOperator(BaseOperator): """ Downloads a report to GCS bucket. .. seealso: For API documentation check: https://developers.google.com/search-ads/v2/reference/reports/getFile .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:GoogleSearchAdsGetfileReportOperator` :param report_id: ID of the report. :type report_id: str :param bucket_name: The bucket to upload to. :type bucket_name: str :param report_name: The report name to set when uploading the local file. If not provided then report_id is used. :type report_name: str :param gzip: Option to compress local file or file data for upload :type gzip: bool :param api_version: The version of the api that will be requested for example 'v3'. :type api_version: str :param gcp_conn_id: The connection ID to use when fetching connection info. :type gcp_conn_id: str :param delegate_to: The account to impersonate using domain-wide delegation of authority, if any. For this to work, the service account making the request must have domain-wide delegation enabled. :type delegate_to: str :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). :type impersonation_chain: Union[str, Sequence[str]] """ template_fields = ( "report_name", "report_id", "bucket_name", "impersonation_chain", ) def __init__( self, *, report_id: str, bucket_name: str, report_name: Optional[str] = None, gzip: bool = True, chunk_size: int = 10 * 1024 * 1024, api_version: str = "v2", gcp_conn_id: str = "google_cloud_default", delegate_to: Optional[str] = None, impersonation_chain: Optional[Union[str, Sequence[str]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) self.report_id = report_id self.api_version = api_version self.gcp_conn_id = gcp_conn_id self.delegate_to = delegate_to self.report_id = report_id self.chunk_size = chunk_size self.gzip = gzip self.bucket_name = self._set_bucket_name(bucket_name) self.report_name = report_name self.impersonation_chain = impersonation_chain def _resolve_file_name(self, name: str) -> str: csv = ".csv" gzip = ".gz" if not name.endswith(csv): name += csv if self.gzip: name += gzip return name @staticmethod def _set_bucket_name(name: str) -> str: bucket = name if not name.startswith("gs://") else name[5:] return bucket.strip("/") @staticmethod def _handle_report_fragment(fragment: bytes) -> bytes: fragment_records = fragment.split(b"\n", 1) if len(fragment_records) > 1: return fragment_records[1] return b"" def execute(self, context: dict): hook = GoogleSearchAdsHook( gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to, api_version=self.api_version, impersonation_chain=self.impersonation_chain, ) gcs_hook = GCSHook( gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to, impersonation_chain=self.impersonation_chain, ) # Resolve file name of the report report_name = self.report_name or self.report_id report_name = self._resolve_file_name(report_name) response = hook.get(report_id=self.report_id) if not response['isReportReady']: raise AirflowException(f'Report {self.report_id} is not ready yet') # Resolve report fragments fragments_count = len(response["files"]) # Download chunks of report's data self.log.info("Downloading Search Ads report %s", self.report_id) with NamedTemporaryFile() as temp_file: for i in range(fragments_count): byte_content = hook.get_file(report_fragment=i, report_id=self.report_id) fragment = byte_content if i == 0 else self._handle_report_fragment(byte_content) temp_file.write(fragment) temp_file.flush() gcs_hook.upload( bucket_name=self.bucket_name, object_name=report_name, gzip=self.gzip, filename=temp_file.name, ) self.xcom_push(context, key="file_name", value=report_name)
apache/incubator-airflow
airflow/providers/google/marketing_platform/operators/search_ads.py
Python
apache-2.0
9,547
# # Copyright (C) 2012, Martin Zibricky # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # Install necessary 3rd party Python modules to run all tests. # This script is supposed to be used in a continuous integration system: # https://jenkins.shiningpanda.com/pyinstaller/ # Python there is mostly 64bit. import os try: import PyInstaller except ImportError: # if importing PyInstaller fails, try to load from parent # directory to support running without installation import imp if not hasattr(os, "getuid") or os.getuid() != 0: imp.load_module('PyInstaller', *imp.find_module('PyInstaller', [os.path.dirname(os.path.dirname(os.path.abspath(__file__)))])) import PyInstaller.compat as compat _PACKAGES = [ 'docutils', 'jinja2', 'MySQL-python', 'numpy ', 'PIL', 'pycrypto', #'pyenchant', 'pyodbc', 'pytz', 'sphinx', 'simplejson', 'SQLAlchemy', #'wxPython', ] def main(): for pkg in _PACKAGES: print 'Installing module... %s' % pkg retcode = compat.exec_command_rc('pip', 'install', pkg) if retcode: print ' %s installation failed' % pkg if __name__ == '__main__': main()
joaormatos/anaconda
Anaconda/standalone/trunk/buildtests/setupenv_unix.py
Python
gpl-3.0
1,879
""" This is an example that uses the MidiToText eventhandler. When an event is triggered on it, it prints the event to the console. It gets the events from the MidiInFile. So it prints all the events from the infile to the console. great for debugging :-s """ # get data test_file = 'test/midifiles/minimal-cubase-type0.mid' # do parsing from .MidiInFile import MidiInFile from .MidiToText import MidiToText # the event handler midiIn = MidiInFile(MidiToText(), test_file) midiIn.read()
JonathanRaiman/Dali
data/score_informed_transcription/midi/example_print_file.py
Python
mit
494
""" Test how many times newly loaded binaries are notified; they should be delivered in batches instead of one-by-one. """ from __future__ import print_function import lldb from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil class ModuleLoadedNotifysTestCase(TestBase): mydir = TestBase.compute_mydir(__file__) NO_DEBUG_INFO_TESTCASE = True # DyanmicLoaderDarwin should batch up notifications about # newly added/removed libraries. Other DynamicLoaders may # not be written this way. @skipUnlessDarwin def setUp(self): # Call super's setUp(). TestBase.setUp(self) # Find the line number to break inside main(). self.line = line_number('main.cpp', '// breakpoint') def test_launch_notifications(self): """Test that lldb broadcasts newly loaded libraries in batches.""" self.build() exe = self.getBuildArtifact("a.out") self.dbg.SetAsync(False) listener = self.dbg.GetListener() listener.StartListeningForEventClass( self.dbg, lldb.SBTarget.GetBroadcasterClassName(), lldb.SBTarget.eBroadcastBitModulesLoaded | lldb.SBTarget.eBroadcastBitModulesUnloaded) # Create a target by the debugger. target = self.dbg.CreateTarget(exe) self.assertTrue(target, VALID_TARGET) # break on main breakpoint = target.BreakpointCreateByName('main', 'a.out') event = lldb.SBEvent() # CreateTarget() generated modules-loaded events; consume them & toss while listener.GetNextEvent(event): True error = lldb.SBError() process = target.Launch(listener, None, # argv None, # envp None, # stdin_path None, # stdout_path None, # stderr_path None, # working directory 0, # launch flags False, # Stop at entry error) # error self.assertTrue( process.GetState() == lldb.eStateStopped, PROCESS_STOPPED) total_solibs_added = 0 total_solibs_removed = 0 total_modules_added_events = 0 total_modules_removed_events = 0 while listener.GetNextEvent(event): if lldb.SBTarget.EventIsTargetEvent(event): if event.GetType() == lldb.SBTarget.eBroadcastBitModulesLoaded: solib_count = lldb.SBTarget.GetNumModulesFromEvent(event) total_modules_added_events += 1 total_solibs_added += solib_count if self.TraceOn(): # print all of the binaries that have been added added_files = [] i = 0 while i < solib_count: module = lldb.SBTarget.GetModuleAtIndexFromEvent(i, event) added_files.append(module.GetFileSpec().GetFilename()) i = i + 1 print("Loaded files: %s" % (', '.join(added_files))) if event.GetType() == lldb.SBTarget.eBroadcastBitModulesUnloaded: solib_count = lldb.SBTarget.GetNumModulesFromEvent(event) total_modules_removed_events += 1 total_solibs_removed += solib_count if self.TraceOn(): # print all of the binaries that have been removed removed_files = [] i = 0 while i < solib_count: module = lldb.SBTarget.GetModuleAtIndexFromEvent(i, event) removed_files.append(module.GetFileSpec().GetFilename()) i = i + 1 print("Unloaded files: %s" % (', '.join(removed_files))) # This is testing that we get back a small number of events with the loaded # binaries in batches. Check that we got back more than 1 solib per event. # In practice on Darwin today, we get back two events for a do-nothing c # program: a.out and dyld, and then all the rest of the system libraries. avg_solibs_added_per_event = int(float(total_solibs_added) / float(total_modules_added_events)) self.assertGreater(avg_solibs_added_per_event, 1)
apple/swift-lldb
packages/Python/lldbsuite/test/functionalities/target-new-solib-notifications/TestModuleLoadedNotifys.py
Python
apache-2.0
4,683
#pack.py from tkinter import * #导入tkinter模块所有内容 root = Tk() root.title("登录") f1 = Frame(root); f1.pack() #界面分为上下3个Frame,f1放置第1行标签和文本框 f2 = Frame(root); f2.pack() #f2放置第2行标签和文本框 f3 = Frame(root); f3.pack() #f3放置第3行2个按钮 Label(f1, text="用户名").pack(side=LEFT) #标签放置在f1中,左停靠 Entry(f1).pack(side=LEFT) #单行文本框放置在f1中,左对齐 Label(f2, text="密 码").pack(side=LEFT) #标签放置在f2中,左停靠 Entry(f2, show="*").pack(side=LEFT) #单行文本框放置在f2中,左停靠 Button(f3, text="取消").pack(side=RIGHT) #按钮放置在f3中,右停靠 Button(f3, text="登录").pack(side=RIGHT) #按钮放置在f3中,右停靠 root.mainloop()
GH1995/tools
archives/Python_江老师给的代码/chapter16/pack.py
Python
gpl-3.0
782
# Lint as: python2, python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Global import for model hyper-parameters. Using this module any ModelParams can be accessed via GetParams. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import importlib import REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.compat as tf def _Import(name): """Imports the python module of the given name.""" tf.logging.info('Importing %s', name) try: importlib.import_module(name) tf.logging.info('Imported %s', name) except ImportError as e: # It is expected that some imports may be missing. tf.logging.info('Could not import %s: %s', name, e) _TASK_ROOT = 'REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.tasks' # LINT.IfChange(task_dirs) _TASK_DIRS = ( 'asr', 'car', 'image', 'lm', 'mt', 'punctuator', ) # LINT.ThenChange(tasks/BUILD:task_dirs) def ImportAllParams(task_root=_TASK_ROOT, task_dirs=_TASK_DIRS): # Import all ModelParams to ensure that they are added to the global registry. for task in task_dirs: # By our code repository convention, there is a params.py under the task's # params directory. params.py imports _all_ modules that may registers a # model param. _Import('{}.{}.params.params'.format(task_root, task)) def ImportParams(model_name, task_root=_TASK_ROOT, task_dirs=_TASK_DIRS): """Attempts to only import the files that may contain the model.""" # 'model_name' follows <task>.<path>.<class name> if '.' not in model_name: raise ValueError('Invalid model name %s' % model_name) model_module = model_name.rpartition('.')[0] # Try importing the module directly, in case it's a local import. _Import(model_module) # Try built-in tasks imports. for task in sorted(task_dirs): if model_module.startswith(task + '.'): path = model_module[len(task) + 1:] _Import('{}.{}.params.{}'.format(task_root, task, path))
mlperf/training_results_v0.7
Google/benchmarks/transformer/implementations/transformer-research-TF-tpu-v4-16/lingvo/model_imports.py
Python
apache-2.0
2,689
import os import sys try: import pypandoc long_description = pypandoc.convert('README.md', 'rst', format='md') except (IOError, ImportError): long_description = open('README.md').read() try: from setuptools import setup, find_packages, Extension except ImportError: sys.stderr.write('Setuptools not found!\n') raise extra_args = ['-std=c++11', '-march=native', '-O3'] if sys.platform == 'darwin': extra_args += ['-mmacosx-version-min=10.9', '-stdlib=libc++'] os.environ['LDFLAGS'] = '-mmacosx-version-min=10.9' module = Extension( '_falconn', sources=['internal/python_wrapper.cc'], extra_compile_args=extra_args, include_dirs=['include', 'external/eigen', 'external/pybind11/include', 'external/simple-serializer']) setup( name='FALCONN', version='1.4.0', author='Ilya Razenshteyn, Ludwig Schmidt', author_email='falconn.lib@gmail.com', url='http://falconn-lib.org/', description= 'A library for similarity search over high-dimensional data based on Locality-Sensitive Hashing (LSH)', long_description=long_description, license='MIT', keywords= 'nearest neighbor search similarity lsh locality-sensitive hashing cosine distance euclidean', packages=find_packages(), include_package_data=True, ext_modules=[module])
FALCONN-LIB/FALCONN
src/python/package/setup.py
Python
mit
1,325
""" Create SQL statements for QuerySets. The code in here encapsulates all of the SQL construction so that QuerySets themselves do not have to (and could be backed by things other than SQL databases). The abstraction barrier only works one way: this module has to know all about the internals of models in order to get the information it needs. """ from collections import OrderedDict import copy import warnings from django.core.exceptions import FieldError from django.db import connections, DEFAULT_DB_ALIAS from django.db.models.constants import LOOKUP_SEP from django.db.models.aggregates import refs_aggregate from django.db.models.expressions import ExpressionNode from django.db.models.fields import FieldDoesNotExist from django.db.models.query_utils import Q from django.db.models.related import PathInfo from django.db.models.sql import aggregates as base_aggregates_module from django.db.models.sql.constants import (QUERY_TERMS, ORDER_DIR, SINGLE, ORDER_PATTERN, JoinInfo, SelectInfo) from django.db.models.sql.datastructures import EmptyResultSet, Empty, MultiJoin, Col from django.db.models.sql.expressions import SQLEvaluator from django.db.models.sql.where import (WhereNode, Constraint, EverythingNode, ExtraWhere, AND, OR, EmptyWhere) from django.utils import six from django.utils.deprecation import RemovedInDjango19Warning from django.utils.encoding import force_text from django.utils.tree import Node __all__ = ['Query', 'RawQuery'] class RawQuery(object): """ A single raw SQL query """ def __init__(self, sql, using, params=None): self.params = params or () self.sql = sql self.using = using self.cursor = None # Mirror some properties of a normal query so that # the compiler can be used to process results. self.low_mark, self.high_mark = 0, None # Used for offset/limit self.extra_select = {} self.aggregate_select = {} def clone(self, using): return RawQuery(self.sql, using, params=self.params) def convert_values(self, value, field, connection): """Convert the database-returned value into a type that is consistent across database backends. By default, this defers to the underlying backend operations, but it can be overridden by Query classes for specific backends. """ return connection.ops.convert_values(value, field) def get_columns(self): if self.cursor is None: self._execute_query() converter = connections[self.using].introspection.table_name_converter return [converter(column_meta[0]) for column_meta in self.cursor.description] def __iter__(self): # Always execute a new query for a new iterator. # This could be optimized with a cache at the expense of RAM. self._execute_query() if not connections[self.using].features.can_use_chunked_reads: # If the database can't use chunked reads we need to make sure we # evaluate the entire query up front. result = list(self.cursor) else: result = self.cursor return iter(result) def __repr__(self): return "<RawQuery: %r>" % (self.sql % tuple(self.params)) def _execute_query(self): self.cursor = connections[self.using].cursor() self.cursor.execute(self.sql, self.params) class Query(object): """ A single SQL query. """ # SQL join types. These are part of the class because their string forms # vary from database to database and can be customised by a subclass. INNER = 'INNER JOIN' LOUTER = 'LEFT OUTER JOIN' alias_prefix = 'T' subq_aliases = frozenset([alias_prefix]) query_terms = QUERY_TERMS aggregates_module = base_aggregates_module compiler = 'SQLCompiler' def __init__(self, model, where=WhereNode): self.model = model self.alias_refcount = {} # alias_map is the most important data structure regarding joins. # It's used for recording which joins exist in the query and what # type they are. The key is the alias of the joined table (possibly # the table name) and the value is JoinInfo from constants.py. self.alias_map = {} self.table_map = {} # Maps table names to list of aliases. self.join_map = {} self.default_cols = True self.default_ordering = True self.standard_ordering = True self.used_aliases = set() self.filter_is_sticky = False self.included_inherited_models = {} # SQL-related attributes # Select and related select clauses as SelectInfo instances. # The select is used for cases where we want to set up the select # clause to contain other than default fields (values(), annotate(), # subqueries...) self.select = [] # The related_select_cols is used for columns needed for # select_related - this is populated in the compile stage. self.related_select_cols = [] self.tables = [] # Aliases in the order they are created. self.where = where() self.where_class = where self.group_by = None self.having = where() self.order_by = [] self.low_mark, self.high_mark = 0, None # Used for offset/limit self.distinct = False self.distinct_fields = [] self.select_for_update = False self.select_for_update_nowait = False self.select_related = False # SQL aggregate-related attributes # The _aggregates will be an OrderedDict when used. Due to the cost # of creating OrderedDict this attribute is created lazily (in # self.aggregates property). self._aggregates = None # Maps alias -> SQL aggregate function self.aggregate_select_mask = None self._aggregate_select_cache = None # Arbitrary maximum limit for select_related. Prevents infinite # recursion. Can be changed by the depth parameter to select_related(). self.max_depth = 5 # These are for extensions. The contents are more or less appended # verbatim to the appropriate clause. # The _extra attribute is an OrderedDict, lazily created similarly to # .aggregates self._extra = None # Maps col_alias -> (col_sql, params). self.extra_select_mask = None self._extra_select_cache = None self.extra_tables = () self.extra_order_by = () # A tuple that is a set of model field names and either True, if these # are the fields to defer, or False if these are the only fields to # load. self.deferred_loading = (set(), True) @property def extra(self): if self._extra is None: self._extra = OrderedDict() return self._extra @property def aggregates(self): if self._aggregates is None: self._aggregates = OrderedDict() return self._aggregates def __str__(self): """ Returns the query as a string of SQL with the parameter values substituted in (use sql_with_params() to see the unsubstituted string). Parameter values won't necessarily be quoted correctly, since that is done by the database interface at execution time. """ sql, params = self.sql_with_params() return sql % params def sql_with_params(self): """ Returns the query as an SQL string and the parameters that will be substituted into the query. """ return self.get_compiler(DEFAULT_DB_ALIAS).as_sql() def __deepcopy__(self, memo): result = self.clone(memo=memo) memo[id(self)] = result return result def prepare(self): return self def get_compiler(self, using=None, connection=None): if using is None and connection is None: raise ValueError("Need either using or connection") if using: connection = connections[using] # Check that the compiler will be able to execute the query for alias, aggregate in self.aggregate_select.items(): connection.ops.check_aggregate_support(aggregate) return connection.ops.compiler(self.compiler)(self, connection, using) def get_meta(self): """ Returns the Options instance (the model._meta) from which to start processing. Normally, this is self.model._meta, but it can be changed by subclasses. """ return self.model._meta def clone(self, klass=None, memo=None, **kwargs): """ Creates a copy of the current instance. The 'kwargs' parameter can be used by clients to update attributes after copying has taken place. """ obj = Empty() obj.__class__ = klass or self.__class__ obj.model = self.model obj.alias_refcount = self.alias_refcount.copy() obj.alias_map = self.alias_map.copy() obj.table_map = self.table_map.copy() obj.join_map = self.join_map.copy() obj.default_cols = self.default_cols obj.default_ordering = self.default_ordering obj.standard_ordering = self.standard_ordering obj.included_inherited_models = self.included_inherited_models.copy() obj.select = self.select[:] obj.related_select_cols = [] obj.tables = self.tables[:] obj.where = self.where.clone() obj.where_class = self.where_class if self.group_by is None: obj.group_by = None else: obj.group_by = self.group_by[:] obj.having = self.having.clone() obj.order_by = self.order_by[:] obj.low_mark, obj.high_mark = self.low_mark, self.high_mark obj.distinct = self.distinct obj.distinct_fields = self.distinct_fields[:] obj.select_for_update = self.select_for_update obj.select_for_update_nowait = self.select_for_update_nowait obj.select_related = self.select_related obj.related_select_cols = [] obj._aggregates = self._aggregates.copy() if self._aggregates is not None else None if self.aggregate_select_mask is None: obj.aggregate_select_mask = None else: obj.aggregate_select_mask = self.aggregate_select_mask.copy() # _aggregate_select_cache cannot be copied, as doing so breaks the # (necessary) state in which both aggregates and # _aggregate_select_cache point to the same underlying objects. # It will get re-populated in the cloned queryset the next time it's # used. obj._aggregate_select_cache = None obj.max_depth = self.max_depth obj._extra = self._extra.copy() if self._extra is not None else None if self.extra_select_mask is None: obj.extra_select_mask = None else: obj.extra_select_mask = self.extra_select_mask.copy() if self._extra_select_cache is None: obj._extra_select_cache = None else: obj._extra_select_cache = self._extra_select_cache.copy() obj.extra_tables = self.extra_tables obj.extra_order_by = self.extra_order_by obj.deferred_loading = copy.copy(self.deferred_loading[0]), self.deferred_loading[1] if self.filter_is_sticky and self.used_aliases: obj.used_aliases = self.used_aliases.copy() else: obj.used_aliases = set() obj.filter_is_sticky = False if 'alias_prefix' in self.__dict__: obj.alias_prefix = self.alias_prefix if 'subq_aliases' in self.__dict__: obj.subq_aliases = self.subq_aliases.copy() obj.__dict__.update(kwargs) if hasattr(obj, '_setup_query'): obj._setup_query() return obj def convert_values(self, value, field, connection): """Convert the database-returned value into a type that is consistent across database backends. By default, this defers to the underlying backend operations, but it can be overridden by Query classes for specific backends. """ return connection.ops.convert_values(value, field) def resolve_aggregate(self, value, aggregate, connection): """Resolve the value of aggregates returned by the database to consistent (and reasonable) types. This is required because of the predisposition of certain backends to return Decimal and long types when they are not needed. """ if value is None: if aggregate.is_ordinal: return 0 # Return None as-is return value elif aggregate.is_ordinal: # Any ordinal aggregate (e.g., count) returns an int return int(value) elif aggregate.is_computed: # Any computed aggregate (e.g., avg) returns a float return float(value) else: # Return value depends on the type of the field being processed. return self.convert_values(value, aggregate.field, connection) def get_aggregation(self, using, force_subq=False): """ Returns the dictionary with the values of the existing aggregations. """ if not self.aggregate_select: return {} # If there is a group by clause, aggregating does not add useful # information but retrieves only the first row. Aggregate # over the subquery instead. if self.group_by is not None or force_subq: from django.db.models.sql.subqueries import AggregateQuery query = AggregateQuery(self.model) obj = self.clone() if not force_subq: # In forced subq case the ordering and limits will likely # affect the results. obj.clear_ordering(True) obj.clear_limits() obj.select_for_update = False obj.select_related = False obj.related_select_cols = [] relabels = dict((t, 'subquery') for t in self.tables) # Remove any aggregates marked for reduction from the subquery # and move them to the outer AggregateQuery. for alias, aggregate in self.aggregate_select.items(): if aggregate.is_summary: query.aggregates[alias] = aggregate.relabeled_clone(relabels) del obj.aggregate_select[alias] try: query.add_subquery(obj, using) except EmptyResultSet: return dict( (alias, None) for alias in query.aggregate_select ) else: query = self self.select = [] self.default_cols = False self._extra = {} self.remove_inherited_models() query.clear_ordering(True) query.clear_limits() query.select_for_update = False query.select_related = False query.related_select_cols = [] result = query.get_compiler(using).execute_sql(SINGLE) if result is None: result = [None for q in query.aggregate_select.items()] return dict( (alias, self.resolve_aggregate(val, aggregate, connection=connections[using])) for (alias, aggregate), val in zip(query.aggregate_select.items(), result) ) def get_count(self, using): """ Performs a COUNT() query using the current filter constraints. """ obj = self.clone() if len(self.select) > 1 or self.aggregate_select or (self.distinct and self.distinct_fields): # If a select clause exists, then the query has already started to # specify the columns that are to be returned. # In this case, we need to use a subquery to evaluate the count. from django.db.models.sql.subqueries import AggregateQuery subquery = obj subquery.clear_ordering(True) subquery.clear_limits() obj = AggregateQuery(obj.model) try: obj.add_subquery(subquery, using=using) except EmptyResultSet: # add_subquery evaluates the query, if it's an EmptyResultSet # then there are can be no results, and therefore there the # count is obviously 0 return 0 obj.add_count_column() number = obj.get_aggregation(using=using)[None] # Apply offset and limit constraints manually, since using LIMIT/OFFSET # in SQL (in variants that provide them) doesn't change the COUNT # output. number = max(0, number - self.low_mark) if self.high_mark is not None: number = min(number, self.high_mark - self.low_mark) return number def has_filters(self): return self.where or self.having def has_results(self, using): q = self.clone() if not q.distinct: q.clear_select_clause() q.clear_ordering(True) q.set_limits(high=1) compiler = q.get_compiler(using=using) return compiler.has_results() def combine(self, rhs, connector): """ Merge the 'rhs' query into the current one (with any 'rhs' effects being applied *after* (that is, "to the right of") anything in the current query. 'rhs' is not modified during a call to this function. The 'connector' parameter describes how to connect filters from the 'rhs' query. """ assert self.model == rhs.model, \ "Cannot combine queries on two different base models." assert self.can_filter(), \ "Cannot combine queries once a slice has been taken." assert self.distinct == rhs.distinct, \ "Cannot combine a unique query with a non-unique query." assert self.distinct_fields == rhs.distinct_fields, \ "Cannot combine queries with different distinct fields." self.remove_inherited_models() # Work out how to relabel the rhs aliases, if necessary. change_map = {} conjunction = (connector == AND) # Determine which existing joins can be reused. When combining the # query with AND we must recreate all joins for m2m filters. When # combining with OR we can reuse joins. The reason is that in AND # case a single row can't fulfill a condition like: # revrel__col=1 & revrel__col=2 # But, there might be two different related rows matching this # condition. In OR case a single True is enough, so single row is # enough, too. # # Note that we will be creating duplicate joins for non-m2m joins in # the AND case. The results will be correct but this creates too many # joins. This is something that could be fixed later on. reuse = set() if conjunction else set(self.tables) # Base table must be present in the query - this is the same # table on both sides. self.get_initial_alias() joinpromoter = JoinPromoter(connector, 2, False) joinpromoter.add_votes( j for j in self.alias_map if self.alias_map[j].join_type == self.INNER) rhs_votes = set() # Now, add the joins from rhs query into the new query (skipping base # table). for alias in rhs.tables[1:]: table, _, join_type, lhs, join_cols, nullable, join_field = rhs.alias_map[alias] # If the left side of the join was already relabeled, use the # updated alias. lhs = change_map.get(lhs, lhs) new_alias = self.join( (lhs, table, join_cols), reuse=reuse, nullable=nullable, join_field=join_field) if join_type == self.INNER: rhs_votes.add(new_alias) # We can't reuse the same join again in the query. If we have two # distinct joins for the same connection in rhs query, then the # combined query must have two joins, too. reuse.discard(new_alias) change_map[alias] = new_alias if not rhs.alias_refcount[alias]: # The alias was unused in the rhs query. Unref it so that it # will be unused in the new query, too. We have to add and # unref the alias so that join promotion has information of # the join type for the unused alias. self.unref_alias(new_alias) joinpromoter.add_votes(rhs_votes) joinpromoter.update_join_types(self) # Now relabel a copy of the rhs where-clause and add it to the current # one. if rhs.where: w = rhs.where.clone() w.relabel_aliases(change_map) if not self.where: # Since 'self' matches everything, add an explicit "include # everything" where-constraint so that connections between the # where clauses won't exclude valid results. self.where.add(EverythingNode(), AND) elif self.where: # rhs has an empty where clause. w = self.where_class() w.add(EverythingNode(), AND) else: w = self.where_class() self.where.add(w, connector) # Selection columns and extra extensions are those provided by 'rhs'. self.select = [] for col, field in rhs.select: if isinstance(col, (list, tuple)): new_col = change_map.get(col[0], col[0]), col[1] self.select.append(SelectInfo(new_col, field)) else: new_col = col.relabeled_clone(change_map) self.select.append(SelectInfo(new_col, field)) if connector == OR: # It would be nice to be able to handle this, but the queries don't # really make sense (or return consistent value sets). Not worth # the extra complexity when you can write a real query instead. if self._extra and rhs._extra: raise ValueError("When merging querysets using 'or', you " "cannot have extra(select=...) on both sides.") self.extra.update(rhs.extra) extra_select_mask = set() if self.extra_select_mask is not None: extra_select_mask.update(self.extra_select_mask) if rhs.extra_select_mask is not None: extra_select_mask.update(rhs.extra_select_mask) if extra_select_mask: self.set_extra_mask(extra_select_mask) self.extra_tables += rhs.extra_tables # Ordering uses the 'rhs' ordering, unless it has none, in which case # the current ordering is used. self.order_by = rhs.order_by[:] if rhs.order_by else self.order_by self.extra_order_by = rhs.extra_order_by or self.extra_order_by def deferred_to_data(self, target, callback): """ Converts the self.deferred_loading data structure to an alternate data structure, describing the field that *will* be loaded. This is used to compute the columns to select from the database and also by the QuerySet class to work out which fields are being initialized on each model. Models that have all their fields included aren't mentioned in the result, only those that have field restrictions in place. The "target" parameter is the instance that is populated (in place). The "callback" is a function that is called whenever a (model, field) pair need to be added to "target". It accepts three parameters: "target", and the model and list of fields being added for that model. """ field_names, defer = self.deferred_loading if not field_names: return orig_opts = self.get_meta() seen = {} must_include = {orig_opts.concrete_model: set([orig_opts.pk])} for field_name in field_names: parts = field_name.split(LOOKUP_SEP) cur_model = self.model._meta.concrete_model opts = orig_opts for name in parts[:-1]: old_model = cur_model source = opts.get_field_by_name(name)[0] if is_reverse_o2o(source): cur_model = source.model else: cur_model = source.rel.to opts = cur_model._meta # Even if we're "just passing through" this model, we must add # both the current model's pk and the related reference field # (if it's not a reverse relation) to the things we select. if not is_reverse_o2o(source): must_include[old_model].add(source) add_to_dict(must_include, cur_model, opts.pk) field, model, _, _ = opts.get_field_by_name(parts[-1]) if model is None: model = cur_model if not is_reverse_o2o(field): add_to_dict(seen, model, field) if defer: # We need to load all fields for each model, except those that # appear in "seen" (for all models that appear in "seen"). The only # slight complexity here is handling fields that exist on parent # models. workset = {} for model, values in six.iteritems(seen): for field, m in model._meta.get_fields_with_model(): if field in values: continue add_to_dict(workset, m or model, field) for model, values in six.iteritems(must_include): # If we haven't included a model in workset, we don't add the # corresponding must_include fields for that model, since an # empty set means "include all fields". That's why there's no # "else" branch here. if model in workset: workset[model].update(values) for model, values in six.iteritems(workset): callback(target, model, values) else: for model, values in six.iteritems(must_include): if model in seen: seen[model].update(values) else: # As we've passed through this model, but not explicitly # included any fields, we have to make sure it's mentioned # so that only the "must include" fields are pulled in. seen[model] = values # Now ensure that every model in the inheritance chain is mentioned # in the parent list. Again, it must be mentioned to ensure that # only "must include" fields are pulled in. for model in orig_opts.get_parent_list(): if model not in seen: seen[model] = set() for model, values in six.iteritems(seen): callback(target, model, values) def deferred_to_columns_cb(self, target, model, fields): """ Callback used by deferred_to_columns(). The "target" parameter should be a set instance. """ table = model._meta.db_table if table not in target: target[table] = set() for field in fields: target[table].add(field.column) def table_alias(self, table_name, create=False): """ Returns a table alias for the given table_name and whether this is a new alias or not. If 'create' is true, a new alias is always created. Otherwise, the most recently created alias for the table (if one exists) is reused. """ alias_list = self.table_map.get(table_name) if not create and alias_list: alias = alias_list[0] self.alias_refcount[alias] += 1 return alias, False # Create a new alias for this table. if alias_list: alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1) alias_list.append(alias) else: # The first occurrence of a table uses the table name directly. alias = table_name self.table_map[alias] = [alias] self.alias_refcount[alias] = 1 self.tables.append(alias) return alias, True def ref_alias(self, alias): """ Increases the reference count for this alias. """ self.alias_refcount[alias] += 1 def unref_alias(self, alias, amount=1): """ Decreases the reference count for this alias. """ self.alias_refcount[alias] -= amount def promote_joins(self, aliases): """ Promotes recursively the join type of given aliases and its children to an outer join. If 'unconditional' is False, the join is only promoted if it is nullable or the parent join is an outer join. The children promotion is done to avoid join chains that contain a LOUTER b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted, then we must also promote b->c automatically, or otherwise the promotion of a->b doesn't actually change anything in the query results. """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_cols[0][1] is None: # This is the base table (first FROM entry) - this table # isn't really joined at all in the query, so we should not # alter its join type. continue # Only the first alias (skipped above) should have None join_type assert self.alias_map[alias].join_type is not None parent_alias = self.alias_map[alias].lhs_alias parent_louter = ( parent_alias and self.alias_map[parent_alias].join_type == self.LOUTER) already_louter = self.alias_map[alias].join_type == self.LOUTER if ((self.alias_map[alias].nullable or parent_louter) and not already_louter): data = self.alias_map[alias]._replace(join_type=self.LOUTER) self.alias_map[alias] = data # Join type of 'alias' changed, so re-examine all aliases that # refer to this one. aliases.extend( join for join in self.alias_map.keys() if (self.alias_map[join].lhs_alias == alias and join not in aliases)) def demote_joins(self, aliases): """ Change join type from LOUTER to INNER for all joins in aliases. Similarly to promote_joins(), this method must ensure no join chains containing first an outer, then an inner join are generated. If we are demoting b->c join in chain a LOUTER b LOUTER c then we must demote a->b automatically, or otherwise the demotion of b->c doesn't actually change anything in the query results. . """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type == self.LOUTER: self.alias_map[alias] = self.alias_map[alias]._replace(join_type=self.INNER) parent_alias = self.alias_map[alias].lhs_alias if self.alias_map[parent_alias].join_type == self.INNER: aliases.append(parent_alias) def reset_refcounts(self, to_counts): """ This method will reset reference counts for aliases so that they match the value passed in :param to_counts:. """ for alias, cur_refcount in self.alias_refcount.copy().items(): unref_amount = cur_refcount - to_counts.get(alias, 0) self.unref_alias(alias, unref_amount) def change_aliases(self, change_map): """ Changes the aliases in change_map (which maps old-alias -> new-alias), relabelling any references to them in select columns and the where clause. """ assert set(change_map.keys()).intersection(set(change_map.values())) == set() def relabel_column(col): if isinstance(col, (list, tuple)): old_alias = col[0] return (change_map.get(old_alias, old_alias), col[1]) else: return col.relabeled_clone(change_map) # 1. Update references in "select" (normal columns plus aliases), # "group by", "where" and "having". self.where.relabel_aliases(change_map) self.having.relabel_aliases(change_map) if self.group_by: self.group_by = [relabel_column(col) for col in self.group_by] self.select = [SelectInfo(relabel_column(s.col), s.field) for s in self.select] if self._aggregates: self._aggregates = OrderedDict( (key, relabel_column(col)) for key, col in self._aggregates.items()) # 2. Rename the alias in the internal table/alias datastructures. for ident, aliases in self.join_map.items(): del self.join_map[ident] aliases = tuple(change_map.get(a, a) for a in aliases) ident = (change_map.get(ident[0], ident[0]),) + ident[1:] self.join_map[ident] = aliases for old_alias, new_alias in six.iteritems(change_map): alias_data = self.alias_map[old_alias] alias_data = alias_data._replace(rhs_alias=new_alias) self.alias_refcount[new_alias] = self.alias_refcount[old_alias] del self.alias_refcount[old_alias] self.alias_map[new_alias] = alias_data del self.alias_map[old_alias] table_aliases = self.table_map[alias_data.table_name] for pos, alias in enumerate(table_aliases): if alias == old_alias: table_aliases[pos] = new_alias break for pos, alias in enumerate(self.tables): if alias == old_alias: self.tables[pos] = new_alias break for key, alias in self.included_inherited_models.items(): if alias in change_map: self.included_inherited_models[key] = change_map[alias] # 3. Update any joins that refer to the old alias. for alias, data in six.iteritems(self.alias_map): lhs = data.lhs_alias if lhs in change_map: data = data._replace(lhs_alias=change_map[lhs]) self.alias_map[alias] = data def bump_prefix(self, outer_query): """ Changes the alias prefix to the next letter in the alphabet in a way that the outer query's aliases and this query's aliases will not conflict. Even tables that previously had no alias will get an alias after this call. """ if self.alias_prefix != outer_query.alias_prefix: # No clashes between self and outer query should be possible. return self.alias_prefix = chr(ord(self.alias_prefix) + 1) while self.alias_prefix in self.subq_aliases: self.alias_prefix = chr(ord(self.alias_prefix) + 1) assert self.alias_prefix < 'Z' self.subq_aliases = self.subq_aliases.union([self.alias_prefix]) outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases) change_map = OrderedDict() for pos, alias in enumerate(self.tables): new_alias = '%s%d' % (self.alias_prefix, pos) change_map[alias] = new_alias self.tables[pos] = new_alias self.change_aliases(change_map) def get_initial_alias(self): """ Returns the first alias for this query, after increasing its reference count. """ if self.tables: alias = self.tables[0] self.ref_alias(alias) else: alias = self.join((None, self.get_meta().db_table, None)) return alias def count_active_tables(self): """ Returns the number of tables in this query with a non-zero reference count. Note that after execution, the reference counts are zeroed, so tables added in compiler will not be seen by this method. """ return len([1 for count in self.alias_refcount.values() if count]) def join(self, connection, reuse=None, nullable=False, join_field=None): """ Returns an alias for the join in 'connection', either reusing an existing alias for that join or creating a new one. 'connection' is a tuple (lhs, table, join_cols) where 'lhs' is either an existing table alias or a table name. 'join_cols' is a tuple of tuples containing columns to join on ((l_id1, r_id1), (l_id2, r_id2)). The join corresponds to the SQL equivalent of:: lhs.l_id1 = table.r_id1 AND lhs.l_id2 = table.r_id2 The 'reuse' parameter can be either None which means all joins (matching the connection) are reusable, or it can be a set containing the aliases that can be reused. A join is always created as LOUTER if the lhs alias is LOUTER to make sure we do not generate chains like t1 LOUTER t2 INNER t3. All new joins are created as LOUTER if nullable is True. If 'nullable' is True, the join can potentially involve NULL values and is a candidate for promotion (to "left outer") when combining querysets. The 'join_field' is the field we are joining along (if any). """ lhs, table, join_cols = connection assert lhs is None or join_field is not None existing = self.join_map.get(connection, ()) if reuse is None: reuse = existing else: reuse = [a for a in existing if a in reuse] for alias in reuse: if join_field and self.alias_map[alias].join_field != join_field: # The join_map doesn't contain join_field (mainly because # fields in Query structs are problematic in pickling), so # check that the existing join is created using the same # join_field used for the under work join. continue self.ref_alias(alias) return alias # No reuse is possible, so we need a new alias. alias, _ = self.table_alias(table, create=True) if not lhs: # Not all tables need to be joined to anything. No join type # means the later columns are ignored. join_type = None elif self.alias_map[lhs].join_type == self.LOUTER or nullable: join_type = self.LOUTER else: join_type = self.INNER join = JoinInfo(table, alias, join_type, lhs, join_cols or ((None, None),), nullable, join_field) self.alias_map[alias] = join if connection in self.join_map: self.join_map[connection] += (alias,) else: self.join_map[connection] = (alias,) return alias def setup_inherited_models(self): """ If the model that is the basis for this QuerySet inherits other models, we need to ensure that those other models have their tables included in the query. We do this as a separate step so that subclasses know which tables are going to be active in the query, without needing to compute all the select columns (this method is called from pre_sql_setup(), whereas column determination is a later part, and side-effect, of as_sql()). """ opts = self.get_meta() root_alias = self.tables[0] seen = {None: root_alias} for field, model in opts.get_fields_with_model(): if model not in seen: self.join_parent_model(opts, model, root_alias, seen) self.included_inherited_models = seen def join_parent_model(self, opts, model, alias, seen): """ Makes sure the given 'model' is joined in the query. If 'model' isn't a parent of 'opts' or if it is None this method is a no-op. The 'alias' is the root alias for starting the join, 'seen' is a dict of model -> alias of existing joins. It must also contain a mapping of None -> some alias. This will be returned in the no-op case. """ if model in seen: return seen[model] chain = opts.get_base_chain(model) if chain is None: return alias curr_opts = opts for int_model in chain: if int_model in seen: return seen[int_model] # Proxy model have elements in base chain # with no parents, assign the new options # object and skip to the next base in that # case if not curr_opts.parents[int_model]: curr_opts = int_model._meta continue link_field = curr_opts.get_ancestor_link(int_model) _, _, _, joins, _ = self.setup_joins( [link_field.name], curr_opts, alias) curr_opts = int_model._meta alias = seen[int_model] = joins[-1] return alias or seen[None] def remove_inherited_models(self): """ Undoes the effects of setup_inherited_models(). Should be called whenever select columns (self.select) are set explicitly. """ for key, alias in self.included_inherited_models.items(): if key: self.unref_alias(alias) self.included_inherited_models = {} def add_aggregate(self, aggregate, model, alias, is_summary): """ Adds a single aggregate expression to the Query """ opts = model._meta field_list = aggregate.lookup.split(LOOKUP_SEP) if len(field_list) == 1 and self._aggregates and aggregate.lookup in self.aggregates: # Aggregate is over an annotation field_name = field_list[0] col = field_name source = self.aggregates[field_name] if not is_summary: raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % ( aggregate.name, field_name, field_name)) elif ((len(field_list) > 1) or (field_list[0] not in [i.name for i in opts.fields]) or self.group_by is None or not is_summary): # If: # - the field descriptor has more than one part (foo__bar), or # - the field descriptor is referencing an m2m/m2o field, or # - this is a reference to a model field (possibly inherited), or # - this is an annotation over a model field # then we need to explore the joins that are required. # Join promotion note - we must not remove any rows here, so use # outer join if there isn't any existing join. _, sources, opts, join_list, path = self.setup_joins( field_list, opts, self.get_initial_alias()) # Process the join chain to see if it can be trimmed targets, _, join_list = self.trim_joins(sources, join_list, path) col = targets[0].column source = sources[0] col = (join_list[-1], col) else: # The simplest cases. No joins required - # just reference the provided column alias. field_name = field_list[0] source = opts.get_field(field_name) col = field_name # We want to have the alias in SELECT clause even if mask is set. self.append_aggregate_mask([alias]) # Add the aggregate to the query aggregate.add_to_query(self, alias, col=col, source=source, is_summary=is_summary) def prepare_lookup_value(self, value, lookups, can_reuse): # Default lookup if none given is exact. if len(lookups) == 0: lookups = ['exact'] # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all # uses of None as a query value. if value is None: if lookups[-1] not in ('exact', 'iexact'): raise ValueError("Cannot use None as a query value") lookups[-1] = 'isnull' value = True elif callable(value): warnings.warn( "Passing callable arguments to queryset is deprecated.", RemovedInDjango19Warning, stacklevel=2) value = value() elif isinstance(value, ExpressionNode): # If value is a query expression, evaluate it value = SQLEvaluator(value, self, reuse=can_reuse) if hasattr(value, 'query') and hasattr(value.query, 'bump_prefix'): value = value._clone() value.query.bump_prefix(self) if hasattr(value, 'bump_prefix'): value = value.clone() value.bump_prefix(self) # For Oracle '' is equivalent to null. The check needs to be done # at this stage because join promotion can't be done at compiler # stage. Using DEFAULT_DB_ALIAS isn't nice, but it is the best we # can do here. Similar thing is done in is_nullable(), too. if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and lookups[-1] == 'exact' and value == ''): value = True lookups[-1] = 'isnull' return value, lookups def solve_lookup_type(self, lookup): """ Solve the lookup type from the lookup (eg: 'foobar__id__icontains') """ lookup_splitted = lookup.split(LOOKUP_SEP) if self._aggregates: aggregate, aggregate_lookups = refs_aggregate(lookup_splitted, self.aggregates) if aggregate: return aggregate_lookups, (), aggregate _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta()) field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)] if len(lookup_parts) == 0: lookup_parts = ['exact'] elif len(lookup_parts) > 1: if not field_parts: raise FieldError( 'Invalid lookup "%s" for model %s".' % (lookup, self.get_meta().model.__name__)) return lookup_parts, field_parts, False def check_query_object_type(self, value, opts): """ Checks whether the object passed while querying is of the correct type. If not, it raises a ValueError specifying the wrong object. """ if hasattr(value, '_meta'): if not (value._meta.concrete_model == opts.concrete_model or opts.concrete_model in value._meta.get_parent_list() or value._meta.concrete_model in opts.get_parent_list()): raise ValueError( 'Cannot query "%s": Must be "%s" instance.' % (value, opts.object_name)) def check_related_objects(self, field, value, opts): """ Checks the type of object passed to query relations. """ if field.rel: # testing for iterable of models if hasattr(value, '__iter__'): for v in value: self.check_query_object_type(v, opts) else: # expecting single model instance here self.check_query_object_type(value, opts) def build_lookup(self, lookups, lhs, rhs): lookups = lookups[:] while lookups: lookup = lookups[0] if len(lookups) == 1: final_lookup = lhs.get_lookup(lookup) if final_lookup: return final_lookup(lhs, rhs) # We didn't find a lookup, so we are going to try get_transform # + get_lookup('exact'). lookups.append('exact') next = lhs.get_transform(lookup) if next: lhs = next(lhs, lookups) else: raise FieldError( "Unsupported lookup '%s' for %s or join on the field not " "permitted." % (lookup, lhs.output_field.__class__.__name__)) lookups = lookups[1:] def build_filter(self, filter_expr, branch_negated=False, current_negated=False, can_reuse=None, connector=AND): """ Builds a WhereNode for a single filter clause, but doesn't add it to this Query. Query.add_q() will then add this filter to the where or having Node. The 'branch_negated' tells us if the current branch contains any negations. This will be used to determine if subqueries are needed. The 'current_negated' is used to determine if the current filter is negated or not and this will be used to determine if IS NULL filtering is needed. The difference between current_netageted and branch_negated is that branch_negated is set on first negation, but current_negated is flipped for each negation. Note that add_filter will not do any negating itself, that is done upper in the code by add_q(). The 'can_reuse' is a set of reusable joins for multijoins. The method will create a filter clause that can be added to the current query. However, if the filter isn't added to the query then the caller is responsible for unreffing the joins used. """ arg, value = filter_expr if not arg: raise FieldError("Cannot parse keyword query %r" % arg) lookups, parts, reffed_aggregate = self.solve_lookup_type(arg) # Work out the lookup type and remove it from the end of 'parts', # if necessary. value, lookups = self.prepare_lookup_value(value, lookups, can_reuse) used_joins = getattr(value, '_used_joins', []) clause = self.where_class() if reffed_aggregate: condition = self.build_lookup(lookups, reffed_aggregate, value) if not condition: # Backwards compat for custom lookups assert len(lookups) == 1 condition = (reffed_aggregate, lookups[0], value) clause.add(condition, AND) return clause, [] opts = self.get_meta() alias = self.get_initial_alias() allow_many = not branch_negated try: field, sources, opts, join_list, path = self.setup_joins( parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many) self.check_related_objects(field, value, opts) # split_exclude() needs to know which joins were generated for the # lookup parts self._lookup_joins = join_list except MultiJoin as e: return self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]), can_reuse, e.names_with_path) if can_reuse is not None: can_reuse.update(join_list) used_joins = set(used_joins).union(set(join_list)) # Process the join list to see if we can remove any non-needed joins from # the far end (fewer tables in a query is better). targets, alias, join_list = self.trim_joins(sources, join_list, path) if hasattr(field, 'get_lookup_constraint'): # For now foreign keys get special treatment. This should be # refactored when composite fields lands. condition = field.get_lookup_constraint(self.where_class, alias, targets, sources, lookups, value) lookup_type = lookups[-1] else: assert(len(targets) == 1) col = Col(alias, targets[0], field) condition = self.build_lookup(lookups, col, value) if not condition: # Backwards compat for custom lookups if lookups[0] not in self.query_terms: raise FieldError( "Join on field '%s' not permitted. Did you " "misspell '%s' for the lookup type?" % (col.output_field.name, lookups[0])) if len(lookups) > 1: raise FieldError("Nested lookup '%s' not supported." % LOOKUP_SEP.join(lookups)) condition = (Constraint(alias, targets[0].column, field), lookups[0], value) lookup_type = lookups[-1] else: lookup_type = condition.lookup_name clause.add(condition, AND) require_outer = lookup_type == 'isnull' and value is True and not current_negated if current_negated and (lookup_type != 'isnull' or value is False): require_outer = True if (lookup_type != 'isnull' and ( self.is_nullable(targets[0]) or self.alias_map[join_list[-1]].join_type == self.LOUTER)): # The condition added here will be SQL like this: # NOT (col IS NOT NULL), where the first NOT is added in # upper layers of code. The reason for addition is that if col # is null, then col != someval will result in SQL "unknown" # which isn't the same as in Python. The Python None handling # is wanted, and it can be gotten by # (col IS NULL OR col != someval) # <=> # NOT (col IS NOT NULL AND col = someval). lookup_class = targets[0].get_lookup('isnull') clause.add(lookup_class(Col(alias, targets[0], sources[0]), False), AND) return clause, used_joins if not require_outer else () def add_filter(self, filter_clause): self.add_q(Q(**{filter_clause[0]: filter_clause[1]})) def need_having(self, obj): """ Returns whether or not all elements of this q_object need to be put together in the HAVING clause. """ if not self._aggregates: return False if not isinstance(obj, Node): return (refs_aggregate(obj[0].split(LOOKUP_SEP), self.aggregates)[0] or (hasattr(obj[1], 'contains_aggregate') and obj[1].contains_aggregate(self.aggregates))) return any(self.need_having(c) for c in obj.children) def split_having_parts(self, q_object, negated=False): """ Returns a list of q_objects which need to go into the having clause instead of the where clause. Removes the splitted out nodes from the given q_object. Note that the q_object is altered, so cloning it is needed. """ having_parts = [] for c in q_object.children[:]: # When constructing the having nodes we need to take care to # preserve the negation status from the upper parts of the tree if isinstance(c, Node): # For each negated child, flip the in_negated flag. in_negated = c.negated ^ negated if c.connector == OR and self.need_having(c): # A subtree starting from OR clause must go into having in # whole if any part of that tree references an aggregate. q_object.children.remove(c) having_parts.append(c) c.negated = in_negated else: having_parts.extend( self.split_having_parts(c, in_negated)[1]) elif self.need_having(c): q_object.children.remove(c) new_q = self.where_class(children=[c], negated=negated) having_parts.append(new_q) return q_object, having_parts def add_q(self, q_object): """ A preprocessor for the internal _add_q(). Responsible for splitting the given q_object into where and having parts and setting up some internal variables. """ if not self.need_having(q_object): where_part, having_parts = q_object, [] else: where_part, having_parts = self.split_having_parts( q_object.clone(), q_object.negated) # For join promotion this case is doing an AND for the added q_object # and existing conditions. So, any existing inner join forces the join # type to remain inner. Existing outer joins can however be demoted. # (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if # rel_a doesn't produce any rows, then the whole condition must fail. # So, demotion is OK. existing_inner = set( (a for a in self.alias_map if self.alias_map[a].join_type == self.INNER)) clause, require_inner = self._add_q(where_part, self.used_aliases) self.where.add(clause, AND) for hp in having_parts: clause, _ = self._add_q(hp, self.used_aliases) self.having.add(clause, AND) self.demote_joins(existing_inner) def _add_q(self, q_object, used_aliases, branch_negated=False, current_negated=False): """ Adds a Q-object to the current filter. """ connector = q_object.connector current_negated = current_negated ^ q_object.negated branch_negated = branch_negated or q_object.negated target_clause = self.where_class(connector=connector, negated=q_object.negated) joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated) for child in q_object.children: if isinstance(child, Node): child_clause, needed_inner = self._add_q( child, used_aliases, branch_negated, current_negated) joinpromoter.add_votes(needed_inner) else: child_clause, needed_inner = self.build_filter( child, can_reuse=used_aliases, branch_negated=branch_negated, current_negated=current_negated, connector=connector) joinpromoter.add_votes(needed_inner) target_clause.add(child_clause, connector) needed_inner = joinpromoter.update_join_types(self) return target_clause, needed_inner def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False): """ Walks the names path and turns them PathInfo tuples. Note that a single name in 'names' can generate multiple PathInfos (m2m for example). 'names' is the path of names to travel, 'opts' is the model Options we start the name resolving from, 'allow_many' is as for setup_joins(). Returns a list of PathInfo tuples. In addition returns the final field (the last used join field), and target (which is a field guaranteed to contain the same value as the final field). """ path, names_with_path = [], [] for pos, name in enumerate(names): cur_names_with_path = (name, []) if name == 'pk': name = opts.pk.name try: field, model, direct, m2m = opts.get_field_by_name(name) except FieldDoesNotExist: # We didn't found the current field, so move position back # one step. pos -= 1 break # Check if we need any joins for concrete inheritance cases (the # field lives in parent, but we are currently in one of its # children) if model: # The field lives on a base class of the current model. # Skip the chain of proxy to the concrete proxied model proxied_model = opts.concrete_model for int_model in opts.get_base_chain(model): if int_model is proxied_model: opts = int_model._meta else: final_field = opts.parents[int_model] targets = (final_field.rel.get_related_field(),) opts = int_model._meta path.append(PathInfo(final_field.model._meta, opts, targets, final_field, False, True)) cur_names_with_path[1].append(PathInfo(final_field.model._meta, opts, targets, final_field, False, True)) if hasattr(field, 'get_path_info'): pathinfos = field.get_path_info() if not allow_many: for inner_pos, p in enumerate(pathinfos): if p.m2m: cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1]) names_with_path.append(cur_names_with_path) raise MultiJoin(pos + 1, names_with_path) last = pathinfos[-1] path.extend(pathinfos) final_field = last.join_field opts = last.to_opts targets = last.target_fields cur_names_with_path[1].extend(pathinfos) names_with_path.append(cur_names_with_path) else: # Local non-relational field. final_field = field targets = (field,) break if pos == -1 or (fail_on_missing and pos + 1 != len(names)): self.raise_field_error(opts, name) return path, final_field, targets, names[pos + 1:] def raise_field_error(self, opts, name): available = opts.get_all_field_names() + list(self.aggregate_select) raise FieldError("Cannot resolve keyword %r into field. " "Choices are: %s" % (name, ", ".join(available))) def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True): """ Compute the necessary table joins for the passage through the fields given in 'names'. 'opts' is the Options class for the current model (which gives the table we are starting from), 'alias' is the alias for the table to start the joining from. The 'can_reuse' defines the reverse foreign key joins we can reuse. It can be None in which case all joins are reusable or a set of aliases that can be reused. Note that non-reverse foreign keys are always reusable when using setup_joins(). If 'allow_many' is False, then any reverse foreign key seen will generate a MultiJoin exception. Returns the final field involved in the joins, the target field (used for any 'where' constraint), the final 'opts' value, the joins and the field path travelled to generate the joins. The target field is the field containing the concrete value. Final field can be something different, for example foreign key pointing to that value. Final field is needed for example in some value conversions (convert 'obj' in fk__id=obj to pk val using the foreign key field for example). """ joins = [alias] # First, generate the path for the names path, final_field, targets, rest = self.names_to_path( names, opts, allow_many, fail_on_missing=True) # Then, add the path to the query's joins. Note that we can't trim # joins at this stage - we will need the information about join type # of the trimmed joins. for pos, join in enumerate(path): opts = join.to_opts if join.direct: nullable = self.is_nullable(join.join_field) else: nullable = True connection = alias, opts.db_table, join.join_field.get_joining_columns() reuse = can_reuse if join.m2m else None alias = self.join( connection, reuse=reuse, nullable=nullable, join_field=join.join_field) joins.append(alias) if hasattr(final_field, 'field'): final_field = final_field.field return final_field, targets, opts, joins, path def trim_joins(self, targets, joins, path): """ The 'target' parameter is the final field being joined to, 'joins' is the full list of join aliases. The 'path' contain the PathInfos used to create the joins. Returns the final target field and table alias and the new active joins. We will always trim any direct join if we have the target column available already in the previous table. Reverse joins can't be trimmed as we don't know if there is anything on the other side of the join. """ joins = joins[:] for pos, info in enumerate(reversed(path)): if len(joins) == 1 or not info.direct: break join_targets = set(t.column for t in info.join_field.foreign_related_fields) cur_targets = set(t.column for t in targets) if not cur_targets.issubset(join_targets): break targets = tuple(r[0] for r in info.join_field.related_fields if r[1].column in cur_targets) self.unref_alias(joins.pop()) return targets, joins[-1], joins def split_exclude(self, filter_expr, prefix, can_reuse, names_with_path): """ When doing an exclude against any kind of N-to-many relation, we need to use a subquery. This method constructs the nested query, given the original exclude filter (filter_expr) and the portion up to the first N-to-many relation field. As an example we could have original filter ~Q(child__name='foo'). We would get here with filter_expr = child__name, prefix = child and can_reuse is a set of joins usable for filters in the original query. We will turn this into equivalent of: WHERE NOT (pk IN (SELECT parent_id FROM thetable WHERE name = 'foo' AND parent_id IS NOT NULL)) It might be worth it to consider using WHERE NOT EXISTS as that has saner null handling, and is easier for the backend's optimizer to handle. """ # Generate the inner query. query = Query(self.model) query.add_filter(filter_expr) query.clear_ordering(True) # Try to have as simple as possible subquery -> trim leading joins from # the subquery. trimmed_prefix, contains_louter = query.trim_start(names_with_path) query.remove_inherited_models() # Add extra check to make sure the selected field will not be null # since we are adding an IN <subquery> clause. This prevents the # database from tripping over IN (...,NULL,...) selects and returning # nothing alias, col = query.select[0].col if self.is_nullable(query.select[0].field): lookup_class = query.select[0].field.get_lookup('isnull') lookup = lookup_class(Col(alias, query.select[0].field, query.select[0].field), False) query.where.add(lookup, AND) if alias in can_reuse: select_field = query.select[0].field pk = select_field.model._meta.pk # Need to add a restriction so that outer query's filters are in effect for # the subquery, too. query.bump_prefix(self) lookup_class = select_field.get_lookup('exact') lookup = lookup_class(Col(query.select[0].col[0], pk, pk), Col(alias, pk, pk)) query.where.add(lookup, AND) condition, needed_inner = self.build_filter( ('%s__in' % trimmed_prefix, query), current_negated=True, branch_negated=True, can_reuse=can_reuse) if contains_louter: or_null_condition, _ = self.build_filter( ('%s__isnull' % trimmed_prefix, True), current_negated=True, branch_negated=True, can_reuse=can_reuse) condition.add(or_null_condition, OR) # Note that the end result will be: # (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL. # This might look crazy but due to how IN works, this seems to be # correct. If the IS NOT NULL check is removed then outercol NOT # IN will return UNKNOWN. If the IS NULL check is removed, then if # outercol IS NULL we will not match the row. return condition, needed_inner def set_empty(self): self.where = EmptyWhere() self.having = EmptyWhere() def is_empty(self): return isinstance(self.where, EmptyWhere) or isinstance(self.having, EmptyWhere) def set_limits(self, low=None, high=None): """ Adjusts the limits on the rows retrieved. We use low/high to set these, as it makes it more Pythonic to read and write. When the SQL query is created, they are converted to the appropriate offset and limit values. Any limits passed in here are applied relative to the existing constraints. So low is added to the current low value and both will be clamped to any existing high value. """ if high is not None: if self.high_mark is not None: self.high_mark = min(self.high_mark, self.low_mark + high) else: self.high_mark = self.low_mark + high if low is not None: if self.high_mark is not None: self.low_mark = min(self.high_mark, self.low_mark + low) else: self.low_mark = self.low_mark + low def clear_limits(self): """ Clears any existing limits. """ self.low_mark, self.high_mark = 0, None def can_filter(self): """ Returns True if adding filters to this instance is still possible. Typically, this means no limits or offsets have been put on the results. """ return not self.low_mark and self.high_mark is None def clear_select_clause(self): """ Removes all fields from SELECT clause. """ self.select = [] self.default_cols = False self.select_related = False self.set_extra_mask(()) self.set_aggregate_mask(()) def clear_select_fields(self): """ Clears the list of fields to select (but not extra_select columns). Some queryset types completely replace any existing list of select columns. """ self.select = [] def add_distinct_fields(self, *field_names): """ Adds and resolves the given fields to the query's "distinct on" clause. """ self.distinct_fields = field_names self.distinct = True def add_fields(self, field_names, allow_m2m=True): """ Adds the given (model) fields to the select set. The field names are added in the order specified. """ alias = self.get_initial_alias() opts = self.get_meta() try: for name in field_names: # Join promotion note - we must not remove any rows here, so # if there is no existing joins, use outer join. _, targets, _, joins, path = self.setup_joins( name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m) targets, final_alias, joins = self.trim_joins(targets, joins, path) for target in targets: self.select.append(SelectInfo((final_alias, target.column), target)) except MultiJoin: raise FieldError("Invalid field name: '%s'" % name) except FieldError: if LOOKUP_SEP in name: # For lookups spanning over relationships, show the error # from the model on which the lookup failed. raise else: names = sorted(opts.get_all_field_names() + list(self.extra) + list(self.aggregate_select)) raise FieldError("Cannot resolve keyword %r into field. " "Choices are: %s" % (name, ", ".join(names))) self.remove_inherited_models() def add_ordering(self, *ordering): """ Adds items from the 'ordering' sequence to the query's "order by" clause. These items are either field names (not column names) -- possibly with a direction prefix ('-' or '?') -- or ordinals, corresponding to column positions in the 'select' list. If 'ordering' is empty, all ordering is cleared from the query. """ errors = [] for item in ordering: if not ORDER_PATTERN.match(item): errors.append(item) if errors: raise FieldError('Invalid order_by arguments: %s' % errors) if ordering: self.order_by.extend(ordering) else: self.default_ordering = False def clear_ordering(self, force_empty): """ Removes any ordering settings. If 'force_empty' is True, there will be no ordering in the resulting query (not even the model's default). """ self.order_by = [] self.extra_order_by = () if force_empty: self.default_ordering = False def set_group_by(self): """ Expands the GROUP BY clause required by the query. This will usually be the set of all non-aggregate fields in the return data. If the database backend supports grouping by the primary key, and the query would be equivalent, the optimization will be made automatically. """ self.group_by = [] for col, _ in self.select: self.group_by.append(col) def add_count_column(self): """ Converts the query to do count(...) or count(distinct(pk)) in order to get its size. """ if not self.distinct: if not self.select: count = self.aggregates_module.Count('*', is_summary=True) else: assert len(self.select) == 1, \ "Cannot add count col with multiple cols in 'select': %r" % self.select count = self.aggregates_module.Count(self.select[0].col) else: opts = self.get_meta() if not self.select: count = self.aggregates_module.Count( (self.join((None, opts.db_table, None)), opts.pk.column), is_summary=True, distinct=True) else: # Because of SQL portability issues, multi-column, distinct # counts need a sub-query -- see get_count() for details. assert len(self.select) == 1, \ "Cannot add count col with multiple cols in 'select'." count = self.aggregates_module.Count(self.select[0].col, distinct=True) # Distinct handling is done in Count(), so don't do it at this # level. self.distinct = False # Set only aggregate to be the count column. # Clear out the select cache to reflect the new unmasked aggregates. self._aggregates = {None: count} self.set_aggregate_mask(None) self.group_by = None def add_select_related(self, fields): """ Sets up the select_related data structure so that we only select certain related models (as opposed to all models, when self.select_related=True). """ if isinstance(self.select_related, bool): field_dict = {} else: field_dict = self.select_related for field in fields: d = field_dict for part in field.split(LOOKUP_SEP): d = d.setdefault(part, {}) self.select_related = field_dict self.related_select_cols = [] def add_extra(self, select, select_params, where, params, tables, order_by): """ Adds data to the various extra_* attributes for user-created additions to the query. """ if select: # We need to pair any placeholder markers in the 'select' # dictionary with their parameters in 'select_params' so that # subsequent updates to the select dictionary also adjust the # parameters appropriately. select_pairs = OrderedDict() if select_params: param_iter = iter(select_params) else: param_iter = iter([]) for name, entry in select.items(): entry = force_text(entry) entry_params = [] pos = entry.find("%s") while pos != -1: entry_params.append(next(param_iter)) pos = entry.find("%s", pos + 2) select_pairs[name] = (entry, entry_params) # This is order preserving, since self.extra_select is an OrderedDict. self.extra.update(select_pairs) if where or params: self.where.add(ExtraWhere(where, params), AND) if tables: self.extra_tables += tuple(tables) if order_by: self.extra_order_by = order_by def clear_deferred_loading(self): """ Remove any fields from the deferred loading set. """ self.deferred_loading = (set(), True) def add_deferred_loading(self, field_names): """ Add the given list of model field names to the set of fields to exclude from loading from the database when automatic column selection is done. The new field names are added to any existing field names that are deferred (or removed from any existing field names that are marked as the only ones for immediate loading). """ # Fields on related models are stored in the literal double-underscore # format, so that we can use a set datastructure. We do the foo__bar # splitting and handling when computing the SQL column names (as part of # get_columns()). existing, defer = self.deferred_loading if defer: # Add to existing deferred names. self.deferred_loading = existing.union(field_names), True else: # Remove names from the set of any existing "immediate load" names. self.deferred_loading = existing.difference(field_names), False def add_immediate_loading(self, field_names): """ Add the given list of model field names to the set of fields to retrieve when the SQL is executed ("immediate loading" fields). The field names replace any existing immediate loading field names. If there are field names already specified for deferred loading, those names are removed from the new field_names before storing the new names for immediate loading. (That is, immediate loading overrides any existing immediate values, but respects existing deferrals.) """ existing, defer = self.deferred_loading field_names = set(field_names) if 'pk' in field_names: field_names.remove('pk') field_names.add(self.get_meta().pk.name) if defer: # Remove any existing deferred names from the current set before # setting the new names. self.deferred_loading = field_names.difference(existing), False else: # Replace any existing "immediate load" field names. self.deferred_loading = field_names, False def get_loaded_field_names(self): """ If any fields are marked to be deferred, returns a dictionary mapping models to a set of names in those fields that will be loaded. If a model is not in the returned dictionary, none of its fields are deferred. If no fields are marked for deferral, returns an empty dictionary. """ # We cache this because we call this function multiple times # (compiler.fill_related_selections, query.iterator) try: return self._loaded_field_names_cache except AttributeError: collection = {} self.deferred_to_data(collection, self.get_loaded_field_names_cb) self._loaded_field_names_cache = collection return collection def get_loaded_field_names_cb(self, target, model, fields): """ Callback used by get_deferred_field_names(). """ target[model] = set(f.name for f in fields) def set_aggregate_mask(self, names): "Set the mask of aggregates that will actually be returned by the SELECT" if names is None: self.aggregate_select_mask = None else: self.aggregate_select_mask = set(names) self._aggregate_select_cache = None def append_aggregate_mask(self, names): if self.aggregate_select_mask is not None: self.set_aggregate_mask(set(names).union(self.aggregate_select_mask)) def set_extra_mask(self, names): """ Set the mask of extra select items that will be returned by SELECT, we don't actually remove them from the Query since they might be used later """ if names is None: self.extra_select_mask = None else: self.extra_select_mask = set(names) self._extra_select_cache = None @property def aggregate_select(self): """The OrderedDict of aggregate columns that are not masked, and should be used in the SELECT clause. This result is cached for optimization purposes. """ if self._aggregate_select_cache is not None: return self._aggregate_select_cache elif not self._aggregates: return {} elif self.aggregate_select_mask is not None: self._aggregate_select_cache = OrderedDict( (k, v) for k, v in self.aggregates.items() if k in self.aggregate_select_mask ) return self._aggregate_select_cache else: return self.aggregates @property def extra_select(self): if self._extra_select_cache is not None: return self._extra_select_cache if not self._extra: return {} elif self.extra_select_mask is not None: self._extra_select_cache = OrderedDict( (k, v) for k, v in self.extra.items() if k in self.extra_select_mask ) return self._extra_select_cache else: return self.extra def trim_start(self, names_with_path): """ Trims joins from the start of the join path. The candidates for trim are the PathInfos in names_with_path structure that are m2m joins. Also sets the select column so the start matches the join. This method is meant to be used for generating the subquery joins & cols in split_exclude(). Returns a lookup usable for doing outerq.filter(lookup=self). Returns also if the joins in the prefix contain a LEFT OUTER join. _""" all_paths = [] for _, paths in names_with_path: all_paths.extend(paths) contains_louter = False # Trim and operate only on tables that were generated for # the lookup part of the query. That is, avoid trimming # joins generated for F() expressions. lookup_tables = [t for t in self.tables if t in self._lookup_joins or t == self.tables[0]] for trimmed_paths, path in enumerate(all_paths): if path.m2m: break if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == self.LOUTER: contains_louter = True self.unref_alias(lookup_tables[trimmed_paths]) # The path.join_field is a Rel, lets get the other side's field join_field = path.join_field.field # Build the filter prefix. paths_in_prefix = trimmed_paths trimmed_prefix = [] for name, path in names_with_path: if paths_in_prefix - len(path) < 0: break trimmed_prefix.append(name) paths_in_prefix -= len(path) trimmed_prefix.append( join_field.foreign_related_fields[0].name) trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix) # Lets still see if we can trim the first join from the inner query # (that is, self). We can't do this for LEFT JOINs because we would # miss those rows that have nothing on the outer side. if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type != self.LOUTER: select_fields = [r[0] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths + 1] self.unref_alias(lookup_tables[trimmed_paths]) extra_restriction = join_field.get_extra_restriction( self.where_class, None, lookup_tables[trimmed_paths + 1]) if extra_restriction: self.where.add(extra_restriction, AND) else: # TODO: It might be possible to trim more joins from the start of the # inner query if it happens to have a longer join chain containing the # values in select_fields. Lets punt this one for now. select_fields = [r[1] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths] self.select = [SelectInfo((select_alias, f.column), f) for f in select_fields] return trimmed_prefix, contains_louter def is_nullable(self, field): """ A helper to check if the given field should be treated as nullable. Some backends treat '' as null and Django treats such fields as nullable for those backends. In such situations field.null can be False even if we should treat the field as nullable. """ # We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have # (nor should it have) knowledge of which connection is going to be # used. The proper fix would be to defer all decisions where # is_nullable() is needed to the compiler stage, but that is not easy # to do currently. if ((connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls) and field.empty_strings_allowed): return True else: return field.null def get_order_dir(field, default='ASC'): """ Returns the field name and direction for an order specification. For example, '-foo' is returned as ('foo', 'DESC'). The 'default' param is used to indicate which way no prefix (or a '+' prefix) should sort. The '-' prefix always sorts the opposite way. """ dirn = ORDER_DIR[default] if field[0] == '-': return field[1:], dirn[1] return field, dirn[0] def add_to_dict(data, key, value): """ A helper function to add "value" to the set of values for "key", whether or not "key" already exists. """ if key in data: data[key].add(value) else: data[key] = set([value]) def is_reverse_o2o(field): """ A little helper to check if the given field is reverse-o2o. The field is expected to be some sort of relation field or related object. """ return not hasattr(field, 'rel') and field.field.unique def alias_diff(refcounts_before, refcounts_after): """ Given the before and after copies of refcounts works out which aliases have been added to the after copy. """ # Use -1 as default value so that any join that is created, then trimmed # is seen as added. return set(t for t in refcounts_after if refcounts_after[t] > refcounts_before.get(t, -1)) class JoinPromoter(object): """ A class to abstract away join promotion problems for complex filter conditions. """ def __init__(self, connector, num_children, negated): self.connector = connector self.negated = negated if self.negated: if connector == AND: self.effective_connector = OR else: self.effective_connector = AND else: self.effective_connector = self.connector self.num_children = num_children # Maps of table alias to how many times it is seen as required for # inner and/or outer joins. self.outer_votes = {} self.inner_votes = {} def add_votes(self, inner_votes): """ Add single vote per item to self.inner_votes. Parameter can be any iterable. """ for voted in inner_votes: self.inner_votes[voted] = self.inner_votes.get(voted, 0) + 1 def update_join_types(self, query): """ Change join types so that the generated query is as efficient as possible, but still correct. So, change as many joins as possible to INNER, but don't make OUTER joins INNER if that could remove results from the query. """ to_promote = set() to_demote = set() # The effective_connector is used so that NOT (a AND b) is treated # similarly to (a OR b) for join promotion. for table, votes in self.inner_votes.items(): # We must use outer joins in OR case when the join isn't contained # in all of the joins. Otherwise the INNER JOIN itself could remove # valid results. Consider the case where a model with rel_a and # rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now, # if rel_a join doesn't produce any results is null (for example # reverse foreign key or null value in direct foreign key), and # there is a matching row in rel_b with col=2, then an INNER join # to rel_a would remove a valid match from the query. So, we need # to promote any existing INNER to LOUTER (it is possible this # promotion in turn will be demoted later on). if self.effective_connector == 'OR' and votes < self.num_children: to_promote.add(table) # If connector is AND and there is a filter that can match only # when there is a joinable row, then use INNER. For example, in # rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL # as join output, then the col=1 or col=2 can't match (as # NULL=anything is always false). # For the OR case, if all children voted for a join to be inner, # then we can use INNER for the join. For example: # (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell) # then if rel_a doesn't produce any rows, the whole condition # can't match. Hence we can safely use INNER join. if self.effective_connector == 'AND' or ( self.effective_connector == 'OR' and votes == self.num_children): to_demote.add(table) # Finally, what happens in cases where we have: # (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0 # Now, we first generate the OR clause, and promote joins for it # in the first if branch above. Both rel_a and rel_b are promoted # to LOUTER joins. After that we do the AND case. The OR case # voted no inner joins but the rel_a__col__gte=0 votes inner join # for rel_a. We demote it back to INNER join (in AND case a single # vote is enough). The demotion is OK, if rel_a doesn't produce # rows, then the rel_a__col__gte=0 clause can't be true, and thus # the whole clause must be false. So, it is safe to use INNER # join. # Note that in this example we could just as well have the __gte # clause and the OR clause swapped. Or we could replace the __gte # clause with an OR clause containing rel_a__col=1|rel_a__col=2, # and again we could safely demote to INNER. query.promote_joins(to_promote) query.demote_joins(to_demote) return to_demote
aleksandra-tarkowska/django
django/db/models/sql/query.py
Python
bsd-3-clause
93,321
#!/usr/bin/python #coding=utf-8 ''' @author: sheng @license: ''' SPELL=u'jūliáo' CN=u'居髎' NAME=u'juliao12' CHANNEL='gallbladder' CHANNEL_FULLNAME='GallbladderChannelofFoot-Shaoyang' SEQ='GB29' if __name__ == '__main__': pass
sinotradition/meridian
meridian/acupoints/juliao12.py
Python
apache-2.0
242
from pywps.Process import WPSProcess class FirstProcess(WPSProcess): def __init__(self): WPSProcess.__init__(self,identifier="complexVector", title="First Process", abstract="Get vector imput and return it to output", statusSupported=True, storeSupported=True) self.indata = self.addComplexInput(identifier="indata",title="Complex in",formats=[{"mimeType":"text/xml"},{"mimeType":"application/xml"}],minOccurs=0,maxOccurs=1024) self.outdata = self.addComplexOutput(identifier="outdata", title="Complex out",formats=[{"mimeType":"text/xml"}]) self.outdata2 = self.addComplexOutput(identifier="outdata2", title="Complex out",formats=[{"mimeType":"application/xml"}]) def execute(self): self.outdata.setValue(self.indata.getValue()[0]) self.outdata2.setValue(self.indata.getValue()[0]) class SecondProcess(WPSProcess): def __init__(self): WPSProcess.__init__(self,identifier="complexRaster", title="Second Process") self.indata = self.addComplexInput(identifier="indata",title="Complex in", formats=[{"mimeType":"image/tiff"}],maxmegabites=2) self.outdata = self.addComplexOutput(identifier="outdata", title="Compex out", formats=[{"mimeType":"image/tiff"}]) def execute(self): self.outdata.setValue(self.indata.getValue())
jachym/PyWPS-SVN
tests/processes/moreInOne.py
Python
gpl-2.0
1,512
#!/usr/bin/env python # # Copyright 2006,2007,2010,2011,2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, gr_unittest, digital, blocks import pmt import numpy from time import sleep class test_hdlc_framer(gr_unittest.TestCase): def setUp(self): self.tb = gr.top_block() def tearDown(self): self.tb = None def test_001(self): #test complementary operation of framer & deframer #want to frame some random data that has enough consecutive bits to #stuff at least a few bits npkts = 20 src_data = [0xFE, 0xDA, 0xAC, 0x29, 0x7F, 0xA2, 0x90, 0x0F, 0xF8] frame = digital.hdlc_framer_pb("wat") deframe = digital.hdlc_deframer_bp(8, 500) debug = blocks.message_debug() self.tb.connect(frame, deframe) self.tb.msg_connect(deframe, "out", debug, "store") self.tb.start() msg = pmt.cons(pmt.PMT_NIL, pmt.init_u8vector(len(src_data),src_data)) for i in range(npkts): frame.to_basic_block()._post(pmt.intern("in"), msg) sleep(0.2) self.tb.stop() self.tb.wait() rxmsg = debug.get_message(0) result_len = pmt.blob_length(pmt.cdr(rxmsg)) msg_data = [] for j in range(result_len): msg_data.append(pmt.u8vector_ref(pmt.cdr(rxmsg), j)) self.assertEqual(src_data, msg_data) if __name__ == '__main__': gr_unittest.run(test_hdlc_framer, "test_hdlc_framer.xml")
awalls-cx18/gnuradio
gr-digital/python/digital/qa_hdlc_framer.py
Python
gpl-3.0
2,222
import sys sys.path.append("../build") import os here = os.path.dirname(os.path.realpath(__file__)) import numpy as np import matplotlib.pyplot as plt import pyrfr.regression data_set_prefix = '%(here)s/../test_data_sets/diabetes_' % {"here":here} features = np.loadtxt(data_set_prefix+'features.csv', delimiter=",") responses = np.loadtxt(data_set_prefix+'responses.csv', delimiter=",") types = np.zeros([features.shape[1]],dtype=np.uint) num_train_samples = 400 indices = np.array(range(features.shape[0])) np.random.shuffle(indices) features_train = features[indices[:num_train_samples]] features_test = features[indices[num_train_samples:]] responses_train = responses[indices[:num_train_samples]] responses_test = responses[indices[num_train_samples:]] data = pyrfr.regression.default_data_container(features_train.shape[1]) for f,r in zip(features_train, responses_train): data.add_data_point(f.tolist(), r) # create an instance of a regerssion forest using binary splits and the RSS loss the_forest = pyrfr.regression.binary_rss_forest() #reset to reseed the rng for the next fit rng = pyrfr.regression.default_random_engine(42) # create an instance of a regerssion forest using binary splits and the RSS loss the_forest = pyrfr.regression.binary_rss_forest() the_forest.options.num_trees = 16 # the forest's parameters the_forest.options.compute_oob_error = True the_forest.options.do_bootstrapping=True # default: false the_forest.options.num_data_points_per_tree=(data.num_data_points()//4)* 3 # means same number as data points the_forest.options.tree_opts.max_features = data.num_features()//2 # 0 would mean all the features the_forest.options.tree_opts.min_samples_to_split = 0 # 0 means split until pure the_forest.options.tree_opts.min_samples_in_leaf = 0 # 0 means no restriction the_forest.options.tree_opts.max_depth=1024 # 0 means no restriction the_forest.options.tree_opts.epsilon_purity = 1e-8 # when checking for purity, the data points can differ by this epsilon the_forest.fit(data, rng) predictions_test = [the_forest.predict(f.tolist()) for f in features_test] print(np.sqrt(np.mean((predictions_test - responses_test) ** 2))) print(the_forest.out_of_bag_error())
sfalkner/random_forest_run
pyrfr/examples/pyrfr_oob_error.py
Python
bsd-3-clause
2,227
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Session Handling for SQLAlchemy backend. Initializing: * Call set_defaults with the minimal of the following kwargs: sql_connection, sqlite_db Example:: session.set_defaults( sql_connection="sqlite:///var/lib/solum/sqlite.db", sqlite_db="/var/lib/solum/sqlite.db") Recommended ways to use sessions within this framework: * Don't use them explicitly; this is like running with AUTOCOMMIT=1. model_query() will implicitly use a session when called without one supplied. This is the ideal situation because it will allow queries to be automatically retried if the database connection is interrupted. Note: Automatic retry will be enabled in a future patch. It is generally fine to issue several queries in a row like this. Even though they may be run in separate transactions and/or separate sessions, each one will see the data from the prior calls. If needed, undo- or rollback-like functionality should be handled at a logical level. For an example, look at the code around quotas and reservation_rollback(). Examples:: def get_foo(context, foo): return (model_query(context, models.Foo). filter_by(foo=foo). first()) def update_foo(context, id, newfoo): (model_query(context, models.Foo). filter_by(id=id). update({'foo': newfoo})) def create_foo(context, values): foo_ref = models.Foo() foo_ref.update(values) foo_ref.save() return foo_ref * Within the scope of a single method, keeping all the reads and writes within the context managed by a single session. In this way, the session's __exit__ handler will take care of calling flush() and commit() for you. If using this approach, you should not explicitly call flush() or commit(). Any error within the context of the session will cause the session to emit a ROLLBACK. Database Errors like IntegrityError will be raised in session's __exit__ handler, and any try/except within the context managed by session will not be triggered. And catching other non-database errors in the session will not trigger the ROLLBACK, so exception handlers should always be outside the session, unless the developer wants to do a partial commit on purpose. If the connection is dropped before this is possible, the database will implicitly roll back the transaction. Note: statements in the session scope will not be automatically retried. If you create models within the session, they need to be added, but you do not need to call model.save() :: def create_many_foo(context, foos): session = get_session() with session.begin(): for foo in foos: foo_ref = models.Foo() foo_ref.update(foo) session.add(foo_ref) def update_bar(context, foo_id, newbar): session = get_session() with session.begin(): foo_ref = (model_query(context, models.Foo, session). filter_by(id=foo_id). first()) (model_query(context, models.Bar, session). filter_by(id=foo_ref['bar_id']). update({'bar': newbar})) Note: update_bar is a trivially simple example of using "with session.begin". Whereas create_many_foo is a good example of when a transaction is needed, it is always best to use as few queries as possible. The two queries in update_bar can be better expressed using a single query which avoids the need for an explicit transaction. It can be expressed like so:: def update_bar(context, foo_id, newbar): subq = (model_query(context, models.Foo.id). filter_by(id=foo_id). limit(1). subquery()) (model_query(context, models.Bar). filter_by(id=subq.as_scalar()). update({'bar': newbar})) For reference, this emits approximately the following SQL statement:: UPDATE bar SET bar = ${newbar} WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1); Note: create_duplicate_foo is a trivially simple example of catching an exception while using "with session.begin". Here create two duplicate instances with same primary key, must catch the exception out of context managed by a single session: def create_duplicate_foo(context): foo1 = models.Foo() foo2 = models.Foo() foo1.id = foo2.id = 1 session = get_session() try: with session.begin(): session.add(foo1) session.add(foo2) except exception.DBDuplicateEntry as e: handle_error(e) * Passing an active session between methods. Sessions should only be passed to private methods. The private method must use a subtransaction; otherwise SQLAlchemy will throw an error when you call session.begin() on an existing transaction. Public methods should not accept a session parameter and should not be involved in sessions within the caller's scope. Note that this incurs more overhead in SQLAlchemy than the above means due to nesting transactions, and it is not possible to implicitly retry failed database operations when using this approach. This also makes code somewhat more difficult to read and debug, because a single database transaction spans more than one method. Error handling becomes less clear in this situation. When this is needed for code clarity, it should be clearly documented. :: def myfunc(foo): session = get_session() with session.begin(): # do some database things bar = _private_func(foo, session) return bar def _private_func(foo, session=None): if not session: session = get_session() with session.begin(subtransaction=True): # do some other database things return bar There are some things which it is best to avoid: * Don't keep a transaction open any longer than necessary. This means that your "with session.begin()" block should be as short as possible, while still containing all the related calls for that transaction. * Avoid "with_lockmode('UPDATE')" when possible. In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match any rows, it will take a gap-lock. This is a form of write-lock on the "gap" where no rows exist, and prevents any other writes to that space. This can effectively prevent any INSERT into a table by locking the gap at the end of the index. Similar problems will occur if the SELECT FOR UPDATE has an overly broad WHERE clause, or doesn't properly use an index. One idea proposed at ODS Fall '12 was to use a normal SELECT to test the number of rows matching a query, and if only one row is returned, then issue the SELECT FOR UPDATE. The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE. However, this can not be done until the "deleted" columns are removed and proper UNIQUE constraints are added to the tables. Enabling soft deletes: * To use/enable soft-deletes, the SoftDeleteMixin must be added to your model class. For example:: class NovaBase(models.SoftDeleteMixin, models.ModelBase): pass Efficient use of soft deletes: * There are two possible ways to mark a record as deleted:: model.soft_delete() and query.soft_delete(). model.soft_delete() method works with single already fetched entry. query.soft_delete() makes only one db request for all entries that correspond to query. * In almost all cases you should use query.soft_delete(). Some examples:: def soft_delete_bar(): count = model_query(BarModel).find(some_condition).soft_delete() if count == 0: raise Exception("0 entries were soft deleted") def complex_soft_delete_with_synchronization_bar(session=None): if session is None: session = get_session() with session.begin(subtransactions=True): count = (model_query(BarModel). find(some_condition). soft_delete(synchronize_session=True)) # Here synchronize_session is required, because we # don't know what is going on in outer session. if count == 0: raise Exception("0 entries were soft deleted") * There is only one situation where model.soft_delete() is appropriate: when you fetch a single record, work with it, and mark it as deleted in the same transaction. :: def soft_delete_bar_model(): session = get_session() with session.begin(): bar_ref = model_query(BarModel).find(some_condition).first() # Work with bar_ref bar_ref.soft_delete(session=session) However, if you need to work with all entries that correspond to query and then soft delete them you should use query.soft_delete() method:: def soft_delete_multi_models(): session = get_session() with session.begin(): query = (model_query(BarModel, session=session). find(some_condition)) model_refs = query.all() # Work with model_refs query.soft_delete(synchronize_session=False) # synchronize_session=False should be set if there is no outer # session and these entries are not used after this. When working with many rows, it is very important to use query.soft_delete, which issues a single query. Using model.soft_delete(), as in the following example, is very inefficient. :: for bar_ref in bar_refs: bar_ref.soft_delete(session=session) # This will produce count(bar_refs) db requests. """ import functools import os.path import re import time from oslo.config import cfg import six from sqlalchemy import exc as sqla_exc from sqlalchemy.interfaces import PoolListener import sqlalchemy.orm from sqlalchemy.pool import NullPool, StaticPool from sqlalchemy.sql.expression import literal_column from solum.openstack.common.db import exception from solum.openstack.common.gettextutils import _ from solum.openstack.common import log as logging from solum.openstack.common import timeutils sqlite_db_opts = [ cfg.StrOpt('sqlite_db', default='solum.sqlite', help='The file name to use with SQLite'), cfg.BoolOpt('sqlite_synchronous', default=True, help='If True, SQLite uses synchronous mode'), ] database_opts = [ cfg.StrOpt('connection', default='sqlite:///' + os.path.abspath(os.path.join(os.path.dirname(__file__), '../', '$sqlite_db')), help='The SQLAlchemy connection string used to connect to the ' 'database', secret=True, deprecated_opts=[cfg.DeprecatedOpt('sql_connection', group='DEFAULT'), cfg.DeprecatedOpt('sql_connection', group='DATABASE'), cfg.DeprecatedOpt('connection', group='sql'), ]), cfg.StrOpt('slave_connection', default='', secret=True, help='The SQLAlchemy connection string used to connect to the ' 'slave database'), cfg.IntOpt('idle_timeout', default=3600, deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout', group='DEFAULT'), cfg.DeprecatedOpt('sql_idle_timeout', group='DATABASE'), cfg.DeprecatedOpt('idle_timeout', group='sql')], help='Timeout before idle sql connections are reaped'), cfg.IntOpt('min_pool_size', default=1, deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size', group='DEFAULT'), cfg.DeprecatedOpt('sql_min_pool_size', group='DATABASE')], help='Minimum number of SQL connections to keep open in a ' 'pool'), cfg.IntOpt('max_pool_size', default=None, deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size', group='DEFAULT'), cfg.DeprecatedOpt('sql_max_pool_size', group='DATABASE')], help='Maximum number of SQL connections to keep open in a ' 'pool'), cfg.IntOpt('max_retries', default=10, deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries', group='DEFAULT'), cfg.DeprecatedOpt('sql_max_retries', group='DATABASE')], help='Maximum db connection retries during startup. ' '(setting -1 implies an infinite retry count)'), cfg.IntOpt('retry_interval', default=10, deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval', group='DEFAULT'), cfg.DeprecatedOpt('reconnect_interval', group='DATABASE')], help='Interval between retries of opening a sql connection'), cfg.IntOpt('max_overflow', default=None, deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow', group='DEFAULT'), cfg.DeprecatedOpt('sqlalchemy_max_overflow', group='DATABASE')], help='If set, use this value for max_overflow with sqlalchemy'), cfg.IntOpt('connection_debug', default=0, deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug', group='DEFAULT')], help='Verbosity of SQL debugging information. 0=None, ' '100=Everything'), cfg.BoolOpt('connection_trace', default=False, deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace', group='DEFAULT')], help='Add python stack traces to SQL as comment strings'), cfg.IntOpt('pool_timeout', default=None, deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout', group='DATABASE')], help='If set, use this value for pool_timeout with sqlalchemy'), ] CONF = cfg.CONF CONF.register_opts(sqlite_db_opts) CONF.register_opts(database_opts, 'database') LOG = logging.getLogger(__name__) _ENGINE = None _MAKER = None _SLAVE_ENGINE = None _SLAVE_MAKER = None def set_defaults(sql_connection, sqlite_db, max_pool_size=None, max_overflow=None, pool_timeout=None): """Set defaults for configuration variables.""" cfg.set_defaults(database_opts, connection=sql_connection) cfg.set_defaults(sqlite_db_opts, sqlite_db=sqlite_db) # Update the QueuePool defaults if max_pool_size is not None: cfg.set_defaults(database_opts, max_pool_size=max_pool_size) if max_overflow is not None: cfg.set_defaults(database_opts, max_overflow=max_overflow) if pool_timeout is not None: cfg.set_defaults(database_opts, pool_timeout=pool_timeout) def cleanup(): global _ENGINE, _MAKER global _SLAVE_ENGINE, _SLAVE_MAKER if _MAKER: _MAKER.close_all() _MAKER = None if _ENGINE: _ENGINE.dispose() _ENGINE = None if _SLAVE_MAKER: _SLAVE_MAKER.close_all() _SLAVE_MAKER = None if _SLAVE_ENGINE: _SLAVE_ENGINE.dispose() _SLAVE_ENGINE = None class SqliteForeignKeysListener(PoolListener): """Ensures that the foreign key constraints are enforced in SQLite. The foreign key constraints are disabled by default in SQLite, so the foreign key constraints will be enabled here for every database connection """ def connect(self, dbapi_con, con_record): dbapi_con.execute('pragma foreign_keys=ON') def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False, slave_session=False, mysql_traditional_mode=False): """Return a SQLAlchemy session.""" global _MAKER global _SLAVE_MAKER maker = _MAKER if slave_session: maker = _SLAVE_MAKER if maker is None: engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session, mysql_traditional_mode=mysql_traditional_mode) maker = get_maker(engine, autocommit, expire_on_commit) if slave_session: _SLAVE_MAKER = maker else: _MAKER = maker session = maker() return session # note(boris-42): In current versions of DB backends unique constraint # violation messages follow the structure: # # sqlite: # 1 column - (IntegrityError) column c1 is not unique # N columns - (IntegrityError) column c1, c2, ..., N are not unique # # sqlite since 3.7.16: # 1 column - (IntegrityError) UNIQUE constraint failed: k1 # # N columns - (IntegrityError) UNIQUE constraint failed: k1, k2 # # postgres: # 1 column - (IntegrityError) duplicate key value violates unique # constraint "users_c1_key" # N columns - (IntegrityError) duplicate key value violates unique # constraint "name_of_our_constraint" # # mysql: # 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key # 'c1'") # N columns - (IntegrityError) (1062, "Duplicate entry 'values joined # with -' for key 'name_of_our_constraint'") _DUP_KEY_RE_DB = { "sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"), re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")), "postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),), "mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),) } def _raise_if_duplicate_entry_error(integrity_error, engine_name): """Raise exception if two entries are duplicated. In this function will be raised DBDuplicateEntry exception if integrity error wrap unique constraint violation. """ def get_columns_from_uniq_cons_or_name(columns): # note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2" # where `t` it is table name and columns `c1`, `c2` # are in UniqueConstraint. uniqbase = "uniq_" if not columns.startswith(uniqbase): if engine_name == "postgresql": return [columns[columns.index("_") + 1:columns.rindex("_")]] return [columns] return columns[len(uniqbase):].split("0")[1:] if engine_name not in ["mysql", "sqlite", "postgresql"]: return # FIXME(johannes): The usage of the .message attribute has been # deprecated since Python 2.6. However, the exceptions raised by # SQLAlchemy can differ when using unicode() and accessing .message. # An audit across all three supported engines will be necessary to # ensure there are no regressions. for pattern in _DUP_KEY_RE_DB[engine_name]: match = pattern.match(integrity_error.message) if match: break else: return columns = match.group(1) if engine_name == "sqlite": columns = columns.strip().split(", ") else: columns = get_columns_from_uniq_cons_or_name(columns) raise exception.DBDuplicateEntry(columns, integrity_error) # NOTE(comstud): In current versions of DB backends, Deadlock violation # messages follow the structure: # # mysql: # (OperationalError) (1213, 'Deadlock found when trying to get lock; try ' # 'restarting transaction') <query_str> <query_args> _DEADLOCK_RE_DB = { "mysql": re.compile(r"^.*\(1213, 'Deadlock.*") } def _raise_if_deadlock_error(operational_error, engine_name): """Raise exception on deadlock condition. Raise DBDeadlock exception if OperationalError contains a Deadlock condition. """ re = _DEADLOCK_RE_DB.get(engine_name) if re is None: return # FIXME(johannes): The usage of the .message attribute has been # deprecated since Python 2.6. However, the exceptions raised by # SQLAlchemy can differ when using unicode() and accessing .message. # An audit across all three supported engines will be necessary to # ensure there are no regressions. m = re.match(operational_error.message) if not m: return raise exception.DBDeadlock(operational_error) def _wrap_db_error(f): @functools.wraps(f) def _wrap(*args, **kwargs): try: return f(*args, **kwargs) except UnicodeEncodeError: raise exception.DBInvalidUnicodeParameter() # note(boris-42): We should catch unique constraint violation and # wrap it by our own DBDuplicateEntry exception. Unique constraint # violation is wrapped by IntegrityError. except sqla_exc.OperationalError as e: _raise_if_deadlock_error(e, get_engine().name) # NOTE(comstud): A lot of code is checking for OperationalError # so let's not wrap it for now. raise except sqla_exc.IntegrityError as e: # note(boris-42): SqlAlchemy doesn't unify errors from different # DBs so we must do this. Also in some tables (for example # instance_types) there are more than one unique constraint. This # means we should get names of columns, which values violate # unique constraint, from error message. _raise_if_duplicate_entry_error(e, get_engine().name) raise exception.DBError(e) except Exception as e: LOG.exception(_('DB exception wrapped.')) raise exception.DBError(e) return _wrap def get_engine(sqlite_fk=False, slave_engine=False, mysql_traditional_mode=False): """Return a SQLAlchemy engine.""" global _ENGINE global _SLAVE_ENGINE engine = _ENGINE db_uri = CONF.database.connection if slave_engine: engine = _SLAVE_ENGINE db_uri = CONF.database.slave_connection if engine is None: engine = create_engine(db_uri, sqlite_fk=sqlite_fk, mysql_traditional_mode=mysql_traditional_mode) if slave_engine: _SLAVE_ENGINE = engine else: _ENGINE = engine return engine def _synchronous_switch_listener(dbapi_conn, connection_rec): """Switch sqlite connections to non-synchronous mode.""" dbapi_conn.execute("PRAGMA synchronous = OFF") def _add_regexp_listener(dbapi_con, con_record): """Add REGEXP function to sqlite connections.""" def regexp(expr, item): reg = re.compile(expr) return reg.search(six.text_type(item)) is not None dbapi_con.create_function('regexp', 2, regexp) def _thread_yield(dbapi_con, con_record): """Ensure other greenthreads get a chance to be executed. If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will execute instead of time.sleep(0). Force a context switch. With common database backends (eg MySQLdb and sqlite), there is no implicit yield caused by network I/O since they are implemented by C libraries that eventlet cannot monkey patch. """ time.sleep(0) def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy): """Ensures that MySQL and DB2 connections are alive. Borrowed from: http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f """ cursor = dbapi_conn.cursor() try: ping_sql = 'select 1' if engine.name == 'ibm_db_sa': # DB2 requires a table expression ping_sql = 'select 1 from (values (1)) AS t1' cursor.execute(ping_sql) except Exception as ex: if engine.dialect.is_disconnect(ex, dbapi_conn, cursor): msg = _('Database server has gone away: %s') % ex LOG.warning(msg) raise sqla_exc.DisconnectionError(msg) else: raise def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy): """Set engine mode to 'traditional'. Required to prevent silent truncates at insert or update operations under MySQL. By default MySQL truncates inserted string if it longer than a declared field just with warning. That is fraught with data corruption. """ dbapi_con.cursor().execute("SET SESSION sql_mode = TRADITIONAL;") def _is_db_connection_error(args): """Return True if error in connecting to db.""" # NOTE(adam_g): This is currently MySQL specific and needs to be extended # to support Postgres and others. # For the db2, the error code is -30081 since the db2 is still not ready conn_err_codes = ('2002', '2003', '2006', '-30081') for err_code in conn_err_codes: if args.find(err_code) != -1: return True return False def create_engine(sql_connection, sqlite_fk=False, mysql_traditional_mode=False): """Return a new SQLAlchemy engine.""" # NOTE(geekinutah): At this point we could be connecting to the normal # db handle or the slave db handle. Things like # _wrap_db_error aren't going to work well if their # backends don't match. Let's check. _assert_matching_drivers() connection_dict = sqlalchemy.engine.url.make_url(sql_connection) engine_args = { "pool_recycle": CONF.database.idle_timeout, "echo": False, 'convert_unicode': True, } # Map our SQL debug level to SQLAlchemy's options if CONF.database.connection_debug >= 100: engine_args['echo'] = 'debug' elif CONF.database.connection_debug >= 50: engine_args['echo'] = True if "sqlite" in connection_dict.drivername: if sqlite_fk: engine_args["listeners"] = [SqliteForeignKeysListener()] engine_args["poolclass"] = NullPool if CONF.database.connection == "sqlite://": engine_args["poolclass"] = StaticPool engine_args["connect_args"] = {'check_same_thread': False} else: if CONF.database.max_pool_size is not None: engine_args['pool_size'] = CONF.database.max_pool_size if CONF.database.max_overflow is not None: engine_args['max_overflow'] = CONF.database.max_overflow if CONF.database.pool_timeout is not None: engine_args['pool_timeout'] = CONF.database.pool_timeout engine = sqlalchemy.create_engine(sql_connection, **engine_args) sqlalchemy.event.listen(engine, 'checkin', _thread_yield) if engine.name in ['mysql', 'ibm_db_sa']: callback = functools.partial(_ping_listener, engine) sqlalchemy.event.listen(engine, 'checkout', callback) if mysql_traditional_mode: sqlalchemy.event.listen(engine, 'checkout', _set_mode_traditional) else: LOG.warning(_("This application has not enabled MySQL traditional" " mode, which means silent data corruption may" " occur. Please encourage the application" " developers to enable this mode.")) elif 'sqlite' in connection_dict.drivername: if not CONF.sqlite_synchronous: sqlalchemy.event.listen(engine, 'connect', _synchronous_switch_listener) sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener) if (CONF.database.connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb'): _patch_mysqldb_with_stacktrace_comments() try: engine.connect() except sqla_exc.OperationalError as e: if not _is_db_connection_error(e.args[0]): raise remaining = CONF.database.max_retries if remaining == -1: remaining = 'infinite' while True: msg = _('SQL connection failed. %s attempts left.') LOG.warning(msg % remaining) if remaining != 'infinite': remaining -= 1 time.sleep(CONF.database.retry_interval) try: engine.connect() break except sqla_exc.OperationalError as e: if (remaining != 'infinite' and remaining == 0) or \ not _is_db_connection_error(e.args[0]): raise return engine class Query(sqlalchemy.orm.query.Query): """Subclass of sqlalchemy.query with soft_delete() method.""" def soft_delete(self, synchronize_session='evaluate'): return self.update({'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow()}, synchronize_session=synchronize_session) class Session(sqlalchemy.orm.session.Session): """Custom Session class to avoid SqlAlchemy Session monkey patching.""" @_wrap_db_error def query(self, *args, **kwargs): return super(Session, self).query(*args, **kwargs) @_wrap_db_error def flush(self, *args, **kwargs): return super(Session, self).flush(*args, **kwargs) @_wrap_db_error def execute(self, *args, **kwargs): return super(Session, self).execute(*args, **kwargs) def get_maker(engine, autocommit=True, expire_on_commit=False): """Return a SQLAlchemy sessionmaker using the given engine.""" return sqlalchemy.orm.sessionmaker(bind=engine, class_=Session, autocommit=autocommit, expire_on_commit=expire_on_commit, query_cls=Query) def _patch_mysqldb_with_stacktrace_comments(): """Adds current stack trace as a comment in queries. Patches MySQLdb.cursors.BaseCursor._do_query. """ import MySQLdb.cursors import traceback old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query def _do_query(self, q): stack = '' for filename, line, method, function in traceback.extract_stack(): # exclude various common things from trace if filename.endswith('session.py') and method == '_do_query': continue if filename.endswith('api.py') and method == 'wrapper': continue if filename.endswith('utils.py') and method == '_inner': continue if filename.endswith('exception.py') and method == '_wrap': continue # db/api is just a wrapper around db/sqlalchemy/api if filename.endswith('db/api.py'): continue # only trace inside solum index = filename.rfind('solum') if index == -1: continue stack += "File:%s:%s Method:%s() Line:%s | " \ % (filename[index:], line, method, function) # strip trailing " | " from stack if stack: stack = stack[:-3] qq = "%s /* %s */" % (q, stack) else: qq = q old_mysql_do_query(self, qq) setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query) def _assert_matching_drivers(): """Make sure slave handle and normal handle have the same driver.""" # NOTE(geekinutah): There's no use case for writing to one backend and # reading from another. Who knows what the future holds? if CONF.database.slave_connection == '': return normal = sqlalchemy.engine.url.make_url(CONF.database.connection) slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection) assert normal.drivername == slave.drivername
julienvey/solum
solum/openstack/common/db/sqlalchemy/session.py
Python
apache-2.0
33,708
#!/usr/bin/python import os, sys, re, time import ParseGetPot, Factory from MooseObject import MooseObject from Warehouse import Warehouse """ Parser object for reading GetPot formatted files """ class Parser: def __init__(self, factory, warehouse): self.factory = factory self.warehouse = warehouse self.params_parsed = set() self.params_ignored = set() self.root = None """ Parse the passed filename filling the warehouse with populated InputParameter objects Error codes: 0x00 - Success 0x01 - pyGetpot parsing error 0x02 - Unrecogonized Boolean key/value pair 0x04 - Missing required parameter 0x08 - Bad value 0x10 - Mismatched type 0x20 - Missing Node type parameter If new error codes are added, the static mask value needs to be adjusted """ @staticmethod def getErrorCodeMask(): # See Error codes description above for mask calculation return 0x3F def parse(self, filename): error_code = 0x00 try: self.root = ParseGetPot.readInputFile(filename) except ParseGetPot.ParseException, ex: print "Parse Error in " + filename + ": " + ex.msg return 0x01 # Parse Error error_code = self._parseNode(filename, self.root) if len(self.params_ignored): print 'Warning detected when parsing file "' + os.path.join(os.getcwd(), filename) + '"' print ' Ignored Parameter(s): ', self.params_ignored return error_code def extractParams(self, filename, params, getpot_node): error_code = 0x00 full_name = getpot_node.fullName() # Populate all of the parameters of this test node # using the GetPotParser. We'll loop over the parsed node # so that we can keep track of ignored parameters as well local_parsed = set() for key, value in getpot_node.params.iteritems(): self.params_parsed.add(full_name + '/' + key) local_parsed.add(key) if key in params: if params.type(key) == list: params[key] = value.split(' ') else: if re.match('".*"', value): # Strip quotes params[key] = value[1:-1] else: if key in params.strict_types: # The developer wants to enforce a specific type without setting a valid value strict_type = params.strict_types[key] if strict_type == time.struct_time: # Dates have to be parsed try: params[key] = time.strptime(value, "%m/%d/%Y") except ValueError: print "Bad Value for key '" + full_name + '/' + key + "': " + value params['error_code'] = 0x08 error_code = error_code | params['error_code'] elif strict_type != type(value): print "Mismatched type for key '" + full_name + '/' + key + "': " + value params['error_code'] = 0x10 error_code = error_code | params['error_code'] # Prevent bool types from being stored as strings. This can lead to the # strange situation where string('False') evaluates to true... elif params.isValid(key) and (type(params[key]) == type(bool())): # We support using the case-insensitive strings {true, false} and the string '0', '1'. if (value.lower()=='true') or (value=='1'): params[key] = True elif (value.lower()=='false') or (value=='0'): params[key] = False else: print "Unrecognized (key,value) pair: (", key, ',', value, ")" params['error_code'] = 0x02 error_code = error_code | params['error_code'] else: # Otherwise, just do normal assignment params[key] = value else: self.params_ignored.add(key) # Make sure that all required parameters are supplied required_params_missing = params.required_keys() - local_parsed if len(required_params_missing): print 'Error detected when parsing file "' + os.path.join(os.getcwd(), filename) + '"' print ' Required Missing Parameter(s): ', required_params_missing params['error_code'] = 0x04 # Missing required params error_code = params['error_code'] return error_code # private: def _parseNode(self, filename, node): error_code = 0x00 if 'type' in node.params: moose_type = node.params['type'] # Get the valid Params for this type params = self.factory.validParams(moose_type) # Extract the parameters from the Getpot node error_code = error_code | self.extractParams(filename, params, node) # Add factory and warehouse as private params of the object params.addPrivateParam('_factory', self.factory) params.addPrivateParam('_warehouse', self.warehouse) params.addPrivateParam('_parser', self) params.addPrivateParam('_root', self.root) # Build the object moose_object = self.factory.create(moose_type, node.name, params) # Put it in the warehouse self.warehouse.addObject(moose_object) # Are we in a tree node that "looks" like it should contain a buildable object? elif self._looksLikeValidSubBlock(node): print 'Error detected when parsing file "' + os.path.join(os.getcwd(), filename) + '"' print ' Missing "type" parameter in block' error_code = error_code | 0x20 # Loop over the section names and parse them for child in node.children_list: error_code = error_code | self._parseNode(filename, node.children[child]) return error_code # This routine returns a Boolean indicating whether a given block # looks like a valid subblock. In the Testing system, a valid subblock # has a "type" and no children blocks. def _looksLikeValidSubBlock(self, node): if len(node.params.keys()) and len(node.children_list) == 0: return True else: return False
vityurkiv/Ox
python/FactorySystem/Parser.py
Python
lgpl-2.1
5,998
# Necessary to supress on error in Python 2.7.3 at the completion of # python setup.py test. # See http://bugs.python.org/issue15881#msg170215 import multiprocessing # NOQA import distutils.command.clean import os import pkg_resources import setuptools import subprocess class Clean(distutils.command.clean.clean): def run(self): subprocess.call('find . -name *.pyc -delete'.split(' ')) subprocess.call('rm -rf *.egg/ test_results/ .coverage .noseids'.split(' ')) distutils.command.clean.clean.run(self) def read_file(file_name): """Utility function to read a file.""" return open(os.path.join(os.path.dirname(__file__), file_name)).read() def read_first_line(file_name): """Read the first line from the specified file.""" with open(os.path.join(os.path.dirname(__file__), file_name)) as f: return f.readline().strip() def read_requirements(file_path): return [ i.strip() for i in pkg_resources.resource_string(__name__, file_path).split() if i.strip()[0:1] != '#' and i.strip()[0:2] != '--' and len(i.strip()) > 0 ] REQUIREMENTS = read_requirements('requirements.txt') TEST_REQUIREMENTS = read_requirements('requirements-dev.txt') setuptools.setup(name='PyLCP', version=read_first_line('version_number.txt'), description="Python client library for Points Loyalty Commerce Platform.", long_description=read_file('README.md'), classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Software Development :: Libraries :: Python Modules' ], keywords='LCP REST', author='Points International', author_email='', url='', license='', packages=setuptools.find_packages(exclude=['tests']), include_package_data=True, zip_safe=False, install_requires=REQUIREMENTS, entry_points=""" # -*- Entry points: -*- """, test_suite='nose.collector', tests_require=TEST_REQUIREMENTS, cmdclass={ 'clean': Clean }, )
bradsokol/PyLCP
setup.py
Python
bsd-3-clause
2,835
# # Tests for the lambertw function, # Adapted from the MPMath tests [1] by Yosef Meller, mellerf@netvision.net.il # Distributed under the same license as SciPy itself. # # [1] mpmath source code, Subversion revision 992 # http://code.google.com/p/mpmath/source/browse/trunk/mpmath/tests/test_functions2.py?spec=svn994&r=992 from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import assert_, assert_equal, assert_array_almost_equal from scipy.special import lambertw from numpy import nan, inf, pi, e, isnan, log, r_, array, complex_ from scipy.special._testutils import FuncData def test_values(): assert_(isnan(lambertw(nan))) assert_equal(lambertw(inf,1).real, inf) assert_equal(lambertw(inf,1).imag, 2*pi) assert_equal(lambertw(-inf,1).real, inf) assert_equal(lambertw(-inf,1).imag, 3*pi) assert_equal(lambertw(1.), lambertw(1., 0)) data = [ (0,0, 0), (0+0j,0, 0), (inf,0, inf), (0,-1, -inf), (0,1, -inf), (0,3, -inf), (e,0, 1), (1,0, 0.567143290409783873), (-pi/2,0, 1j*pi/2), (-log(2)/2,0, -log(2)), (0.25,0, 0.203888354702240164), (-0.25,0, -0.357402956181388903), (-1./10000,0, -0.000100010001500266719), (-0.25,-1, -2.15329236411034965), (0.25,-1, -3.00899800997004620-4.07652978899159763j), (-0.25,-1, -2.15329236411034965), (0.25,1, -3.00899800997004620+4.07652978899159763j), (-0.25,1, -3.48973228422959210+7.41405453009603664j), (-4,0, 0.67881197132094523+1.91195078174339937j), (-4,1, -0.66743107129800988+7.76827456802783084j), (-4,-1, 0.67881197132094523-1.91195078174339937j), (1000,0, 5.24960285240159623), (1000,1, 4.91492239981054535+5.44652615979447070j), (1000,-1, 4.91492239981054535-5.44652615979447070j), (1000,5, 3.5010625305312892+29.9614548941181328j), (3+4j,0, 1.281561806123775878+0.533095222020971071j), (-0.4+0.4j,0, -0.10396515323290657+0.61899273315171632j), (3+4j,1, -0.11691092896595324+5.61888039871282334j), (3+4j,-1, 0.25856740686699742-3.85211668616143559j), (-0.5,-1, -0.794023632344689368-0.770111750510379110j), (-1./10000,1, -11.82350837248724344+6.80546081842002101j), (-1./10000,-1, -11.6671145325663544), (-1./10000,-2, -11.82350837248724344-6.80546081842002101j), (-1./100000,4, -14.9186890769540539+26.1856750178782046j), (-1./100000,5, -15.0931437726379218666+32.5525721210262290086j), ((2+1j)/10,0, 0.173704503762911669+0.071781336752835511j), ((2+1j)/10,1, -3.21746028349820063+4.56175438896292539j), ((2+1j)/10,-1, -3.03781405002993088-3.53946629633505737j), ((2+1j)/10,4, -4.6878509692773249+23.8313630697683291j), (-(2+1j)/10,0, -0.226933772515757933-0.164986470020154580j), (-(2+1j)/10,1, -2.43569517046110001+0.76974067544756289j), (-(2+1j)/10,-1, -3.54858738151989450-6.91627921869943589j), (-(2+1j)/10,4, -4.5500846928118151+20.6672982215434637j), (pi,0, 1.073658194796149172092178407024821347547745350410314531), # Former bug in generated branch, (-0.5+0.002j,0, -0.78917138132659918344 + 0.76743539379990327749j), (-0.5-0.002j,0, -0.78917138132659918344 - 0.76743539379990327749j), (-0.448+0.4j,0, -0.11855133765652382241 + 0.66570534313583423116j), (-0.448-0.4j,0, -0.11855133765652382241 - 0.66570534313583423116j), ] data = array(data, dtype=complex_) def w(x, y): return lambertw(x, y.real.astype(int)) olderr = np.seterr(all='ignore') try: FuncData(w, data, (0,1), 2, rtol=1e-10, atol=1e-13).check() finally: np.seterr(**olderr) def test_ufunc(): assert_array_almost_equal( lambertw(r_[0., e, 1.]), r_[0., 1., 0.567143290409783873]) def test_lambertw_ufunc_loop_selection(): # see https://github.com/scipy/scipy/issues/4895 dt = np.dtype(np.complex128) assert_equal(lambertw(0, 0, 0).dtype, dt) assert_equal(lambertw([0], 0, 0).dtype, dt) assert_equal(lambertw(0, [0], 0).dtype, dt) assert_equal(lambertw(0, 0, [0]).dtype, dt) assert_equal(lambertw([0], [0], [0]).dtype, dt)
mbayon/TFG-MachineLearning
venv/lib/python3.6/site-packages/scipy/special/tests/test_lambertw.py
Python
mit
4,318
# -*- coding: utf-8 -*- # # Copyright (C) 2015-2017 Bitergia # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA. # # Authors: # Santiago Dueñas <sduenas@bitergia.com> # Germán Poo-Caamaño <gpoo@gnome.org> # import datetime import email import logging import mailbox import re import sys import xml.etree.ElementTree import dateutil.parser import dateutil.rrule import dateutil.tz import requests from .errors import ParseError logger = logging.getLogger(__name__) DEFAULT_DATETIME = datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=dateutil.tz.tzutc()) def check_compressed_file_type(filepath): """Check if filename is a compressed file supported by the tool. This function uses magic numbers (first four bytes) to determine the type of the file. Supported types are 'gz' and 'bz2'. When the filetype is not supported, the function returns `None`. :param filepath: path to the file :returns: 'gz' or 'bz2'; `None` if the type is not supported """ def compressed_file_type(content): magic_dict = { b'\x1f\x8b\x08': 'gz', b'\x42\x5a\x68': 'bz2' } for magic, filetype in magic_dict.items(): if content.startswith(magic): return filetype return None with open(filepath, mode='rb') as f: magic_number = f.read(4) return compressed_file_type(magic_number) def months_range(from_date, to_date): """Generate a months range. Generator of months starting on `from_date` util `to_date`. Each returned item is a tuple of two datatime objects like in (month, month+1). Thus, the result will follow the sequence: ((fd, fd+1), (fd+1, fd+2), ..., (td-2, td-1), (td-1, td)) :param from_date: generate dates starting on this month :param to_date: generate dates until this month :result: a generator of months range """ start = datetime.datetime(from_date.year, from_date.month, 1) end = datetime.datetime(to_date.year, to_date.month, 1) month_gen = dateutil.rrule.rrule(freq=dateutil.rrule.MONTHLY, dtstart=start, until=end) months = [d for d in month_gen] pos = 0 for x in range(1, len(months)): yield months[pos], months[x] pos = x def message_to_dict(msg): """Convert an email message into a dictionary. This function transforms an `email.message.Message` object into a dictionary. Headers are stored as key:value pairs while the body of the message is stored inside `body` key. Body may have two other keys inside, 'plain', for plain body messages and 'html', for HTML encoded messages. The returned dictionary has the type `requests.structures.CaseInsensitiveDict` due to same headers with different case formats can appear in the same message. :param msg: email message of type `email.message.Message` :returns : dictionary of type `requests.structures.CaseInsensitiveDict` :raises ParseError: when an error occurs transforming the message to a dictionary """ def parse_headers(msg): headers = {} for header, value in msg.items(): hv = [] for text, charset in email.header.decode_header(value): if type(text) == bytes: charset = charset if charset else 'utf-8' try: text = text.decode(charset, errors='surrogateescape') except (UnicodeError, LookupError): # Try again with a 7bit encoding text = text.decode('ascii', errors='surrogateescape') hv.append(text) v = ' '.join(hv) headers[header] = v if v else None return headers def parse_payload(msg): body = {} if not msg.is_multipart(): payload = decode_payload(msg) subtype = msg.get_content_subtype() body[subtype] = [payload] else: # Include all the attached texts if it is multipart # Ignores binary parts by default for part in email.iterators.typed_subpart_iterator(msg): payload = decode_payload(part) subtype = part.get_content_subtype() body.setdefault(subtype, []).append(payload) return {k: '\n'.join(v) for k, v in body.items()} def decode_payload(msg_or_part): charset = msg_or_part.get_content_charset('utf-8') payload = msg_or_part.get_payload(decode=True) try: payload = payload.decode(charset, errors='surrogateescape') except (UnicodeError, LookupError): # Try again with a 7bit encoding payload = payload.decode('ascii', errors='surrogateescape') return payload # The function starts here message = requests.structures.CaseInsensitiveDict() if isinstance(msg, mailbox.mboxMessage): message['unixfrom'] = msg.get_from() else: message['unixfrom'] = None try: for k, v in parse_headers(msg).items(): message[k] = v message['body'] = parse_payload(msg) except UnicodeError as e: raise ParseError(cause=str(e)) return message def remove_invalid_xml_chars(raw_xml): """Remove control and invalid characters from an xml stream. Looks for invalid characters and subtitutes them with whitespaces. This solution is based on these two posts: Olemis Lang's reponse on StackOverflow (http://stackoverflow.com/questions/1707890) and lawlesst's on GitHub Gist (https://gist.github.com/lawlesst/4110923), that is based on the previous answer. :param xml: XML stream :returns: a purged XML stream """ illegal_unichrs = [(0x00, 0x08), (0x0B, 0x1F), (0x7F, 0x84), (0x86, 0x9F)] illegal_ranges = ['%s-%s' % (chr(low), chr(high)) for (low, high) in illegal_unichrs if low < sys.maxunicode] illegal_xml_re = re.compile('[%s]' % ''.join(illegal_ranges)) purged_xml = '' for c in raw_xml: if illegal_xml_re.search(c) is not None: c = ' ' purged_xml += c return purged_xml def xml_to_dict(raw_xml): """Convert a XML stream into a dictionary. This function transforms a xml stream into a dictionary. The attributes are stored as single elements while child nodes are stored into lists. The text node is stored using the special key '__text__'. This code is based on Winston Ewert's solution to this problem. See http://codereview.stackexchange.com/questions/10400/convert-elementtree-to-dict for more info. The code was licensed as cc by-sa 3.0. :param raw_xml: XML stream :returns: a dict with the XML data :raises ParseError: raised when an error occurs parsing the given XML stream """ def node_to_dict(node): d = {} d.update(node.items()) text = getattr(node, 'text', None) if text is not None: d['__text__'] = text childs = {} for child in node: childs.setdefault(child.tag, []).append(node_to_dict(child)) d.update(childs.items()) return d purged_xml = remove_invalid_xml_chars(raw_xml) try: tree = xml.etree.ElementTree.fromstring(purged_xml) except xml.etree.ElementTree.ParseError as e: cause = "XML stream %s" % (str(e)) raise ParseError(cause=cause) d = node_to_dict(tree) return d
sduenas/perceval
perceval/utils.py
Python
gpl-3.0
8,300
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('information', '0001_initial'), ] operations = [ migrations.AlterField( model_name='informationdetail', name='visibility', field=models.CharField(default=b'Unpublished', max_length=20, choices=[(b'Published', b'Published'), (b'Unpublished', b'Unpublished')]), ), ]
zacherytapp/wedding
weddingapp/apps/information/migrations/0002_auto_20151102_0334.py
Python
bsd-3-clause
508
# Copyright 2012 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Triggers define what causes a Jenkins job to start building. **Component**: triggers :Macro: trigger :Entry Point: jenkins_jobs.triggers Example:: job: name: test_job triggers: - timed: '@daily' """ import six import xml.etree.ElementTree as XML import jenkins_jobs.modules.base from jenkins_jobs.modules import hudson_model from jenkins_jobs.errors import (InvalidAttributeError, JenkinsJobsException, MissingAttributeError) import logging import re try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict logger = logging.getLogger(str(__name__)) def gerrit_handle_legacy_configuration(data): hyphenizer = re.compile("[A-Z]") def hyphenize(attr): """Convert strings like triggerOn to trigger-on. """ return hyphenizer.sub(lambda x: "-%s" % x.group(0).lower(), attr) def convert_dict(d, old_keys): for old_key in old_keys: if old_key in d: new_key = hyphenize(old_key) logger.warn("'%s' is deprecated and will be removed after " "1.0.0, please use '%s' instead", old_key, new_key) d[new_key] = d[old_key] del d[old_key] convert_dict(data, [ 'triggerOnPatchsetUploadedEvent', 'triggerOnChangeAbandonedEvent', 'triggerOnChangeMergedEvent', 'triggerOnChangeRestoredEvent', 'triggerOnCommentAddedEvent', 'triggerOnDraftPublishedEvent', 'triggerOnRefUpdatedEvent', 'triggerApprovalCategory', 'triggerApprovalValue', 'overrideVotes', 'gerritBuildSuccessfulVerifiedValue', 'gerritBuildFailedVerifiedValue', 'failureMessage', 'skipVote', ]) for project in data['projects']: convert_dict(project, [ 'projectCompareType', 'projectPattern', 'branchCompareType', 'branchPattern', ]) old_format_events = OrderedDict( (key, should_register) for key, should_register in six.iteritems(data) if key.startswith('trigger-on-')) trigger_on = data.setdefault('trigger-on', []) if old_format_events: logger.warn("The events: %s; which you used is/are deprecated. " "Please use 'trigger-on' instead.", ', '.join(old_format_events)) if old_format_events and trigger_on: raise JenkinsJobsException( 'Both, the new format (trigger-on) and old format (trigger-on-*) ' 'gerrit events format found. Please use either the new or the old ' 'format of trigger events definition.') trigger_on.extend(event_name[len('trigger-on-'):] for event_name, should_register in six.iteritems(old_format_events) if should_register) for idx, event in enumerate(trigger_on): if event == 'comment-added-event': trigger_on[idx] = events = OrderedDict() events['comment-added-event'] = OrderedDict(( ('approval-category', data['trigger-approval-category']), ('approval-value', data['trigger-approval-value']) )) def build_gerrit_triggers(xml_parent, data): available_simple_triggers = { 'change-abandoned-event': 'PluginChangeAbandonedEvent', 'change-merged-event': 'PluginChangeMergedEvent', 'change-restored-event': 'PluginChangeRestoredEvent', 'draft-published-event': 'PluginDraftPublishedEvent', 'patchset-uploaded-event': 'PluginPatchsetCreatedEvent', 'patchset-created-event': 'PluginPatchsetCreatedEvent', 'ref-updated-event': 'PluginRefUpdatedEvent', } tag_namespace = 'com.sonyericsson.hudson.plugins.gerrit.trigger.' \ 'hudsontrigger.events' trigger_on_events = XML.SubElement(xml_parent, 'triggerOnEvents') for event in data.get('trigger-on', []): if isinstance(event, six.string_types): tag_name = available_simple_triggers.get(event) if event == 'patchset-uploaded-event': logger.warn("'%s' is deprecated. Use 'patchset-created-event' " "format instead.", event) if not tag_name: known = ', '.join(available_simple_triggers.keys() + ['comment-added-event', 'comment-added-contains-event']) msg = ("The event '%s' under 'trigger-on' is not one of the " "known: %s.") % (event, known) raise JenkinsJobsException(msg) XML.SubElement(trigger_on_events, '%s.%s' % (tag_namespace, tag_name)) else: if 'patchset-created-event' in event.keys(): pce = event['patchset-created-event'] pc = XML.SubElement( trigger_on_events, '%s.%s' % (tag_namespace, 'PluginPatchsetCreatedEvent')) XML.SubElement(pc, 'excludeDrafts').text = str( pce.get('exclude-drafts', False)).lower() XML.SubElement(pc, 'excludeTrivialRebase').text = str( pce.get('exclude-trivial-rebase', False)).lower() XML.SubElement(pc, 'excludeNoCodeChange').text = str( pce.get('exclude-no-code-change', False)).lower() if 'comment-added-event' in event.keys(): comment_added_event = event['comment-added-event'] cadded = XML.SubElement( trigger_on_events, '%s.%s' % (tag_namespace, 'PluginCommentAddedEvent')) XML.SubElement(cadded, 'verdictCategory').text = \ comment_added_event['approval-category'] XML.SubElement( cadded, 'commentAddedTriggerApprovalValue').text = \ str(comment_added_event['approval-value']) if 'comment-added-contains-event' in event.keys(): comment_added_event = event['comment-added-contains-event'] caddedc = XML.SubElement( trigger_on_events, '%s.%s' % (tag_namespace, 'PluginCommentAddedContainsEvent')) XML.SubElement(caddedc, 'commentAddedCommentContains').text = \ comment_added_event['comment-contains-value'] def build_gerrit_skip_votes(xml_parent, data): outcomes = [('successful', 'onSuccessful'), ('failed', 'onFailed'), ('unstable', 'onUnstable'), ('notbuilt', 'onNotBuilt')] skip_vote_node = XML.SubElement(xml_parent, 'skipVote') skip_vote = data.get('skip-vote', {}) for result_kind, tag_name in outcomes: if skip_vote.get(result_kind, False): XML.SubElement(skip_vote_node, tag_name).text = 'true' else: XML.SubElement(skip_vote_node, tag_name).text = 'false' def gerrit(parser, xml_parent, data): """yaml: gerrit Trigger on a Gerrit event. Requires the Jenkins :jenkins-wiki:`Gerrit Trigger Plugin <Gerrit+Trigger>` version >= 2.6.0. :arg list trigger-on: Events to react on. Please use either the new **trigger-on**, or the old **trigger-on-*** events definitions. You cannot use both at once. .. _trigger_on: :Trigger on: * **patchset-created-event** (`dict`) -- Trigger upon patchset creation. :Patchset created: * **exclude-drafts** (`bool`) -- exclude drafts (Default: False) * **exclude-trivial-rebase** (`bool`) -- exclude trivial rebase (Default: False) * **exclude-no-code-change** (`bool`) -- exclude no code change (Default: False) Exclude drafts|trivial-rebase|no-code-change needs Gerrit Trigger v2.12.0 * **patchset-uploaded-event** -- Trigger upon patchset creation (this is a alias for `patchset-created-event`). .. deprecated:: 1.1.0 Please use :ref:`trigger-on <trigger_on>`. * **change-abandoned-event** -- Trigger on patchset abandoned. Requires Gerrit Trigger Plugin version >= 2.8.0. * **change-merged-event** -- Trigger on change merged * **change-restored-event** -- Trigger on change restored. Requires Gerrit Trigger Plugin version >= 2.8.0 * **draft-published-event** -- Trigger on draft published event. * **ref-updated-event** -- Trigger on ref-updated. * **comment-added-event** (`dict`) -- Trigger on comment added. :Comment added: * **approval-category** (`str`) -- Approval (verdict) category (for example 'APRV', 'CRVW', 'VRIF' -- see `Gerrit access control <http://gerrit.googlecode.com/svn/documentation/2.1/ access-control.html#categories>`_ * **approval-value** -- Approval value for the comment added. * **comment-added-contains-event** (`dict`) -- Trigger on comment added contains Regular Expression. :Comment added contains: * **comment-contains-value** (`str`) -- Comment contains Regular Expression value. :arg bool trigger-on-patchset-uploaded-event: Trigger on patchset upload. .. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`. :arg bool trigger-on-change-abandoned-event: Trigger on change abandoned. Requires Gerrit Trigger Plugin version >= 2.8.0 .. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`. :arg bool trigger-on-change-merged-event: Trigger on change merged .. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`. :arg bool trigger-on-change-restored-event: Trigger on change restored. Requires Gerrit Trigger Plugin version >= 2.8.0 .. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`. :arg bool trigger-on-comment-added-event: Trigger on comment added .. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`. :arg bool trigger-on-draft-published-event: Trigger on draft published event .. deprecated:: 1.1.0 Please use :ref:`trigger-on <trigger_on>`. :arg bool trigger-on-ref-updated-event: Trigger on ref-updated .. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`. :arg str trigger-approval-category: Approval category for comment added .. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`. :arg int trigger-approval-value: Approval value for comment added .. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`. :arg bool override-votes: Override default vote values :arg int gerrit-build-started-verified-value: Started ''Verified'' value :arg int gerrit-build-successful-verified-value: Successful ''Verified'' value :arg int gerrit-build-failed-verified-value: Failed ''Verified'' value :arg int gerrit-build-unstable-verified-value: Unstable ''Verified'' value :arg int gerrit-build-notbuilt-verified-value: Not built ''Verified'' value :arg int gerrit-build-started-codereview-value: Started ''CodeReview'' value :arg int gerrit-build-successful-codereview-value: Successful ''CodeReview'' value :arg int gerrit-build-failed-codereview-value: Failed ''CodeReview'' value :arg int gerrit-build-unstable-codereview-value: Unstable ''CodeReview'' value :arg int gerrit-build-notbuilt-codereview-value: Not built ''CodeReview'' value :arg str failure-message: Message to leave on failure (default '') :arg str successful-message: Message to leave on success (default '') :arg str unstable-message: Message to leave when unstable (default '') :arg str notbuilt-message: Message to leave when not built (default '') :arg str failure-message-file: Sets the filename within the workspace from which to retrieve the unsuccessful review message. (optional) :arg list projects: list of projects to match :Project: * **project-compare-type** (`str`) -- ''PLAIN'', ''ANT'' or ''REG_EXP'' * **project-pattern** (`str`) -- Project name pattern to match * **branch-compare-type** (`str`) -- ''PLAIN'', ''ANT'' or ''REG_EXP'' (not used if `branches` list is specified) .. deprecated:: 1.1.0 Please use :ref:`branches <branches>`. * **branch-pattern** (`str`) -- Branch name pattern to match (not used if `branches` list is specified) .. deprecated:: 1.1.0 Please use :ref:`branches <branches>`. .. _branches: * **branches** (`list`) -- List of branches to match (optional) :Branch: * **branch-compare-type** (`str`) -- ''PLAIN'', ''ANT'' or ''REG_EXP'' (optional) (default ''PLAIN'') * **branch-pattern** (`str`) -- Branch name pattern to match * **file-paths** (`list`) -- List of file paths to match (optional) :File Path: * **compare-type** (`str`) -- ''PLAIN'', ''ANT'' or ''REG_EXP'' (optional) (default ''PLAIN'') * **pattern** (`str`) -- File path pattern to match * **forbidden-file-paths** (`list`) -- List of file paths to skip triggering (optional) :Forbidden File Path: * **compare-type** (`str`) -- ''PLAIN'', ''ANT'' or ''REG_EXP'' (optional) (default ''PLAIN'') * **pattern** (`str`) -- File path pattern to match * **topics** (`list`) -- List of topics to match (optional) :File Path: * **compare-type** (`str`) -- ''PLAIN'', ''ANT'' or ''REG_EXP'' (optional) (default ''PLAIN'') * **pattern** (`str`) -- Topic name pattern to match :arg dict skip-vote: map of build outcomes for which Jenkins must skip vote. Requires Gerrit Trigger Plugin version >= 2.7.0 :Outcome: * **successful** (`bool`) * **failed** (`bool`) * **unstable** (`bool`) * **notbuilt** (`bool`) :arg bool silent: When silent mode is on there will be no communication back to Gerrit, i.e. no build started/failed/successful approve messages etc. If other non-silent jobs are triggered by the same Gerrit event as this job, the result of this job's build will not be counted in the end result of the other jobs. (default false) :arg bool silent-start: Sets silent start mode to on or off. When silent start mode is on there will be no 'build started' messages sent back to Gerrit. (default false) :arg bool escape-quotes: escape quotes in the values of Gerrit change parameters (default true) :arg bool no-name-and-email: Do not pass compound 'name and email' parameters (default false) :arg bool readable-message: If parameters regarding multiline text, e.g. commit message, should be as human readable or not. If false, those parameters are Base64 encoded to keep environment variables clean. (default false) :arg str dependency-jobs: All jobs on which this job depends. If a commit should trigger both a dependency and this job, the dependency will be built first. Use commas to separate job names. Beware of cyclic dependencies. (optional) :arg str notification-level: Defines to whom email notifications should be sent. This can either be nobody ('NONE'), the change owner ('OWNER'), reviewers and change owner ('OWNER_REVIEWERS'), all interested users i.e. owning, reviewing, watching, and starring ('ALL') or server default ('SERVER_DEFAULT'). (default 'SERVER_DEFAULT') :arg bool dynamic-trigger-enabled: Enable/disable the dynamic trigger (default false) :arg str dynamic-trigger-url: if you specify this option, the Gerrit trigger configuration will be fetched from there on a regular interval :arg bool trigger-for-unreviewed-patches: trigger patchset-created events for changes that were uploaded while connection to Gerrit was down (default false). Requires Gerrit Trigger Plugin version >= 2.11.0 :arg str custom-url: Custom URL for a message sent to Gerrit. Build details URL will be used if empty. (default '') :arg str server-name: Name of the server to trigger on, or ''__ANY__'' to trigger on any configured Gerrit server (default '__ANY__'). Requires Gerrit Trigger Plugin version >= 2.11.0 You may select one or more Gerrit events upon which to trigger. You must also supply at least one project and branch, optionally more. If you select the comment-added trigger, you should also indicate which approval category and value you want to trigger the job. Until version 0.4.0 of Jenkins Job Builder, camelCase keys were used to configure Gerrit Trigger Plugin, instead of hyphenated-keys. While still supported, camedCase keys are deprecated and should not be used. Support for this will be removed after 1.0.0 is released. Example: .. literalinclude:: /../../tests/triggers/fixtures/gerrit004.yaml :language: yaml """ def get_compare_type(xml_tag, compare_type): valid_compare_types = ['PLAIN', 'ANT', 'REG_EXP'] if compare_type not in valid_compare_types: raise InvalidAttributeError(xml_tag, compare_type, valid_compare_types) return compare_type gerrit_handle_legacy_configuration(data) projects = data['projects'] gtrig = XML.SubElement(xml_parent, 'com.sonyericsson.hudson.plugins.gerrit.trigger.' 'hudsontrigger.GerritTrigger') XML.SubElement(gtrig, 'spec') gprojects = XML.SubElement(gtrig, 'gerritProjects') for project in projects: gproj = XML.SubElement(gprojects, 'com.sonyericsson.hudson.plugins.gerrit.' 'trigger.hudsontrigger.data.GerritProject') XML.SubElement(gproj, 'compareType').text = get_compare_type( 'project-compare-type', project['project-compare-type']) XML.SubElement(gproj, 'pattern').text = project['project-pattern'] branches = XML.SubElement(gproj, 'branches') project_branches = project.get('branches', []) if 'branch-compare-type' in project and 'branch-pattern' in project: warning = 'branch-compare-type and branch-pattern at project ' \ 'level are deprecated and support will be removed ' \ 'in a later version of Jenkins Job Builder; ' if project_branches: warning += 'discarding values and using values from ' \ 'branches section' else: warning += 'please use branches section instead' logger.warn(warning) if not project_branches: project_branches = [ {'branch-compare-type': project['branch-compare-type'], 'branch-pattern': project['branch-pattern']}] for branch in project_branches: gbranch = XML.SubElement( branches, 'com.sonyericsson.hudson.plugins.' 'gerrit.trigger.hudsontrigger.data.Branch') XML.SubElement(gbranch, 'compareType').text = get_compare_type( 'branch-compare-type', branch['branch-compare-type']) XML.SubElement(gbranch, 'pattern').text = branch['branch-pattern'] project_file_paths = project.get('file-paths', []) if project_file_paths: fps_tag = XML.SubElement(gproj, 'filePaths') for file_path in project_file_paths: fp_tag = XML.SubElement(fps_tag, 'com.sonyericsson.hudson.plugins.' 'gerrit.trigger.hudsontrigger.data.' 'FilePath') XML.SubElement(fp_tag, 'compareType').text = get_compare_type( 'compare-type', file_path.get('compare-type', 'PLAIN')) XML.SubElement(fp_tag, 'pattern').text = file_path['pattern'] project_forbidden_file_paths = project.get('forbidden-file-paths', []) if project_forbidden_file_paths: ffps_tag = XML.SubElement(gproj, 'forbiddenFilePaths') for forbidden_file_path in project_forbidden_file_paths: ffp_tag = XML.SubElement(ffps_tag, 'com.sonyericsson.hudson.plugins.' 'gerrit.trigger.hudsontrigger.data.' 'FilePath') XML.SubElement(ffp_tag, 'compareType').text = get_compare_type( 'compare-type', forbidden_file_path.get('compare-type', 'PLAIN')) XML.SubElement(ffp_tag, 'pattern').text = \ forbidden_file_path['pattern'] topics = project.get('topics', []) if topics: topics_tag = XML.SubElement(gproj, 'topics') for topic in topics: topic_tag = XML.SubElement(topics_tag, 'com.sonyericsson.hudson.plugins.' 'gerrit.trigger.hudsontrigger.data.' 'Topic') XML.SubElement(topic_tag, 'compareType').text = \ get_compare_type('compare-type', topic.get('compare-type', 'PLAIN')) XML.SubElement(topic_tag, 'pattern').text = topic['pattern'] build_gerrit_skip_votes(gtrig, data) XML.SubElement(gtrig, 'silentMode').text = str( data.get('silent', False)).lower() XML.SubElement(gtrig, 'silentStartMode').text = str( data.get('silent-start', False)).lower() XML.SubElement(gtrig, 'escapeQuotes').text = str( data.get('escape-quotes', True)).lower() XML.SubElement(gtrig, 'noNameAndEmailParameters').text = str( data.get('no-name-and-email', False)).lower() XML.SubElement(gtrig, 'readableMessage').text = str( data.get('readable-message', False)).lower() XML.SubElement(gtrig, 'dependencyJobsNames').text = str( data.get('dependency-jobs', '')) notification_levels = ['NONE', 'OWNER', 'OWNER_REVIEWERS', 'ALL', 'SERVER_DEFAULT'] notification_level = data.get('notification-level', 'SERVER_DEFAULT') if notification_level not in notification_levels: raise InvalidAttributeError('notification-level', notification_level, notification_levels) if notification_level == 'SERVER_DEFAULT': XML.SubElement(gtrig, 'notificationLevel').text = '' else: XML.SubElement(gtrig, 'notificationLevel').text = notification_level XML.SubElement(gtrig, 'dynamicTriggerConfiguration').text = str( data.get('dynamic-trigger-enabled', False)) XML.SubElement(gtrig, 'triggerConfigURL').text = str( data.get('dynamic-trigger-url', '')) XML.SubElement(gtrig, 'allowTriggeringUnreviewedPatches').text = str( data.get('trigger-for-unreviewed-patches', False)).lower() build_gerrit_triggers(gtrig, data) override = str(data.get('override-votes', False)).lower() if override == 'true': for yamlkey, xmlkey in [('gerrit-build-started-verified-value', 'gerritBuildStartedVerifiedValue'), ('gerrit-build-successful-verified-value', 'gerritBuildSuccessfulVerifiedValue'), ('gerrit-build-failed-verified-value', 'gerritBuildFailedVerifiedValue'), ('gerrit-build-unstable-verified-value', 'gerritBuildUnstableVerifiedValue'), ('gerrit-build-notbuilt-verified-value', 'gerritBuildNotBuiltVerifiedValue'), ('gerrit-build-started-codereview-value', 'gerritBuildStartedCodeReviewValue'), ('gerrit-build-successful-codereview-value', 'gerritBuildSuccessfulCodeReviewValue'), ('gerrit-build-failed-codereview-value', 'gerritBuildFailedCodeReviewValue'), ('gerrit-build-unstable-codereview-value', 'gerritBuildUnstableCodeReviewValue'), ('gerrit-build-notbuilt-codereview-value', 'gerritBuildNotBuiltCodeReviewValue')]: if data.get(yamlkey) is not None: # str(int(x)) makes input values like '+1' work XML.SubElement(gtrig, xmlkey).text = str( int(data.get(yamlkey))) XML.SubElement(gtrig, 'buildStartMessage').text = str( data.get('start-message', '')) XML.SubElement(gtrig, 'buildFailureMessage').text = \ data.get('failure-message', '') XML.SubElement(gtrig, 'buildSuccessfulMessage').text = str( data.get('successful-message', '')) XML.SubElement(gtrig, 'buildUnstableMessage').text = str( data.get('unstable-message', '')) XML.SubElement(gtrig, 'buildNotBuiltMessage').text = str( data.get('notbuilt-message', '')) XML.SubElement(gtrig, 'buildUnsuccessfulFilepath').text = str( data.get('failure-message-file', '')) XML.SubElement(gtrig, 'customUrl').text = str(data.get('custom-url', '')) XML.SubElement(gtrig, 'serverName').text = str( data.get('server-name', '__ANY__')) def pollscm(parser, xml_parent, data): """yaml: pollscm Poll the SCM to determine if there has been a change. :Parameter: the polling interval (cron syntax) .. deprecated:: 1.3.0. Please use :ref:`cron <cron>`. .. _cron: :arg string cron: the polling interval (cron syntax, required) :arg bool ignore-post-commit-hooks: Ignore changes notified by SCM post-commit hooks. The subversion-plugin supports this since version 1.44. (default false) Example: .. literalinclude:: /../../tests/triggers/fixtures/pollscm002.yaml :language: yaml """ try: cron = data['cron'] ipch = str(data.get('ignore-post-commit-hooks', False)).lower() except KeyError as e: # ensure specific error on the attribute not being set is raised # for new format raise MissingAttributeError(e) except TypeError: # To keep backward compatibility logger.warn("Your pollscm usage is deprecated, please use" " the syntax described in the documentation" " instead") cron = data ipch = 'false' if not cron: raise InvalidAttributeError('cron', cron) scmtrig = XML.SubElement(xml_parent, 'hudson.triggers.SCMTrigger') XML.SubElement(scmtrig, 'spec').text = cron XML.SubElement(scmtrig, 'ignorePostCommitHooks').text = ipch def build_pollurl_content_type(xml_parent, entries, prefix, collection_name, element_name): namespace = 'org.jenkinsci.plugins.urltrigger.content' content_type = XML.SubElement( xml_parent, '{0}.{1}ContentType'.format(namespace, prefix)) if entries: collection = XML.SubElement(content_type, collection_name) for entry in entries: content_entry = XML.SubElement( collection, '{0}.{1}ContentEntry'.format(namespace, prefix)) XML.SubElement(content_entry, element_name).text = entry def pollurl(parser, xml_parent, data): """yaml: pollurl Trigger when the HTTP response from a URL changes. Requires the Jenkins :jenkins-wiki:`URLTrigger Plugin <URLTrigger+Plugin>`. :arg string cron: cron syntax of when to run (default '') :arg string polling-node: Restrict where the polling should run. (optional) :arg list urls: List of URLs to monitor :URL: * **url** (`str`) -- URL to monitor for changes (required) * **proxy** (`bool`) -- Activate the Jenkins proxy (default false) * **timeout** (`int`) -- Connect/read timeout in seconds (default 300) * **username** (`string`) -- User name for basic authentication (optional) * **password** (`string`) -- Password for basic authentication (optional) * **check-status** (`int`) -- Check for a specific HTTP status code (optional) * **check-etag** (`bool`) -- Check the HTTP ETag for changes (default false) * **check-date** (`bool`) -- Check the last modification date of the URL (default false) * **check-content** (`list`) -- List of content type changes to monitor :Content Type: * **simple** (`bool`) -- Trigger on any change to the content of the URL (default false) * **json** (`list`) -- Trigger on any change to the listed JSON paths * **text** (`list`) -- Trigger on any change to the listed regular expressions * **xml** (`list`) -- Trigger on any change to the listed XPath expressions Example: .. literalinclude:: /../../tests/triggers/fixtures/pollurl001.yaml """ valid_content_types = { 'simple': ['Simple', '', '', []], 'json': ['JSON', 'jsonPaths', 'jsonPath', None], 'text': ['TEXT', 'regExElements', 'regEx', None], 'xml': ['XML', 'xPaths', 'xPath', None] } urltrig = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.urltrigger.URLTrigger') node = data.get('polling-node') XML.SubElement(urltrig, 'spec').text = data.get('cron', '') XML.SubElement(urltrig, 'labelRestriction').text = str(bool(node)).lower() if node: XML.SubElement(urltrig, 'triggerLabel').text = node entries = XML.SubElement(urltrig, 'entries') urls = data.get('urls', []) if not urls: raise JenkinsJobsException('At least one url must be provided') for url in urls: entry = XML.SubElement(entries, 'org.jenkinsci.plugins.urltrigger.' 'URLTriggerEntry') XML.SubElement(entry, 'url').text = url['url'] XML.SubElement(entry, 'proxyActivated').text = \ str(url.get('proxy', False)).lower() if 'username' in url: XML.SubElement(entry, 'username').text = url['username'] if 'password' in url: XML.SubElement(entry, 'password').text = url['password'] if 'check-status' in url: XML.SubElement(entry, 'checkStatus').text = 'true' XML.SubElement(entry, 'statusCode').text = \ str(url.get('check-status')) else: XML.SubElement(entry, 'checkStatus').text = 'false' XML.SubElement(entry, 'statusCode').text = '200' XML.SubElement(entry, 'timeout').text = \ str(url.get('timeout', 300)) XML.SubElement(entry, 'checkETag').text = \ str(url.get('check-etag', False)).lower() XML.SubElement(entry, 'checkLastModificationDate').text = \ str(url.get('check-date', False)).lower() check_content = url.get('check-content', []) XML.SubElement(entry, 'inspectingContent').text = \ str(bool(check_content)).lower() content_types = XML.SubElement(entry, 'contentTypes') for entry in check_content: type_name = next(iter(entry.keys())) if type_name not in valid_content_types: raise JenkinsJobsException('check-content must be one of : %s' % ', '.join(valid_content_types. keys())) content_type = valid_content_types.get(type_name) if entry[type_name]: sub_entries = content_type[3] if sub_entries is None: sub_entries = entry[type_name] build_pollurl_content_type(content_types, sub_entries, *content_type[0:3]) def timed(parser, xml_parent, data): """yaml: timed Trigger builds at certain times. :Parameter: when to run the job (cron syntax) Example:: triggers: - timed: "@midnight" """ scmtrig = XML.SubElement(xml_parent, 'hudson.triggers.TimerTrigger') XML.SubElement(scmtrig, 'spec').text = data def github(parser, xml_parent, data): """yaml: github Trigger a job when github repository is pushed to. Requires the Jenkins :jenkins-wiki:`GitHub Plugin <GitHub+Plugin>`. Example:: triggers: - github """ ghtrig = XML.SubElement(xml_parent, 'com.cloudbees.jenkins.' 'GitHubPushTrigger') XML.SubElement(ghtrig, 'spec').text = '' def github_pull_request(parser, xml_parent, data): """yaml: github-pull-request Build pull requests in github and report results. Requires the Jenkins :jenkins-wiki:`GitHub Pull Request Builder Plugin <GitHub+pull+request+builder+plugin>`. :arg list admin-list: the users with admin rights (optional) :arg list white-list: users whose pull requests build (optional) :arg list org-list: orgs whose users should be white listed (optional) :arg bool allow-whitelist-orgs-as-admins: members of white listed orgs will have admin rights. (default false) :arg string cron: cron syntax of when to run (optional) :arg string trigger-phrase: when filled, commenting this phrase in the pull request will trigger a build (optional) :arg bool only-trigger-phrase: only commenting the trigger phrase in the pull request will trigger a build (default false) :arg bool github-hooks: use github hook (default false) :arg bool permit-all: build every pull request automatically without asking (default false) :arg bool auto-close-on-fail: close failed pull request automatically (default false) :arg list white-list-target-branches: Adding branches to this whitelist allows you to selectively test pull requests destined for these branches only. Supports regular expressions (e.g. 'master', 'feature-.*'). (optional) Example: .. literalinclude:: /../../tests/triggers/fixtures/github-pull-request.yaml """ ghprb = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.ghprb.' 'GhprbTrigger') XML.SubElement(ghprb, 'spec').text = data.get('cron', '') admin_string = "\n".join(data.get('admin-list', [])) XML.SubElement(ghprb, 'adminlist').text = admin_string XML.SubElement(ghprb, 'allowMembersOfWhitelistedOrgsAsAdmin').text = str( data.get('allow-whitelist-orgs-as-admins', False)).lower() white_string = "\n".join(data.get('white-list', [])) XML.SubElement(ghprb, 'whitelist').text = white_string org_string = "\n".join(data.get('org-list', [])) XML.SubElement(ghprb, 'orgslist').text = org_string XML.SubElement(ghprb, 'cron').text = data.get('cron', '') XML.SubElement(ghprb, 'triggerPhrase').text = \ data.get('trigger-phrase', '') XML.SubElement(ghprb, 'onlyTriggerPhrase').text = str( data.get('only-trigger-phrase', False)).lower() XML.SubElement(ghprb, 'useGitHubHooks').text = str( data.get('github-hooks', False)).lower() XML.SubElement(ghprb, 'permitAll').text = str( data.get('permit-all', False)).lower() XML.SubElement(ghprb, 'autoCloseFailedPullRequests').text = str( data.get('auto-close-on-fail', False)).lower() white_list_target_branches = data.get('white-list-target-branches', []) if white_list_target_branches: ghprb_wltb = XML.SubElement(ghprb, 'whiteListTargetBranches') for branch in white_list_target_branches: be = XML.SubElement(ghprb_wltb, 'org.jenkinsci.plugins.' 'ghprb.GhprbBranch') XML.SubElement(be, 'branch').text = str(branch) def gitlab_merge_request(parser, xml_parent, data): """yaml: gitlab-merge-request Build merge requests in gitlab and report results. Requires the Jenkins :jenkins-wiki:`Gitlab MergeRequest Builder Plugin. <Gitlab+Merge+Request+Builder+Plugin>`. :arg string cron: cron syntax of when to run (required) :arg string project-path: gitlab-relative path to project (required) Example: .. literalinclude:: \ /../../tests/triggers/fixtures/gitlab-merge-request.yaml """ ghprb = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.gitlab.' 'GitlabBuildTrigger') if not data.get('cron', None): raise jenkins_jobs.errors.JenkinsJobsException( 'gitlab-merge-request is missing "cron"') if not data.get('project-path', None): raise jenkins_jobs.errors.JenkinsJobsException( 'gitlab-merge-request is missing "project-path"') # Because of a design limitation in the GitlabBuildTrigger Jenkins plugin # both 'spec' and '__cron' have to be set to the same value to have them # take effect. Also, cron and projectPath are prefixed with underscores # in the plugin, but spec is not. XML.SubElement(ghprb, 'spec').text = data.get('cron') XML.SubElement(ghprb, '__cron').text = data.get('cron') XML.SubElement(ghprb, '__projectPath').text = data.get('project-path') def build_result(parser, xml_parent, data): """yaml: build-result Configure jobB to monitor jobA build result. A build is scheduled if there is a new build result that matches your criteria (unstable, failure, ...). Requires the Jenkins :jenkins-wiki:`BuildResultTrigger Plugin <BuildResultTrigger+Plugin>`. :arg list groups: List groups of jobs and results to monitor for :arg list jobs: The jobs to monitor (required) :arg list results: Build results to monitor for (default success) :arg bool combine: Combine all job information. A build will be scheduled only if all conditions are met (default false) :arg str cron: The cron syntax with which to poll the jobs for the supplied result (default '') Example:: triggers: - build-result: combine: true cron: '* * * * *' groups: - jobs: - foo - example results: - unstable - jobs: - foo2 results: - not-built - aborted """ brt = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.' 'buildresulttrigger.BuildResultTrigger') XML.SubElement(brt, 'spec').text = data.get('cron', '') XML.SubElement(brt, 'combinedJobs').text = str( data.get('combine', False)).lower() jobs_info = XML.SubElement(brt, 'jobsInfo') result_dict = {'success': 'SUCCESS', 'unstable': 'UNSTABLE', 'failure': 'FAILURE', 'not-built': 'NOT_BUILT', 'aborted': 'ABORTED'} for group in data['groups']: brti = XML.SubElement(jobs_info, 'org.jenkinsci.plugins.' 'buildresulttrigger.model.' 'BuildResultTriggerInfo') if not group.get('jobs', []): raise jenkins_jobs.errors.\ JenkinsJobsException('Jobs is missing and a required' ' element') jobs_string = ",".join(group['jobs']) XML.SubElement(brti, 'jobNames').text = jobs_string checked_results = XML.SubElement(brti, 'checkedResults') for result in group.get('results', ['success']): if result not in result_dict: raise jenkins_jobs.errors.\ JenkinsJobsException('Result entered is not valid,' ' must be one of: ' + ', '.join(result_dict.keys())) model_checked = XML.SubElement(checked_results, 'org.jenkinsci.' 'plugins.buildresulttrigger.model.' 'CheckedResult') XML.SubElement(model_checked, 'checked').text = result_dict[result] def reverse(parser, xml_parent, data): """yaml: reverse This trigger can be configured in the UI using the checkbox with the following text: 'Build after other projects are built'. Set up a trigger so that when some other projects finish building, a new build is scheduled for this project. This is convenient for running an extensive test after a build is complete, for example. This configuration complements the "Build other projects" section in the "Post-build Actions" of an upstream project, but is preferable when you want to configure the downstream project. :arg str jobs: List of jobs to watch. Can be either a comma separated list or a list. :arg str result: Build results to monitor for between the following options: success, unstable and failure. (default 'success'). Example: .. literalinclude:: /../../tests/triggers/fixtures/reverse.yaml Example List: .. literalinclude:: /../../tests/triggers/fixtures/reverse-list.yaml """ reserveBuildTrigger = XML.SubElement( xml_parent, 'jenkins.triggers.ReverseBuildTrigger') supported_thresholds = ['SUCCESS', 'UNSTABLE', 'FAILURE'] XML.SubElement(reserveBuildTrigger, 'spec').text = '' jobs = data.get('jobs') if isinstance(jobs, list): jobs = ",".join(jobs) XML.SubElement(reserveBuildTrigger, 'upstreamProjects').text = \ jobs threshold = XML.SubElement(reserveBuildTrigger, 'threshold') result = data.get('result').upper() if result not in supported_thresholds: raise jenkins_jobs.errors.JenkinsJobsException( "Choice should be one of the following options: %s." % ", ".join(supported_thresholds)) XML.SubElement(threshold, 'name').text = \ hudson_model.THRESHOLDS[result]['name'] XML.SubElement(threshold, 'ordinal').text = \ hudson_model.THRESHOLDS[result]['ordinal'] XML.SubElement(threshold, 'color').text = \ hudson_model.THRESHOLDS[result]['color'] XML.SubElement(threshold, 'completeBuild').text = \ str(hudson_model.THRESHOLDS[result]['complete']).lower() def monitor_folders(parser, xml_parent, data): """yaml: monitor-folders Configure Jenkins to monitor folders. Requires the Jenkins :jenkins-wiki:`Filesystem Trigger Plugin <FSTriggerPlugin>`. :arg str path: Folder path to poll. (optional) :arg list includes: Fileset includes setting that specifies the list of includes files. Basedir of the fileset is relative to the workspace root. If no value is set, all files are used. (optional) :arg str excludes: The 'excludes' pattern. A file that matches this mask will not be polled even if it matches the mask specified in 'includes' section. (optional) :arg bool check-modification-date: Check last modification date. (default true) :arg bool check-content: Check content. (default true) :arg bool check-fewer: Check fewer or more files (default true) :arg str cron: cron syntax of when to run (default '') Example: .. literalinclude:: /../../tests/triggers/fixtures/monitor_folders.yaml """ ft = XML.SubElement(xml_parent, ('org.jenkinsci.plugins.fstrigger.' 'triggers.FolderContentTrigger')) path = data.get('path') if path: XML.SubElement(ft, 'path').text = path includes = data.get('includes') if includes: XML.SubElement(ft, 'includes').text = ",".join(includes) excludes = data.get('excludes') if excludes: XML.SubElement(ft, 'excludes').text = excludes XML.SubElement(ft, 'spec').text = data.get('cron', '') XML.SubElement(ft, 'excludeCheckLastModificationDate').text = str( not data.get('check-modification-date', True)).lower() XML.SubElement(ft, 'excludeCheckContent').text = str( not data.get('check-content', True)).lower() XML.SubElement(ft, 'excludeCheckFewerOrMoreFiles').text = str( not data.get('check-fewer', True)).lower() def ivy(parser, xml_parent, data): """yaml: ivy Poll with an Ivy script Requires the Jenkins :jenkins-wiki:`IvyTrigger Plugin <IvyTrigger+Plugin>`. :arg str path: Path of the ivy file. (optional) :arg str settings-path: Ivy Settings Path. (optional) :arg list str properties-file: List of properties file path. Properties will be injected as variables in the ivy settings file. (optional) :arg str properties-content: Properties content. Properties will be injected as variables in the ivy settings file. (optional) :arg bool debug: Active debug mode on artifacts resolution. (default false) :arg download-artifacts: Download artifacts for dependencies to see if they have changed. (default true) :arg bool enable-concurrent: Enable Concurrent Build. (default false) :arg str label: Restrict where the polling should run. (default '') :arg str cron: cron syntax of when to run (default '') Example: .. literalinclude:: /../../tests/triggers/fixtures/ivy.yaml """ it = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.ivytrigger.IvyTrigger') mappings = [('path', 'ivyPath', None), ('settings-path', 'ivySettingsPath', None), ('properties-file', 'propertiesFilePath', None), ('properties-content', 'propertiesContent', None), ('debug', 'debug', False), ('download-artifacts', 'downloadArtifacts', True), ('enable-concurrent', 'enableConcurrentBuild', False), ('cron', 'spec', '')] for prop in mappings: opt, xmlopt, default_val = prop[:3] val = data.get(opt, default_val) if val is not None: if type(val) == bool: val = str(val).lower() if type(val) == list: val = ";".join(val) XML.SubElement(it, xmlopt).text = val label = data.get('label') XML.SubElement(it, 'labelRestriction').text = str(bool(label)).lower() if label: XML.SubElement(it, 'triggerLabel').text = label def script(parser, xml_parent, data): """yaml: script Triggers the job using shell or batch script. Requires the Jenkins :jenkins-wiki:`ScriptTrigger Plugin <ScriptTrigger+Plugin>`. :arg str label: Restrict where the polling should run. (default '') :arg str script: A shell or batch script. (default '') :arg str script-file-path: A shell or batch script path. (default '') :arg str cron: cron syntax of when to run (default '') :arg bool enable-concurrent: Enables triggering concurrent builds. (default false) :arg int exit-code: If the exit code of the script execution returns this expected exit code, a build is scheduled. (default 0) Example: .. literalinclude:: /../../tests/triggers/fixtures/script.yaml """ data = data if data else {} st = XML.SubElement( xml_parent, 'org.jenkinsci.plugins.scripttrigger.ScriptTrigger' ) label = data.get('label') XML.SubElement(st, 'script').text = str(data.get('script', '')) XML.SubElement(st, 'scriptFilePath').text = str( data.get('script-file-path', '')) XML.SubElement(st, 'spec').text = str(data.get('cron', '')) XML.SubElement(st, 'labelRestriction').text = str(bool(label)).lower() if label: XML.SubElement(st, 'triggerLabel').text = label XML.SubElement(st, 'enableConcurrentBuild').text = str( data.get('enable-concurrent', False)).lower() XML.SubElement(st, 'exitCode').text = str(data.get('exit-code', 0)) def gitlab_push(parser, xml_parent, data): data = data if data else {} glt = XML.SubElement( xml_parent, 'com.dabsquared.gitlabjenkins.GitLabPushTrigger' ) glt.set('plugin', 'gitlab-plugin@1.1.26') spec = XML.SubElement(glt, 'spec') triggerOnPush = XML.SubElement(glt, 'triggerOnPush') triggerOnPush.text = str(data.get('triggerOnPush', False)).lower() triggerOnMergeRequest = XML.SubElement(glt, 'triggerOnMergeRequest') triggerOnMergeRequest.text = str(data.get('triggerOnMergeRequest', False)).lower() triggerOpenMergeRequestOnPush = XML.SubElement(glt, 'triggerOpenMergeRequestOnPush') triggerOpenMergeRequestOnPush.text = str(data.get('triggerOpenMergeRequestOnPush', 'never')).lower() triggerOpenMergeRequestOnPush = XML.SubElement(glt, 'ciSkip') triggerOpenMergeRequestOnPush.text = str(data.get('ciSkip', False)).lower() setBuildDescription = XML.SubElement(glt, 'setBuildDescription') setBuildDescription.text = str(data.get('setBuildDescription', False)).lower() addNoteOnMergeRequest = XML.SubElement(glt, 'addNoteOnMergeRequest') addNoteOnMergeRequest.text = str(data.get('addNoteOnMergeRequest', False)).lower() addNoteOnMergeRequest = XML.SubElement(glt, 'addVoteOnMergeRequest') addNoteOnMergeRequest.text = str(data.get('addVoteOnMergeRequest', False)).lower() allowAllBranches = XML.SubElement(glt, 'allowAllBranches') allowAllBranches.text = str(data.get('allowAllBranches', False)).lower() includeBranchesSpec = XML.SubElement(glt, 'includeBranchesSpec') branches = data.get('includeBranchesSpec', ['master']) includeBranchesSpec.text = ','.join(branches) excludeBranchesSpec = XML.SubElement(glt, 'excludeBranchesSpec') branches = data.get('excludeBranchesSpec', []) excludeBranchesSpec.text = ','.join(branches) def groovy_script(parser, xml_parent, data): """yaml: groovy-script Triggers the job using a groovy script. Requires the Jenkins :jenkins-wiki:`ScriptTrigger Plugin <ScriptTrigger+Plugin>`. :arg bool system-script: If true, run the groovy script as a system script, the script will have access to the same variables as the Groovy Console. If false, run the groovy script on the executor node, the script will not have access to the hudson or job model. (default false) :arg str script: Content of the groovy script. If the script result is evaluated to true, a build is scheduled. (default '') :arg str script-file-path: Groovy script path. (default '') :arg str property-file-path: Property file path. All properties will be set as parameters for the triggered build. (optional) :arg bool enable-concurrent: Enable concurrent build. (default false) :arg str label: Restrict where the polling should run. (default '') :arg str cron: cron syntax of when to run (default '') Example: .. literalinclude:: /../../tests/triggers/fixtures/groovy-script.yaml """ gst = XML.SubElement( xml_parent, 'org.jenkinsci.plugins.scripttrigger.groovy.GroovyScriptTrigger' ) XML.SubElement(gst, 'groovySystemScript').text = str( data.get('system-script', False)).lower() XML.SubElement(gst, 'groovyExpression').text = str(data.get('script', '')) XML.SubElement(gst, 'groovyFilePath').text = str(data.get( 'script-file-path', '')) if 'property-file-path' in data: XML.SubElement(gst, 'propertiesFilePath').text = str( data.get('property-file-path')) XML.SubElement(gst, 'enableConcurrentBuild').text = str( data.get('enable-concurrent', False)).lower() label = data.get('label') XML.SubElement(gst, 'labelRestriction').text = str(bool(label)).lower() if label: XML.SubElement(gst, 'triggerLabel').text = label XML.SubElement(gst, 'spec').text = str(data.get('cron', '')) class Triggers(jenkins_jobs.modules.base.Base): sequence = 50 component_type = 'trigger' component_list_type = 'triggers' def gen_xml(self, parser, xml_parent, data): triggers = data.get('triggers', []) if not triggers: return trig_e = XML.SubElement(xml_parent, 'triggers', {'class': 'vector'}) for trigger in triggers: self.registry.dispatch('trigger', parser, trig_e, trigger)
sebbrandt87/jenkins-job-builder
jenkins_jobs/modules/triggers.py
Python
apache-2.0
54,956
import os import xml.dom.minidom class Filter: def __init__(self,_include,_items): self.include = _include self.items = _items def match(self,msg): for i in self.items: if not i.match(msg): return False return True def parseAll(xml): fs = Filter.parse(xml.getElementsByTagName("filters"))[0]; return Filter(True, [UserFilterItem("alex")]) class FilterItem(): def match(self,msg): return True class UserFilterItem(FilterItem): def __init__(self,value): self.name = value def match(self,msg): return True
stenbacka/irc2you
server/filter.py
Python
bsd-3-clause
572
import copy from typing import Type, TypeVar, MutableMapping, Any, Iterable from collections import defaultdict from datapipelines import DataSource, PipelineContext, Query, NotFoundError, validate_query from ..data import Platform from ..dto.staticdata.champion import ChampionDto, ChampionListDto from ..dto.staticdata.rune import RuneDto, RuneListDto, RunePathDto, RunePathsDto from ..dto.staticdata.item import ItemDto, ItemListDto from ..dto.staticdata.summonerspell import SummonerSpellDto, SummonerSpellListDto from ..dto.staticdata.version import VersionListDto from ..dto.staticdata.profileicon import ProfileIconDataDto from ..dto.staticdata.language import LanguagesDto, LanguageStringsDto from ..dto.staticdata.realm import RealmDto from ..dto.staticdata.map import MapDto, MapListDto from .common import HTTPClient, HTTPError from .riotapi.common import _get_latest_version from .uniquekeys import _hash_included_data, convert_region_to_platform try: import ujson as json except ImportError: import json T = TypeVar("T") # Manually add stat runes since Riot doesn't provide static data for them... statperk_health = { "id": 5001, "name": "HealthScaling", "key": "HealthScaling", "shortDesc": "+15-90 Health (based on level)", "longDesc": "+15-90 Health (based on level)", "icon": "/lol-game-data/assets/v1/perk-images/StatMods/StatModsHealthScalingIcon.png", } statperk_armor = { "id": 5002, "name": "Armor", "key": "Armor", "shortDesc": "+6 Armor", "longDesc": "+6 Armor", "icon": "/lol-game-data/assets/v1/perk-images/StatMods/StatModsArmorIcon.png", } statperk_magic_resist = { "id": 5003, "name": "MagicResist", "key": "MagicRes", "shortDesc": "+8 Magic Resist", "longDesc": "+8 Magic Resist", "icon": "/lol-game-data/assets/v1/perk-images/StatMods/StatModsMagicResIcon.png", } statperk_attack_speed = { "id": 5005, "name": "AttackSpeed", "key": "AttackSpeed", "shortDesc": "+10% Attack Speed", "longDesc": "+10% Attack Speed", "icon": "/lol-game-data/assets/v1/perk-images/StatMods/StatModsAttackSpeedIcon.png", } statperk_cdr = { "id": 5007, "name": "CDRScaling", "key": "CDRScaling", "shortDesc": "+1-10% <lol-uikit-tooltipped-keyword key='LinkTooltip_Description_CDR'>CDR</lol-uikit-tooltipped-keyword> (based on level)", "longDesc": "+1-10% <lol-uikit-tooltipped-keyword key='LinkTooltip_Description_CDR'>CDR</lol-uikit-tooltipped-keyword> (based on level)", "icon": "/lol-game-data/assets/v1/perk-images/StatMods/StatModsCDRScalingIcon.png", } statperk_adaptive = { "id": 5008, "name": "Adaptive", "key": "Adaptive", "shortDesc": "+9 <lol-uikit-tooltipped-keyword key='LinkTooltip_Description_Adaptive'><font color='#48C4B7'>Adaptive Force</font></lol-uikit-tooltipped-keyword>", "longDesc": "+9 <lol-uikit-tooltipped-keyword key='LinkTooltip_Description_Adaptive'><font color='#48C4B7'>Adaptive Force</font></lol-uikit-tooltipped-keyword>", "icon": "/lol-game-data/assets/v1/perk-images/StatMods/StatModsAdaptiveForceIcon.png", } statperks = { "id": 5000, "key": "stats", "name": "stats", "icon": "", "slots": [{"runes": [statperk_health, statperk_armor, statperk_magic_resist, statperk_attack_speed, statperk_cdr, statperk_adaptive]}] } class DDragon(DataSource): def __init__(self, http_client: HTTPClient = None) -> None: if http_client is None: self._client = HTTPClient() else: self._client = http_client self._cache = {ChampionListDto: {}, RuneListDto: {}, ItemListDto: {}, SummonerSpellListDto: {}, MapListDto: {}} @DataSource.dispatch def get(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> T: pass @DataSource.dispatch def get_many(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> Iterable[T]: pass def calculate_hash(self, query): hash = list(value for _, value in sorted(query.items())) for i, value in enumerate(hash): if isinstance(value, set): hash[i] = _hash_included_data(value) return tuple(hash) ############# # Champions # ############# _validate_get_champion_query = Query. \ has("platform").as_(Platform).also. \ has("id").as_(int).or_("name").as_(str).also. \ can_have("version").with_default(_get_latest_version, supplies_type=str).also. \ can_have("locale").also. \ can_have("includedData") @get.register(ChampionDto) @validate_query(_validate_get_champion_query, convert_region_to_platform) def get_champion(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> ChampionDto: champions_query = copy.deepcopy(query) if "id" in champions_query: champions_query.pop("id") if "name" in champions_query: champions_query.pop("name") champions = context[context.Keys.PIPELINE].get(ChampionListDto, query=champions_query) def find_matching_attribute(list_of_dtos, attrname, attrvalue): for dto in list_of_dtos: if dto.get(attrname, None) == attrvalue: return dto # The `data` is a list of champion data instances if "id" in query: find = "id", query["id"] elif "name" in query: find = "name", query["name"] else: raise RuntimeError("Impossible!") champion = find_matching_attribute(champions["data"].values(), *find) if champion is None: raise NotFoundError champion["region"] = query["platform"].region.value champion["version"] = query["version"] if "locale" in query: champion["locale"] = query["locale"] if "includedData" in query: champion["includedData"] = query["includedData"] return ChampionDto(champion) _validate_get_champion_list_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_latest_version, supplies_type=str).also. \ can_have("locale").also. \ can_have("includedData") @get.register(ChampionListDto) @validate_query(_validate_get_champion_list_query, convert_region_to_platform) def get_champion_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> ChampionListDto: locale = query["locale"] if "locale" in query else query["platform"].default_locale query["locale"] = locale ahash = self.calculate_hash(query) try: return self._cache[ChampionListDto][ahash] except KeyError: pass url = "https://ddragon.leagueoflegends.com/cdn/{version}/data/{locale}/championFull.json".format( version=query["version"], locale=locale ) try: body = json.loads(self._client.get(url)[0]) except HTTPError as e: raise NotFoundError(str(e)) from e for champ_name, champ in body["data"].items(): champ = ChampionDto(champ) body["data"][champ_name] = champ champ["region"] = query["platform"].region.value body["locale"] = locale body["includedData"] = {"all"} champ["id"], champ["key"] = int(champ["key"]), champ["id"] for skin in champ["skins"]: # id str in DDragon, int in static data. skin["id"] = int(skin["id"]) # Doesn't exist in static data. skin.pop("chromas", None) champ["passive"]["sanitizedDescription"] = champ["passive"]["description"] for recommended in champ['recommended']: # These fields always(?) are the same and don't appear in static data. [recommended.pop(key, None) for key in ('sortrank', 'extensionPage', 'customPanel', 'customTag', 'requiredPerk', 'customPanelCurrencyType', 'customPanelBuffCurrencyName')] for block in recommended['blocks']: # These don't appear in static data for whatever reason. [block.pop(key, None) for key in ("recSteps", "minSummonerLevel", "maxSummonerLevel", "showIfSummonerSpell", "hideIfSummonerSpell")] for item in block["items"]: # id str in DDragon, int in static data. item["id"] = int(item["id"]) # Doesn't exist. item.pop("hideCount", None) for spell in champ['spells']: # id -> key spell["key"] = spell.pop("id") # effectBurn is null in DDragon, empty string in static data. spell["effectBurn"][0] = "" # TODO: Sanitizer? spell["sanitizedDescription"] = spell["description"] spell["sanitizedTooltip"] = spell["tooltip"] # non-existent in static data(? used for charge based spells, not sure why static data strips it) spell.pop("maxammo", None) for var in spell["vars"]: # coeff is always a list, even if just one item if not isinstance(var["coeff"], list): var["coeff"] = [var["coeff"]] body["region"] = query["platform"].region.value body["locale"] = locale body["includedData"] = {"all"} result = ChampionListDto(body) self._cache[ChampionListDto][ahash] = result return result ############ # Versions # ############ _validate_get_versions_query = Query. \ has("platform").as_(Platform) @get.register(VersionListDto) @validate_query(_validate_get_versions_query, convert_region_to_platform) def get_versions(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> VersionListDto: url = "https://ddragon.leagueoflegends.com/api/versions.json" try: body = json.loads(self._client.get(url)[0]) except HTTPError as e: raise NotFoundError(str(e)) from e return VersionListDto({ "region": query["platform"].region.value, "versions": body }) ########## # Realms # ########## _validate_get_realms_query = Query. \ has("platform").as_(Platform) @get.register(RealmDto) @validate_query(_validate_get_realms_query, convert_region_to_platform) def get_realms(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> RealmDto: region = query["platform"].region url = "https://ddragon.leagueoflegends.com/realms/{region}.json".format(region=region.value.lower()) try: body = json.loads(self._client.get(url)[0]) except HTTPError as e: raise NotFoundError(str(e)) from e body["region"] = query["platform"].region.value return RealmDto(body) ############# # Languages # ############# _validate_get_languages_query = Query. \ has("platform").as_(Platform) @get.register(LanguagesDto) @validate_query(_validate_get_languages_query, convert_region_to_platform) def get_languages(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> LanguagesDto: url = "https://ddragon.leagueoflegends.com/cdn/languages.json" try: body = json.loads(self._client.get(url)[0]) except HTTPError as e: raise NotFoundError(str(e)) from e data = {"region": query["platform"].region.value, "languages": body} return LanguagesDto(data) ######## # Maps # ######## _validate_get_map_query = Query. \ has("platform").as_(Platform).also. \ has("id").as_(int).or_("name").as_(str).also. \ can_have("version").with_default(_get_latest_version, supplies_type=str).also. \ can_have("locale") @get.register(MapDto) @validate_query(_validate_get_map_query, convert_region_to_platform) def get_map(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> MapDto: maps_query = copy.deepcopy(query) if "id" in maps_query: maps_query.pop("id") if "name" in maps_query: maps_query.pop("name") maps = context[context.Keys.PIPELINE].get(MapListDto, query=maps_query) def find_matching_attribute(list_of_dtos, attrname, attrvalue): for dto in list_of_dtos: if dto.get(attrname, None) == attrvalue: return dto # The `data` is a list of map data instances if "id" in query: find = "mapId", str(query["id"]) elif "name" in query: find = "mapName", query["name"] else: raise RuntimeError("Impossible!") map = find_matching_attribute(maps["data"].values(), *find) if map is None: raise NotFoundError map["region"] = query["platform"].region.value map["version"] = query["version"] if "locale" in query: map["locale"] = query["locale"] return MapDto(map) _validate_get_map_list_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_latest_version, supplies_type=str).also. \ can_have("locale").as_(str) @get.register(MapListDto) @validate_query(_validate_get_map_list_query, convert_region_to_platform) def get_map_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> MapListDto: locale = query["locale"] if "locale" in query else query["platform"].default_locale query["locale"] = locale ahash = self.calculate_hash(query) try: return self._cache[MapListDto][ahash] except KeyError: pass url = "https://ddragon.leagueoflegends.com/cdn/{version}/data/{locale}/map.json".format( version=query["version"], locale=locale ) try: body = json.loads(self._client.get(url)[0]) except HTTPError as e: raise NotFoundError(str(e)) from e body["region"] = query["platform"].region.value body["locale"] = locale for key, map in body["data"].items(): map = MapDto(map) body["data"][key] = map map["mapName"] = map.pop("MapName") map["mapId"] = map.pop("MapId") result = MapListDto(body) self._cache[MapListDto][ahash] = result return result #################### # Language Strings # #################### _validate_get_language_strings_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_latest_version, supplies_type=str).also. \ can_have("locale").as_(str) @get.register(LanguageStringsDto) @validate_query(_validate_get_language_strings_query, convert_region_to_platform) def get_language_strings(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> LanguageStringsDto: locale = query["locale"] if "locale" in query else query["platform"].default_locale url = "https://ddragon.leagueoflegends.com/cdn/{version}/data/{locale}/language.json".format( version=query["version"], locale=locale ) try: body = json.loads(self._client.get(url)[0]) except HTTPError as e: raise NotFoundError(str(e)) from e body["region"] = query["platform"].region.value body["locale"] = locale return LanguageStringsDto(body) ######### # Runes # ######### _validate_get_rune_query = Query. \ has("platform").as_(Platform).also. \ has("id").as_(int).or_("name").as_(str).also. \ can_have("version").with_default(_get_latest_version, supplies_type=str).also. \ can_have("locale").also. \ can_have("includedData") @get.register(RuneDto) @validate_query(_validate_get_rune_query, convert_region_to_platform) def get_rune(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> RuneDto: runes_query = copy.deepcopy(query) if "id" in runes_query: runes_query.pop("id") if "name" in runes_query: runes_query.pop("name") runes = context[context.Keys.PIPELINE].get(RuneListDto, query=runes_query) def find_matching_attribute(list_of_dtos, attrname, attrvalue): for dto in list_of_dtos: if dto.get(attrname, None) == attrvalue: return dto # The `data` is a list of rune data instances if "id" in query: find = "id", query["id"] elif "name" in query: find = "name", query["name"] else: raise RuntimeError("Impossible!") if isinstance(runes["data"], list): rune = find_matching_attribute(runes["data"], *find) elif isinstance(runes["data"], dict): rune = find_matching_attribute(runes["data"].values(), *find) else: raise ValueError("The runes data from DDragon came back in an unexpected format. Please report this on Github!") if rune is None: raise NotFoundError rune["region"] = query["platform"].region.value rune["version"] = query["version"] if "locale" in query: rune["locale"] = query["locale"] if "includedData" in query: rune["includedData"] = query["includedData"] return RuneDto(rune) _validate_get_rune_list_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_latest_version, supplies_type=str).also. \ can_have("locale").as_(str).also. \ can_have("includedData") @get.register(RuneListDto) @validate_query(_validate_get_rune_list_query, convert_region_to_platform) def get_rune_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> RuneListDto: locale = query["locale"] if "locale" in query else query["platform"].default_locale query["locale"] = locale ahash = self.calculate_hash(query) try: return self._cache[RuneListDto][ahash] except KeyError: pass url = "https://ddragon.leagueoflegends.com/cdn/{version}/data/{locale}/runesReforged.json".format( version=query["version"], locale=locale ) try: body = json.loads(self._client.get(url)[0]) except HTTPError as e: raise NotFoundError(str(e)) from e body.append(statperks) for path in body: for tier, subpath in enumerate(path["slots"]): for i, rune in enumerate(subpath["runes"]): rune["path"] = { "key": path["key"], "name": path["name"], "id": path["id"], "icon": path["icon"] } rune["tier"] = tier subpath[i] = RuneDto(rune) body = {"data": [rune for path in body for subpath in path["slots"] for rune in subpath["runes"] ]} body["region"] = query["platform"].region.value body["locale"] = locale body["version"] = query["version"] body["includedData"] = {"all"} result = RuneListDto(body) self._cache[RuneListDto][ahash] = result return result _validate_get_rune_paths_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_latest_version, supplies_type=str).also. \ can_have("locale").as_(str).also. \ can_have("includedData") @get.register(RunePathsDto) @validate_query(_validate_get_rune_paths_query, convert_region_to_platform) def get_rune_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> RunePathsDto: pipeline = context[PipelineContext.Keys.PIPELINE] runes = pipeline.get(RuneListDto, copy.deepcopy(query))["data"] paths = defaultdict(dict) for rune in runes: if rune["path"]["id"] not in paths: paths[rune["path"]["id"]] = rune["path"] paths = [RunePathDto(path) for path in paths.values()] paths = RunePathsDto(paths=paths, platform=query["platform"], locale=query.get("locale", None), version=query.get("version", None), includedData=query.get("includedData", None)) return paths ######### # Items # ######### _validate_get_item_query = Query. \ has("platform").as_(Platform).also. \ has("id").as_(int).or_("name").as_(str).also. \ can_have("version").with_default(_get_latest_version, supplies_type=str).also. \ can_have("locale").also. \ can_have("includedData") @get.register(ItemDto) @validate_query(_validate_get_item_query, convert_region_to_platform) def get_item(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> ItemDto: items_query = copy.deepcopy(query) if "id" in items_query: items_query.pop("id") if "name" in items_query: items_query.pop("name") items = context[context.Keys.PIPELINE].get(ItemListDto, query=items_query) def find_matching_attribute(list_of_dtos, attrname, attrvalue): for dto in list_of_dtos: if dto.get(attrname, None) == attrvalue: return dto # The `data` is a list of item data instances if "id" in query: find = "id", query["id"] elif "name" in query: find = "name", query["name"] else: raise RuntimeError("Impossible!") item = find_matching_attribute(items["data"].values(), *find) if item is None: raise NotFoundError item["region"] = query["platform"].region.value item["version"] = query["version"] if "locale" in query: item["locale"] = query["locale"] if "includedData" in query: item["includedData"] = query["includedData"] return ItemDto(item) _validate_get_item_list_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_latest_version, supplies_type=str).also. \ can_have("locale").as_(str).also. \ can_have("includedData") @get.register(ItemListDto) @validate_query(_validate_get_item_list_query, convert_region_to_platform) def get_item_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> ItemListDto: locale = query["locale"] if "locale" in query else query["platform"].default_locale query["locale"] = locale ahash = self.calculate_hash(query) try: return self._cache[ItemListDto][ahash] except KeyError: pass url = "https://ddragon.leagueoflegends.com/cdn/{version}/data/{locale}/item.json".format( version=query["version"], locale=locale ) try: body = json.loads(self._client.get(url)[0]) except HTTPError as e: raise NotFoundError(str(e)) from e body.pop("basic") for group in body["groups"]: # key in static data -> id on DDragon group["key"] = group.pop("id") for item_id, item in body["data"].items(): item = ItemDto(item) body["data"][item_id] = item item["id"] = int(item_id) # TODO: Sanitizer? item["sanitizedDescription"] = item["description"] if item["id"] == 3632: # This item doesn't have a name. item["name"] = "" if "tags" not in item: item["tags"] = [] if "depth" not in item: item["depth"] = 1 if "colloq" not in item: item["colloq"] = "" if "plaintext" not in item: item["plaintext"] = "" body["region"] = query["platform"].region.value body["locale"] = locale body["includedData"] = {"all"} result = ItemListDto(body) self._cache[ItemListDto][ahash] = result return result ################### # Summoner Spells # ################### _validate_get_summoner_spell_query = Query. \ has("platform").as_(Platform).also. \ has("id").as_(int).or_("name").as_(str).also. \ can_have("version").with_default(_get_latest_version, supplies_type=str).also. \ can_have("locale").also. \ can_have("includedData") @get.register(SummonerSpellDto) @validate_query(_validate_get_summoner_spell_query, convert_region_to_platform) def get_summoner_spell(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> SummonerSpellDto: summoner_spells_query = copy.deepcopy(query) if "id" in summoner_spells_query: summoner_spells_query.pop("id") if "name" in summoner_spells_query: summoner_spells_query.pop("name") summoner_spells = context[context.Keys.PIPELINE].get(SummonerSpellListDto, query=summoner_spells_query) def find_matching_attribute(list_of_dtos, attrname, attrvalue): for dto in list_of_dtos: if dto.get(attrname, None) == attrvalue: return dto # The `data` is a list of summoner_spell data instances if "id" in query: find = "id", query["id"] elif "name" in query: find = "name", query["name"] else: raise RuntimeError("Impossible!") summoner_spell = find_matching_attribute(summoner_spells["data"].values(), *find) if summoner_spell is None: raise NotFoundError summoner_spell["region"] = query["platform"].region.value summoner_spell["version"] = query["version"] if "locale" in query: summoner_spell["locale"] = query["locale"] if "includedData" in query: summoner_spell["includedData"] = query["includedData"] return SummonerSpellDto(summoner_spell) _validate_get_summoner_spell_list_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_latest_version, supplies_type=str).also. \ can_have("locale").as_(str).also. \ can_have("includedData") @get.register(SummonerSpellListDto) @validate_query(_validate_get_summoner_spell_list_query, convert_region_to_platform) def get_summoner_spell_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> SummonerSpellListDto: locale = query["locale"] if "locale" in query else query["platform"].default_locale query["locale"] = locale ahash = self.calculate_hash(query) try: return self._cache[SummonerSpellListDto][ahash] except KeyError: pass url = "https://ddragon.leagueoflegends.com/cdn/{version}/data/{locale}/summoner.json".format( version=query["version"], locale=locale ) try: body = json.loads(self._client.get(url)[0]) except HTTPError as e: raise NotFoundError(str(e)) from e for ss_name, ss in body["data"].items(): ss = SummonerSpellDto(ss) body["data"][ss_name] = ss # key and id are switched between DDragon and static data. Also, id is of type int, instead of str. ss["id"], ss["key"] = int(ss["key"]), ss["id"] # effectBurn"s first element is an null in DDragon, but an empty string in static data.. ss["effectBurn"][0] = "" # Usually -1, doesn"t exist in static data. ss.pop("maxammo") # TODO: Sanitizer? ss["sanitizedDescription"] = ss["description"] ss["sanitizedTooltip"] = ss["tooltip"] body["region"] = query["platform"].region.value body["locale"] = locale body["includedData"] = {"all"} result = SummonerSpellListDto(body) self._cache[SummonerSpellListDto][ahash] = result return result ################# # Profile Icons # ################# _validate_get_profile_icon_query = Query. \ has("platform").as_(Platform).also. \ can_have("version").with_default(_get_latest_version, supplies_type=str).also. \ can_have("locale").as_(str) @get.register(ProfileIconDataDto) @validate_query(_validate_get_profile_icon_query, convert_region_to_platform) def get_profile_icon(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> ProfileIconDataDto: locale = query["locale"] if "locale" in query else query["platform"].default_locale url = "https://ddragon.leagueoflegends.com/cdn/{version}/data/{locale}/profileicon.json".format( version=query["version"], locale=locale ) try: body = json.loads(self._client.get(url)[0]) except HTTPError as e: raise NotFoundError(str(e)) from e body["region"] = query["platform"].region.value body["locale"] = locale body["version"] = query["version"] for pi in body["data"].values(): pi["region"] = body["region"] pi["version"] = body["version"] pi["locale"] = locale return ProfileIconDataDto(body)
sserrot/champion_relationships
venv/Lib/site-packages/cassiopeia/datastores/ddragon.py
Python
mit
30,073
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013 # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import re from time import sleep from ansible.module_utils.cloud import CloudRetry try: import boto import boto.ec2 #boto does weird import stuff HAS_BOTO = True except ImportError: HAS_BOTO = False try: import boto3 import botocore HAS_BOTO3 = True except: HAS_BOTO3 = False try: from distutils.version import LooseVersion HAS_LOOSE_VERSION = True except: HAS_LOOSE_VERSION = False from ansible.module_utils.six import string_types, binary_type, text_type class AnsibleAWSError(Exception): pass def _botocore_exception_maybe(): """ Allow for boto3 not being installed when using these utils by wrapping botocore.exceptions instead of assigning from it directly. """ if HAS_BOTO3: return botocore.exceptions.ClientError return type(None) class AWSRetry(CloudRetry): base_class = _botocore_exception_maybe() @staticmethod def status_code_from_exception(error): return error.response['Error']['Code'] @staticmethod def found(response_code): # This list of failures is based on this API Reference # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html retry_on = [ 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable', 'InternalFailure', 'InternalError' ] not_found = re.compile(r'^\w+.NotFound') if response_code in retry_on or not_found.search(response_code): return True else: return False def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params): try: return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params) except ValueError: module.fail_json(msg='There is an issue in the code of the module. You must specify either both, resource or client to the conn_type parameter in the boto3_conn function call') def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params): profile = params.pop('profile_name', None) if conn_type not in ['both', 'resource', 'client']: raise ValueError('There is an issue in the calling code. You ' 'must specify either both, resource, or client to ' 'the conn_type parameter in the boto3_conn function ' 'call') if conn_type == 'resource': resource = boto3.session.Session(profile_name=profile).resource(resource, region_name=region, endpoint_url=endpoint, **params) return resource elif conn_type == 'client': client = boto3.session.Session(profile_name=profile).client(resource, region_name=region, endpoint_url=endpoint, **params) return client else: client = boto3.session.Session(profile_name=profile).client(resource, region_name=region, endpoint_url=endpoint, **params) resource = boto3.session.Session(profile_name=profile).resource(resource, region_name=region, endpoint_url=endpoint, **params) return client, resource boto3_inventory_conn = _boto3_conn def aws_common_argument_spec(): return dict( ec2_url=dict(), aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True), aws_access_key=dict(aliases=['ec2_access_key', 'access_key']), validate_certs=dict(default=True, type='bool'), security_token=dict(aliases=['access_token'], no_log=True), profile=dict(), ) def ec2_argument_spec(): spec = aws_common_argument_spec() spec.update( dict( region=dict(aliases=['aws_region', 'ec2_region']), ) ) return spec def get_aws_connection_info(module, boto3=False): # Check module args for credentials, then check environment vars # access_key ec2_url = module.params.get('ec2_url') access_key = module.params.get('aws_access_key') secret_key = module.params.get('aws_secret_key') security_token = module.params.get('security_token') region = module.params.get('region') profile_name = module.params.get('profile') validate_certs = module.params.get('validate_certs') if not ec2_url: if 'AWS_URL' in os.environ: ec2_url = os.environ['AWS_URL'] elif 'EC2_URL' in os.environ: ec2_url = os.environ['EC2_URL'] if not access_key: if 'AWS_ACCESS_KEY_ID' in os.environ: access_key = os.environ['AWS_ACCESS_KEY_ID'] elif 'AWS_ACCESS_KEY' in os.environ: access_key = os.environ['AWS_ACCESS_KEY'] elif 'EC2_ACCESS_KEY' in os.environ: access_key = os.environ['EC2_ACCESS_KEY'] else: # in case access_key came in as empty string access_key = None if not secret_key: if 'AWS_SECRET_ACCESS_KEY' in os.environ: secret_key = os.environ['AWS_SECRET_ACCESS_KEY'] elif 'AWS_SECRET_KEY' in os.environ: secret_key = os.environ['AWS_SECRET_KEY'] elif 'EC2_SECRET_KEY' in os.environ: secret_key = os.environ['EC2_SECRET_KEY'] else: # in case secret_key came in as empty string secret_key = None if not region: if 'AWS_REGION' in os.environ: region = os.environ['AWS_REGION'] elif 'AWS_DEFAULT_REGION' in os.environ: region = os.environ['AWS_DEFAULT_REGION'] elif 'EC2_REGION' in os.environ: region = os.environ['EC2_REGION'] else: if not boto3: # boto.config.get returns None if config not found region = boto.config.get('Boto', 'aws_region') if not region: region = boto.config.get('Boto', 'ec2_region') elif HAS_BOTO3: # here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None. region = botocore.session.get_session().get_config_variable('region') else: module.fail_json(msg="Boto3 is required for this module. Please install boto3 and try again") if not security_token: if 'AWS_SECURITY_TOKEN' in os.environ: security_token = os.environ['AWS_SECURITY_TOKEN'] elif 'AWS_SESSION_TOKEN' in os.environ: security_token = os.environ['AWS_SESSION_TOKEN'] elif 'EC2_SECURITY_TOKEN' in os.environ: security_token = os.environ['EC2_SECURITY_TOKEN'] else: # in case security_token came in as empty string security_token = None if HAS_BOTO3 and boto3: boto_params = dict(aws_access_key_id=access_key, aws_secret_access_key=secret_key, aws_session_token=security_token) boto_params['verify'] = validate_certs if profile_name: boto_params['profile_name'] = profile_name else: boto_params = dict(aws_access_key_id=access_key, aws_secret_access_key=secret_key, security_token=security_token) # only set profile_name if passed as an argument if profile_name: boto_params['profile_name'] = profile_name boto_params['validate_certs'] = validate_certs for param, value in boto_params.items(): if isinstance(value, binary_type): boto_params[param] = text_type(value, 'utf-8', 'strict') return region, ec2_url, boto_params def get_ec2_creds(module): ''' for compatibility mode with old modules that don't/can't yet use ec2_connect method ''' region, ec2_url, boto_params = get_aws_connection_info(module) return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region def boto_fix_security_token_in_profile(conn, profile_name): ''' monkey patch for boto issue boto/boto#2100 ''' profile = 'profile ' + profile_name if boto.config.has_option(profile, 'aws_security_token'): conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token')) return conn def connect_to_aws(aws_module, region, **params): conn = aws_module.connect_to_region(region, **params) if not conn: if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]: raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade boto or extend with endpoints_path" % (region, aws_module.__name__)) else: raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__)) if params.get('profile_name'): conn = boto_fix_security_token_in_profile(conn, params['profile_name']) return conn def ec2_connect(module): """ Return an ec2 connection""" region, ec2_url, boto_params = get_aws_connection_info(module) # If we have a region specified, connect to its endpoint. if region: try: ec2 = connect_to_aws(boto.ec2, region, **boto_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) # Otherwise, no region so we fallback to the old connection method elif ec2_url: try: ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="Either region or ec2_url must be specified") return ec2 def paging(pause=0, marker_property='marker'): """ Adds paging to boto retrieval functions that support a 'marker' this is configurable as not all boto functions seem to use the same name. """ def wrapper(f): def page(*args, **kwargs): results = [] marker = None while True: try: new = f(*args, marker=marker, **kwargs) marker = getattr(new, marker_property) results.extend(new) if not marker: break elif pause: sleep(pause) except TypeError: # Older version of boto do not allow for marker param, just run normally results = f(*args, **kwargs) break return results return page return wrapper def camel_dict_to_snake_dict(camel_dict): def camel_to_snake(name): import re first_cap_re = re.compile('(.)([A-Z][a-z]+)') all_cap_re = re.compile('([a-z0-9])([A-Z])') s1 = first_cap_re.sub(r'\1_\2', name) return all_cap_re.sub(r'\1_\2', s1).lower() def value_is_list(camel_list): checked_list = [] for item in camel_list: if isinstance(item, dict): checked_list.append(camel_dict_to_snake_dict(item)) elif isinstance(item, list): checked_list.append(value_is_list(item)) else: checked_list.append(item) return checked_list snake_dict = {} for k, v in camel_dict.items(): if isinstance(v, dict): snake_dict[camel_to_snake(k)] = camel_dict_to_snake_dict(v) elif isinstance(v, list): snake_dict[camel_to_snake(k)] = value_is_list(v) else: snake_dict[camel_to_snake(k)] = v return snake_dict def snake_dict_to_camel_dict(snake_dict): def camelize(complex_type): if complex_type is None: return new_type = type(complex_type)() if isinstance(complex_type, dict): for key in complex_type: new_type[camel(key)] = camelize(complex_type[key]) elif isinstance(complex_type, list): for i in range(len(complex_type)): new_type.append(camelize(complex_type[i])) else: return complex_type return new_type def camel(words): return words.split('_')[0] + ''.join(x.capitalize() or '_' for x in words.split('_')[1:]) return camelize(snake_dict) def ansible_dict_to_boto3_filter_list(filters_dict): """ Convert an Ansible dict of filters to list of dicts that boto3 can use Args: filters_dict (dict): Dict of AWS filters. Basic Usage: >>> filters = {'some-aws-id', 'i-01234567'} >>> ansible_dict_to_boto3_filter_list(filters) { 'some-aws-id': 'i-01234567' } Returns: List: List of AWS filters and their values [ { 'Name': 'some-aws-id', 'Values': [ 'i-01234567', ] } ] """ filters_list = [] for k,v in filters_dict.items(): filter_dict = {'Name': k} if isinstance(v, string_types): filter_dict['Values'] = [v] else: filter_dict['Values'] = v filters_list.append(filter_dict) return filters_list def boto3_tag_list_to_ansible_dict(tags_list): """ Convert a boto3 list of resource tags to a flat dict of key:value pairs Args: tags_list (list): List of dicts representing AWS tags. Basic Usage: >>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}] >>> boto3_tag_list_to_ansible_dict(tags_list) [ { 'Key': 'MyTagKey', 'Value': 'MyTagValue' } ] Returns: Dict: Dict of key:value pairs representing AWS tags { 'MyTagKey': 'MyTagValue', } """ tags_dict = {} for tag in tags_list: if 'key' in tag: tags_dict[tag['key']] = tag['value'] elif 'Key' in tag: tags_dict[tag['Key']] = tag['Value'] return tags_dict def ansible_dict_to_boto3_tag_list(tags_dict): """ Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts Args: tags_dict (dict): Dict representing AWS resource tags. Basic Usage: >>> tags_dict = {'MyTagKey': 'MyTagValue'} >>> ansible_dict_to_boto3_tag_list(tags_dict) { 'MyTagKey': 'MyTagValue' } Returns: List: List of dicts containing tag keys and values [ { 'Key': 'MyTagKey', 'Value': 'MyTagValue' } ] """ tags_list = [] for k,v in tags_dict.items(): tags_list.append({'Key': k, 'Value': v}) return tags_list def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=True): """ Return list of security group IDs from security group names. Note that security group names are not unique across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in a try block """ def get_sg_name(sg, boto3): if boto3: return sg['GroupName'] else: return sg.name def get_sg_id(sg, boto3): if boto3: return sg['GroupId'] else: return sg.id sec_group_id_list = [] if isinstance(sec_group_list, string_types): sec_group_list = [sec_group_list] # Get all security groups if boto3: if vpc_id: filters = [ { 'Name': 'vpc-id', 'Values': [ vpc_id, ] } ] all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups'] else: all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups'] else: if vpc_id: filters = { 'vpc-id': vpc_id } all_sec_groups = ec2_connection.get_all_security_groups(filters=filters) else: all_sec_groups = ec2_connection.get_all_security_groups() unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups) sec_group_name_list = list(set(sec_group_list) - set(unmatched)) if len(unmatched) > 0: # If we have unmatched names that look like an ID, assume they are import re sec_group_id_list[:] = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)] still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)] if len(still_unmatched) > 0: raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched)) sec_group_id_list += [ str(get_sg_id(all_sg, boto3)) for all_sg in all_sec_groups if str(get_sg_name(all_sg, boto3)) in sec_group_name_list ] return sec_group_id_list def sort_json_policy_dict(policy_dict): """ Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but different orders will return true Args: policy_dict (dict): Dict representing IAM JSON policy. Basic Usage: >>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]} >>> sort_json_policy_dict(my_iam_policy) Returns: Dict: Will return a copy of the policy as a Dict but any List will be sorted { 'Principle': { 'AWS': [ '7', '14', '31', '101' ] } } """ def value_is_list(my_list): checked_list = [] for item in my_list: if isinstance(item, dict): checked_list.append(sort_json_policy_dict(item)) elif isinstance(item, list): checked_list.append(value_is_list(item)) else: checked_list.append(item) # Sort list. If it's a list of dictionaries, sort by tuple of key-value # pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries. checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x) return checked_list ordered_policy_dict = {} for key, value in policy_dict.items(): if isinstance(value, dict): ordered_policy_dict[key] = sort_json_policy_dict(value) elif isinstance(value, list): ordered_policy_dict[key] = value_is_list(value) else: ordered_policy_dict[key] = value return ordered_policy_dict def map_complex_type(complex_type, type_map): """ Allows to cast elements within a dictionary to a specific type Example of usage: DEPLOYMENT_CONFIGURATION_TYPE_MAP = { 'maximum_percent': 'int', 'minimum_healthy_percent': 'int' } deployment_configuration = map_complex_type(module.params['deployment_configuration'], DEPLOYMENT_CONFIGURATION_TYPE_MAP) This ensures all keys within the root element are casted and valid integers """ if complex_type is None: return new_type = type(complex_type)() if isinstance(complex_type, dict): for key in complex_type: if key in type_map: if isinstance(type_map[key], list): new_type[key] = map_complex_type( complex_type[key], type_map[key][0]) else: new_type[key] = map_complex_type( complex_type[key], type_map[key]) else: return complex_type elif isinstance(complex_type, list): for i in range(len(complex_type)): new_type.append(map_complex_type( complex_type[i], type_map)) elif type_map: return globals()['__builtins__'][type_map](complex_type) return new_type
Inspq/ansible
lib/ansible/module_utils/ec2.py
Python
gpl-3.0
21,973
#! /usr/bin/env python # coding: utf-8 # # CCI's boto convenience functions. # # SVN: $Id: cci_boto.py 43293 2014-08-18 17:40:22Z svnsync $ # Created: 2014.06.11 # Copyright: Steven E. Pav, 2014 # Author: Steven E. Pav # Comments: Steven E. Pav from boto.glacier import connect_to_region import time from os.path import isfile from datetime import datetime # copied from glacier.py distributed with boto: def connect(region, debug_level=0, access_key=None, secret_key=None): """ Connect to a specific region """ layer2 = connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key, debug=debug_level) if layer2 is None: raise Exception('Invalid region %s, or bad creds' % region) return layer2 def fetch_vault(layer2, vault_name): """ Internal get_vault """ vault = layer2.get_vault(vault_name) if vault is None : raise Exception('Unknown vault %s' % vault_name) return vault def upload_files(layer2, vault_name, filenames, verbosity=1): my_vault = layer2.get_vault(vault_name) okfn = [fn for fn in filenames if isfile(fn)] outids = [my_vault.upload_archive(fn, description=fn) for fn in okfn] if verbosity > 0: for iii in range(len(okfn)): print 'Uploading %s to %s, id=%s' % (okfn[iii], vault_name, outids[iii]) return outids def init_inventory_job(layer2, vault_name): """ Initiate a request for vault_inventory """ vault = fetch_vault(layer2, vault_name) job_id = vault.retrieve_inventory() return job_id def init_retrieval_job(layer2, vault_name, archive_id): """ Initiate a request for an archive """ vault = fetch_vault(layer2, vault_name) job_id = vault.retrieve_archive(archive_id) return job_id # but see http://stackoverflow.com/a/14924210/164611 # for a better way to do this kind of thing ... def wait_job(layer2, vault_name, job_id, sleepsec=60, timeout=86400): """ Wait for a job to terminate. """ t0 = datetime.now() vault = fetch_vault(layer2, vault_name) while 1: job = vault.get_job(job_id) if not job.completed: tf = datetime.now() delta = tf - t0 if (delta.seconds > timeout): # 2FIX: should this just return false? raise Exception('timed out') time.sleep(sleepsec) else: break wasok = job.completed return wasok def get_inventory(layer2, vault_name): """ get inventory of vault, blocking on return. """ # get inventory: job_id = init_inventory_job(layer2, vault_name) wasok = wait_job(layer2, vault_name, job_id) if not wasok : raise Exception('some problem waiting on job') return get_job_result(layer2, vault_name, job_id) def get_job_result(layer2, vault_name, job_id): """ Get the job output """ layer1 = layer2.layer1 retval = layer1.get_job_output(vault_name, job_id) return retval # module as script if __name__ == "__main__": pass #for vim modeline: (do not edit) # vim:ts=4:sw=4:sts=4:tw=79:sta:et:ai:nu:fdm=indent:syn=python:ft=python:tag=.py_tags;:cin:fo=croql
shabbychef/boto_wrapper
bin/cci_boto.py
Python
gpl-2.0
3,239
from cs50 import SQL from flask import session from helpers import usd db = SQL("sqlite:///finance.db") def get_cash(): return float(db.execute("SELECT cash FROM users WHERE id = :id", id=session["user_id"])[0]["cash"]) def get_username(): return db.execute("SELECT username FROM users WHERE id = :id", id=session["user_id"] )[0]["username"]
marzique/cs50_finance
sqlquery.py
Python
mit
382
#!/usr/bin/env python import socket def tcpClient(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('192.168.0.101', 8889)) s.listen(10) while True: c, a = s.accept() print 'got connection from', a c.send('hello, this is server') r = c.recv(400) print 'read to server', r c.send('hello again, this is server') r = c.recv(400) print 'read to serv again er', r tcpClient()
oxnz/NZChat
cast/x.py
Python
mit
409
# from sys import argv as a # script, first, second, third = a # print ("The script is called:", script) # print ("Your first variable is:", first) # print ("Your second variable is:", second) # print ("Your third variable is:", third) from sys import argv script, txt1, txt2 = argv print (argv)
ismk/Python-Examples
learn python the hard way/ex13.py
Python
mit
315
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (c) 2013, 2014 CERN # Author: Pawel Szostek (pawel.szostek@cern.ch) # Multi-tool support by Javier D. Garcia-Lasheras (javier@garcialasheras.com) # # This file is part of Hdlmake. # # Hdlmake is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Hdlmake is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Hdlmake. If not, see <http://www.gnu.org/licenses/>. from action import Action import logging import os import sys import global_mod from srcfile import SourceFileFactory import importlib class GenerateRemoteSynthesisMakefile(Action): def _check_manifest(self): if not self.top_module.action == "synthesis": logging.error("action must be equal to \"synthesis\"") sys.exit("Exiting") if not self.top_module.syn_project: logging.error("syn_project must be set in the manifest.") sys.exit("Exiting") def run(self): self._check_all_fetched_or_quit() self._check_manifest() tool_object = global_mod.tool_module.ToolControls() self._generate_remote_synthesis_makefile(tool_object) def _search_tcl_file(self, directory=None): # This function is used in _generate_remote_ise_makefile if directory is None: directory = "." filenames = os.listdir(directory) tcls = [] for filename in filenames: file_parts = filename.split('.') if file_parts[len(file_parts)-1] == "tcl": tcls.append(filename) if len(tcls) == 0: return None if len(tcls) > 1: logging.warning("Multiple tcls in the current directory: " + str(tcls) + "\n" + "Picking the first one: " + tcls[0]) return tcls[0] def _generate_tcl(self): # This function is used in _generate_remote_ise_makefile f = open("run.tcl", "w") f.write("project open " + self.top_module.syn_project + '\n') f.write("process run {Generate Programming File} -force rerun_all\n") f.close() def _generate_remote_synthesis_makefile(self, tool_object): logging.info("Generating makefile for remote synthesis.") top_mod = self.modules_pool.get_top_module() tcl = self._search_tcl_file() if tcl is None: self._generate_tcl() tcl = "run.tcl" files = self.modules_pool.build_global_file_set() sff = SourceFileFactory() files.add(sff.new(tcl, module=None)) files.add(sff.new(top_mod.syn_project, module=None)) tool_object.generate_remote_synthesis_makefile(files=files, name=top_mod.syn_name, cwd=os.getcwd(), user=self.env["rsynth_user"], server=self.env["rsynth_server"]) logging.info("Remote synthesis makefile generated.")
JamesHyunKim/myhdl
hdlmake/action/remote_synthesis.py
Python
gpl-3.0
3,420
from flask import Blueprint, jsonify, request routes_api = Blueprint('routes_api', __name__) @routes_api.route('/v1/routes', methods=['GET']) def routes_get(): ''' Get a list of routes It is handler for GET /routes ''' return jsonify()
ridindirtyatl/truffle-api
routes.py
Python
agpl-3.0
261
from __future__ import unicode_literals from .application import Application from .current import get_app, set_app, NoRunningApplicationError from .dummy import DummyApplication from .run_in_terminal import run_in_terminal, run_coroutine_in_terminal __all__ = [ # Application. 'Application', # Current. 'get_app', 'set_app', 'NoRunningApplicationError', # Dummy. 'DummyApplication', # Run_in_terminal 'run_coroutine_in_terminal', 'run_in_terminal', ]
lmregus/Portfolio
python/design_patterns/env/lib/python3.7/site-packages/prompt_toolkit/application/__init__.py
Python
mit
499
#!/usr/bin/python #-*- coding: utf-8 -*- #encoding=utf-8 ''' /* * Copyright (c) 2018, https://github.com/nebulaim * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ ''' import glob, re, binascii, os, sys ''' { langPackString key: "lng_about_done" [STRING], value: "Done" [STRING], }, ''' if (len(sys.argv) !=2): print('Input file required.') sys.exit(1) def ToCamelName(name): ss = name.split("_") for i in range(len(ss)): if (i == 0): continue s = ss[i] ss[i] = s[0:1].upper() + s[1:] return ''.join(ss) langPackStrings = [] langPackStringPluralizeds = [] with open(sys.argv[1]) as f: lastName = '' for line in f: line=line.strip() line=line.strip(',') if (line == '{ langPackString'): langPackStrings.append({'key':'', 'value':''}) lastName = 'langPackString' continue if (line == '{ langPackStringPluralized'): langPackStringPluralizeds.append({'key':'', 'zero_value':'', 'one_value':'', 'two_value':'', 'few_value':'', 'many_value':'', 'other_value':''}) lastName = 'langPackStringPluralized' continue if (line.find('flags:') >=0 ): continue if (line == '},'): continue idx = line.rfind('[') line = line[0:idx-1] if (line == ''): continue idx = line.find(':') s = [] if (idx > 0): s = [line[0:idx], line[idx+2:]] else: continue if (len(s) == 0): continue s[1] = s[1].strip('"') #print s if (lastName == 'langPackString'): langPackStrings[-1][s[0]] = s[1] if (lastName == 'langPackStringPluralized'): langPackStringPluralizeds[-1][s[0]] = s[1] #print langPackStrings #print langPackStringPluralizeds #print len(langPackStrings) + len(langPackStringPluralizeds) print'''# lang_pack_en.toml langCode = "en" version = 77 ''' for d in langPackStrings: print '[[Strings]]' for k, v in d.items(): print('%s = "%s"' % (ToCamelName(k), v)) print('') for d in langPackStringPluralizeds: print '[[StringPluralizeds]]' for k, v in d.items(): print('%s = "%s"' % (ToCamelName(k), v)) print('')
nebulaim/telegramd
server/biz_server/langpack/model/codegen_lang_pack.py
Python
apache-2.0
2,877
""" Openepr Console based on rconsole A Python console you can embed in a program and attach to remotely. """ import getopt import sys import os import rfoo.utils.rconsole as rconsole def print_usage(): scriptName = os.path.basename(sys.argv[0]) sys.stdout.write(""" Start remote console: %(name)s [-h] [-pPORT] -h, --help Print this help. -pPORT Set PORT. """ % {'name': scriptName}) def main(): """Parse options and run script.""" try: options, args = getopt.getopt( sys.argv[1:], 'hp:', ['help'] ) options = dict(options) except getopt.GetoptError: print_usage() return 2 if '-h' in options or '--help' in options: print_usage() return if '-p' in options: port = int(options.get('-p')) else: port = rconsole.PORT try: rconsole.interact(port=port) except: print '' if __name__ == '__main__': main()
chengdh/openerp-ktv
openerp-console.py
Python
agpl-3.0
1,002
import numpy as np import scipy.sparse as sp from scipy import linalg from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.testing import ignore_warnings from sklearn import datasets from sklearn.metrics import mean_squared_error from sklearn.metrics.scorer import SCORERS from sklearn.linear_model.base import LinearRegression from sklearn.linear_model.ridge import ridge_regression from sklearn.linear_model.ridge import Ridge from sklearn.linear_model.ridge import _RidgeGCV from sklearn.linear_model.ridge import RidgeCV from sklearn.linear_model.ridge import RidgeClassifier from sklearn.linear_model.ridge import RidgeClassifierCV from sklearn.linear_model.ridge import _solve_dense_cholesky from sklearn.linear_model.ridge import _solve_dense_cholesky_kernel from sklearn.cross_validation import KFold diabetes = datasets.load_diabetes() X_diabetes, y_diabetes = diabetes.data, diabetes.target ind = np.arange(X_diabetes.shape[0]) rng = np.random.RandomState(0) rng.shuffle(ind) ind = ind[:200] X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind] iris = datasets.load_iris() X_iris = sp.csr_matrix(iris.data) y_iris = iris.target DENSE_FILTER = lambda X: X SPARSE_FILTER = lambda X: sp.csr_matrix(X) def test_ridge(): """Ridge regression convergence test using score TODO: for this test to be robust, we should use a dataset instead of np.random. """ rng = np.random.RandomState(0) alpha = 1.0 for solver in ("svd", "sparse_cg", "dense_cholesky", "lsqr"): # With more samples than features n_samples, n_features = 6, 5 y = rng.randn(n_samples) X = rng.randn(n_samples, n_features) ridge = Ridge(alpha=alpha, solver=solver) ridge.fit(X, y) assert_equal(ridge.coef_.shape, (X.shape[1], )) assert_greater(ridge.score(X, y), 0.47) if solver == "dense_cholesky": # Currently the only solver to support sample_weight. ridge.fit(X, y, sample_weight=np.ones(n_samples)) assert_greater(ridge.score(X, y), 0.47) # With more features than samples n_samples, n_features = 5, 10 y = rng.randn(n_samples) X = rng.randn(n_samples, n_features) ridge = Ridge(alpha=alpha, solver=solver) ridge.fit(X, y) assert_greater(ridge.score(X, y), .9) if solver == "dense_cholesky": # Currently the only solver to support sample_weight. ridge.fit(X, y, sample_weight=np.ones(n_samples)) assert_greater(ridge.score(X, y), 0.9) def test_primal_dual_relationship(): y = y_diabetes.reshape(-1, 1) coef = _solve_dense_cholesky(X_diabetes, y, alpha=[1e-2]) K = np.dot(X_diabetes, X_diabetes.T) dual_coef = _solve_dense_cholesky_kernel(K, y, alpha=[1e-2]) coef2 = np.dot(X_diabetes.T, dual_coef).T assert_array_almost_equal(coef, coef2) def test_ridge_singular(): # test on a singular matrix rng = np.random.RandomState(0) n_samples, n_features = 6, 6 y = rng.randn(n_samples / 2) y = np.concatenate((y, y)) X = rng.randn(n_samples / 2, n_features) X = np.concatenate((X, X), axis=0) ridge = Ridge(alpha=0) ridge.fit(X, y) assert_greater(ridge.score(X, y), 0.9) def test_ridge_sample_weights(): rng = np.random.RandomState(0) for solver in ("dense_cholesky", ): for n_samples, n_features in ((6, 5), (5, 10)): for alpha in (1.0, 1e-2): y = rng.randn(n_samples) X = rng.randn(n_samples, n_features) sample_weight = 1 + rng.rand(n_samples) coefs = ridge_regression(X, y, alpha=alpha, sample_weight=sample_weight, solver=solver) # Sample weight can be implemented via a simple rescaling # for the square loss. coefs2 = ridge_regression( X * np.sqrt(sample_weight)[:, np.newaxis], y * np.sqrt(sample_weight), alpha=alpha, solver=solver) assert_array_almost_equal(coefs, coefs2) # Test for fit_intercept = True est = Ridge(alpha=alpha, solver=solver) est.fit(X, y, sample_weight=sample_weight) # Check using Newton's Method # Quadratic function should be solved in a single step. # Initialize sample_weight = np.sqrt(sample_weight) X_weighted = sample_weight[:, np.newaxis] * ( np.column_stack((np.ones(n_samples), X))) y_weighted = y * sample_weight # Gradient is (X*coef-y)*X + alpha*coef_[1:] # Remove coef since it is initialized to zero. grad = -np.dot(y_weighted, X_weighted) # Hessian is (X.T*X) + alpha*I except that the first # diagonal element should be zero, since there is no # penalization of intercept. diag = alpha * np.ones(n_features + 1) diag[0] = 0. hess = np.dot(X_weighted.T, X_weighted) hess.flat[::n_features + 2] += diag coef_ = - np.dot(linalg.inv(hess), grad) assert_almost_equal(coef_[0], est.intercept_) assert_array_almost_equal(coef_[1:], est.coef_) def test_ridge_shapes(): """Test shape of coef_ and intercept_ """ rng = np.random.RandomState(0) n_samples, n_features = 5, 10 X = rng.randn(n_samples, n_features) y = rng.randn(n_samples) Y1 = y[:, np.newaxis] Y = np.c_[y, 1 + y] ridge = Ridge() ridge.fit(X, y) assert_equal(ridge.coef_.shape, (n_features,)) assert_equal(ridge.intercept_.shape, ()) ridge.fit(X, Y1) assert_equal(ridge.coef_.shape, (1, n_features)) assert_equal(ridge.intercept_.shape, (1, )) ridge.fit(X, Y) assert_equal(ridge.coef_.shape, (2, n_features)) assert_equal(ridge.intercept_.shape, (2, )) def test_ridge_intercept(): """Test intercept with multiple targets GH issue #708 """ rng = np.random.RandomState(0) n_samples, n_features = 5, 10 X = rng.randn(n_samples, n_features) y = rng.randn(n_samples) Y = np.c_[y, 1. + y] ridge = Ridge() ridge.fit(X, y) intercept = ridge.intercept_ ridge.fit(X, Y) assert_almost_equal(ridge.intercept_[0], intercept) assert_almost_equal(ridge.intercept_[1], intercept + 1.) def test_toy_ridge_object(): """Test BayesianRegression ridge classifier TODO: test also n_samples > n_features """ X = np.array([[1], [2]]) Y = np.array([1, 2]) clf = Ridge(alpha=0.0) clf.fit(X, Y) X_test = [[1], [2], [3], [4]] assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4]) assert_equal(len(clf.coef_.shape), 1) assert_equal(type(clf.intercept_), np.float64) Y = np.vstack((Y, Y)).T clf.fit(X, Y) X_test = [[1], [2], [3], [4]] assert_equal(len(clf.coef_.shape), 2) assert_equal(type(clf.intercept_), np.ndarray) def test_ridge_vs_lstsq(): """On alpha=0., Ridge and OLS yield the same solution.""" rng = np.random.RandomState(0) # we need more samples than features n_samples, n_features = 5, 4 y = rng.randn(n_samples) X = rng.randn(n_samples, n_features) ridge = Ridge(alpha=0., fit_intercept=False) ols = LinearRegression(fit_intercept=False) ridge.fit(X, y) ols.fit(X, y) assert_almost_equal(ridge.coef_, ols.coef_) ridge.fit(X, y) ols.fit(X, y) assert_almost_equal(ridge.coef_, ols.coef_) def test_ridge_individual_penalties(): """Tests the ridge object using individual penalties""" rng = np.random.RandomState(42) n_samples, n_features, n_targets = 20, 10, 5 X = rng.randn(n_samples, n_features) y = rng.randn(n_samples, n_targets) penalties = np.arange(n_targets) coef_cholesky = np.array([ Ridge(alpha=alpha, solver="dense_cholesky").fit(X, target).coef_ for alpha, target in zip(penalties, y.T)]) coefs_indiv_pen = [ Ridge(alpha=penalties, solver=solver, tol=1e-6).fit(X, y).coef_ for solver in ['svd', 'sparse_cg', 'lsqr', 'dense_cholesky']] for coef_indiv_pen in coefs_indiv_pen: assert_array_almost_equal(coef_cholesky, coef_indiv_pen) # Test error is raised when number of targets and penalties do not match. ridge = Ridge(alpha=penalties[:3]) assert_raises(ValueError, ridge.fit, X, y) def _test_ridge_loo(filter_): # test that can work with both dense or sparse matrices n_samples = X_diabetes.shape[0] ret = [] ridge_gcv = _RidgeGCV(fit_intercept=False) ridge = Ridge(alpha=1.0, fit_intercept=False) # generalized cross-validation (efficient leave-one-out) decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes) errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp) values, c = ridge_gcv._values(1.0, y_diabetes, *decomp) # brute-force leave-one-out: remove one example at a time errors2 = [] values2 = [] for i in range(n_samples): sel = np.arange(n_samples) != i X_new = X_diabetes[sel] y_new = y_diabetes[sel] ridge.fit(X_new, y_new) value = ridge.predict([X_diabetes[i]])[0] error = (y_diabetes[i] - value) ** 2 errors2.append(error) values2.append(value) # check that efficient and brute-force LOO give same results assert_almost_equal(errors, errors2) assert_almost_equal(values, values2) # generalized cross-validation (efficient leave-one-out, # SVD variation) decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes) errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp) values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp) # check that efficient and SVD efficient LOO give same results assert_almost_equal(errors, errors3) assert_almost_equal(values, values3) # check best alpha ridge_gcv.fit(filter_(X_diabetes), y_diabetes) alpha_ = ridge_gcv.alpha_ ret.append(alpha_) # check that we get same best alpha with custom loss_func f = ignore_warnings ridge_gcv2 = RidgeCV(fit_intercept=False, loss_func=mean_squared_error) f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes) assert_equal(ridge_gcv2.alpha_, alpha_) # check that we get same best alpha with custom score_func func = lambda x, y: -mean_squared_error(x, y) ridge_gcv3 = RidgeCV(fit_intercept=False, score_func=func) f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes) assert_equal(ridge_gcv3.alpha_, alpha_) # check that we get same best alpha with a scorer scorer = SCORERS['mean_squared_error'] ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer) ridge_gcv4.fit(filter_(X_diabetes), y_diabetes) assert_equal(ridge_gcv4.alpha_, alpha_) # check that we get same best alpha with sample weights ridge_gcv.fit(filter_(X_diabetes), y_diabetes, sample_weight=np.ones(n_samples)) assert_equal(ridge_gcv.alpha_, alpha_) # simulate several responses Y = np.vstack((y_diabetes, y_diabetes)).T ridge_gcv.fit(filter_(X_diabetes), Y) Y_pred = ridge_gcv.predict(filter_(X_diabetes)) ridge_gcv.fit(filter_(X_diabetes), y_diabetes) y_pred = ridge_gcv.predict(filter_(X_diabetes)) assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=5) return ret def _test_ridge_cv(filter_): n_samples = X_diabetes.shape[0] ridge_cv = RidgeCV() ridge_cv.fit(filter_(X_diabetes), y_diabetes) ridge_cv.predict(filter_(X_diabetes)) assert_equal(len(ridge_cv.coef_.shape), 1) assert_equal(type(ridge_cv.intercept_), np.float64) cv = KFold(n_samples, 5) ridge_cv.set_params(cv=cv) ridge_cv.fit(filter_(X_diabetes), y_diabetes) ridge_cv.predict(filter_(X_diabetes)) assert_equal(len(ridge_cv.coef_.shape), 1) assert_equal(type(ridge_cv.intercept_), np.float64) def _test_ridge_diabetes(filter_): ridge = Ridge(fit_intercept=False) ridge.fit(filter_(X_diabetes), y_diabetes) return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5) def _test_multi_ridge_diabetes(filter_): # simulate several responses Y = np.vstack((y_diabetes, y_diabetes)).T n_features = X_diabetes.shape[1] ridge = Ridge(fit_intercept=False) ridge.fit(filter_(X_diabetes), Y) assert_equal(ridge.coef_.shape, (2, n_features)) Y_pred = ridge.predict(filter_(X_diabetes)) ridge.fit(filter_(X_diabetes), y_diabetes) y_pred = ridge.predict(filter_(X_diabetes)) assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3) def _test_ridge_classifiers(filter_): n_classes = np.unique(y_iris).shape[0] n_features = X_iris.shape[1] for clf in (RidgeClassifier(), RidgeClassifierCV()): clf.fit(filter_(X_iris), y_iris) assert_equal(clf.coef_.shape, (n_classes, n_features)) y_pred = clf.predict(filter_(X_iris)) assert_greater(np.mean(y_iris == y_pred), .79) n_samples = X_iris.shape[0] cv = KFold(n_samples, 5) clf = RidgeClassifierCV(cv=cv) clf.fit(filter_(X_iris), y_iris) y_pred = clf.predict(filter_(X_iris)) assert_true(np.mean(y_iris == y_pred) >= 0.8) def _test_tolerance(filter_): ridge = Ridge(tol=1e-5) ridge.fit(filter_(X_diabetes), y_diabetes) score = ridge.score(filter_(X_diabetes), y_diabetes) ridge2 = Ridge(tol=1e-3) ridge2.fit(filter_(X_diabetes), y_diabetes) score2 = ridge2.score(filter_(X_diabetes), y_diabetes) assert_true(score >= score2) def test_dense_sparse(): for test_func in (_test_ridge_loo, _test_ridge_cv, _test_ridge_diabetes, _test_multi_ridge_diabetes, _test_ridge_classifiers, _test_tolerance): # test dense matrix ret_dense = test_func(DENSE_FILTER) # test sparse matrix ret_sparse = test_func(SPARSE_FILTER) # test that the outputs are the same if ret_dense is not None and ret_sparse is not None: assert_array_almost_equal(ret_dense, ret_sparse, decimal=3) def test_ridge_cv_sparse_svd(): X = sp.csr_matrix(X_diabetes) ridge = RidgeCV(gcv_mode="svd") assert_raises(TypeError, ridge.fit, X) def test_class_weights(): """ Test class weights. """ X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) y = [1, 1, 1, -1, -1] clf = RidgeClassifier(class_weight=None) clf.fit(X, y) assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1])) # we give a small weights to class 1 clf = RidgeClassifier(class_weight={1: 0.001}) clf.fit(X, y) # now the hyperplane should rotate clock-wise and # the prediction on this point should shift assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1])) # check if class_weight = 'auto' can handle negative labels. clf = RidgeClassifier(class_weight='auto') clf.fit(X, y) assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1])) # class_weight = 'auto', and class_weight = None should return # same values when y has equal number of all labels X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]]) y = [1, 1, -1, -1] clf = RidgeClassifier(class_weight=None) clf.fit(X, y) clfa = RidgeClassifier(class_weight='auto') clfa.fit(X, y) assert_equal(len(clfa.classes_), 2) assert_array_almost_equal(clf.coef_, clfa.coef_) assert_array_almost_equal(clf.intercept_, clfa.intercept_) def test_class_weights_cv(): """ Test class weights for cross validated ridge classifier. """ X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) y = [1, 1, 1, -1, -1] clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1]) clf.fit(X, y) # we give a small weights to class 1 clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10]) clf.fit(X, y) assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1])) def test_ridgecv_store_cv_values(): """ Test _RidgeCV's store_cv_values attribute. """ rng = rng = np.random.RandomState(42) n_samples = 8 n_features = 5 x = rng.randn(n_samples, n_features) alphas = [1e-1, 1e0, 1e1] n_alphas = len(alphas) r = RidgeCV(alphas=alphas, store_cv_values=True) # with len(y.shape) == 1 y = rng.randn(n_samples) r.fit(x, y) assert_equal(r.cv_values_.shape, (n_samples, n_alphas)) # with len(y.shape) == 2 n_responses = 3 y = rng.randn(n_samples, n_responses) r.fit(x, y) assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
treycausey/scikit-learn
sklearn/linear_model/tests/test_ridge.py
Python
bsd-3-clause
17,457
from httpobs.website.decorators import add_response_headers, sanitized_api_response __all__ = ['add_response_headers', 'sanitized_api_response']
april/http-observatory
httpobs/website/__init__.py
Python
mpl-2.0
157
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2014 University of Dundee & Open Microscopy Environment. # All Rights Reserved. # Use is subject to license terms supplied in LICENSE.txt # """ FOR TRAINING PURPOSES ONLY! """ from omero.gateway import BlitzGateway from Parse_OMERO_Properties import USERNAME, PASSWORD, HOST, PORT from Parse_OMERO_Properties import imageId # Create a connection # ================================================================= conn = BlitzGateway(USERNAME, PASSWORD, host=HOST, port=PORT) conn.connect() # Create an image from scratch # ================================================================= # This example demonstrates the usage of the convenience method # createImageFromNumpySeq() Here we create a multi-dimensional image from a # hard-coded array of data. from numpy import array, int8 import omero sizeX, sizeY, sizeZ, sizeC, sizeT = 5, 4, 1, 2, 1 plane1 = array( [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], dtype=int8) plane2 = array( [[5, 6, 7, 8, 9], [0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [0, 1, 2, 3, 4]], dtype=int8) planes = [plane1, plane2] def planeGen(): """generator will yield planes""" for p in planes: yield p desc = "Image created from a hard-coded arrays" i = conn.createImageFromNumpySeq( planeGen(), "numpy image", sizeZ, sizeC, sizeT, description=desc, dataset=None) print 'Created new Image:%s Name:"%s"' % (i.getId(), i.getName()) # Set the pixel size using units (new in 5.1.0) # ================================================================= # Lengths are specified by value and a unit enumeration # Here we set the pixel size X and Y to be 9.8 Angstroms from omero.model.enums import UnitsLength # Re-load the image to avoid update conflicts i = conn.getObject("Image", i.getId()) u = omero.model.LengthI(9.8, UnitsLength.ANGSTROM) p = i.getPrimaryPixels()._obj p.setPhysicalSizeX(u) p.setPhysicalSizeY(u) conn.getUpdateService().saveObject(p) # Create an Image from an existing image # ================================================================= # We are going to create a new image by passing the method a 'generator' of 2D # planes This will come from an existing image, by taking the average of 2 # channels. zctList = [] image = conn.getObject('Image', imageId) sizeZ, sizeC, sizeT = image.getSizeZ(), image.getSizeC(), image.getSizeT() dataset = image.getParent() pixels = image.getPrimaryPixels() newSizeC = 1 def planeGen(): """ set up a generator of 2D numpy arrays. The createImage method below expects planes in the order specified here (for z.. for c.. for t..) """ for z in range(sizeZ): # all Z sections # Illustrative purposes only, since we only have 1 channel for c in range(newSizeC): for t in range(sizeT): # all time-points channel0 = pixels.getPlane(z, 0, t) channel1 = pixels.getPlane(z, 1, t) # Here we can manipulate the data in many different ways. As # an example we are doing "average" # average of 2 channels newPlane = (channel0 + channel1) / 2 print "newPlane for z,t:", z, t, newPlane.dtype, \ newPlane.min(), newPlane.max() yield newPlane desc = ("Image created from Image ID: %s by averaging Channel 1 and Channel 2" % imageId) i = conn.createImageFromNumpySeq( planeGen(), "new image", sizeZ, newSizeC, sizeT, description=desc, dataset=dataset) # Close connection: # ================================================================= # When you are done, close the session to free up server resources. conn._closeSession()
joansmith/openmicroscopy
examples/Training/python/Create_Image.py
Python
gpl-2.0
3,793
class Solution(object): def compareVersion(self, version1, version2): """ :type version1: str :type version2: str :rtype: int """ vl1 = version1.split('.') vl2 = version2.split('.') mix = len(vl1) if len(vl2) < mix: mix = len(vl2) for i in range(mix): if int(vl1[i]) > int(vl2[i]): return 1 if int(vl1[i]) < int(vl2[i]): return -1 if len(vl1) > len(vl2) and int(vl1[-1]) != 0: return 1 elif len(vl1) < len(vl2) and int(vl2[-1]) != 0: return -1 else: return 0
CharlotteLock/LeetCode
165. Compare Version Numbers.py
Python
gpl-3.0
671
# This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., # 59 Temple Place, Suite 330, # Boston, MA 02111-1307 USA # This file is part of urlgrabber, a high-level cross-protocol url-grabber # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko # Copyright 2009 Red Hat inc, pycurl code written by Seth Vidal """A high-level cross-protocol url-grabber. GENERAL ARGUMENTS (kwargs) Where possible, the module-level default is indicated, and legal values are provided. copy_local = 0 [0|1] ignored except for file:// urls, in which case it specifies whether urlgrab should still make a copy of the file, or simply point to the existing copy. The module level default for this option is 0. close_connection = 0 [0|1] tells URLGrabber to close the connection after a file has been transferred. This is ignored unless the download happens with the http keepalive handler (keepalive=1). Otherwise, the connection is left open for further use. The module level default for this option is 0 (keepalive connections will not be closed). keepalive = 1 [0|1] specifies whether keepalive should be used for HTTP/1.1 servers that support it. The module level default for this option is 1 (keepalive is enabled). progress_obj = None a class instance that supports the following methods: po.start(filename, url, basename, size, now, text) # length will be None if unknown po.update(read) # read == bytes read so far po.end() multi_progress_obj = None a class instance that supports the following methods: mo.start(total_files, total_size) mo.newMeter() => meter mo.removeMeter(meter) mo.end() The 'meter' object is similar to progress_obj, but multiple instances may be created and updated at the same time. When downloading multiple files in parallel and multi_progress_obj is None progress_obj is used in compatibility mode: finished files are shown but there's no in-progress display. text = None specifies alternative text to be passed to the progress meter object. If not given, the default progress meter will use the basename of the file. throttle = 1.0 a number - if it's an int, it's the bytes/second throttle limit. If it's a float, it is first multiplied by bandwidth. If throttle == 0, throttling is disabled. If None, the module-level default (which can be set on default_grabber.throttle) is used. See BANDWIDTH THROTTLING for more information. timeout = 300 a positive integer expressing the number of seconds to wait before timing out attempts to connect to a server. If the value is None or 0, connection attempts will not time out. The timeout is passed to the underlying pycurl object as its CONNECTTIMEOUT option, see the curl documentation on CURLOPT_CONNECTTIMEOUT for more information. http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUT minrate = 1000 This sets the low speed threshold in bytes per second. If the server is sending data slower than this for at least `timeout' seconds, the library aborts the connection. bandwidth = 0 the nominal max bandwidth in bytes/second. If throttle is a float and bandwidth == 0, throttling is disabled. If None, the module-level default (which can be set on default_grabber.bandwidth) is used. See BANDWIDTH THROTTLING for more information. range = None a tuple of the form (first_byte, last_byte) describing a byte range to retrieve. Either or both of the values may set to None. If first_byte is None, byte offset 0 is assumed. If last_byte is None, the last byte available is assumed. Note that the range specification is python-like in that (0,10) will yield the first 10 bytes of the file. If set to None, no range will be used. reget = None [None|'simple'|'check_timestamp'] whether to attempt to reget a partially-downloaded file. Reget only applies to .urlgrab and (obviously) only if there is a partially downloaded file. Reget has two modes: 'simple' -- the local file will always be trusted. If there are 100 bytes in the local file, then the download will always begin 100 bytes into the requested file. 'check_timestamp' -- the timestamp of the server file will be compared to the timestamp of the local file. ONLY if the local file is newer than or the same age as the server file will reget be used. If the server file is newer, or the timestamp is not returned, the entire file will be fetched. NOTE: urlgrabber can do very little to verify that the partial file on disk is identical to the beginning of the remote file. You may want to either employ a custom "checkfunc" or simply avoid using reget in situations where corruption is a concern. user_agent = 'urlgrabber/VERSION' a string, usually of the form 'AGENT/VERSION' that is provided to HTTP servers in the User-agent header. The module level default for this option is "urlgrabber/VERSION". http_headers = None a tuple of 2-tuples, each containing a header and value. These will be used for http and https requests only. For example, you can do http_headers = (('Pragma', 'no-cache'),) ftp_headers = None this is just like http_headers, but will be used for ftp requests. proxies = None a dictionary that maps protocol schemes to proxy hosts. For example, to use a proxy server on host "foo" port 3128 for http and https URLs: proxies={ 'http' : 'http://foo:3128', 'https' : 'http://foo:3128' } note that proxy authentication information may be provided using normal URL constructs: proxies={ 'http' : 'http://user:host@foo:3128' } libproxy = False Use the libproxy module (if installed) to find proxies. The libproxy code is only used if the proxies dictionary does not provide any proxies. no_cache = False When True, server-side cache will be disabled for http and https requests. This is equivalent to setting http_headers = (('Pragma', 'no-cache'),) prefix = None a url prefix that will be prepended to all requested urls. For example: g = URLGrabber(prefix='http://foo.com/mirror/') g.urlgrab('some/file.txt') ## this will fetch 'http://foo.com/mirror/some/file.txt' This option exists primarily to allow identical behavior to MirrorGroup (and derived) instances. Note: a '/' will be inserted if necessary, so you cannot specify a prefix that ends with a partial file or directory name. opener = None No-op when using the curl backend (default) cache_openers = True No-op when using the curl backend (default) data = None Only relevant for the HTTP family (and ignored for other protocols), this allows HTTP POSTs. When the data kwarg is present (and not None), an HTTP request will automatically become a POST rather than GET. This is done by direct passthrough to urllib2. If you use this, you may also want to set the 'Content-length' and 'Content-type' headers with the http_headers option. Note that python 2.2 handles the case of these badly and if you do not use the proper case (shown here), your values will be overridden with the defaults. urlparser = URLParser() The URLParser class handles pre-processing of URLs, including auth-handling for user/pass encoded in http urls, file handing (that is, filenames not sent as a URL), and URL quoting. If you want to override any of this behavior, you can pass in a replacement instance. See also the 'quote' option. quote = None Whether or not to quote the path portion of a url. quote = 1 -> quote the URLs (they're not quoted yet) quote = 0 -> do not quote them (they're already quoted) quote = None -> guess what to do This option only affects proper urls like 'file:///etc/passwd'; it does not affect 'raw' filenames like '/etc/passwd'. The latter will always be quoted as they are converted to URLs. Also, only the path part of a url is quoted. If you need more fine-grained control, you should probably subclass URLParser and pass it in via the 'urlparser' option. username = None username to use for simple http auth - is automatically quoted for special characters password = None password to use for simple http auth - is automatically quoted for special characters ssl_ca_cert = None this option can be used if M2Crypto is available and will be ignored otherwise. If provided, it will be used to create an SSL context. If both ssl_ca_cert and ssl_context are provided, then ssl_context will be ignored and a new context will be created from ssl_ca_cert. ssl_context = None No-op when using the curl backend (default) ssl_verify_peer = True Check the server's certificate to make sure it is valid with what our CA validates ssl_verify_host = True Check the server's hostname to make sure it matches the certificate DN ssl_key = None Path to the key the client should use to connect/authenticate with ssl_key_type = 'PEM' PEM or DER - format of key ssl_cert = None Path to the ssl certificate the client should use to to authenticate with ssl_cert_type = 'PEM' PEM or DER - format of certificate ssl_key_pass = None password to access the ssl_key size = None size (in bytes) or Maximum size of the thing being downloaded. This is mostly to keep us from exploding with an endless datastream max_header_size = 2097152 Maximum size (in bytes) of the headers. ip_resolve = 'whatever' What type of name to IP resolving to use, default is to do both IPV4 and IPV6. async = (key, limit) When this option is set, the urlgrab() is not processed immediately but queued. parallel_wait() then processes grabs in parallel, limiting the numer of connections in each 'key' group to at most 'limit'. max_connections The global connection limit. timedhosts The filename of the host download statistics. If defined, urlgrabber will update the stats at the end of every download. At the end of parallel_wait(), the updated stats are saved. If synchronous grabs are used, you should call th_save(). default_speed, half_life These options only affect the async mirror selection code. The default_speed option sets the speed estimate for mirrors we have never downloaded from, and defaults to 1 MBps. The speed estimate also drifts exponentially from the speed actually measured to the default speed, with default period of 30 days. ftp_disable_epsv = False False, True This options disables Extended Passive Mode (the EPSV command) which does not work correctly on some buggy ftp servers. RETRY RELATED ARGUMENTS retry = None the number of times to retry the grab before bailing. If this is zero, it will retry forever. This was intentional... really, it was :). If this value is not supplied or is supplied but is None retrying does not occur. retrycodes = [-1,2,4,5,6,7] a sequence of errorcodes (values of e.errno) for which it should retry. See the doc on URLGrabError for more details on this. You might consider modifying a copy of the default codes rather than building yours from scratch so that if the list is extended in the future (or one code is split into two) you can still enjoy the benefits of the default list. You can do that with something like this: retrycodes = urlgrabber.grabber.URLGrabberOptions().retrycodes if 12 not in retrycodes: retrycodes.append(12) checkfunc = None a function to do additional checks. This defaults to None, which means no additional checking. The function should simply return on a successful check. It should raise URLGrabError on an unsuccessful check. Raising of any other exception will be considered immediate failure and no retries will occur. If it raises URLGrabError, the error code will determine the retry behavior. Negative error numbers are reserved for use by these passed in functions, so you can use many negative numbers for different types of failure. By default, -1 results in a retry, but this can be customized with retrycodes. If you simply pass in a function, it will be given exactly one argument: a CallbackObject instance with the .url attribute defined and either .filename (for urlgrab) or .data (for urlread). For urlgrab, .filename is the name of the local file. For urlread, .data is the actual string data. If you need other arguments passed to the callback (program state of some sort), you can do so like this: checkfunc=(function, ('arg1', 2), {'kwarg': 3}) if the downloaded file has filename /tmp/stuff, then this will result in this call (for urlgrab): function(obj, 'arg1', 2, kwarg=3) # obj.filename = '/tmp/stuff' # obj.url = 'http://foo.com/stuff' NOTE: both the "args" tuple and "kwargs" dict must be present if you use this syntax, but either (or both) can be empty. failure_callback = None The callback that gets called during retries when an attempt to fetch a file fails. The syntax for specifying the callback is identical to checkfunc, except for the attributes defined in the CallbackObject instance. The attributes for failure_callback are: exception = the raised exception url = the url we're trying to fetch tries = the number of tries so far (including this one) retry = the value of the retry option retry_no_cache = the value of the retry_no_cache option The callback is present primarily to inform the calling program of the failure, but if it raises an exception (including the one it's passed) that exception will NOT be caught and will therefore cause future retries to be aborted. The callback is called for EVERY failure, including the last one. On the last try, the callback can raise an alternate exception, but it cannot (without severe trickiness) prevent the exception from being raised. failfunc = None The callback that gets called when urlgrab request fails. If defined, urlgrab() calls it instead of raising URLGrabError. Callback syntax is identical to failure_callback. Contrary to failure_callback, it's called only once. It's primary purpose is to use urlgrab() without a try/except block. interrupt_callback = None This callback is called if KeyboardInterrupt is received at any point in the transfer. Basically, this callback can have three impacts on the fetch process based on the way it exits: 1) raise no exception: the current fetch will be aborted, but any further retries will still take place 2) raise a URLGrabError: if you're using a MirrorGroup, then this will prompt a failover to the next mirror according to the behavior of the MirrorGroup subclass. It is recommended that you raise URLGrabError with code 15, 'user abort'. If you are NOT using a MirrorGroup subclass, then this is the same as (3). 3) raise some other exception (such as KeyboardInterrupt), which will not be caught at either the grabber or mirror levels. That is, it will be raised up all the way to the caller. This callback is very similar to failure_callback. They are passed the same arguments, so you could use the same function for both. retry_no_cache = False When True, automatically enable no_cache for future retries if checkfunc performs an unsuccessful check. This option is useful if your application expects a set of files from the same server to form an atomic unit and you write your checkfunc to ensure each file being downloaded belongs to such a unit. If transparent proxy caching is in effect, the files can become out-of-sync, disrupting the atomicity. Enabling this option will prevent that, while ensuring that you still enjoy the benefits of caching when possible. BANDWIDTH THROTTLING urlgrabber supports throttling via two values: throttle and bandwidth Between the two, you can either specify and absolute throttle threshold or specify a theshold as a fraction of maximum available bandwidth. throttle is a number - if it's an int, it's the bytes/second throttle limit. If it's a float, it is first multiplied by bandwidth. If throttle == 0, throttling is disabled. If None, the module-level default (which can be set with set_throttle) is used. bandwidth is the nominal max bandwidth in bytes/second. If throttle is a float and bandwidth == 0, throttling is disabled. If None, the module-level default (which can be set with set_bandwidth) is used. Note that when multiple downloads run simultaneously (multiprocessing or the parallel urlgrab() feature is used) the total bandwidth might exceed the throttle limit. You may want to also set max_connections=1 or scale your throttle option down accordingly. THROTTLING EXAMPLES: Lets say you have a 100 Mbps connection. This is (about) 10^8 bits per second, or 12,500,000 Bytes per second. You have a number of throttling options: *) set_bandwidth(12500000); set_throttle(0.5) # throttle is a float This will limit urlgrab to use half of your available bandwidth. *) set_throttle(6250000) # throttle is an int This will also limit urlgrab to use half of your available bandwidth, regardless of what bandwidth is set to. *) set_throttle(6250000); set_throttle(1.0) # float Use half your bandwidth *) set_throttle(6250000); set_throttle(2.0) # float Use up to 12,500,000 Bytes per second (your nominal max bandwidth) *) set_throttle(6250000); set_throttle(0) # throttle = 0 Disable throttling - this is more efficient than a very large throttle setting. *) set_throttle(0); set_throttle(1.0) # throttle is float, bandwidth = 0 Disable throttling - this is the default when the module is loaded. SUGGESTED AUTHOR IMPLEMENTATION (THROTTLING) While this is flexible, it's not extremely obvious to the user. I suggest you implement a float throttle as a percent to make the distinction between absolute and relative throttling very explicit. Also, you may want to convert the units to something more convenient than bytes/second, such as kbps or kB/s, etc. """ import os import sys import urlparse import time import string import urllib import urllib2 from httplib import responses import mimetools import thread import types import stat import pycurl from ftplib import parse150 from StringIO import StringIO from httplib import HTTPException import socket, select, fcntl from byterange import range_tuple_normalize, range_tuple_to_header, RangeError try: import xattr if not hasattr(xattr, 'set'): xattr = None # This is a "newer" API. except ImportError: xattr = None ######################################################################## # MODULE INITIALIZATION ######################################################################## try: exec('from ' + (__name__.split('.'))[0] + ' import __version__') except: __version__ = '???' try: # this part isn't going to do much - need to talk to gettext from i18n import _ except ImportError, msg: def _(st): return st ######################################################################## # functions for debugging output. These functions are here because they # are also part of the module initialization. DEBUG = None def set_logger(DBOBJ): """Set the DEBUG object. This is called by _init_default_logger when the environment variable URLGRABBER_DEBUG is set, but can also be called by a calling program. Basically, if the calling program uses the logging module and would like to incorporate urlgrabber logging, then it can do so this way. It's probably not necessary as most internal logging is only for debugging purposes. The passed-in object should be a logging.Logger instance. It will be pushed into the keepalive and byterange modules if they're being used. The mirror module pulls this object in on import, so you will need to manually push into it. In fact, you may find it tidier to simply push your logging object (or objects) into each of these modules independently. """ global DEBUG DEBUG = DBOBJ def _init_default_logger(logspec=None): '''Examines the environment variable URLGRABBER_DEBUG and creates a logging object (logging.logger) based on the contents. It takes the form URLGRABBER_DEBUG=level,filename where "level" can be either an integer or a log level from the logging module (DEBUG, INFO, etc). If the integer is zero or less, logging will be disabled. Filename is the filename where logs will be sent. If it is "-", then stdout will be used. If the filename is empty or missing, stderr will be used. If the variable cannot be processed or the logging module cannot be imported (python < 2.3) then logging will be disabled. Here are some examples: URLGRABBER_DEBUG=1,debug.txt # log everything to debug.txt URLGRABBER_DEBUG=WARNING,- # log warning and higher to stdout URLGRABBER_DEBUG=INFO # log info and higher to stderr This function is called during module initialization. It is not intended to be called from outside. The only reason it is a function at all is to keep the module-level namespace tidy and to collect the code into a nice block.''' try: if logspec is None: logspec = os.environ['URLGRABBER_DEBUG'] dbinfo = logspec.split(',') import logging level = logging._levelNames.get(dbinfo[0], None) if level is None: level = int(dbinfo[0]) if level < 1: raise ValueError() formatter = logging.Formatter('%(asctime)s %(message)s') if len(dbinfo) > 1: filename = dbinfo[1] else: filename = '' if filename == '': handler = logging.StreamHandler(sys.stderr) elif filename == '-': handler = logging.StreamHandler(sys.stdout) else: handler = logging.FileHandler(filename) handler.setFormatter(formatter) DBOBJ = logging.getLogger('urlgrabber') DBOBJ.propagate = False DBOBJ.addHandler(handler) DBOBJ.setLevel(level) except (KeyError, ImportError, ValueError): DBOBJ = None set_logger(DBOBJ) def _log_package_state(): if not DEBUG: return DEBUG.debug('urlgrabber version = %s' % __version__) DEBUG.debug('trans function "_" = %s' % _) _init_default_logger() _log_package_state() # normally this would be from i18n or something like it ... def _(st): return st ######################################################################## # END MODULE INITIALIZATION ######################################################################## ######################################################################## # UTILITY FUNCTIONS ######################################################################## # These functions are meant to be utilities for the urlgrabber library to use. def _to_utf8(obj, errors='replace'): '''convert 'unicode' to an encoded utf-8 byte string ''' # stolen from yum.i18n if isinstance(obj, unicode): obj = obj.encode('utf-8', errors) return obj def exception2msg(e): try: return str(e) except UnicodeEncodeError: # always use byte strings return unicode(e).encode('utf8') ######################################################################## # END UTILITY FUNCTIONS ######################################################################## class URLGrabError(IOError): """ URLGrabError error codes: URLGrabber error codes (0 -- 255) 0 - everything looks good (you should never see this) 1 - malformed url 2 - local file doesn't exist 3 - request for non-file local file (dir, etc) 4 - IOError on fetch 5 - OSError on fetch 6 - no content length header when we expected one 7 - HTTPException 8 - Exceeded read limit (for urlread) 9 - Requested byte range not satisfiable. 10 - Byte range requested, but range support unavailable 11 - Illegal reget mode 12 - Socket timeout 13 - malformed proxy url 14 - HTTPError (includes .code and .exception attributes) 15 - user abort 16 - error writing to local file MirrorGroup error codes (256 -- 511) 256 - No more mirrors left to try Custom (non-builtin) classes derived from MirrorGroup (512 -- 767) [ this range reserved for application-specific error codes ] Retry codes (< 0) -1 - retry the download, unknown reason Note: to test which group a code is in, you can simply do integer division by 256: e.errno / 256 Negative codes are reserved for use by functions passed in to retrygrab with checkfunc. The value -1 is built in as a generic retry code and is already included in the retrycodes list. Therefore, you can create a custom check function that simply returns -1 and the fetch will be re-tried. For more customized retries, you can use other negative number and include them in retry-codes. This is nice for outputting useful messages about what failed. You can use these error codes like so: try: urlgrab(url) except URLGrabError, e: if e.errno == 3: ... # or print e.strerror # or simply print e #### print '[Errno %i] %s' % (e.errno, e.strerror) """ def __init__(self, *args): IOError.__init__(self, *args) self.url = "No url specified" class CallbackObject: """Container for returned callback data. This is currently a dummy class into which urlgrabber can stuff information for passing to callbacks. This way, the prototype for all callbacks is the same, regardless of the data that will be passed back. Any function that accepts a callback function as an argument SHOULD document what it will define in this object. It is possible that this class will have some greater functionality in the future. """ def __init__(self, **kwargs): self.__dict__.update(kwargs) def urlgrab(url, filename=None, **kwargs): """grab the file at <url> and make a local copy at <filename> If filename is none, the basename of the url is used. urlgrab returns the filename of the local file, which may be different from the passed-in filename if the copy_local kwarg == 0. See module documentation for a description of possible kwargs. """ return default_grabber.urlgrab(url, filename, **kwargs) def urlopen(url, **kwargs): """open the url and return a file object If a progress object or throttle specifications exist, then a special file object will be returned that supports them. The file object can be treated like any other file object. See module documentation for a description of possible kwargs. """ return default_grabber.urlopen(url, **kwargs) def urlread(url, limit=None, **kwargs): """read the url into a string, up to 'limit' bytes If the limit is exceeded, an exception will be thrown. Note that urlread is NOT intended to be used as a way of saying "I want the first N bytes" but rather 'read the whole file into memory, but don't use too much' See module documentation for a description of possible kwargs. """ return default_grabber.urlread(url, limit, **kwargs) class URLParser: """Process the URLs before passing them to urllib2. This class does several things: * add any prefix * translate a "raw" file to a proper file: url * handle any http or https auth that's encoded within the url * quote the url Only the "parse" method is called directly, and it calls sub-methods. An instance of this class is held in the options object, which means that it's easy to change the behavior by sub-classing and passing the replacement in. It need only have a method like: url, parts = urlparser.parse(url, opts) """ def parse(self, url, opts): """parse the url and return the (modified) url and its parts Note: a raw file WILL be quoted when it's converted to a URL. However, other urls (ones which come with a proper scheme) may or may not be quoted according to opts.quote opts.quote = 1 --> quote it opts.quote = 0 --> do not quote it opts.quote = None --> guess """ url = _to_utf8(url) quote = opts.quote if opts.prefix: url = self.add_prefix(url, opts.prefix) parts = urlparse.urlparse(url) (scheme, host, path, parm, query, frag) = parts if not scheme or (len(scheme) == 1 and scheme in string.letters): # if a scheme isn't specified, we guess that it's "file:" if url[0] not in '/\\': url = os.path.abspath(url) url = 'file:' + urllib.pathname2url(url) parts = urlparse.urlparse(url) quote = 0 # pathname2url quotes, so we won't do it again if scheme in ['http', 'https']: parts = self.process_http(parts, url) if quote is None: quote = self.guess_should_quote(parts) if quote: parts = self.quote(parts) url = urlparse.urlunparse(parts) return url, parts def add_prefix(self, url, prefix): if prefix[-1] == '/' or url[0] == '/': url = prefix + url else: url = prefix + '/' + url return url def process_http(self, parts, url): (scheme, host, path, parm, query, frag) = parts # TODO: auth-parsing here, maybe? pycurl doesn't really need it return (scheme, host, path, parm, query, frag) def quote(self, parts): """quote the URL This method quotes ONLY the path part. If you need to quote other parts, you should override this and pass in your derived class. The other alternative is to quote other parts before passing into urlgrabber. """ (scheme, host, path, parm, query, frag) = parts path = urllib.quote(path) return (scheme, host, path, parm, query, frag) hexvals = '0123456789ABCDEF' def guess_should_quote(self, parts): """ Guess whether we should quote a path. This amounts to guessing whether it's already quoted. find ' ' -> 1 find '%' -> 1 find '%XX' -> 0 else -> 1 """ (scheme, host, path, parm, query, frag) = parts if ' ' in path: return 1 ind = string.find(path, '%') if ind > -1: while ind > -1: if len(path) < ind+3: return 1 code = path[ind+1:ind+3].upper() if code[0] not in self.hexvals or \ code[1] not in self.hexvals: return 1 ind = string.find(path, '%', ind+1) return 0 return 1 class URLGrabberOptions: """Class to ease kwargs handling.""" def __init__(self, delegate=None, **kwargs): """Initialize URLGrabberOptions object. Set default values for all options and then update options specified in kwargs. """ self.delegate = delegate if delegate is None: self._set_defaults() self._set_attributes(**kwargs) def __getattr__(self, name): if self.delegate and hasattr(self.delegate, name): return getattr(self.delegate, name) raise AttributeError, name def raw_throttle(self): """Calculate raw throttle value from throttle and bandwidth values. """ if self.throttle <= 0: return 0 elif type(self.throttle) == type(0): return float(self.throttle) else: # throttle is a float return self.bandwidth * self.throttle def find_proxy(self, url, scheme): """Find the proxy to use for this URL. Use the proxies dictionary first, then libproxy. """ self.proxy = None if scheme not in ('ftp', 'http', 'https'): return if self.proxies: proxy = self.proxies.get(scheme) if proxy is None: if scheme == 'http': proxy = self.proxies.get('https') elif scheme == 'https': proxy = self.proxies.get('http') if proxy == '_none_': proxy = '' self.proxy = proxy return if self.libproxy: global _libproxy_cache if _libproxy_cache is None: try: import libproxy _libproxy_cache = libproxy.ProxyFactory() except: _libproxy_cache = False if _libproxy_cache: for proxy in _libproxy_cache.getProxies(url): if proxy.startswith('http://'): if DEBUG: DEBUG.info('using proxy "%s" for url %s' % (proxy, url)) self.proxy = proxy break def derive(self, **kwargs): """Create a derived URLGrabberOptions instance. This method creates a new instance and overrides the options specified in kwargs. """ return URLGrabberOptions(delegate=self, **kwargs) def _set_attributes(self, **kwargs): """Update object attributes with those provided in kwargs.""" self.__dict__.update(kwargs) if kwargs.has_key('range'): # normalize the supplied range value self.range = range_tuple_normalize(self.range) if not self.reget in [None, 'simple', 'check_timestamp']: raise URLGrabError(11, _('Illegal reget mode: %s') \ % (self.reget, )) def _set_defaults(self): """Set all options to their default values. When adding new options, make sure a default is provided here. """ self.progress_obj = None self.multi_progress_obj = None self.throttle = 1.0 self.bandwidth = 0 self.retry = None self.retrycodes = [-1,2,4,5,6,7] self.checkfunc = None self.failfunc = _do_raise self.copy_local = 0 self.close_connection = 0 self.range = None self.user_agent = 'urlgrabber/%s' % __version__ self.ip_resolve = None self.keepalive = 1 self.proxies = None self.libproxy = False self.proxy = None self.reget = None self.failure_callback = None self.interrupt_callback = None self.prefix = None self.opener = None self.cache_openers = True self.timeout = 300 self.minrate = None self.text = None self.http_headers = None self.ftp_headers = None self.data = None self.urlparser = URLParser() self.quote = None self.username = None self.password = None self.ssl_ca_cert = None # sets SSL_CAINFO - path to certdb self.ssl_context = None # no-op in pycurl self.ssl_verify_peer = True # check peer's cert for authenticityb self.ssl_verify_host = True # make sure who they are and who the cert is for matches self.ssl_key = None # client key self.ssl_key_type = 'PEM' #(or DER) self.ssl_cert = None # client cert self.ssl_cert_type = 'PEM' # (or DER) self.ssl_key_pass = None # password to access the key self.size = None # if we know how big the thing we're getting is going # to be. this is ultimately a MAXIMUM size for the file self.max_header_size = 2097152 #2mb seems reasonable for maximum header size self.async = None # blocking by default self.mirror_group = None self.max_connections = 5 self.timedhosts = None self.half_life = 30*24*60*60 # 30 days self.default_speed = 500e3 # 500 kBps self.ftp_disable_epsv = False self.no_cache = False self.retry_no_cache = False def __repr__(self): return self.format() def format(self, indent=' '): keys = self.__dict__.keys() if self.delegate is not None: keys.remove('delegate') keys.sort() s = '{\n' for k in keys: s = s + indent + '%-15s: %s,\n' % \ (repr(k), repr(self.__dict__[k])) if self.delegate: df = self.delegate.format(indent + ' ') s = s + indent + '%-15s: %s\n' % ("'delegate'", df) s = s + indent + '}' return s def _do_raise(obj): raise obj.exception def _run_callback(cb, obj): if not cb: return if callable(cb): return cb(obj) cb, arg, karg = cb return cb(obj, *arg, **karg) class URLGrabber(object): """Provides easy opening of URLs with a variety of options. All options are specified as kwargs. Options may be specified when the class is created and may be overridden on a per request basis. New objects inherit default values from default_grabber. """ def __init__(self, **kwargs): self.opts = URLGrabberOptions(**kwargs) def _retry(self, opts, func, *args): tries = 0 while 1: # there are only two ways out of this loop. The second has # several "sub-ways" # 1) via the return in the "try" block # 2) by some exception being raised # a) an excepton is raised that we don't "except" # b) a callback raises ANY exception # c) we're not retry-ing or have run out of retries # d) the URLGrabError code is not in retrycodes # beware of infinite loops :) tries = tries + 1 exception = None callback = None if DEBUG: DEBUG.info('attempt %i/%s: %s', tries, opts.retry, args[0]) try: r = apply(func, (opts,) + args, {}) if DEBUG: DEBUG.info('success') return r except URLGrabError, e: exception = e callback = opts.failure_callback except KeyboardInterrupt, e: exception = e callback = opts.interrupt_callback if not callback: raise if DEBUG: DEBUG.info('exception: %s', exception) if callback: if DEBUG: DEBUG.info('calling callback: %s', callback) obj = CallbackObject(exception=exception, url=args[0], tries=tries, retry=opts.retry, retry_no_cache=opts.retry_no_cache) _run_callback(callback, obj) if (opts.retry is None) or (tries == opts.retry): if DEBUG: DEBUG.info('retries exceeded, re-raising') raise retrycode = getattr(exception, 'errno', None) if (retrycode is not None) and (retrycode not in opts.retrycodes): if DEBUG: DEBUG.info('retrycode (%i) not in list %s, re-raising', retrycode, opts.retrycodes) raise if retrycode is not None and retrycode < 0 and opts.retry_no_cache: opts.no_cache = True def urlopen(self, url, opts=None, **kwargs): """open the url and return a file object If a progress object or throttle value specified when this object was created, then a special file object will be returned that supports them. The file object can be treated like any other file object. """ url = _to_utf8(url) opts = (opts or self.opts).derive(**kwargs) if DEBUG: DEBUG.debug('combined options: %s' % repr(opts)) (url,parts) = opts.urlparser.parse(url, opts) opts.find_proxy(url, parts[0]) def retryfunc(opts, url): return PyCurlFileObject(url, filename=None, opts=opts) return self._retry(opts, retryfunc, url) def urlgrab(self, url, filename=None, opts=None, **kwargs): """grab the file at <url> and make a local copy at <filename> If filename is none, the basename of the url is used. urlgrab returns the filename of the local file, which may be different from the passed-in filename if copy_local == 0. """ url = _to_utf8(url) opts = (opts or self.opts).derive(**kwargs) if DEBUG: DEBUG.debug('combined options: %s' % repr(opts)) (url,parts) = opts.urlparser.parse(url, opts) (scheme, host, path, parm, query, frag) = parts opts.find_proxy(url, scheme) if filename is None: filename = os.path.basename( urllib.unquote(path) ) if not filename: # This is better than nothing. filename = 'index.html' if scheme == 'file' and not opts.copy_local: # just return the name of the local file - don't make a # copy currently path = urllib.url2pathname(path) if host: path = os.path.normpath('//' + host + path) if not os.path.exists(path): err = URLGrabError(2, _('Local file does not exist: %s') % (path, )) err.url = url raise err elif not os.path.isfile(path): err = URLGrabError(3, _('Not a normal file: %s') % (path, )) err.url = url raise err elif not opts.range: if not opts.checkfunc is None: obj = CallbackObject(filename=path, url=url) _run_callback(opts.checkfunc, obj) return path if opts.async: opts.url = url opts.filename = filename opts.size = int(opts.size or 0) _async_queue.append(opts) return filename def retryfunc(opts, url, filename): fo = PyCurlFileObject(url, filename, opts) try: fo._do_grab() if fo._tm_last: dlsz = fo._tm_last[0] - fo._tm_first[0] dltm = fo._tm_last[1] - fo._tm_first[1] _TH.update(url, dlsz, dltm, None) if not opts.checkfunc is None: obj = CallbackObject(filename=filename, url=url) _run_callback(opts.checkfunc, obj) finally: fo.close() return filename try: return self._retry(opts, retryfunc, url, filename) except URLGrabError, e: _TH.update(url, 0, 0, e) opts.exception = e return _run_callback(opts.failfunc, opts) def urlread(self, url, limit=None, opts=None, **kwargs): """read the url into a string, up to 'limit' bytes If the limit is exceeded, an exception will be thrown. Note that urlread is NOT intended to be used as a way of saying "I want the first N bytes" but rather 'read the whole file into memory, but don't use too much' """ url = _to_utf8(url) opts = (opts or self.opts).derive(**kwargs) if DEBUG: DEBUG.debug('combined options: %s' % repr(opts)) (url,parts) = opts.urlparser.parse(url, opts) opts.find_proxy(url, parts[0]) if limit is not None: limit = limit + 1 def retryfunc(opts, url, limit): fo = PyCurlFileObject(url, filename=None, opts=opts) s = '' try: # this is an unfortunate thing. Some file-like objects # have a default "limit" of None, while the built-in (real) # file objects have -1. They each break the other, so for # now, we just force the default if necessary. if limit is None: s = fo.read() else: s = fo.read(limit) if not opts.checkfunc is None: obj = CallbackObject(data=s, url=url) _run_callback(opts.checkfunc, obj) finally: fo.close() return s s = self._retry(opts, retryfunc, url, limit) if limit and len(s) > limit: err = URLGrabError(8, _('Exceeded limit (%i): %s') % (limit, url)) err.url = url raise err return s def _make_callback(self, callback_obj): # not used, left for compatibility if callable(callback_obj): return callback_obj, (), {} else: return callback_obj # create the default URLGrabber used by urlXXX functions. # NOTE: actual defaults are set in URLGrabberOptions default_grabber = URLGrabber() class PyCurlFileObject(object): def __init__(self, url, filename, opts): self.fo = None self._hdr_dump = '' self._parsed_hdr = None self.url = url self.scheme = urlparse.urlsplit(self.url)[0] self.filename = filename self.append = False self.reget_time = None self.opts = opts if self.opts.reget == 'check_timestamp': raise NotImplementedError, "check_timestamp regets are not implemented in this ver of urlgrabber. Please report this." self._complete = False self._rbuf = '' self._rbufsize = 1024*8 self._ttime = time.time() self._tsize = 0 self._amount_read = 0 self._reget_length = 0 self._range = None self._prog_running = False self._error = (None, None) self.size = 0 self._hdr_ended = False self._tm_first = None self._tm_last = None self._do_open() def __getattr__(self, name): """This effectively allows us to wrap at the instance level. Any attribute not found in _this_ object will be searched for in self.fo. This includes methods.""" if hasattr(self.fo, name): return getattr(self.fo, name) raise AttributeError, name def _retrieve(self, buf): try: tm = self._amount_read + len(buf), time.time() if self._tm_first is None: self._tm_first = tm else: self._tm_last = tm if not self._prog_running: if self.opts.progress_obj: size = self.size + self._reget_length self.opts.progress_obj.start(self._prog_reportname, urllib.unquote(self.url), self._prog_basename, size=size, text=self.opts.text) self._prog_running = True self.opts.progress_obj.update(self._amount_read) self._amount_read += len(buf) try: if self._range: # client-side ranges pos = self._amount_read - len(buf) start = self._range[0] - pos stop = self._range[1] - pos if start < len(buf) and stop > 0: self.fo.write(buf[max(start, 0):stop]) else: self.fo.write(buf) except IOError, e: self._cb_error = URLGrabError(16, exception2msg(e)) return -1 return len(buf) except KeyboardInterrupt: return -1 def _hdr_retrieve(self, buf): if self._hdr_ended: self._hdr_dump = '' self.size = 0 self._hdr_ended = False if self._over_max_size(cur=len(self._hdr_dump), max_size=self.opts.max_header_size): return -1 try: # we have to get the size before we do the progress obj start # but we can't do that w/o making it do 2 connects, which sucks # so we cheat and stuff it in here in the hdr_retrieve if self.scheme in ['http','https']: if buf.lower().find('content-length:') != -1: length = buf.split(':')[1] self.size = int(length) elif (self.append or self.opts.range) and self._hdr_dump == '' and ' 200 ' in buf: # reget was attempted but server sends it all # undo what we did in _build_range() self.append = False self.reget_time = None self._amount_read = 0 self._reget_length = 0 self._range = self.opts.range self.fo.truncate(0) elif self.scheme in ['ftp']: s = None if buf.startswith('213 '): s = buf[3:].strip() if len(s) >= 14: s = None # ignore MDTM responses elif buf.startswith('150 '): s = parse150(buf) if s: self.size = int(s) if buf.lower().find('location') != -1: location = ':'.join(buf.split(':')[1:]) location = location.strip() self.scheme = urlparse.urlsplit(location)[0] self.url = location self._hdr_dump += buf if len(self._hdr_dump) != 0 and buf == '\r\n': self._hdr_ended = True if DEBUG: DEBUG.debug('header ended:') return len(buf) except KeyboardInterrupt: return pycurl.READFUNC_ABORT def _return_hdr_obj(self): if self._parsed_hdr: return self._parsed_hdr statusend = self._hdr_dump.find('\n') statusend += 1 # ridiculous as it may seem. hdrfp = StringIO() hdrfp.write(self._hdr_dump[statusend:]) hdrfp.seek(0) self._parsed_hdr = mimetools.Message(hdrfp) return self._parsed_hdr hdr = property(_return_hdr_obj) http_code = property(fget= lambda self: self.curl_obj.getinfo(pycurl.RESPONSE_CODE)) def _set_opts(self, opts={}): # XXX if not opts: opts = self.opts # keepalives if not opts.keepalive: self.curl_obj.setopt(pycurl.FORBID_REUSE, 1) # defaults we're always going to set self.curl_obj.setopt(pycurl.NOPROGRESS, False) self.curl_obj.setopt(pycurl.NOSIGNAL, True) self.curl_obj.setopt(pycurl.WRITEFUNCTION, self._retrieve) self.curl_obj.setopt(pycurl.HEADERFUNCTION, self._hdr_retrieve) self.curl_obj.setopt(pycurl.PROGRESSFUNCTION, self._progress_update) self.curl_obj.setopt(pycurl.FAILONERROR, True) self.curl_obj.setopt(pycurl.OPT_FILETIME, True) self.curl_obj.setopt(pycurl.FOLLOWLOCATION, True) if DEBUG and DEBUG.level <= 10: self.curl_obj.setopt(pycurl.VERBOSE, True) if opts.user_agent: self.curl_obj.setopt(pycurl.USERAGENT, opts.user_agent) if opts.ip_resolve: # Default is: IPRESOLVE_WHATEVER ipr = opts.ip_resolve.lower() if ipr == 'whatever': # Do we need this? self.curl_obj.setopt(pycurl.IPRESOLVE,pycurl.IPRESOLVE_WHATEVER) if ipr == 'ipv4': self.curl_obj.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4) if ipr == 'ipv6': self.curl_obj.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V6) # maybe to be options later self.curl_obj.setopt(pycurl.FOLLOWLOCATION, True) self.curl_obj.setopt(pycurl.MAXREDIRS, 5) # timeouts timeout = 300 if hasattr(opts, 'timeout'): timeout = int(opts.timeout or 0) self.curl_obj.setopt(pycurl.CONNECTTIMEOUT, timeout) self.curl_obj.setopt(pycurl.LOW_SPEED_LIMIT, opts.minrate or 1000) self.curl_obj.setopt(pycurl.LOW_SPEED_TIME, timeout) # ssl options if self.scheme == 'https': if opts.ssl_ca_cert: # this may do ZERO with nss according to curl docs self.curl_obj.setopt(pycurl.CAPATH, opts.ssl_ca_cert) self.curl_obj.setopt(pycurl.CAINFO, opts.ssl_ca_cert) self.curl_obj.setopt(pycurl.SSL_VERIFYPEER, opts.ssl_verify_peer) if opts.ssl_verify_host: # 1 is meaningless to curl self.curl_obj.setopt(pycurl.SSL_VERIFYHOST, 2) if opts.ssl_key: self.curl_obj.setopt(pycurl.SSLKEY, opts.ssl_key) if opts.ssl_key_type: self.curl_obj.setopt(pycurl.SSLKEYTYPE, opts.ssl_key_type) if opts.ssl_cert: self.curl_obj.setopt(pycurl.SSLCERT, opts.ssl_cert) # if we have a client side cert - turn off reuse b/c nss is odd self.curl_obj.setopt(pycurl.FORBID_REUSE, 1) if opts.ssl_cert_type: self.curl_obj.setopt(pycurl.SSLCERTTYPE, opts.ssl_cert_type) if opts.ssl_key_pass: self.curl_obj.setopt(pycurl.SSLKEYPASSWD, opts.ssl_key_pass) #headers: if self.scheme in ('http', 'https'): headers = [] if opts.http_headers is not None: for (tag, content) in opts.http_headers: headers.append('%s:%s' % (tag, content)) if opts.no_cache: headers.append('Pragma:no-cache') if headers: self.curl_obj.setopt(pycurl.HTTPHEADER, headers) # ranges: if opts.range or opts.reget: range_str = self._build_range() if range_str: self.curl_obj.setopt(pycurl.RANGE, range_str) # throttle/bandwidth if hasattr(opts, 'raw_throttle') and opts.raw_throttle(): self.curl_obj.setopt(pycurl.MAX_RECV_SPEED_LARGE, int(opts.raw_throttle())) # proxy if opts.proxy is not None: self.curl_obj.setopt(pycurl.PROXY, opts.proxy) self.curl_obj.setopt(pycurl.PROXYAUTH, # All but Kerberos. BZ 769254 pycurl.HTTPAUTH_ANY - pycurl.HTTPAUTH_GSSNEGOTIATE) if opts.username and opts.password: if self.scheme in ('http', 'https'): self.curl_obj.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_ANY) if opts.username and opts.password: # apparently when applying them as curlopts they do not require quoting of any kind userpwd = '%s:%s' % (opts.username, opts.password) self.curl_obj.setopt(pycurl.USERPWD, userpwd) #posts - simple - expects the fields as they are if opts.data: self.curl_obj.setopt(pycurl.POST, True) self.curl_obj.setopt(pycurl.POSTFIELDS, _to_utf8(opts.data)) # ftp if opts.ftp_disable_epsv: self.curl_obj.setopt(pycurl.FTP_USE_EPSV, False) # our url self.curl_obj.setopt(pycurl.URL, self.url) def _do_perform(self): if self._complete: return try: self.curl_obj.perform() except pycurl.error, e: # XXX - break some of these out a bit more clearly # to other URLGrabErrors from # http://curl.haxx.se/libcurl/c/libcurl-errors.html # this covers e.args[0] == 22 pretty well - which will be common code = self.http_code errcode = e.args[0] errurl = urllib.unquote(self.url) if self._error[0]: errcode = self._error[0] if errcode == 23 and 200 <= code <= 299: # this is probably wrong but ultimately this is what happens # we have a legit http code and a pycurl 'writer failed' code # which almost always means something aborted it from outside # since we cannot know what it is -I'm banking on it being # a ctrl-c. XXXX - if there's a way of going back two raises to # figure out what aborted the pycurl process FIXME raise getattr(self, '_cb_error', KeyboardInterrupt) elif errcode == 28: err = URLGrabError(12, _('Timeout on %s: %s') % (errurl, e)) err.url = errurl raise err elif errcode == 42: # this is probably wrong but ultimately this is what happens # we have a legit http code and a pycurl 'writer failed' code # which almost always means something aborted it from outside # since we cannot know what it is -I'm banking on it being # a ctrl-c. XXXX - if there's a way of going back two raises to # figure out what aborted the pycurl process FIXME raise KeyboardInterrupt else: pyerr2str = { 5 : _("Couldn't resolve proxy"), 6 : _("Couldn't resolve host"), 7 : _("Couldn't connect"), 8 : _("Bad reply to FTP server"), 9 : _("Access denied"), 11 : _("Bad reply to FTP pass"), 13 : _("Bad reply to FTP pasv"), 14 : _("Bad reply to FTP 227"), 15 : _("Couldn't get FTP host"), 17 : _("Couldn't set FTP type"), 18 : _("Partial file"), 19 : _("FTP RETR command failed"), 22 : _("HTTP returned error"), 23 : _("Write error"), 25 : _("Upload failed"), 26 : _("Read error"), 27 : _("Out of Memory"), 28 : _("Operation timed out"), 30 : _("FTP PORT command failed"), 31 : _("FTP REST command failed"), 33 : _("Range failed"), 34 : _("HTTP POST failed"), 35 : _("SSL CONNECT failed"), 36 : _("Couldn't resume download"), 37 : _("Couldn't read file"), 42 : _("Aborted by callback"), 47 : _("Too many redirects"), 51 : _("Peer certificate failed verification"), 52 : _("Got nothing: SSL certificate expired?"), 53 : _("SSL engine not found"), 54 : _("SSL engine set failed"), 55 : _("Network error send()"), 56 : _("Network error recv()"), 58 : _("Local certificate failed"), 59 : _("SSL set cipher failed"), 60 : _("Local CA certificate failed"), 61 : _("HTTP bad transfer encoding"), 63 : _("Maximum file size exceeded"), 64 : _("FTP SSL failed"), 67 : _("Authentication failure"), 70 : _("Out of disk space on server"), 73 : _("Remove file exists"), 77 : _("Problem with the SSL CA cert (path? access rights?)"), } errstr = str(e.args[1]) or pyerr2str.get(errcode, '<Unknown>') if code and not 200 <= code <= 299: msg = '%s Error %d - %s' % (self.scheme.upper(), code, self.scheme in ('http', 'https') and responses.get(code) or errstr) else: msg = 'curl#%s - "%s"' % (errcode, errstr) code = errcode err = URLGrabError(14, msg) err.url = errurl err.code = code raise err else: if self._error[1]: msg = self._error[1] err = URLGrabError(14, msg) err.url = urllib.unquote(self.url) raise err def _do_open(self): self.curl_obj = _curl_cache self.curl_obj.reset() # reset all old settings away, just in case # setup any ranges self._set_opts() self._do_grab() return self.fo def _add_headers(self): pass def _build_range(self): reget_length = 0 rt = None if self.opts.reget and type(self.filename) in types.StringTypes: # we have reget turned on and we're dumping to a file try: s = os.stat(self.filename) except OSError: pass else: self.reget_time = s[stat.ST_MTIME] reget_length = s[stat.ST_SIZE] # Set initial length when regetting self._amount_read = reget_length self._reget_length = reget_length # set where we started from, too rt = reget_length, '' self.append = 1 if self.opts.range: rt = self.opts.range if rt[0] is None: rt = (0, rt[1]) rt = (rt[0] + reget_length, rt[1]) if rt: header = range_tuple_to_header(rt) if header: return header.split('=')[1] def _make_request(self, req, opener): #XXXX # This doesn't do anything really, but we could use this # instead of do_open() to catch a lot of crap errors as # mstenner did before here return (self.fo, self.hdr) try: if self.opts.timeout: old_to = socket.getdefaulttimeout() socket.setdefaulttimeout(self.opts.timeout) try: fo = opener.open(req) finally: socket.setdefaulttimeout(old_to) else: fo = opener.open(req) hdr = fo.info() except ValueError, e: err = URLGrabError(1, _('Bad URL: %s : %s') % (self.url, e, )) err.url = self.url raise err except RangeError, e: err = URLGrabError(9, _('%s on %s') % (e, self.url)) err.url = self.url raise err except urllib2.HTTPError, e: new_e = URLGrabError(14, _('%s on %s') % (e, self.url)) new_e.code = e.code new_e.exception = e new_e.url = self.url raise new_e except IOError, e: if hasattr(e, 'reason') and isinstance(e.reason, socket.timeout): err = URLGrabError(12, _('Timeout on %s: %s') % (self.url, e)) err.url = self.url raise err else: err = URLGrabError(4, _('IOError on %s: %s') % (self.url, e)) err.url = self.url raise err except OSError, e: err = URLGrabError(5, _('%s on %s') % (e, self.url)) err.url = self.url raise err except HTTPException, e: err = URLGrabError(7, _('HTTP Exception (%s) on %s: %s') % \ (e.__class__.__name__, self.url, e)) err.url = self.url raise err else: return (fo, hdr) def _do_grab(self): """dump the file to a filename or StringIO buffer""" if self._complete: return _was_filename = False if type(self.filename) in types.StringTypes and self.filename: _was_filename = True self._prog_reportname = str(self.filename) self._prog_basename = os.path.basename(self.filename) if self.append: mode = 'ab' else: mode = 'wb' if DEBUG: DEBUG.info('opening local file "%s" with mode %s' % \ (self.filename, mode)) try: self.fo = open(self.filename, mode) except IOError, e: err = URLGrabError(16, _(\ 'error opening local file from %s, IOError: %s') % (self.url, e)) err.url = self.url raise err else: self._prog_reportname = 'MEMORY' self._prog_basename = 'MEMORY' self.fo = StringIO() # if this is to be a tempfile instead.... # it just makes crap in the tempdir #fh, self._temp_name = mkstemp() #self.fo = open(self._temp_name, 'wb') try: self._do_perform() except URLGrabError, e: self.fo.flush() self.fo.close() raise e if _was_filename: # close it up self.fo.flush() self.fo.close() # Set the URL where we got it from: if xattr is not None: # See: http://www.freedesktop.org/wiki/CommonExtendedAttributes try: xattr.set(self.filename, 'user.xdg.origin.url', self.url) except: pass # URL too long. = IOError ... ignore everything. # set the time mod_time = self.curl_obj.getinfo(pycurl.INFO_FILETIME) if mod_time != -1: try: os.utime(self.filename, (mod_time, mod_time)) except OSError, e: err = URLGrabError(16, _(\ 'error setting timestamp on file %s from %s, OSError: %s') % (self.filename, self.url, e)) err.url = self.url raise err # re open it try: self.fo = open(self.filename, 'r') except IOError, e: err = URLGrabError(16, _(\ 'error opening file from %s, IOError: %s') % (self.url, e)) err.url = self.url raise err else: #self.fo = open(self._temp_name, 'r') self.fo.seek(0) self._complete = True def _fill_buffer(self, amt=None): """fill the buffer to contain at least 'amt' bytes by reading from the underlying file object. If amt is None, then it will read until it gets nothing more. It updates the progress meter and throttles after every self._rbufsize bytes.""" # the _rbuf test is only in this first 'if' for speed. It's not # logically necessary if self._rbuf and not amt is None: L = len(self._rbuf) if amt > L: amt = amt - L else: return # if we've made it here, then we don't have enough in the buffer # and we need to read more. if not self._complete: self._do_grab() #XXX cheater - change on ranges buf = [self._rbuf] bufsize = len(self._rbuf) while amt is None or amt: # first, delay if necessary for throttling reasons if self.opts.raw_throttle(): diff = self._tsize/self.opts.raw_throttle() - \ (time.time() - self._ttime) if diff > 0: time.sleep(diff) self._ttime = time.time() # now read some data, up to self._rbufsize if amt is None: readamount = self._rbufsize else: readamount = min(amt, self._rbufsize) try: new = self.fo.read(readamount) except socket.error, e: err = URLGrabError(4, _('Socket Error on %s: %s') % (self.url, e)) err.url = self.url raise err except socket.timeout, e: raise URLGrabError(12, _('Timeout on %s: %s') % (self.url, e)) err.url = self.url raise err except IOError, e: raise URLGrabError(4, _('IOError on %s: %s') %(self.url, e)) err.url = self.url raise err newsize = len(new) if not newsize: break # no more to read if amt: amt = amt - newsize buf.append(new) bufsize = bufsize + newsize self._tsize = newsize self._amount_read = self._amount_read + newsize #if self.opts.progress_obj: # self.opts.progress_obj.update(self._amount_read) self._rbuf = string.join(buf, '') return def _progress_update(self, download_total, downloaded, upload_total, uploaded): if self._over_max_size(cur=self._amount_read-self._reget_length): return -1 try: if self._prog_running: downloaded += self._reget_length self.opts.progress_obj.update(downloaded) except (KeyboardInterrupt, IOError): return -1 def _over_max_size(self, cur, max_size=None): if not max_size: if not self.opts.size: max_size = self.size else: max_size = self.opts.size if not max_size: return False # if we have None for all of the Max then this is dumb if cur > int(float(max_size) * 1.10): msg = _("Downloaded more than max size for %s: %s > %s") \ % (self.url, cur, max_size) self._error = (pycurl.E_FILESIZE_EXCEEDED, msg) return True return False def read(self, amt=None): self._fill_buffer(amt) if amt is None: s, self._rbuf = self._rbuf, '' else: s, self._rbuf = self._rbuf[:amt], self._rbuf[amt:] return s def readline(self, limit=-1): if not self._complete: self._do_grab() return self.fo.readline() i = string.find(self._rbuf, '\n') while i < 0 and not (0 < limit <= len(self._rbuf)): L = len(self._rbuf) self._fill_buffer(L + self._rbufsize) if not len(self._rbuf) > L: break i = string.find(self._rbuf, '\n', L) if i < 0: i = len(self._rbuf) else: i = i+1 if 0 <= limit < len(self._rbuf): i = limit s, self._rbuf = self._rbuf[:i], self._rbuf[i:] return s def close(self): if self._prog_running: self.opts.progress_obj.end(self._amount_read) self.fo.close() def geturl(self): """ Provide the geturl() method, used to be got from urllib.addinfourl, via. urllib.URLopener.* """ return self.url if hasattr(pycurl, 'GLOBAL_ACK_EINTR'): # fail immediately on ctrl-c pycurl.global_init(pycurl.GLOBAL_DEFAULT | pycurl.GLOBAL_ACK_EINTR) _curl_cache = pycurl.Curl() # make one and reuse it over and over and over def reset_curl_obj(): """To make sure curl has reread the network/dns info we force a reload""" global _curl_cache _curl_cache.close() _curl_cache = pycurl.Curl() _libproxy_cache = None ##################################################################### # DEPRECATED FUNCTIONS def set_throttle(new_throttle): """Deprecated. Use: default_grabber.throttle = new_throttle""" default_grabber.throttle = new_throttle def set_bandwidth(new_bandwidth): """Deprecated. Use: default_grabber.bandwidth = new_bandwidth""" default_grabber.bandwidth = new_bandwidth def set_progress_obj(new_progress_obj): """Deprecated. Use: default_grabber.progress_obj = new_progress_obj""" default_grabber.progress_obj = new_progress_obj def set_user_agent(new_user_agent): """Deprecated. Use: default_grabber.user_agent = new_user_agent""" default_grabber.user_agent = new_user_agent def retrygrab(url, filename=None, copy_local=0, close_connection=0, progress_obj=None, throttle=None, bandwidth=None, numtries=3, retrycodes=[-1,2,4,5,6,7], checkfunc=None): """Deprecated. Use: urlgrab() with the retry arg instead""" kwargs = {'copy_local' : copy_local, 'close_connection' : close_connection, 'progress_obj' : progress_obj, 'throttle' : throttle, 'bandwidth' : bandwidth, 'retry' : numtries, 'retrycodes' : retrycodes, 'checkfunc' : checkfunc } return urlgrab(url, filename, **kwargs) ##################################################################### # Serializer + parser: A replacement of the rather bulky Json code. # # - handles basic python literals, lists and tuples. # - serialized strings never contain ' ' or '\n' # ##################################################################### _quoter_map = {} for c in '%[(,)] \n': _quoter_map[c] = '%%%02x' % ord(c) del c def _dumps(v): if v is None: return 'None' if v is True: return 'True' if v is False: return 'False' if type(v) in (int, long, float): return str(v) if type(v) == unicode: v = v.encode('UTF8') if type(v) == str: def quoter(c): return _quoter_map.get(c, c) return "'%s'" % ''.join(map(quoter, v)) if type(v) == tuple: return "(%s)" % ','.join(map(_dumps, v)) if type(v) == list: return "[%s]" % ','.join(map(_dumps, v)) raise TypeError, 'Can\'t serialize %s' % v def _loads(s): def decode(v): if v == 'None': return None if v == 'True': return True if v == 'False': return False try: return int(v) except ValueError: pass try: return float(v) except ValueError: pass if len(v) >= 2 and v[0] == v[-1] == "'": ret = []; i = 1 while True: j = v.find('%', i) ret.append(v[i:j]) # skips the final "'" if j == -1: break ret.append(chr(int(v[j + 1:j + 3], 16))) i = j + 3 v = ''.join(ret) return v stk = None l = [] i = j = 0 while True: if j == len(s) or s[j] in ',)]': if j > i: l.append(decode(s[i:j])) if j == len(s): break if s[j] in ')]': if s[j] == ')': l = tuple(l) stk[0].append(l) l, stk = stk i = j = j + 1 elif s[j] in '[(': stk = l, stk l = [] i = j = j + 1 else: j += 1 # safe because '[(,)]' are quoted if stk: raise ValueError if len(l) == 1: l = l[0] return l ##################################################################### # External downloader process ##################################################################### def _readlines(fd): buf = os.read(fd, 4096) if not buf: return None # whole lines only, no buffering while buf[-1] != '\n': buf += os.read(fd, 4096) return buf[:-1].split('\n') import subprocess class _ExternalDownloader: def __init__(self): self.popen = subprocess.Popen( '/usr/libexec/urlgrabber-ext-down', stdin = subprocess.PIPE, stdout = subprocess.PIPE, ) self.stdin = self.popen.stdin.fileno() self.stdout = self.popen.stdout.fileno() self.running = {} self.cnt = 0 # list of options we pass to downloader _options = ( 'url', 'filename', 'timeout', 'minrate', 'close_connection', 'keepalive', 'throttle', 'bandwidth', 'range', 'reget', 'user_agent', 'http_headers', 'ftp_headers', 'proxy', 'prefix', 'username', 'password', 'ssl_ca_cert', 'ssl_cert', 'ssl_cert_type', 'ssl_key', 'ssl_key_type', 'ssl_key_pass', 'ssl_verify_peer', 'ssl_verify_host', 'size', 'max_header_size', 'ip_resolve', 'ftp_disable_epsv', 'no_cache', ) def start(self, opts): arg = [] for k in self._options: v = getattr(opts, k) if v is None: continue arg.append('%s=%s' % (k, _dumps(v))) if opts.progress_obj and opts.multi_progress_obj: arg.append('progress_obj=True') arg = ' '.join(arg) if DEBUG: DEBUG.info('attempt %i/%s: %s', opts.tries, opts.retry, opts.url) self.cnt += 1 self.running[self.cnt] = opts os.write(self.stdin, arg +'\n') def perform(self): ret = [] lines = _readlines(self.stdout) if not lines: if DEBUG: DEBUG.info('downloader died') raise KeyboardInterrupt for line in lines: # parse downloader output line = line.split(' ', 6) _id, size = map(int, line[:2]) if len(line) == 2: self.running[_id]._progress.update(size) continue # job done opts = self.running.pop(_id) if line[4] == 'OK': ug_err = None if DEBUG: DEBUG.info('success') else: ug_err = URLGrabError(int(line[4]), line[6]) if line[5] != '0': ug_err.code = int(line[5]) if DEBUG: DEBUG.info('failure: %s', ug_err) _TH.update(opts.url, int(line[2]), float(line[3]), ug_err, opts.async[0]) ret.append((opts, size, ug_err)) return ret def abort(self): self.popen.stdin.close() self.popen.stdout.close() self.popen.wait() class _ExternalDownloaderPool: def __init__(self): self.epoll = select.epoll() self.running = {} self.cache = {} def start(self, opts): host = urlparse.urlsplit(opts.url).netloc dl = self.cache.pop(host, None) if not dl: dl = _ExternalDownloader() fl = fcntl.fcntl(dl.stdin, fcntl.F_GETFD) fcntl.fcntl(dl.stdin, fcntl.F_SETFD, fl | fcntl.FD_CLOEXEC) self.epoll.register(dl.stdout, select.EPOLLIN) self.running[dl.stdout] = dl dl.start(opts) def perform(self): ret = [] for fd, event in self.epoll.poll(): if event & select.EPOLLHUP: if DEBUG: DEBUG.info('downloader died') raise KeyboardInterrupt assert event & select.EPOLLIN done = self.running[fd].perform() if not done: continue assert len(done) == 1 ret.extend(done) # dl finished, move it to the cache host = urlparse.urlsplit(done[0][0].url).netloc if host in self.cache: self.cache[host].abort() self.epoll.unregister(fd) self.cache[host] = self.running.pop(fd) return ret def abort(self): for dl in self.running.values(): self.epoll.unregister(dl.stdout) dl.abort() for dl in self.cache.values(): dl.abort() ##################################################################### # High level async API ##################################################################### _async_queue = [] def parallel_wait(meter=None): '''Process queued requests in parallel. ''' # calculate total sizes meters = {} for opts in _async_queue: if opts.progress_obj and opts.multi_progress_obj: count, total = meters.get(opts.multi_progress_obj) or (0, 0) meters[opts.multi_progress_obj] = count + 1, total + opts.size # start multi-file meters for meter in meters: count, total = meters[meter] meter.start(count, total) dl = _ExternalDownloaderPool() host_con = {} # current host connection counts single = set() # hosts in single connection mode retry_queue = [] def start(opts, tries): opts.tries = tries try: dl.start(opts) except OSError, e: # can't spawn downloader, give up immediately opts.exception = URLGrabError(5, exception2msg(e)) _run_callback(opts.failfunc, opts) return key, limit = opts.async host_con[key] = host_con.get(key, 0) + 1 if opts.progress_obj: if opts.multi_progress_obj: opts._progress = opts.multi_progress_obj.newMeter() opts._progress.start(text=opts.text) else: opts._progress = time.time() # no updates def perform(): for opts, size, ug_err in dl.perform(): key, limit = opts.async host_con[key] -= 1 if ug_err is None: if opts.checkfunc: try: _run_callback(opts.checkfunc, opts) except URLGrabError, ug_err: pass if opts.progress_obj: if opts.multi_progress_obj: if ug_err: opts._progress.failure(None) else: opts.multi_progress_obj.re.total += size - opts.size # correct totals opts._progress.end(size) opts.multi_progress_obj.removeMeter(opts._progress) else: opts.progress_obj.start(text=opts.text, now=opts._progress) opts.progress_obj.update(size) opts.progress_obj.end(size) del opts._progress if ug_err is None: continue if limit != 1 and key not in single and ug_err.errno in (12, 14): # One possible cause is connection-limited server. # Turn on the max_connections=1 override. BZ 853432 if DEBUG: DEBUG.info('max_connections(%s) %s => 1', key, limit) single.add(key) # When using multi-downloader the parent's _curl_cache # object is idle. Kill it, as it might use keepalive=1. reset_curl_obj() retry = opts.retry or 0 if opts.failure_callback: opts.exception = ug_err try: _run_callback(opts.failure_callback, opts) except URLGrabError, ug_err: retry = 0 # no retries if opts.tries < retry and ug_err.errno in opts.retrycodes: if ug_err.errno < 0 and opts.retry_no_cache: opts.no_cache = True start(opts, opts.tries + 1) # simple retry continue if opts.mirror_group: mg, errors, failed, removed = opts.mirror_group errors.append((opts.url, exception2msg(ug_err))) failed[key] = failed.get(key, 0) + 1 opts.mirror = key opts.exception = ug_err action = mg.default_action or {} if mg.failure_callback: opts.tries = len(errors) action = dict(action) # update only the copy action.update(_run_callback(mg.failure_callback, opts)) if not action.get('fail', 0): # mask this mirror and retry if action.get('remove', 1): removed.add(key) retry_queue.append(opts) continue # fail=1 from callback ug_err.errors = errors # urlgrab failed opts.exception = ug_err _run_callback(opts.failfunc, opts) try: retry_idx = idx = 0 while True: if retry_idx < len(retry_queue): # retries first opts = retry_queue[retry_idx] retry_idx += 1 elif idx < len(_async_queue): # handle next request opts = _async_queue[idx] idx += 1 else: # both queues are empty if not dl.running: break perform() continue # check global limit while len(dl.running) >= default_grabber.opts.max_connections: perform() if DEBUG: DEBUG.info('max_connections: %d/%d', len(dl.running), default_grabber.opts.max_connections) if opts.mirror_group: mg, errors, failed, removed = opts.mirror_group # find the best mirror best = None best_speed = None for mirror in mg.mirrors: key = mirror['mirror'] if key in removed: continue # estimate mirror speed speed, fail = _TH.estimate(key) speed /= 1 + host_con.get(key, 0) # order by: least failures, private flag, best speed # ignore 'private' flag if there were failures private = not fail and mirror.get('kwargs', {}).get('private', False) speed = -failed.get(key, 0), private, speed if best is None or speed > best_speed: best = mirror best_speed = speed if best is None: opts.exception = URLGrabError(256, _('No more mirrors to try.')) opts.exception.errors = errors _run_callback(opts.failfunc, opts) continue # update the grabber object, apply mirror kwargs grabber = best.get('grabber') or mg.grabber opts.delegate = grabber.opts.derive(**best.get('kwargs', {})) # update the current mirror and limit key = best['mirror'] limit = best.get('kwargs', {}).get('max_connections') opts.async = key, limit # update URL and proxy url = mg._join_url(key, opts.relative_url) url, parts = opts.urlparser.parse(url, opts) opts.find_proxy(url, parts[0]) opts.url = url # check host limit, then start key, limit = opts.async if key in single: limit = 1 while host_con.get(key, 0) >= (limit or 2): perform() if DEBUG: DEBUG.info('max_connections(%s): %d/%s', key, host_con.get(key, 0), limit) start(opts, 1) except IOError, e: if e.errno != 4: raise raise KeyboardInterrupt finally: dl.abort() for meter in meters: meter.end() del _async_queue[:] _TH.save() ##################################################################### # Host bandwidth estimation ##################################################################### class _TH: hosts = {} dirty = None @staticmethod def load(): filename = default_grabber.opts.timedhosts if filename and _TH.dirty is None: try: now = int(time.time()) for line in open(filename): try: host, speed, fail, ts = line.rsplit(' ', 3) _TH.hosts[host] = int(speed), int(fail), min(int(ts), now) except ValueError: if DEBUG: DEBUG.info('Error parsing timedhosts: line "%s"', line) except IOError: pass _TH.dirty = False @staticmethod def save(): filename = default_grabber.opts.timedhosts if filename and _TH.dirty is True: tmp = '%s.%d' % (filename, os.getpid()) try: f = open(tmp, 'w') for host in _TH.hosts: f.write(host + ' %d %d %d\n' % _TH.hosts[host]) f.close() os.rename(tmp, filename) except IOError: pass _TH.dirty = False @staticmethod def update(url, dl_size, dl_time, ug_err, baseurl=None): # Use hostname from URL. If it's a file:// URL, use baseurl. # If no baseurl, do not update timedhosts. host = urlparse.urlsplit(url).netloc.split('@')[-1] or baseurl if not host: return _TH.load() speed, fail, ts = _TH.hosts.get(host) or (0, 0, 0) now = time.time() if ug_err is None: # defer first update if the file was small. BZ 851178. if not ts and dl_size < 1e6: return # k1: the older, the less useful # k2: <500ms readings are less reliable # speeds vary, use 10:1 smoothing k1 = 2**((ts - now) / default_grabber.opts.half_life) k2 = min(dl_time / .500, 1.0) / 10 if k2 > 0: speed = (k1 * speed + k2 * dl_size / dl_time) / (k1 + k2) fail = 0 elif getattr(ug_err, 'code', None) == 404: if not ts: return # 1st update, avoid speed=0 fail = 0 # alive, at least else: fail += 1 # seems dead _TH.hosts[host] = speed, fail, now _TH.dirty = True @staticmethod def estimate(baseurl): _TH.load() # Use just the hostname, unless it's a file:// baseurl. host = urlparse.urlsplit(baseurl).netloc.split('@')[-1] or baseurl default_speed = default_grabber.opts.default_speed try: speed, fail, ts = _TH.hosts[host] except KeyError: return default_speed, 0 speed *= 2**-fail k = 2**((ts - time.time()) / default_grabber.opts.half_life) speed = k * speed + (1 - k) * default_speed return speed, fail ##################################################################### # TESTING def _main_test(): try: url, filename = sys.argv[1:3] except ValueError: print 'usage:', sys.argv[0], \ '<url> <filename> [copy_local=0|1] [close_connection=0|1]' sys.exit() kwargs = {} for a in sys.argv[3:]: k, v = string.split(a, '=', 1) kwargs[k] = int(v) set_throttle(1.0) set_bandwidth(32 * 1024) print "throttle: %s, throttle bandwidth: %s B/s" % (default_grabber.throttle, default_grabber.bandwidth) try: from progress import text_progress_meter except ImportError, e: pass else: kwargs['progress_obj'] = text_progress_meter() try: name = apply(urlgrab, (url, filename), kwargs) except URLGrabError, e: print e else: print 'LOCAL FILE:', name def _retry_test(): try: url, filename = sys.argv[1:3] except ValueError: print 'usage:', sys.argv[0], \ '<url> <filename> [copy_local=0|1] [close_connection=0|1]' sys.exit() kwargs = {} for a in sys.argv[3:]: k, v = string.split(a, '=', 1) kwargs[k] = int(v) try: from progress import text_progress_meter except ImportError, e: pass else: kwargs['progress_obj'] = text_progress_meter() def cfunc(filename, hello, there='foo'): print hello, there import random rnum = random.random() if rnum < .5: print 'forcing retry' raise URLGrabError(-1, 'forcing retry') if rnum < .75: print 'forcing failure' raise URLGrabError(-2, 'forcing immediate failure') print 'success' return kwargs['checkfunc'] = (cfunc, ('hello',), {'there':'there'}) try: name = apply(retrygrab, (url, filename), kwargs) except URLGrabError, e: print e else: print 'LOCAL FILE:', name def _file_object_test(filename=None): import cStringIO if filename is None: filename = __file__ print 'using file "%s" for comparisons' % filename fo = open(filename) s_input = fo.read() fo.close() for testfunc in [_test_file_object_smallread, _test_file_object_readall, _test_file_object_readline, _test_file_object_readlines]: fo_input = cStringIO.StringIO(s_input) fo_output = cStringIO.StringIO() wrapper = PyCurlFileObject(fo_input, None, 0) print 'testing %-30s ' % testfunc.__name__, testfunc(wrapper, fo_output) s_output = fo_output.getvalue() if s_output == s_input: print 'passed' else: print 'FAILED' def _test_file_object_smallread(wrapper, fo_output): while 1: s = wrapper.read(23) fo_output.write(s) if not s: return def _test_file_object_readall(wrapper, fo_output): s = wrapper.read() fo_output.write(s) def _test_file_object_readline(wrapper, fo_output): while 1: s = wrapper.readline() fo_output.write(s) if not s: return def _test_file_object_readlines(wrapper, fo_output): li = wrapper.readlines() fo_output.write(string.join(li, '')) if __name__ == '__main__': _main_test() _retry_test() _file_object_test('test')
henrysher/urlgrabber
urlgrabber/grabber.py
Python
lgpl-2.1
96,279
from .qmpdsocket import QMPDSocket assert QMPDSocket
duganchen/qmpdsocket
qmpdsocket/__init__.py
Python
mit
53
# # Test database for rsvndump # written by Jonas Gehring # import os import test_api def info(): return "Copying test with modifications on sub-directories [bugreport from Valentin Haenel]" def setup(step,log): if step == 0: os.mkdir("a") os.mkdir("a/dir1") os.mkdir("a/dir2") f = open("a/dir1/file1","wb") print >>f, "file1" f = open("a/dir1/file2","wb") print >>f, "file2" f = open("a/dir2/file3","wb") print >>f, "file3" test_api.run("svn", "add", "a", output = log) return True elif step == 1: test_api.run("svn", "cp", "a", "b", output=log) test_api.run("svn", "propset", "svn:ignore", "bogus", "b/dir1", output=log) return True else: return False # Runs the test def run(id, args = []): # Set up the test repository test_api.setup_repos(id, setup) odump_path = test_api.dump_original(id) rdump_path = test_api.dump_rsvndump(id, args) vdump_path = test_api.dump_reload(id, rdump_path) return test_api.diff(id, odump_path, vdump_path)
jgehring/rsvndump
tests/db/tests/copy_modify3.py
Python
gpl-3.0
990
import unittest import common import trezorlib.ckd_public as bip32 import trezorlib.types_pb2 as proto_types import binascii class TestMsgGetaddress(common.TrezorTest): def test_show(self): self.setup_mnemonic_nopin_nopassphrase() self.assertEqual(self.client.get_address('Bitcoin', [1], show_display=True), '1CK7SJdcb8z9HuvVft3D91HLpLC6KSsGb') self.assertEqual(self.client.get_address('Bitcoin', [2], show_display=True), '15AeAhtNJNKyowK8qPHwgpXkhsokzLtUpG') self.assertEqual(self.client.get_address('Bitcoin', [3], show_display=True), '1CmzyJp9w3NafXMSEFH4SLYUPAVCSUrrJ5') def test_show_multisig_3(self): self.setup_mnemonic_nopin_nopassphrase() node = bip32.deserialize('xpub661MyMwAqRbcF1zGijBb2K6x9YiJPh58xpcCeLvTxMX6spkY3PcpJ4ABcCyWfskq5DDxM3e6Ez5ePCqG5bnPUXR4wL8TZWyoDaUdiWW7bKy') multisig = proto_types.MultisigRedeemScriptType( pubkeys=[proto_types.HDNodePathType(node=node, address_n=[1]), proto_types.HDNodePathType(node=node, address_n=[2]), proto_types.HDNodePathType(node=node, address_n=[3])], signatures=['', '', ''], m=2, ) for i in [1, 2, 3]: self.assertEqual(self.client.get_address('Bitcoin', [i], show_display=True, multisig=multisig), '3E7GDtuHqnqPmDgwH59pVC7AvySiSkbibz') def test_show_multisig_15(self): self.setup_mnemonic_nopin_nopassphrase() node = bip32.deserialize('xpub661MyMwAqRbcF1zGijBb2K6x9YiJPh58xpcCeLvTxMX6spkY3PcpJ4ABcCyWfskq5DDxM3e6Ez5ePCqG5bnPUXR4wL8TZWyoDaUdiWW7bKy') pubs = [] for x in range(15): pubs.append(proto_types.HDNodePathType(node=node, address_n=[x])) multisig = proto_types.MultisigRedeemScriptType( pubkeys=pubs, signatures=[''] * 15, m=15, ) for i in range(15): self.assertEqual(self.client.get_address('Bitcoin', [i], show_display=True, multisig=multisig), '3QaKF8zobqcqY8aS6nxCD5ZYdiRfL3RCmU') if __name__ == '__main__': unittest.main()
runn1ng/python-trezor
tests/test_msg_getaddress_show.py
Python
lgpl-3.0
2,252
# -*- coding: utf-8 -*- #!/usr/bin/python import numpy as np import scipy from sklearn import preprocessing from sklearn.feature_extraction import DictVectorizer from sklearn.cross_validation import train_test_split from sklearn.metrics import classification_report, confusion_matrix from collections import Counter from scipy.stats.stats import pearsonr import data_readers import feature_extractors as fe import label_transformers as lt import training_functions as training import utils def build_dataset(reader, phi_list, class_func, vectorizer=None, verbose=False): """Core general function for building experimental hand-generated feature datasets. Parameters ---------- reader : iterator Should follow the format of data_readers. This is the dataset we'll be featurizing. phi_list : array of feature functions (default: [`manual_content_flags`]) Any function that takes a string as input and returns a bool/int/float-valued dict as output. class_func : function on the labels A function that modifies the labels based on the experimental design. If `class_func` returns None for a label, then that item is ignored. vectorizer : sklearn.feature_extraction.DictVectorizer If this is None, then a new `DictVectorizer` is created and used to turn the list of dicts created by `phi` into a feature matrix. This happens when we are training. If this is not None, then it's assumed to be a `DictVectorizer` and used to transform the list of dicts. This happens in assessment, when we take in new instances and need to featurize them as we did in training. Returns ------- dict A dict with keys 'X' (the feature matrix), 'y' (the list of labels), 'vectorizer' (the `DictVectorizer`), and 'raw_examples' (the example strings, for error analysis). """ labels = [] feat_dicts = [] raw_examples = [] rows = [] for i, (paragraph, parse, label) in enumerate(reader()): if i % 100 == 0: print " Starting feature extraction for unit #%d " % (i+1) cls = class_func(label) #print label, cls if cls != None: labels.append(cls) raw_examples.append(paragraph) if verbose: print cls, ":", paragraph features = Counter() for phi in phi_list: cur_feats = phi(paragraph, parse) if cur_feats is None: continue # If we won't accidentally blow away data, merge 'em. overlap_feature_names = features.viewkeys() & cur_feats.viewkeys() if verbose and len(overlap_feature_names) > 0: print "Note: Overlap features are ", overlap_feature_names features |= cur_feats rows.append(cur_feats['row']) feat_dicts.append(features) if verbose: print features print print "Completed all feature extraction: %d units" % (i+1) # In training, we want a new vectorizer, but in # assessment, we featurize using the existing vectorizer: feat_matrix = None if vectorizer == None: vectorizer = DictVectorizer(sparse=True) feat_matrix = vectorizer.fit_transform(feat_dicts) else: feat_matrix = vectorizer.transform(feat_dicts) return {'X': feat_matrix, 'y': labels, 'vectorizer': vectorizer, 'raw_examples': raw_examples} def experiment_features( train_reader=data_readers.toy, assess_reader=None, train_size=0.7, phi_list=[fe.manual_content_flags], class_func=lt.identity_class_func, train_func=training.fit_logistic_at_with_crossvalidation, score_func=scipy.stats.stats.pearsonr, verbose=True): """Generic experimental framework for hand-crafted features. Either assesses with a random train/test split of `train_reader` or with `assess_reader` if it is given. Parameters ---------- train_reader : data iterator (default: `train_reader`) Iterator for training data. assess_reader : iterator or None (default: None) If None, then the data from `train_reader` are split into a random train/test split, with the the train percentage determined by `train_size`. If not None, then this should be an iterator for assessment data (e.g., `dev_reader`). train_size : float (default: 0.7) If `assess_reader` is None, then this is the percentage of `train_reader` devoted to training. If `assess_reader` is not None, then this value is ignored. phi_list : array of feature functions (default: [`manual_content_flags`]) Any function that takes a string as input and returns a bool/int/float-valued dict as output. class_func : function on the labels A function that modifies the labels based on the experimental design. If `class_func` returns None for a label, then that item is ignored. train_func : model wrapper (default: `fit_logistic_at_with_crossvalidation`) Any function that takes a feature matrix and a label list as its values and returns a fitted model with a `predict` function that operates on feature matrices. score_metric : function name (default: `utils.safe_weighted_f1`) This should be an `sklearn.metrics` scoring function. The default is weighted average F1. verbose : bool (default: True) Whether to print out the model assessment to standard output. Prints ------- To standard output, if `verbose=True` Model confusion matrix and a model precision/recall/F1 report. Returns ------- float The overall scoring metric for assess set as determined by `score_metric`. float The overall Cronbach's alpha for assess set np.array The confusion matrix (rows are truth, columns are predictions) list of dictionaries A list of {truth:_ , prediction:_, example:_} dicts on the assessment data """ # Train dataset: train = build_dataset(train_reader, phi_list, class_func, vectorizer=None, verbose=verbose) # Manage the assessment set-up: indices = np.arange(0, len(train['y'])) X_train = train['X'] y_train = np.array(train['y']) train_examples = np.array(train['raw_examples']) X_assess = None y_assess = None assess_examples = None if assess_reader == None: print " Raw y training distribution:" print " ", np.bincount(y_train)[1:] indices_train, indices_assess, y_train, y_assess = train_test_split( indices, y_train, train_size=train_size, stratify=y_train) X_assess = X_train[indices_assess] assess_examples = train_examples[indices_assess] X_train = X_train[indices_train] train_examples = train_examples[indices_train] print " Train y distribution:" print " ", np.bincount(y_train)[1:] print " Test y distribution:" print " ", np.bincount(y_assess)[1:] else: assess = build_dataset( assess_reader, phi_list, class_func, vectorizer=train['vectorizer']) X_assess, y_assess, assess_examples = assess['X'], assess['y'], np.array(assess['raw_examples']) # Normalize: nonzero_cells = len(X_train.nonzero()[0]) total_cells = 1.*X_train.shape[0] * X_train.shape[1] proportion_nonzero = nonzero_cells/total_cells print "sparsity: %g/1 are nonzero" % proportion_nonzero if proportion_nonzero > 0.5: # if dense matrix X_train = X_train.toarray() X_assess = X_assess.toarray() scaler = preprocessing.StandardScaler().fit(X_train) X_train = scaler.transform(X_train) X_assess = scaler.transform(X_assess) else: scaler = preprocessing.MaxAbsScaler().fit(X_train) X_train = scaler.transform(X_train) X_assess = scaler.transform(X_assess) # Train: mod = train_func(X_train, y_train) # Predictions: predictions_on_assess = mod.predict(X_assess) assess_performance = get_score_example_pairs(y_assess, predictions_on_assess, assess_examples) predictions_on_train = mod.predict(X_train) train_performance = get_score_example_pairs(y_train, predictions_on_train, train_examples) # Report: if verbose: print "\n-- TRAINING RESULTS --" print_verbose_overview(y_train, predictions_on_train) print "\n-- ASSESSMENT RESULTS --" print_verbose_overview(y_assess, predictions_on_assess) try: the_score = score_func(y_assess, predictions_on_assess) except: the_score = (0,0) # Return the overall results on the assessment data: return the_score, \ utils.cronbach_alpha(y_assess, predictions_on_assess), \ confusion_matrix(y_assess, predictions_on_assess), \ assess_performance def get_score_example_pairs(y, y_hat, examples): """ Return a list of dicts: {truth score, predicted score, example} """ paired_results = sorted(zip(y, y_hat), key=lambda x: x[0]-x[1]) performance = [] for i, (truth, prediction) in enumerate(paired_results): performance.append({"truth": truth, "prediction": prediction, "example": examples[i]}) return performance def print_verbose_overview(y, yhat): """ Print a performance overview """ print "Correlation: ", pearsonr(y, yhat)[0] print "Alpha: ", utils.cronbach_alpha(y, yhat) print "Classification report:" print classification_report(y, yhat, digits=3) print "Confusion matrix:" print confusion_matrix(y, yhat) print " (Rows are truth; columns are predictions)" def experiment_features_iterated( train_reader=data_readers.toy, assess_reader=None, train_size=0.7, phi_list=[fe.manual_content_flags], class_func=lt.identity_class_func, train_func=training.fit_logistic_at_with_crossvalidation, score_func=utils.safe_weighted_f1, verbose=True, iterations=1): """ Generic iterated experimental framework for hand-crafted features. """ correlation_overall = [] cronbach_overall = [] conf_matrix_overall = None assess_performance = [] while len(correlation_overall) < iterations: print "\nStarting iteration: %d/%d" % (len(correlation_overall)+1, iterations) try: correlation_local, cronbach_local, conf_matrix_local, perf_local = experiment_features( train_reader=train_reader, assess_reader=assess_reader, train_size=train_size, phi_list=phi_list, class_func=class_func, train_func=train_func, score_func=score_func, verbose=verbose) correlation_overall.append(correlation_local[0]) cronbach_overall.append(cronbach_local) assess_performance.extend(perf_local) if conf_matrix_overall is None: conf_matrix_overall = conf_matrix_local else: conf_matrix_overall += conf_matrix_local except (ValueError,UserWarning) as e: print e if verbose: print "\n-- OVERALL --" print correlation_overall print cronbach_overall print conf_matrix_overall return correlation_overall, cronbach_overall, conf_matrix_overall, assess_performance
ptoman/icgauge
icgauge/experiment_frameworks.py
Python
mit
11,989
from __future__ import division import numpy as np np.seterr(divide='ignore') # these warnings are usually harmless for this code from matplotlib import pyplot as plt import matplotlib import os matplotlib.rcParams['font.size'] = 8 import pyhsmm from pyhsmm.util.text import progprint_xrange save_images = False #### load data data = np.loadtxt(os.path.join(os.path.dirname(__file__),'example-data.txt')) #### EM N = 4 obs_dim = data.shape[1] obs_hypparams = {'mu_0':np.zeros(obs_dim), 'sigma_0':np.eye(obs_dim), 'kappa_0':0.25, 'nu_0':obs_dim+2} obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in xrange(N)] # Build the HMM model that will represent the fitmodel fitmodel = pyhsmm.models.HMM( alpha=50.,init_state_concentration=50., # these are only used for initialization obs_distns=obs_distns) fitmodel.add_data(data) print 'Gibbs sampling for initialization' for idx in progprint_xrange(25): fitmodel.resample_model() plt.figure() fitmodel.plot() plt.gcf().suptitle('Gibbs-sampled initialization') print 'EM' likes = fitmodel.EM_fit() plt.figure() fitmodel.plot() plt.gcf().suptitle('EM fit') plt.figure() plt.plot(likes) plt.gcf().suptitle('log likelihoods during EM') plt.show()
theDataGeek/pyhsmm
examples/hmm-EM.py
Python
mit
1,294
""" Support for monitoring a Smappee energy sensor. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/sensor.smappee/ """ import logging from datetime import timedelta from homeassistant.components.smappee import DATA_SMAPPEE from homeassistant.helpers.entity import Entity DEPENDENCIES = ['smappee'] _LOGGER = logging.getLogger(__name__) SENSOR_PREFIX = 'Smappee' SENSOR_TYPES = { 'solar': ['Solar', 'mdi:white-balance-sunny', 'local', 'W', 'solar'], 'active_power': ['Active Power', 'mdi:power-plug', 'local', 'W', 'active_power'], 'current': ['Current', 'mdi:gauge', 'local', 'A', 'current'], 'voltage': ['Voltage', 'mdi:gauge', 'local', 'V', 'voltage'], 'active_cosfi': ['Power Factor', 'mdi:gauge', 'local', '%', 'active_cosfi'], 'alwayson_today': ['Always On Today', 'mdi:gauge', 'remote', 'kWh', 'alwaysOn'], 'solar_today': ['Solar Today', 'mdi:white-balance-sunny', 'remote', 'kWh', 'solar'], 'power_today': ['Power Today', 'mdi:power-plug', 'remote', 'kWh', 'consumption'], 'water_sensor_1': ['Water Sensor 1', 'mdi:water', 'water', 'm3', 'value1'], 'water_sensor_2': ['Water Sensor 2', 'mdi:water', 'water', 'm3', 'value2'], 'water_sensor_temperature': ['Water Sensor Temperature', 'mdi:temperature-celsius', 'water', '°', 'temperature'], 'water_sensor_humidity': ['Water Sensor Humidity', 'mdi:water-percent', 'water', '%', 'humidity'], 'water_sensor_battery': ['Water Sensor Battery', 'mdi:battery', 'water', '%', 'battery'], } SCAN_INTERVAL = timedelta(seconds=30) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Smappee sensor.""" smappee = hass.data[DATA_SMAPPEE] dev = [] if smappee.is_remote_active: for location_id in smappee.locations.keys(): for sensor in SENSOR_TYPES: if 'remote' in SENSOR_TYPES[sensor]: dev.append(SmappeeSensor(smappee, location_id, sensor, SENSOR_TYPES[sensor])) elif 'water' in SENSOR_TYPES[sensor]: for items in smappee.info[location_id].get('sensors'): dev.append(SmappeeSensor( smappee, location_id, '{}:{}'.format(sensor, items.get('id')), SENSOR_TYPES[sensor])) if smappee.is_local_active: for location_id in smappee.locations.keys(): for sensor in SENSOR_TYPES: if 'local' in SENSOR_TYPES[sensor]: if smappee.is_remote_active: dev.append(SmappeeSensor(smappee, location_id, sensor, SENSOR_TYPES[sensor])) else: dev.append(SmappeeSensor(smappee, None, sensor, SENSOR_TYPES[sensor])) add_entities(dev, True) class SmappeeSensor(Entity): """Implementation of a Smappee sensor.""" def __init__(self, smappee, location_id, sensor, attributes): """Initialize the Smappee sensor.""" self._smappee = smappee self._location_id = location_id self._attributes = attributes self._sensor = sensor self.data = None self._state = None self._name = self._attributes[0] self._icon = self._attributes[1] self._type = self._attributes[2] self._unit_of_measurement = self._attributes[3] self._smappe_name = self._attributes[4] @property def name(self): """Return the name of the sensor.""" if self._location_id: location_name = self._smappee.locations[self._location_id] else: location_name = 'Local' return "{} {} {}".format(SENSOR_PREFIX, location_name, self._name) @property def icon(self): """Icon to use in the frontend.""" return self._icon @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Return the unit of measurement of this entity, if any.""" return self._unit_of_measurement @property def device_state_attributes(self): """Return the state attributes of the device.""" attr = {} if self._location_id: attr['Location Id'] = self._location_id attr['Location Name'] = self._smappee.locations[self._location_id] return attr def update(self): """Get the latest data from Smappee and update the state.""" self._smappee.update() if self._sensor in ['alwayson_today', 'solar_today', 'power_today']: data = self._smappee.consumption[self._location_id] if data: consumption = data.get('consumptions')[-1] _LOGGER.debug("%s %s", self._sensor, consumption) value = consumption.get(self._smappe_name) self._state = round(value / 1000, 2) elif self._sensor == 'active_cosfi': cosfi = self._smappee.active_cosfi() _LOGGER.debug("%s %s", self._sensor, cosfi) if cosfi: self._state = round(cosfi, 2) elif self._sensor == 'current': current = self._smappee.active_current() _LOGGER.debug("%s %s", self._sensor, current) if current: self._state = round(current, 2) elif self._sensor == 'voltage': voltage = self._smappee.active_voltage() _LOGGER.debug("%s %s", self._sensor, voltage) if voltage: self._state = round(voltage, 3) elif self._sensor == 'active_power': data = self._smappee.instantaneous _LOGGER.debug("%s %s", self._sensor, data) if data: value1 = [float(i['value']) for i in data if i['key'].endswith('phase0ActivePower')] value2 = [float(i['value']) for i in data if i['key'].endswith('phase1ActivePower')] value3 = [float(i['value']) for i in data if i['key'].endswith('phase2ActivePower')] active_power = sum(value1 + value2 + value3) / 1000 self._state = round(active_power, 2) elif self._sensor == 'solar': data = self._smappee.instantaneous _LOGGER.debug("%s %s", self._sensor, data) if data: value1 = [float(i['value']) for i in data if i['key'].endswith('phase3ActivePower')] value2 = [float(i['value']) for i in data if i['key'].endswith('phase4ActivePower')] value3 = [float(i['value']) for i in data if i['key'].endswith('phase5ActivePower')] power = sum(value1 + value2 + value3) / 1000 self._state = round(power, 2) elif self._type == 'water': sensor_name, sensor_id = self._sensor.split(":") data = self._smappee.sensor_consumption[self._location_id]\ .get(int(sensor_id)) if data: tempdata = data.get('records') if tempdata: consumption = tempdata[-1] _LOGGER.debug("%s (%s) %s", sensor_name, sensor_id, consumption) value = consumption.get(self._smappe_name) self._state = value
PetePriority/home-assistant
homeassistant/components/smappee/sensor.py
Python
apache-2.0
7,878
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from pyspark import SparkContext from pyspark.sql import DataFrame, SQLContext from pyspark.storagelevel import StorageLevel def _from_java_gf(jgf, sqlContext): """ (internal) creates a python GraphFrame wrapper from a java GraphFrame. :param jgf: """ pv = DataFrame(jgf.vertices(), sqlContext) pe = DataFrame(jgf.edges(), sqlContext) return GraphFrame(pv, pe) def _java_api(jsc): javaClassName = "org.graphframes.GraphFramePythonAPI" return jsc._jvm.Thread.currentThread().getContextClassLoader().loadClass(javaClassName) \ .newInstance() class GraphFrame(object): """ Represents a graph with vertices and edges stored as DataFrames. :param v: :class:`DataFrame` holding vertex information. Must contain a column named "id" that stores unique vertex IDs. :param e: :class:`DataFrame` holding edge information. Must contain two columns "src" and "dst" storing source vertex IDs and destination vertex IDs of edges, respectively. >>> localVertices = [(1,"A"), (2,"B"), (3, "C")] >>> localEdges = [(1,2,"love"), (2,1,"hate"), (2,3,"follow")] >>> v = sqlContext.createDataFrame(localVertices, ["id", "name"]) >>> e = sqlContext.createDataFrame(localEdges, ["src", "dst", "action"]) >>> g = GraphFrame(v, e) """ def __init__(self, v, e): self._vertices = v self._edges = e self._sqlContext = v.sql_ctx self._sc = self._sqlContext._sc self._sc._jvm.org.apache.spark.ml.feature.Tokenizer() self._jvm_gf_api = _java_api(self._sc) self._jvm_graph = self._jvm_gf_api.createGraph(v._jdf, e._jdf) self.ID = self._jvm_gf_api.ID() self.SRC = self._jvm_gf_api.SRC() self.DST = self._jvm_gf_api.DST() self._ATTR = self._jvm_gf_api.ATTR() assert self.ID in v.columns,\ "Vertex ID column '%s' missing from vertex DataFrame, which has columns: %s" %\ (self.ID, ",".join(v.columns)) assert self.SRC in e.columns,\ "Source vertex ID column '%s' missing from edge DataFrame, which has columns: %s" %\ (self.SRC, ",".join(e.columns)) assert self.DST in e.columns,\ "Destination vertex ID column '%s' missing from edge DataFrame, which has columns: %s"%\ (self.DST, ",".join(e.columns)) @property def vertices(self): """ :class:`DataFrame` holding vertex information, with unique column "id" for vertex IDs. """ return self._vertices @property def edges(self): """ :class:`DataFrame` holding edge information, with unique columns "src" and "dst" storing source vertex IDs and destination vertex IDs of edges, respectively. """ return self._edges def __repr__(self): return self._jvm_graph.toString() def cache(self): """ Persist the dataframe representation of vertices and edges of the graph with the default storage level. """ self._jvm_graph.cache() return self def persist(self, storageLevel=StorageLevel.MEMORY_ONLY): """Persist the dataframe representation of vertices and edges of the graph with the given storage level. """ javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel) self._jvm_graph.persist(javaStorageLevel) return self def unpersist(self, blocking=False): """Mark the dataframe representation of vertices and edges of the graph as non-persistent, and remove all blocks for it from memory and disk. """ self._jvm_graph.unpersist(blocking) return self @property def outDegrees(self): """ The out-degree of each vertex in the graph, returned as a DataFrame with two columns: - "id": the ID of the vertex - "outDegree" (integer) storing the out-degree of the vertex Note that vertices with 0 out-edges are not returned in the result. :return: DataFrame with new vertices column "outDegree" """ jdf = self._jvm_graph.outDegrees() return DataFrame(jdf, self._sqlContext) @property def inDegrees(self): """ The in-degree of each vertex in the graph, returned as a DataFame with two columns: - "id": the ID of the vertex - "inDegree" (int) storing the in-degree of the vertex Note that vertices with 0 in-edges are not returned in the result. :return: DataFrame with new vertices column "inDegree" """ jdf = self._jvm_graph.inDegrees() return DataFrame(jdf, self._sqlContext) @property def degrees(self): """ The degree of each vertex in the graph, returned as a DataFrame with two columns: - "id": the ID of the vertex - 'degree' (integer) the degree of the vertex Note that vertices with 0 edges are not returned in the result. :return: DataFrame with new vertices column "degree" """ jdf = self._jvm_graph.degrees() return DataFrame(jdf, self._sqlContext) @property def triplets(self): """ The triplets (source vertex)-[edge]->(destination vertex) for all edges in the graph. Returned as a :class:`DataFrame` with three columns: - "src": source vertex with schema matching 'vertices' - "edge": edge with schema matching 'edges' - 'dst': destination vertex with schema matching 'vertices' :return: DataFrame with columns 'src', 'edge', and 'dst' """ jdf = self._jvm_graph.triplets() return DataFrame(jdf, self._sqlContext) def find(self, pattern): """ Motif finding. See Scala documentation for more details. :param pattern: String describing the motif to search for. :return: DataFrame with one Row for each instance of the motif found """ jdf = self._jvm_graph.find(pattern) return DataFrame(jdf, self._sqlContext) def bfs(self, fromExpr, toExpr, edgeFilter=None, maxPathLength=10): """ Breadth-first search (BFS). See Scala documentation for more details. :return: DataFrame with one Row for each shortest path between matching vertices. """ builder = self._jvm_graph.bfs()\ .fromExpr(fromExpr)\ .toExpr(toExpr)\ .maxPathLength(maxPathLength) if edgeFilter is not None: builder.edgeFilter(edgeFilter) jdf = builder.run() return DataFrame(jdf, self._sqlContext) # Standard algorithms def connectedComponents(self): """ Computes the connected components of the graph. See Scala documentation for more details. :return: DataFrame with new vertices column "component" """ jdf = self._jvm_graph.connectedComponents().run() return DataFrame(jdf, self._sqlContext) def labelPropagation(self, maxIter): """ Runs static label propagation for detecting communities in networks. See Scala documentation for more details. :param maxIter: the number of iterations to be performed :return: DataFrame with new vertices column "label" """ jdf = self._jvm_graph.labelPropagation().maxIter(maxIter).run() return DataFrame(jdf, self._sqlContext) def pageRank(self, resetProbability = 0.15, sourceId = None, maxIter = None, tol = None): """ Runs the PageRank algorithm on the graph. Note: Exactly one of fixed_num_iter or tolerance must be set. See Scala documentation for more details. :param resetProbability: Probability of resetting to a random vertex. :param sourceId: (optional) the source vertex for a personalized PageRank. :param maxIter: If set, the algorithm is run for a fixed number of iterations. This may not be set if the `tol` parameter is set. :param tol: If set, the algorithm is run until the given tolerance. This may not be set if the `numIter` parameter is set. :return: GraphFrame with new vertices column "pagerank" and new edges column "weight" """ builder = self._jvm_graph.pageRank().resetProbability(resetProbability) if sourceId is not None: builder = builder.sourceId(sourceId) if maxIter is not None: builder = builder.maxIter(maxIter) assert tol is None, "Exactly one of maxIter or tol should be set." else: assert tol is not None, "Exactly one of maxIter or tol should be set." builder = builder.tol(tol) jgf = builder.run() return _from_java_gf(jgf, self._sqlContext) def shortestPaths(self, landmarks): """ Runs the shortest path algorithm from a set of landmark vertices in the graph. See Scala documentation for more details. :param landmarks: a set of one or more landmarks :return: DataFrame with new vertices column "distances" """ jdf = self._jvm_graph.shortestPaths().landmarks(landmarks).run() return DataFrame(jdf, self._sqlContext) def stronglyConnectedComponents(self, maxIter): """ Runs the strongly connected components algorithm on this graph. See Scala documentation for more details. :param maxIter: the number of iterations to run :return: DataFrame with new vertex column "component" """ jdf = self._jvm_graph.stronglyConnectedComponents().maxIter(maxIter).run() return DataFrame(jdf, self._sqlContext) def svdPlusPlus(self, rank = 10, maxIter = 2, minValue = 0.0, maxValue = 5.0, gamma1 = 0.007, gamma2 = 0.007, gamma6 = 0.005, gamma7 = 0.015): """ Runs the SVD++ algorithm. See Scala documentation for more details. :return: Tuple of DataFrame with new vertex columns storing learned model, and loss value """ # This call is actually useless, because one needs to build the configuration first... builder = self._jvm_graph.svdPlusPlus() builder.rank(rank).maxIter(maxIter).minValue(minValue).maxValue(maxValue) builder.gamma1(gamma1).gamma2(gamma2).gamma6(gamma6).gamma7(gamma7) jdf = builder.run() loss = builder.loss() v = DataFrame(jdf, self._sqlContext) return (v, loss) def triangleCount(self): """ Counts the number of triangles passing through each vertex in this graph. See Scala documentation for more details. :return: DataFrame with new vertex column "count" """ jdf = self._jvm_graph.triangleCount().run() return DataFrame(jdf, self._sqlContext) def _test(): import doctest import graphframe globs = graphframe.__dict__.copy() globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2) globs['sqlContext'] = SQLContext(globs['sc']) (failure_count, test_count) = doctest.testmod( globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE) globs['sc'].stop() if failure_count: exit(-1) if __name__ == "__main__": _test()
DataReplyUK/datareplyuk
GenesAssociation/spark-2.0.0-bin-hadoop2.7/python/pyspark/graphframes/graphframe.py
Python
apache-2.0
12,224
# Copyright (C) 2015 Allen Li # # This file is part of Dantalian. # # Dantalian is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Dantalian is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Dantalian. If not, see <http://www.gnu.org/licenses/>. """ This module contains shared path-related functionality. """ from itertools import count import os import posixpath def readlink(path): """Follow all symlinks and return the target of the last link.""" while posixpath.islink(path): path = os.readlink(path) return path def listdirpaths(path): """Like os.listdir(), except return pathnames instead of filenames. Returns: A generator yielding paths. """ for entry in os.listdir(path): yield posixpath.join(path, entry) def free_name(dirpath, name): """Find a free filename in the given directory. Given a desired filename, this function attempts to find a filename that is not currently being used in the given directory, adding an incrementing index to the filename as necessary. Note that the returned filename might not work due to race conditions. Program accordingly. Args: dirpath: Pathname of directory to look in. name: Desired filename. Returns: Filename. """ files = os.listdir(dirpath) if name not in files: return name base, ext = posixpath.splitext(name) i = count(1) while True: name = ''.join((base, '.', str(next(i)), ext)) if name not in files: return name def free_name_do(dirpath, name, callback): """Repeatedly attempt to do something while finding a free filename. Returns: Path of successful new name. """ while True: dst = posixpath.join(dirpath, free_name(dirpath, name)) try: callback(dst) except FileExistsError: continue else: return dst
darkfeline/dantalian
src/dantalian/pathlib.py
Python
gpl-3.0
2,387
# This file is part of Moksha. # Copyright (C) 2008-2010 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The Moksha Feed Tree Controller .. moduleauthor:: Luke Macken <lmacken@redhat.com> """ import moksha import logging from tg import expose, tmpl_context from pylons import cache from orbited import json from moksha.lib.base import Controller from moksha.lib.helpers import to_unicode from moksha.widgets.feeds import feed_entries_tree, moksha_feedreader from tg import expose, validate from formencode import validators log = logging.getLogger('moksha.hub') class FeedController(Controller): @expose('mako:moksha.apps.mokshafeeds.templates.index') @validate({'name': validators.UnicodeString()}) def index(self, name='world', *args, **kw): return dict(name=name) @expose() def init_tree(self, key, fresh=False, **kw): c = cache.get_cache('feeds') if fresh: return self._get_feed_titles(fresh=fresh) else: return c.get_value(key='feed_titles', createfunc=self._get_feed_titles, expiretime=3600) def _get_feed_titles(self, fresh=False): results = [] for feed in moksha.feed_storage.keys(): if not feed: raise Exception('None feed?!') if fresh: print "Getting fresh data" feed_data = moksha.feed_cache.fetch(feed) else: print "Getting cached data" timestamp, feed_data = moksha.feed_storage[feed] if not feed_data: log.debug("no feed_data, refetching") #feed_data = moksha.feed_cache.fetch(feed) #if not feed_data: # log.debug("NO FEED DATA AFTER FRESH FETCH!!!!") continue channel = feed_data.get('channel') if not channel: continue title = channel.get('title') results.append({ 'title': title, 'key': feed, 'isLazy': False, 'isFolder': True, }) return json.encode(results) @expose() def get_feed(self, key, *args, **kw): if '::' in key: url, title = key.split('::') for entry in moksha.feed_storage[url][1]['entries']: content = entry.get('content', entry.get('summary')) content = '<span style="line-height:100%%;">%s</span>' % content if entry['title'] == title: return json.encode([{'title': content, 'isLazy': False}]) raise Exception("Cannot find entry by title: %s" % title) feed = moksha.feed_storage[key][1] entries = [] for entry in feed['entries']: entries.append({ 'title': entry.get('title'), 'isLazy': True, 'isFolder': True, 'key': "%s::%s" % (key, entry.get('title')), }) return json.encode(entries) @expose('mako:moksha.templates.widget') def get_entries(self, key, **kw): tmpl_context.widget = feed_entries_tree feed = moksha.feed_storage[key][1] entries = [] for entry in feed['entries']: entries.append({ 'title': entry.get('title'), 'isLazy': False, 'isFolder': False, 'rootVisible': False, 'key': "%s::%s" % (key, entry.get('title','').replace(' ','')), }) return dict(options={'tree_children': entries}) @expose() def get_entry(self, key, **kw): if '::' in key: url, title = key.split('::') for entry in moksha.feed_storage[url][1]['entries']: content = entry.get('content', entry.get('summary')) if isinstance(content, list): if isinstance(content[0], dict) and \ content[0].has_key('value'): content = content[0]['value'] content = """ <blockquote><h3><a href="%s">%s</a></h3><br/>%s</blockquote> """ % (entry.get('link', url), entry['title'], content) if entry['title'].replace(' ', '') == to_unicode(title): return content raise Exception("Cannot find entry by title: %s" % title) else: raise Exception("Invalid entry key: %r" % key)
lmacken/moksha
moksha/apps/feeds/moksha/apps/feeds/controllers/root.py
Python
apache-2.0
5,074
import logging import testUtils as utils import time import threading import pytest from conftest import IPADDRESS1, \ RESOURCE, \ DUMMYVAL, \ OSCORECLIENTCONTEXT from coap import coapDefines as d, \ coapException as e, \ coapOption as o, \ coapObjectSecurity as oscore #============================ logging ========================================= log = logging.getLogger(utils.getMyLoggerName()) log.addHandler(utils.NullHandler()) #============================ define ========================================== IPADDRESS_INVALID = 'bbbb::1' #============================ tests =========================================== def test_GET(logFixture,snoopyDispatcher,twoEndPoints): (coap1,coap2,securityEnabled) = twoEndPoints # adjust timeouts so test is faster coap2.ackTimeout = 2 coap2.respTimeout = 2 options = [] if securityEnabled: context = oscore.SecurityContext(OSCORECLIENTCONTEXT) options = [o.ObjectSecurity(context=context)] # have coap2 do a get with pytest.raises(e.coapTimeout): reply = coap2.GET( uri = 'coap://[{0}]:{1}/{2}/'.format(IPADDRESS_INVALID,d.DEFAULT_UDP_PORT,RESOURCE), confirmable = True, options=options, )
openwsn-berkeley/coap
tests/func/test_timeout_CON.py
Python
bsd-3-clause
1,466
# coding=utf-8 from functools import wraps from flask import abort, flash, redirect, url_for, current_app from flask.ext.login import current_user from .models import Permission def permission_required(permission): def decorator(f): @wraps(f) def decorated_function(*args,**kwargs): if not current_user.can(permission): abort(403) return f(*args, **kwargs) return decorated_function return decorator def admin_required(f): return permission_required(Permission.ADMINISTER)(f) def login_required(func): @wraps(func) def decorated_view(*args, **kwargs): if current_app.login_manager._login_disabled: return func(*args, **kwargs) elif not current_user.is_authenticated: flash('您好,请先登陆.') return redirect(url_for('auth.login')) return func(*args, **kwargs) return decorated_view
tianmaxingkonggrant/tianmaflaskblog
app/decorator.py
Python
mit
824
from zerver.lib.test_classes import WebhookTestCase class DialogflowHookTests(WebhookTestCase): URL_TEMPLATE = "/api/v1/external/dialogflow?api_key={api_key}&email=AARON@zulip.com" WEBHOOK_DIR_NAME = "dialogflow" def test_dialogflow_default(self) -> None: email = self.example_user("aaron").email self.url = self.build_webhook_url( email=email, username="aaron", user_ip="127.0.0.1", ) expected_message = "The weather sure looks great !" self.send_and_test_private_message("default", expected_message) def test_dialogflow_alternate_result(self) -> None: email = self.example_user("aaron").email self.url = self.build_webhook_url( email=email, username="aaron", user_ip="127.0.0.1", ) expected_message = "Weather in New Delhi is nice!" self.send_and_test_private_message("alternate_result", expected_message) def test_dialogflow_error_status(self) -> None: email = self.example_user("aaron").email self.url = self.build_webhook_url( email=email, username="aaron", user_ip="127.0.0.1", ) expected_message = "403 - Access Denied" self.send_and_test_private_message("error_status", expected_message) def test_dialogflow_exception(self) -> None: email = self.example_user("aaron").email self.url = self.build_webhook_url( email=email, username="aaron", user_ip="127.0.0.1", ) expected_message = "Dialogflow couldn't process your query." self.send_and_test_private_message("exception", expected_message)
eeshangarg/zulip
zerver/webhooks/dialogflow/tests.py
Python
apache-2.0
1,732
#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_system_dedicated_mgmt short_description: Configure dedicated management in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the user to set and modify system feature and dedicated_mgmt category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.5 version_added: "2.9" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate IP address. type: str required: false username: description: - FortiOS or FortiGate username. type: str required: false password: description: - FortiOS or FortiGate password. type: str default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. type: str default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol. type: bool default: true ssl_verify: description: - Ensures FortiGate certificate must be verified by a proper CA. type: bool default: true system_dedicated_mgmt: description: - Configure dedicated management. default: null type: dict suboptions: default_gateway: description: - Default gateway for dedicated management interface. type: str dhcp_end_ip: description: - DHCP end IP for dedicated management. type: str dhcp_netmask: description: - DHCP netmask. type: str dhcp_server: description: - Enable/disable DHCP server on management interface. type: str choices: - enable - disable dhcp_start_ip: description: - DHCP start IP for dedicated management. type: str interface: description: - Dedicated management interface. Source system.interface.name. type: str status: description: - Enable/disable dedicated management. type: str choices: - enable - disable ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" ssl_verify: "False" tasks: - name: Configure dedicated management. fortios_system_dedicated_mgmt: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" system_dedicated_mgmt: default_gateway: "<your_own_value>" dhcp_end_ip: "<your_own_value>" dhcp_netmask: "<your_own_value>" dhcp_server: "enable" dhcp_start_ip: "<your_own_value>" interface: "<your_own_value> (source system.interface.name)" status: "enable" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos): host = data['host'] username = data['username'] password = data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password, verify=ssl_verify) def filter_system_dedicated_mgmt_data(json): option_list = ['default_gateway', 'dhcp_end_ip', 'dhcp_netmask', 'dhcp_server', 'dhcp_start_ip', 'interface', 'status'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data, list): for elem in data: elem = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for k, v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data return data def system_dedicated_mgmt(data, fos): vdom = data['vdom'] system_dedicated_mgmt_data = data['system_dedicated_mgmt'] filtered_data = underscore_to_hyphen(filter_system_dedicated_mgmt_data(system_dedicated_mgmt_data)) return fos.set('system', 'dedicated-mgmt', data=filtered_data, vdom=vdom) def is_successful_status(status): return status['status'] == "success" or \ status['http_method'] == "DELETE" and status['http_status'] == 404 def fortios_system(data, fos): if data['system_dedicated_mgmt']: resp = system_dedicated_mgmt(data, fos) return not is_successful_status(resp), \ resp['status'] == "success", \ resp def main(): fields = { "host": {"required": False, "type": "str"}, "username": {"required": False, "type": "str"}, "password": {"required": False, "type": "str", "default": "", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "ssl_verify": {"required": False, "type": "bool", "default": True}, "system_dedicated_mgmt": { "required": False, "type": "dict", "default": None, "options": { "default_gateway": {"required": False, "type": "str"}, "dhcp_end_ip": {"required": False, "type": "str"}, "dhcp_netmask": {"required": False, "type": "str"}, "dhcp_server": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "dhcp_start_ip": {"required": False, "type": "str"}, "interface": {"required": False, "type": "str"}, "status": {"required": False, "type": "str", "choices": ["enable", "disable"]} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers to using fortiosapi instead of HTTPAPI legacy_mode = 'host' in module.params and module.params['host'] is not None and \ 'username' in module.params and module.params['username'] is not None and \ 'password' in module.params and module.params['password'] is not None if not legacy_mode: if module._socket_path: connection = Connection(module._socket_path) fos = FortiOSHandler(connection) is_error, has_changed, result = fortios_system(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") fos = FortiOSAPI() login(module.params, fos) is_error, has_changed, result = fortios_system(module.params, fos) fos.logout() if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
thaim/ansible
lib/ansible/modules/network/fortios/fortios_system_dedicated_mgmt.py
Python
mit
10,367
from ._paths import * from ._pscmanipulate import *
exe0cdc/PyscesToolbox
psctb/modeltools/__init__.py
Python
bsd-3-clause
52
# Copyright 2004-2017 Tom Rothamel <pytom@bishoujo.us> # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # This file ensures that renpy packages will be imported in the right # order. from __future__ import print_function import sys import os import copy import types import threading import cPickle ################################################################################ # Version information ################################################################################ # Version numbers. try: from renpy.vc_version import vc_version; vc_version except ImportError: vc_version = 0 # The tuple giving the version number. version_tuple = (6, 99, 13, vc_version) # The name of this version. version_name = "We came in peace." # A string giving the version number only (7.0.1.123). version_only = ".".join(str(i) for i in version_tuple) # A verbose string giving the version. version = "Ren'Py " + version_only # Other versions. script_version = 5003000 savegame_suffix = "-LT1.save" bytecode_version = 1 ################################################################################ # Platform Information ################################################################################ # Information about the platform we're running on. We break the platforms # up into 5 groups - windows-like, mac-like, linux-like, android-like, # and ios-like. windows = False macintosh = False linux = False android = False ios = False import platform def get_windows_version(): """ When called on windows, returns the windows version. """ import ctypes class OSVERSIONINFOEXW(ctypes.Structure): _fields_ = [('dwOSVersionInfoSize', ctypes.c_ulong), ('dwMajorVersion', ctypes.c_ulong), ('dwMinorVersion', ctypes.c_ulong), ('dwBuildNumber', ctypes.c_ulong), ('dwPlatformId', ctypes.c_ulong), ('szCSDVersion', ctypes.c_wchar*128), ('wServicePackMajor', ctypes.c_ushort), ('wServicePackMinor', ctypes.c_ushort), ('wSuiteMask', ctypes.c_ushort), ('wProductType', ctypes.c_byte), ('wReserved', ctypes.c_byte)] try: os_version = OSVERSIONINFOEXW() os_version.dwOSVersionInfoSize = ctypes.sizeof(os_version) retcode = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version)) # Om failure, assume we have a newer version of windows if retcode != 0: return (10, 0) return (os_version.dwMajorVersion, os_version.dwMinorVersion) except: return (10, 0) if platform.win32_ver()[0]: windows = get_windows_version() elif "RENPY_IOS" in os.environ: ios = True elif platform.mac_ver()[0]: macintosh = True elif "ANDROID_PRIVATE" in os.environ: android = True else: linux = True # A flag that's true if we're on a smartphone or tablet-like platform. mobile = android or ios # A flag that's set to true if the game directory is bundled inside a mac app. macapp = False ################################################################################ # Backup Data for Reload ################################################################################ # True if we're done with safe mode checks. safe_mode_checked = False # True if autoreload mode is enabled. This has to live here, because it # needs to survive through an utter restart. autoreload = False # A dict that persists through utter restarts. Accessible to all code as # renpy.session. session = { } # A list of modules beginning with "renpy" that we don't want # to backup. backup_blacklist = { "renpy", "renpy.object", "renpy.log", "renpy.bootstrap", "renpy.debug", "renpy.display", "renpy.display.pgrender", "renpy.display.scale", "renpy.display.presplash", "renpy.display.test", "renpy.text.ftfont", "renpy.test", "renpy.test.testast", "renpy.test.testexecution", "renpy.test.testkey", "renpy.test.testmouse", "renpy.test.testparser", "renpycoverage", } type_blacklist = ( types.ModuleType, ) name_blacklist = { "renpy.loadsave.autosave_not_running", "renpy.python.unicode_re", "renpy.python.string_re", "renpy.python.store_dicts", "renpy.python.store_modules", "renpy.text.text.VERT_FORWARD", "renpy.text.text.VERT_REVERSE", "renpy.savelocation.scan_thread_condition", "renpy.savelocation.disk_lock", "renpy.character.TAG_RE", "renpy.display.im.cache", "renpy.display.render.blit_lock", "renpy.display.render.IDENTITY", "renpy.loader.auto_lock", "renpy.display.screen.cprof", "renpy.audio.audio.lock", } class Backup(): """ This represents a backup of all of the fields in the python modules comprising Ren'Py, shortly after they were imported. This attempts to preserve object aliasing, but not object identity. If renpy.mod.a is renpy.mod.b before the restore, the same will be true after the restore - even though renpy.mod.a will have changed identity. """ def __init__(self): # A map from (module, field) to the id of the object in that field. self.variables = { } # A map from id(object) to objects. This is discarded after being # pickled. self.objects = { } # A map from module to the set of names in that module. self.names = { } if mobile: return for m in sys.modules.values(): if m is None: continue self.backup_module(m) # A pickled version of self.objects. self.objects_pickle = cPickle.dumps(self.objects, cPickle.HIGHEST_PROTOCOL) self.objects = None def backup_module(self, mod): """ Makes a backup of `mod`, which must be a Python module. """ try: name = mod.__name__ except: return if not name.startswith("renpy"): return if name in backup_blacklist: return if name.startswith("renpy.styledata"): return self.names[mod] = set(vars(mod).keys()) for k, v in vars(mod).iteritems(): if k.startswith("__") and k.endswith("__"): continue if isinstance(v, type_blacklist): continue if name + "." + k in name_blacklist: continue idv = id(v) self.variables[mod, k] = idv self.objects[idv] = v # If we have a problem pickling things, uncomment the next block. try: cPickle.dumps(v, cPickle.HIGHEST_PROTOCOL) except: print("Cannot pickle", name + "." + k, "=", repr(v)) print("Reduce Ex is:", repr(v.__reduce_ex__(cPickle.HIGHEST_PROTOCOL))) def restore(self): """ Restores the modules to a state similar to the state of the modules when the backup was created. """ if not self.names: return # Remove new variables from the module. for mod, names in self.names.iteritems(): modvars = vars(mod) for name in set(modvars.keys()) - names: del modvars[name] objects = cPickle.loads(self.objects_pickle) for k, v in self.variables.iteritems(): mod, field = k setattr(mod, field, objects[v]) # A backup of the Ren'Py modules after initial import. backup = None ################################################################################ # Import ################################################################################ def update_path(package): """ Update the __path__ of package, to import binary modules from a libexec directory. """ name = package.__name__.split(".") import _renpy libexec = os.path.dirname(_renpy.__file__) package.__path__.append(os.path.join(libexec, *name)) # Also find encodings, to deal with the way py2exe lays things out. import encodings libexec = os.path.dirname(encodings.__path__[0]) package.__path__.append(os.path.join(libexec, *name)) def import_all(): # Note: If we add a new update_path, we have to add an equivalent # hook in the renpython hooks dir. import renpy # @UnresolvedImport update_path(renpy) import renpy.arguments # @UnresolvedImport import renpy.config import renpy.log import renpy.display import renpy.debug # Should probably be early, as we will add it as a base to serialized things. import renpy.object import renpy.game import renpy.preferences # Adds in the Ren'Py loader. import renpy.loader import renpy.pyanalysis import renpy.ast import renpy.atl import renpy.curry import renpy.color import renpy.easy import renpy.execution import renpy.loadsave import renpy.savelocation # @UnresolvedImport import renpy.persistent import renpy.scriptedit import renpy.parser import renpy.python import renpy.script import renpy.statements import renpy.styledata # @UnresolvedImport update_path(renpy.styledata) import renpy.style renpy.styledata.import_style_functions() sys.modules['renpy.styleclass'] = renpy.style import renpy.substitutions import renpy.translation import renpy.translation.scanstrings import renpy.translation.generation import renpy.translation.dialogue import renpy.translation.extract import renpy.translation.merge import renpy.display # @UnresolvedImport @Reimport update_path(renpy.display) import renpy.display.presplash import renpy.display.pgrender import renpy.display.scale import renpy.display.module import renpy.display.render # Most display stuff depends on this. @UnresolvedImport import renpy.display.core # object @UnresolvedImport import renpy.text update_path(renpy.text) import renpy.text.ftfont import renpy.text.font import renpy.text.textsupport import renpy.text.texwrap import renpy.text.text import renpy.text.extras sys.modules['renpy.display.text'] = renpy.text.text import renpy.gl update_path(renpy.gl) import renpy.angle update_path(renpy.angle) import renpy.display.layout import renpy.display.viewport import renpy.display.transform import renpy.display.motion # layout @UnresolvedImport import renpy.display.behavior # layout @UnresolvedImport import renpy.display.transition # core, layout @UnresolvedImport import renpy.display.movetransition # core @UnresolvedImport import renpy.display.im import renpy.display.imagelike import renpy.display.image # core, behavior, im, imagelike @UnresolvedImport import renpy.display.video import renpy.display.focus import renpy.display.anim import renpy.display.particle import renpy.display.joystick import renpy.display.controller import renpy.display.minigame import renpy.display.screen import renpy.display.dragdrop import renpy.display.imagemap import renpy.display.predict import renpy.display.emulator import renpy.display.tts import renpy.display.gesture import renpy.display.error # Note: For windows to work, renpy.audio.audio needs to be after # renpy.display.module. import renpy.audio update_path(renpy.audio) import renpy.audio.audio import renpy.audio.music import renpy.audio.sound import renpy.ui import renpy.screenlang import renpy.sl2 update_path(renpy.sl2) import renpy.sl2.slast import renpy.sl2.slparser import renpy.sl2.slproperties import renpy.sl2.sldisplayables import renpy.lint import renpy.warp import renpy.editor import renpy.memory import renpy.exports import renpy.character # depends on exports. @UnresolvedImport import renpy.add_from import renpy.dump import renpy.minstore # depends on lots. @UnresolvedImport import renpy.defaultstore # depends on everything. @UnresolvedImport import renpy.test import renpy.test.testmouse import renpy.test.testfocus import renpy.test.testkey import renpy.test.testast import renpy.test.testparser import renpy.test.testexecution import renpy.main # Back up the Ren'Py modules. global backup if not mobile: backup = Backup() post_import() def post_import(): """ This is called after import or reload, to do further initialization of various modules. """ import renpy # @UnresolvedImport # Create the store. renpy.python.create_store("store") # Import the contents of renpy.defaultstore into renpy.store, and set # up an alias as we do. renpy.store = sys.modules['store'] renpy.exports.store = renpy.store sys.modules['renpy.store'] = sys.modules['store'] import subprocess sys.modules['renpy.subprocess'] = subprocess for k, v in renpy.defaultstore.__dict__.iteritems(): renpy.store.__dict__.setdefault(k, v) # Import everything into renpy.exports, provided it isn't # already there. for k, v in globals().iteritems(): vars(renpy.exports).setdefault(k, v) def reload_all(): """ Resets all modules to the state they were in right after import_all returned. """ if mobile: raise Exception("Reloading is not supported on mobile platforms.") import renpy.style import renpy.display # Clear all pending exceptions. sys.exc_clear() # Reset the styles. renpy.style.reset() # @UndefinedVariable # Shut down the cache thread. renpy.display.im.cache.quit() # Shut down the importer. renpy.loader.quit_importer() # Free memory. renpy.exports.free_memory() # GC renders. renpy.display.render.screen_render = None renpy.display.render.mark_sweep() # Get rid of the draw module and interface. renpy.display.draw.deinit() renpy.display.draw = None renpy.display.interface = None # Delete the store modules. for i in sys.modules.keys(): if i.startswith("store") or i == "renpy.store": m = sys.modules[i] if m is not None: m.__dict__.reset() del sys.modules[i] # Restore the state of all modules from backup. backup.restore() renpy.display.im.reset_module() post_import() # Re-initialize the importer. renpy.loader.init_importer() ################################################################################ # Fix things for code analysis ################################################################################ def setup_modulefinder(modulefinder): """ Informs modulefinder about the location of modules in nonstandard places. """ import _renpy libexec = os.path.dirname(_renpy.__file__) for i in [ "display", "gl", "angle", "text", "styledata" ]: displaypath = os.path.join(libexec, "renpy", i) if os.path.exists(displaypath): modulefinder.AddPackagePath('renpy.' + i, displaypath) def import_cython(): """ Never called, but necessary to ensure that modulefinder will properly grab the various cython modules. """ import renpy.arguments import renpy.display.accelerator import renpy.display.render import renpy.gl.gl import renpy.gl.gl1 import renpy.gl.gldraw import renpy.gl.glenviron_fixed import renpy.gl.glenviron_limited import renpy.gl.glenviron_shader import renpy.gl.glrtt_copy import renpy.gl.glrtt_fbo import renpy.gl.gltexture import renpy.angle.gl import renpy.angle.gldraw import renpy.angle.glenviron_shader import renpy.angle.glrtt_copy import renpy.angle.glrtt_fbo import renpy.angle.gltexture if False: import renpy.defaultstore as store
kfcpaladin/sze-the-game
renpy/__init__.py
Python
mit
17,178
# encoding: utf-8 """ Test suite for pptx.text.layout module """ from __future__ import absolute_import, print_function, unicode_literals import pytest from pptx.text.layout import ( _BinarySearchTree, _Line, _LineSource, TextFitter ) from ..unitutil.mock import ( call, class_mock, function_mock, initializer_mock, instance_mock, method_mock, property_mock ) class DescribeTextFitter(object): def it_can_determine_the_best_fit_font_size(self, best_fit_fixture): text, extents, max_size, font_file = best_fit_fixture[:4] _LineSource_, _init_, line_source_ = best_fit_fixture[4:7] _best_fit_font_size_, font_size_ = best_fit_fixture[7:] font_size = TextFitter.best_fit_font_size( text, extents, max_size, font_file ) _LineSource_.assert_called_once_with(text) _init_.assert_called_once_with(line_source_, extents, font_file) _best_fit_font_size_.assert_called_once_with(max_size) assert font_size is font_size_ def it_finds_best_fit_font_size_to_help_best_fit(self, _best_fit_fixture): text_fitter, max_size, _BinarySearchTree_ = _best_fit_fixture[:3] sizes_, predicate_, font_size_ = _best_fit_fixture[3:] font_size = text_fitter._best_fit_font_size(max_size) _BinarySearchTree_.from_ordered_sequence.assert_called_once_with( range(1, max_size+1) ) sizes_.find_max.assert_called_once_with(predicate_) assert font_size is font_size_ def it_provides_a_fits_inside_predicate_fn(self, fits_pred_fixture): text_fitter, point_size = fits_pred_fixture[:2] _rendered_size_, expected_bool_value = fits_pred_fixture[2:] predicate = text_fitter._fits_inside_predicate result = predicate(point_size) text_fitter._wrap_lines.assert_called_once_with( text_fitter._line_source, point_size ) _rendered_size_.assert_called_once_with( 'Ty', point_size, text_fitter._font_file ) assert result is expected_bool_value def it_provides_a_fits_in_width_predicate_fn(self, fits_cx_pred_fixture): text_fitter, point_size, line = fits_cx_pred_fixture[:3] _rendered_size_, expected_value = fits_cx_pred_fixture[3:] predicate = text_fitter._fits_in_width_predicate(point_size) result = predicate(line) _rendered_size_.assert_called_once_with( line.text, point_size, text_fitter._font_file ) assert result is expected_value def it_wraps_lines_to_help_best_fit(self, wrap_fixture): text_fitter, line_source, point_size, remainder = wrap_fixture text_fitter._wrap_lines(line_source, point_size) assert text_fitter._break_line.call_args_list == [ call(line_source, point_size), call(remainder, point_size) ] def it_breaks_off_a_line_to_help_wrap(self, break_fixture): text_fitter, line_source_, point_size = break_fixture[:3] _BinarySearchTree_, bst_, predicate_ = break_fixture[3:6] max_value_ = break_fixture[6] value = text_fitter._break_line(line_source_, point_size) _BinarySearchTree_.from_ordered_sequence.assert_called_once_with( line_source_ ) text_fitter._fits_in_width_predicate.assert_called_once_with( point_size ) bst_.find_max.assert_called_once_with(predicate_) assert value is max_value_ # fixtures --------------------------------------------- @pytest.fixture def best_fit_fixture(self, _LineSource_, _init_, _best_fit_font_size_): text, extents, max_size = 'Foobar', (19, 20), 42 font_file = 'foobar.ttf' line_source_ = _LineSource_.return_value font_size_ = _best_fit_font_size_.return_value return ( text, extents, max_size, font_file, _LineSource_, _init_, line_source_, _best_fit_font_size_, font_size_ ) @pytest.fixture def _best_fit_fixture(self, _BinarySearchTree_, _fits_inside_predicate_): text_fitter = TextFitter(None, (None, None), None) max_size = 42 sizes_ = _BinarySearchTree_.from_ordered_sequence.return_value predicate_ = _fits_inside_predicate_.return_value font_size_ = sizes_.find_max.return_value return ( text_fitter, max_size, _BinarySearchTree_, sizes_, predicate_, font_size_ ) @pytest.fixture def break_fixture( self, line_source_, _BinarySearchTree_, bst_, _fits_in_width_predicate_): text_fitter = TextFitter(None, (None, None), None) point_size = 21 _BinarySearchTree_.from_ordered_sequence.return_value = bst_ predicate_ = _fits_in_width_predicate_.return_value max_value_ = bst_.find_max.return_value return ( text_fitter, line_source_, point_size, _BinarySearchTree_, bst_, predicate_, max_value_ ) @pytest.fixture(params=[(49, True), (50, True), (51, False)]) def fits_cx_pred_fixture(self, request, _rendered_size_): rendered_width, expected_value = request.param text_fitter = TextFitter(None, (50, None), 'foobar.ttf') point_size, line = 12, _Line('foobar', None) _rendered_size_.return_value = (rendered_width, None) return ( text_fitter, point_size, line, _rendered_size_, expected_value ) @pytest.fixture(params=[ ((66, 99), 6, ('foo', 'bar'), False), ((66, 100), 6, ('foo', 'bar'), True), ((66, 101), 6, ('foo', 'bar'), True), ]) def fits_pred_fixture( self, request, line_source_, _wrap_lines_, _rendered_size_): extents, point_size, text_lines, expected_value = request.param text_fitter = TextFitter(line_source_, extents, 'foobar.ttf') _wrap_lines_.return_value = text_lines _rendered_size_.return_value = (None, 50) return text_fitter, point_size, _rendered_size_, expected_value @pytest.fixture def wrap_fixture(self, _break_line_): text_fitter = TextFitter(None, (None, None), None) point_size = 21 line_source, remainder = _LineSource('foo bar'), _LineSource('bar') _break_line_.side_effect = [ ('foo', remainder), ('bar', _LineSource('')), ] return text_fitter, line_source, point_size, remainder # fixture components ----------------------------------- @pytest.fixture def _best_fit_font_size_(self, request): return method_mock(request, TextFitter, '_best_fit_font_size') @pytest.fixture def _BinarySearchTree_(self, request): return class_mock(request, 'pptx.text.layout._BinarySearchTree') @pytest.fixture def _break_line_(self, request): return method_mock(request, TextFitter, '_break_line') @pytest.fixture def bst_(self, request): return instance_mock(request, _BinarySearchTree) @pytest.fixture def _fits_in_width_predicate_(self, request): return method_mock(request, TextFitter, '_fits_in_width_predicate') @pytest.fixture def _fits_inside_predicate_(self, request): return property_mock(request, TextFitter, '_fits_inside_predicate') @pytest.fixture def _init_(self, request): return initializer_mock(request, TextFitter) @pytest.fixture def _LineSource_(self, request, line_source_): return class_mock( request, 'pptx.text.layout._LineSource', return_value=line_source_ ) @pytest.fixture def line_source_(self, request): return instance_mock(request, _LineSource) @pytest.fixture def _rendered_size_(self, request): return function_mock(request, 'pptx.text.layout._rendered_size') @pytest.fixture def _wrap_lines_(self, request): return method_mock(request, TextFitter, '_wrap_lines') class Describe_BinarySearchTree(object): def it_can_construct_from_an_ordered_sequence(self): bst = _BinarySearchTree.from_ordered_sequence(range(10)) def in_order(node): """ Traverse the tree depth first to produce a list of its values, in order. """ result = [] if node is None: return result result.extend(in_order(node._lesser)) result.append(node.value) result.extend(in_order(node._greater)) return result assert bst.value == 9 assert bst._lesser.value == 4 assert bst._greater is None assert in_order(bst) == list(range(10)) def it_can_find_the_max_value_satisfying_a_predicate(self, max_fixture): bst, predicate, expected_value = max_fixture assert bst.find_max(predicate) == expected_value # fixtures --------------------------------------------- @pytest.fixture(params=[ (range(10), lambda n: n < 6.5, 6), (range(10), lambda n: n > 9.9, None), (range(10), lambda n: n < 0.0, None), ]) def max_fixture(self, request): seq, predicate, expected_value = request.param bst = _BinarySearchTree.from_ordered_sequence(seq) return bst, predicate, expected_value class Describe_LineSource(object): def it_generates_text_remainder_pairs(self): line_source = _LineSource('foo bar baz') expected = ( ('foo', _LineSource('bar baz')), ('foo bar', _LineSource('baz')), ('foo bar baz', _LineSource('')), ) assert all((a == b) for a, b in zip(expected, line_source)) # produces different results on Linux, fails Travis-CI # from pptx.text.layout import _rendered_size # from ..unitutil.file import testfile # class Describe_rendered_size(object): # def it_calculates_the_rendered_size_of_text_at_point_size(self, fixture): # text, point_size, font_file, expected_value = fixture # extents = _rendered_size(text, point_size, font_file) # assert extents == expected_value # # fixtures --------------------------------------------- # @pytest.fixture(params=[ # ('Typical', 18, (673100, 254000)), # ('foo bar baz', 12, (698500, 165100)), # ]) # def fixture(self, request): # text, point_size, expected_value = request.param # font_file = testfile('calibriz.ttf') # return text, point_size, font_file, expected_value
kevingu1003/python-pptx
tests/text/test_layout.py
Python
mit
10,588
import numpy as np __all__ = ['gp_predict', 'gp_covariance'] def gp_kernel(x, y, kernel_type='gaussian', **kwargs): """ Gaussian process kernel score """ if kernel_type == 'gaussian': beta = kwargs.get('beta', 0.3) return np.exp(-((x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2) ** (0.5) / (2 * beta ** 2)) else: raise NotImplementedError('Kernel ({}) not implemented' .format(kernel_type)) def gp_covariance(x, y, kernel_type='gaussian', **kwargs): """ Compute Gram matrix for GP """ return np.array([[gp_kernel(xi, yi, kernel_type) for xi in x] for yi in y]) def gp_predict(target, train_data, gram_matrix, train_labels): """ Predict Value of a node sampled with gaussian process regression around neighboring nodes Parameters ------------ target : array-like, shape (2) [abs, ord] of target point train_data : array-like, shape (2 x N) training data, [abs, ord] of the points in a certain radius of the target point train_labels : array-like, shape (N) Values of the training data gram_matrix : array-like, shape (N x N) The Gram matrix Returns --------- y_pred : float Predicted value for the target sigma_new : float Variance of target point prediction """ k = [gp_kernel(target, yy) for yy in train_data] Sinv = np.linalg.pinv(gram_matrix) y_pred = np.dot(k, Sinv).dot(train_labels) # y = K K^-1 y sigma_new = gp_kernel(target, target) - np.dot(k, Sinv).dot(k) return y_pred, sigma_new
makokal/scalable-irl
sirl/algorithms/function_approximation.py
Python
bsd-3-clause
1,647
#!/usr/bin/env python3 import argparse import sys import re import logging from models import LineChange module = sys.modules['__main__'].__file__ log = logging.getLogger(module) class DiffParser: def __init__(self, filename=None, diff_content=None): if filename != None: self.load_file(filename) elif diff_content != None: self.load_diff_content(diff_content) self.changes = {} self.added_files = set([]) self.removed_files = set([]) def eof(self): return self.line_idx >= len(self.lines) def load_diff_content(self, content): self.lines = content self.line_idx = 0 def load_file(self, filename): log.debug("Parsing {}".format(filename)) with open(filename) as f: self.lines = f.readlines() self.line_idx = 0 def parse_short_commit_hash(self): diff_commits_re = re.compile('^index ([a-f,0-9]{7})\.\.([a-f,0-9]{7}).*$') new_file_re = re.compile('^new file mode [0-9]{6}$') delete_file_re = re.compile('^deleted file mode [0-9]{6}$') line = self.lines[self.line_idx] if new_file_re.match(line) != None: self.added_files.add(self.current_file) elif delete_file_re.match(line) != None: self.removed_files.add(self.current_file) if new_file_re.match(line) != None or delete_file_re.match(line) != None: self.line_idx += 1 line = self.lines[self.line_idx] commits_match = diff_commits_re.match(line) if commits_match != None: pass # self.from_commit = commits_match.group(1) # self.to_commit = commits_match.group(2) else: log.error("Something went wrong when parsing commit!") def parse_next_commit(self, added, removed, modified): if self.eof(): return None commit_re = re.compile('^@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@.*$') filename_re = re.compile('^diff --git a/(.*?) b/(.*?)$') long_re = re.compile('^From ([a-f,0-9]{40}) [A-Z][a-z]{2} [A-Z][a-z]{2} [0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} [0-9]{4}$') match = None while not self.eof() and match == None: if filename_re.match(self.lines[self.line_idx]) != None: break match = commit_re.match(self.lines[self.line_idx]) self.line_idx += 1 if match == None: return None before_line_n = int(match.group(1)) before_offset = int(match.group(2)) after_line_n = int(match.group(3)) after_offset = int(match.group(4)) before_finish_line_n = before_line_n + before_offset after_finish_line_n = after_line_n + after_offset removed_in_new_file = {} while not self.eof(): line = self.lines[self.line_idx] match = commit_re.match(line) if match != None: break match = filename_re.match(line) if match != None: break match = long_re.match(line) if match != None: # Remove empty line which is insert for spacing after_line_n -= 1 before_line_n -= 1 break if line.startswith("+"): if after_line_n in removed_in_new_file and len(removed_in_new_file[after_line_n]) > 0: del removed[removed_in_new_file[after_line_n].pop()] modified[after_line_n] = LineChange(after_line_n, LineChange.ChangeType.modified, self.current_file, self.current_commit) else: added[after_line_n] = LineChange(after_line_n, LineChange.ChangeType.added, self.current_file, self.current_commit) after_line_n += 1 elif line.startswith("-"): if after_line_n in added: del added[after_line_n] modified[after_line_n] = LineChange(after_line_n, LineChange.ChangeType.modified, self.current_file, self.current_commit) else: commit_hash = self.current_commit + "~1" if self.current_commit != None else None removed[before_line_n] = LineChange(before_line_n, LineChange.ChangeType.deleted, self.current_file, commit_hash) if not after_line_n in removed_in_new_file: removed_in_new_file[after_line_n] = [] removed_in_new_file[after_line_n].append(before_line_n) before_line_n += 1 else: before_line_n += 1 after_line_n += 1 self.line_idx += 1 # Does not work when multiple commits change the same lines if not (after_finish_line_n == after_line_n and before_finish_line_n == before_line_n) \ and not self.current_file in self.removed_files and not self.current_file in self.added_files: log.warning("Something went wrong with parsing commits in {} {} S {} {} B {} v {} A {} v {}".format( self.current_file, self.current_commit, before_finish_line_n - before_offset, after_finish_line_n - after_offset, before_finish_line_n, before_line_n, after_finish_line_n, after_line_n)) def parse_next_file_changes(self): if self.eof(): return None filename_re = re.compile('^diff --git a/(.*?) b/(.*?)$') long_re = re.compile('^From ([a-f,0-9]{40}) [A-Z][a-z]{2} [A-Z][a-z]{2} [0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} [0-9]{4}$') self.current_file = None self.current_commit = None # self.to_commit = None # self.from_commit = None match = None while not self.eof() and match == None: if long_re.match(self.lines[self.line_idx]) != None: self.current_commit = long_re.match(self.lines[self.line_idx]).group(1) else: match = filename_re.match(self.lines[self.line_idx]) self.line_idx += 1 if match == None: return None self.current_file = match.group(2) log.debug("Current file set to {} ".format(self.current_file)) added, removed, modified = {}, {}, {} # self.parse_short_commit_hash() # if self.to_commit == None or self.from_commit == None: # log.error("Failure loading commits for {} from '{}'".format(self.current_file, self.lines[self.line_idx])) # return None self.line_idx += 1 while not self.eof(): match = filename_re.match(self.lines[self.line_idx]) if match != None: break if long_re.match(self.lines[self.line_idx]) != None: self.current_commit = None break self.parse_next_commit(added, removed, modified) if not self.current_file in self.changes: self.changes[self.current_file] = added, removed, modified else: # TODO maybe check for errors self.changes[self.current_file][0].update(added) self.changes[self.current_file][1].update(removed) self.changes[self.current_file][2].update(modified) self.current_file = None return True def get_all_changes(self): changes = [] for file in self.changes: changes += list(self.changes[file][0].values()) + \ list(self.changes[file][1].values()) + \ list(self.changes[file][2].values()) return changes def parse(self): while not self.eof() and self.parse_next_file_changes(): pass return self.get_all_changes() def main(): parser = argparse.ArgumentParser(description='Parses a diff, returns changed lines') parser.add_argument('diff', type=str, help='the filename of the diff to parse') parser.add_argument('-v', '--verbose', action='count', default=0, help="increases log verbosity for each occurence.") args = parser.parse_args() log.setLevel([logging.WARNING, logging.INFO, logging.DEBUG][min(2,args.verbose)]) parser = DiffParser(filename=args.diff) log.info("{}".format(parser.parse())) if __name__ == "__main__": logging.basicConfig(stream=sys.stderr, format='%(name)s %(levelname)s %(message)s') main()
chrisma/marvin
parse.py
Python
mit
8,492
"""Module containing class `RecordingImporter`.""" from pathlib import Path import itertools import logging import os from django.db import transaction from vesper.command.command import CommandExecutionError from vesper.django.app.models import ( DeviceConnection, Job, Recording, RecordingChannel, RecordingFile) from vesper.singleton.recording_manager import recording_manager from vesper.util.bunch import Bunch import vesper.command.command_utils as command_utils import vesper.command.recording_utils as recording_utils import vesper.util.file_type_utils as file_type_utils import vesper.util.signal_utils as signal_utils import vesper.util.time_utils as time_utils class RecordingImporter: """ Importer for recordings already stored in files on the Vesper server. The recordings to be imported are specified in the `paths` argument as server-side directory and file paths. Files from directories can be imported either recursively or non-recursively according to the `recursive` argument. The import does not copy or move recordings: it stores the existing paths of their files for future reference. The importer obtains recording metadata for imported files with the aid of a recording file parser extension, specified by the `recording_file_parser` argument. """ extension_name = 'Recording Importer' def __init__(self, args): self.paths = command_utils.get_required_arg('paths', args) self.recursive = command_utils.get_optional_arg( 'recursive', args, True) spec = command_utils.get_optional_arg('recording_file_parser', args) self.file_parser = recording_utils.create_recording_file_parser(spec) def execute(self, job_info): self._job = Job.objects.get(id=job_info.job_id) self._logger = logging.getLogger() try: recordings = self._get_new_recordings() self._log_header(recordings) with transaction.atomic(): self._import_recordings(recordings) except Exception as e: self._logger.error( f'Recording import failed with an exception.\n' f'The exception message was:\n' f' {str(e)}\n' f'The archive was not modified.\n' f'See below for exception traceback.') raise else: self._log_imports(recordings) return True def _get_new_recordings(self): files = self._get_unimported_disk_files() return recording_utils.group_recording_files(files) def _get_unimported_disk_files(self): db_files = RecordingFile.objects.all() db_file_paths = frozenset(r.path for r in db_files) disk_file_infos = list(itertools.chain.from_iterable( self._get_path_recording_file_infos(path) for path in self.paths)) unimported_disk_files = [] for info in disk_file_infos: path = str(info.relative_path) if path not in db_file_paths: file = self._parse_recording_file(info.absolute_path) file.path = info.relative_path _set_recording_file_channel_info(file) unimported_disk_files.append(file) return unimported_disk_files def _get_path_recording_file_infos(self, path): if os.path.isdir(path): return self._get_dir_recording_file_infos(path) else: file = self._get_recording_file_info(path) return [] if file is None else [file] def _get_dir_recording_file_infos(self, path): files = [] for (dir_path, dir_names, file_names) in os.walk(path): for file_name in file_names: file_path = os.path.join(dir_path, file_name) file = self._get_recording_file_info(Path(file_path)) if file is not None: files.append(file) if not self.recursive: # Stop `os.walk` from descending into subdirectories. del dir_names[:] return files def _get_recording_file_info(self, file_path): if not file_type_utils.is_wave_file(file_path): return None else: return self._get_recording_file_path_info(file_path) def _get_recording_file_path_info(self, file_path): if file_path.is_absolute(): if not file_path.exists(): raise CommandExecutionError( f'Purported recording file "{file_path}" does not exist.') return self._get_absolute_path_info(file_path) else: # path is relative return self._get_relative_path_info(file_path) def _get_relative_path_info(self, file_path): rm = recording_manager try: abs_path = rm.get_absolute_recording_file_path(file_path) except ValueError: self._handle_bad_recording_file_path( file_path, 'could not be found in', rm) return Bunch(absolute_path=abs_path, relative_path=file_path) def _handle_bad_recording_file_path(self, file_path, condition, manager): dir_paths = manager.recording_dir_paths if len(dir_paths) == 1: s = f'the recording directory "{dir_paths[0]}"' else: path_list = str(list(dir_paths)) s = f'any of the recording directories {path_list}' raise CommandExecutionError( f'Recording file "{file_path}" {condition} {s}.') def _get_absolute_path_info(self, file_path): rm = recording_manager try: _, rel_path = rm.get_relative_recording_file_path(file_path) except ValueError: self._handle_bad_recording_file_path(file_path, 'is not in', rm) return Bunch(absolute_path=file_path, relative_path=rel_path) def _parse_recording_file(self, file_path): try: file = self.file_parser.parse_file(str(file_path)) except ValueError as e: raise CommandExecutionError( f'Error parsing recording file "{file_path}": {str(e)}') if file.recorder is None: file.recorder = _get_recorder(file) return file def _partition_recordings(self, recordings): new_recordings = [] old_recordings = [] for r in recordings: if self._recording_exists(r): old_recordings.append(r) else: new_recordings.append(r) return (new_recordings, old_recordings) def _recording_exists(self, recording): try: db_recording = Recording.objects.get( station=recording.station, recorder=recording.recorder, start_time=recording.start_time) except Recording.DoesNotExist: return False else: # found recording in database with same station, recorder, # and start time as files on disk if db_recording.length != recording.length: # length of recording in database differs from length # of files # In this case we warn about the problem but do not self._logger.warning( f'For recording "{str(db_recording)}", length ' f'{db_recording.length} of recording indicated in ' f'archive database differs from length ' f'{recording.length} of files found on disk. No ' f'action will be taken regarding this recording.') return True def _log_header(self, recordings): log = self._logger.info recording_count = len(recordings) if recording_count == 0: log('Found no new recordings at the specified paths.') log('No recordings will be imported.') else: log(f'Found {recording_count} new recordings at the ' f'specified paths.') log('The new recordings will be imported.') def _import_recordings(self, recordings): for r in recordings: end_time = signal_utils.get_end_time( r.start_time, r.length, r.sample_rate) creation_time = time_utils.get_utc_now() recording = Recording( station=r.station, recorder=r.recorder, num_channels=r.num_channels, length=r.length, sample_rate=r.sample_rate, start_time=r.start_time, end_time=end_time, creation_time=creation_time, creating_job=self._job) recording.save() r.model = recording for channel_num in range(r.num_channels): recorder_channel_num = r.recorder_channel_nums[channel_num] mic_output = r.mic_outputs[channel_num] channel = RecordingChannel( recording=recording, channel_num=channel_num, recorder_channel_num=recorder_channel_num, mic_output=mic_output) channel.save() start_index = 0 for file_num, f in enumerate(r.files): # We store all paths in the archive database as POSIX # paths, even on Windows, for portability, since Python's # `pathlib` module recognizes the slash as a path separator # on all platforms, but not the backslash. path = f.path.as_posix() file = RecordingFile( recording=recording, file_num=file_num, start_index=start_index, length=f.length, path=path) file.save() start_index += f.length def _log_imports(self, recordings): for r in recordings: log = self._logger.info log(f'Imported recording {str(r.model)} with files:') for f in r.files: log(f' {f.path.as_posix()}') def _get_recorder(file): end_time = signal_utils.get_end_time( file.start_time, file.length, file.sample_rate) station_recorders = file.station.get_station_devices( 'Audio Recorder', file.start_time, end_time) if len(station_recorders) == 0: raise CommandExecutionError( f'Could not find recorder for recording file "{file.path}".') elif len(station_recorders) > 1: raise CommandExecutionError( f'Found more than one possible recorder for file "{file.path}".') else: return station_recorders[0].device def _set_recording_file_channel_info(file): mic_outputs = _get_recorder_mic_outputs(file.recorder, file.start_time) if file.recorder_channel_nums is None: # file name did not indicate recorder channel numbers if len(mic_outputs) != file.num_channels: # number of connected mic outputs does not match number # of file channels raise CommandExecutionError( f'Could not infer recorder channel numbers for ' f'recording file "{file.path}".') else: # number of connected mic outputs matches number of file # channels # We assume that recorder inputs map to file channel numbers # in increasing order. file.recorder_channel_nums = tuple(sorted(mic_outputs.keys())) file.mic_outputs = tuple( _get_mic_output(mic_outputs, i, file.path) for i in file.recorder_channel_nums) def _get_recorder_mic_outputs(recorder, time): """ Gets a mapping from recorder input channel numbers to connected microphone outputs for the specified recorder and time. """ connections = DeviceConnection.objects.filter( input__device=recorder, output__device__model__type='Microphone', start_time__lte=time, end_time__gt=time) # print('recording_importer.get_recorder_mic_outputs', connections.query) return dict((c.input.channel_num, c.output) for c in connections) def _get_mic_output(mic_outputs, channel_num, file_path): try: return mic_outputs[channel_num] except KeyError: raise CommandExecutionError( f'Could not find microphone output connected to recorder input ' f'{channel_num} for recording file "{file_path}".')
HaroldMills/Vesper
vesper/command/recording_importer.py
Python
mit
13,776
# author : Johann-Mattis List # email : mattis.list@uni-marburg.de # created : 2015-03-23 14:23 # modified : 2015-03-23 14:23 """ <++> """ __author__="Johann-Mattis List" __date__="2015-03-23" from lingpy import * import sys list1 = csv2list(sys.argv[1]) header1 = list1[0] list2 = csv2list(sys.argv[2]) header2 = list2[0] gidx1 = header1.index('GLOSS') gidx2 = header2.index('GLOSS') def find_break(word): out = '' for char in word: if char not in '([{,;': out += char else: break return out.strip() matches = {} for i,a in enumerate(list1[1:]): for j,b in enumerate(list2[1:]): wA = find_break(a[gidx1]) wB = find_break(b[gidx2]) if wA == wB or wA in b[gidx2].split(' ') or wB in a[gidx1].split(' '): try: matches[i+1] += [(wA,a[gidx1],j,wB,b[gidx2])] except KeyError: matches[i+1] = [(wA,a[gidx1],j,wB,b[gidx2])] else: if not i+1 in matches: matches[i+1] = [] for m in matches: if len(matches[m]) > 1: dst = [] for k in range(len(matches[m])): dst += [edit_dist(matches[m][k][0], matches[m][k][4], normalized=True)] mind = min(dst) midx = dst.index(mind) matches[m] = [matches[m][midx]] idx = 1 count = 0 for k in matches: if matches[k]: for line in matches[k]: print(k,'\t',line[1],'\t',line[2],'\t',line[4]) else: print(k,'\t',list1[k][gidx1],'\t','?\t?') count += 1
digling/sinotibetan
scripts/compare_lists.py
Python
gpl-2.0
1,606
# -*- coding: utf-8 -*- # # satcfe/tests/conftest.py # # Copyright 2015 Base4 Sistemas Ltda ME # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from decimal import Decimal import pytest from unidecode import unidecode from satcomum import constantes from satcfe.base import BibliotecaSAT from satcfe.clientelocal import ClienteSATLocal from satcfe.entidades import CFeCancelamento from satcfe.entidades import CFeVenda from satcfe.entidades import Destinatario from satcfe.entidades import Emitente from satcfe.entidades import LocalEntrega from satcfe.entidades import Detalhamento from satcfe.entidades import ProdutoServico from satcfe.entidades import Imposto from satcfe.entidades import ICMSSN102 from satcfe.entidades import PISSN from satcfe.entidades import COFINSSN from satcfe.entidades import MeioPagamento def pytest_addoption(parser): parser.addoption('--cnpj-ac', action='store', default='16716114000172', help='CNPJ da empresa desenvolvedora da AC (apenas digitos)') parser.addoption('--emitente-cnpj', action='store', default='08723218000186', help='CNPJ do estabelecimento emitente (apenas digitos)') parser.addoption('--emitente-ie', action='store', default='149626224113', help='Inscricao estadual do emitente (apenas digitos)') parser.addoption('--emitente-im', action='store', default='123123', help='Inscricao municipal do emitente (apenas digitos)') parser.addoption('--emitente-uf', action='store', default='SP', help='Sigla da unidade federativa do estabelecimento emitente') parser.addoption('--emitente-issqn-regime', action='store', default='3', help='Regime especial de tributacao do ISSQN ({}) do emitente, ' 'em casos de testes de emissao de venda e/ou ' 'cancelamento'.format(_valores_possiveis( constantes.C15_CREGTRIBISSQN_EMIT))) parser.addoption('--emitente-issqn-rateio', action='store', default='N', help='Indicador de rateio do desconto sobre o subtotal para ' 'produtos tributados no ISSQN ({}) do emitente, ' 'em casos de testes de emissao de venda e/ou ' 'cancelamento'.format(_valores_possiveis( constantes.C16_INDRATISSQN_EMIT))) parser.addoption('--codigo-ativacao', action='store', default='12345678', help='Codigo de ativacao configurado no equipamento SAT') parser.addoption('--assinatura-ac', action='store', default=constantes.ASSINATURA_AC_TESTE, help='Conteudo da assinatura da AC') parser.addoption('--numero-caixa', action='store', default=1, type=int, help='Numero do caixa de origem') parser.addoption('--lib-caminho', action='store', default='sat.dll', help='Caminho para a biblioteca SAT') parser.addoption('--lib-convencao', action='store', choices=[constantes.STANDARD_C, constantes.WINDOWS_STDCALL], default=constantes.STANDARD_C, type=int, help='Convencao de chamada para a biblioteca SAT ' '({})'.format(_valores_possiveis( constantes.CONVENCOES_CHAMADA))) # TODO: implementar testes para acesso compartilhado ao equipamento SAT # --sathub-host 127.0.0.1 # --sathub-port 8080 # --sathub-baseurl /hub/v1/ # --sathub-username # --sathub-password # opções para ignorar funções SAT específicas parser.addoption('--skip-funcoes-sat', action='store_true', help='Ignora testes de todas as funcoes SAT evitando qualquer ' 'acesso ao equipamento') parser.addoption('--skip-ativarsat', action='store_true', help='Ignora funcao `AtivarSAT`') parser.addoption('--skip-comunicarcertificadoicpbrasil', action='store_true', help='Ignora funcao `ComunicarCertificadoICPBRASIL`') parser.addoption('--skip-enviardadosvenda', action='store_true', help='Ignora funcao `EnviarDadosVenda`') parser.addoption('--skip-cancelarultimavenda', action='store_true', help='Ignora funcao `CancelarUltimaVenda`') parser.addoption('--skip-consultarsat', action='store_true', help='Ignora funcao `ConsultarSAT`') parser.addoption('--skip-testefimafim', action='store_true', help='Ignora funcao `TesteFimAFim`') parser.addoption('--skip-consultarstatusoperacional', action='store_true', help='Ignora funcao `ConsultarStatusOperacional`') parser.addoption('--skip-consultarnumerosessao', action='store_true', help='Ignora funcao `ConsultarNumeroSessao`') parser.addoption('--skip-configurarinterfacederede', action='store_true', help='Ignora funcao `ConfigurarInterfaceDeRede`') parser.addoption('--skip-associarassinatura', action='store_true', help='Ignora funcao `AssociarAssinatura`') parser.addoption('--skip-atualizarsoftwaresat', action='store_true', help='Ignora funcao `AtualizarSoftwareSAT`') parser.addoption('--skip-extrairlogs', action='store_true', help='Ignora funcao `ExtrairLogs`') parser.addoption('--skip-bloquearsat', action='store_true', help='Ignora funcao `BloquearSAT`') parser.addoption('--skip-desbloquearsat', action='store_true', help='Ignora funcao `DesbloquearSAT`') parser.addoption('--skip-trocarcodigodeativacao', action='store_true', help='Ignora funcao `TrocarCodigoDeAtivacao`') @pytest.fixture(scope='module') def clientesatlocal(request): funcoes = ClienteSATLocal( BibliotecaSAT( request.config.getoption('--lib-caminho'), convencao=request.config.getoption('--lib-convencao')), codigo_ativacao=request.config.getoption('--codigo-ativacao')) return funcoes @pytest.fixture(scope='module') def cfevenda(request): _opcao = request.config.getoption cfe = CFeVenda( CNPJ=_opcao('--cnpj-ac'), signAC=_opcao('--assinatura-ac'), numeroCaixa=_opcao('--numero-caixa'), emitente=Emitente( CNPJ=_opcao('--emitente-cnpj'), IE=_opcao('--emitente-ie'), IM=_opcao('--emitente-im'), cRegTribISSQN=_opcao('--emitente-issqn-regime'), indRatISSQN=_opcao('--emitente-issqn-rateio')), destinatario=Destinatario( CPF='11122233396', xNome=u'João de Teste'), entrega=LocalEntrega( xLgr='Rua Armando Gulim', nro='65', xBairro=u'Parque Glória III', xMun='Catanduva', UF='SP'), detalhamentos=[ Detalhamento( produto=ProdutoServico( cProd='123456', xProd='BORRACHA STAEDTLER pvc-free', CFOP='5102', uCom='UN', qCom=Decimal('1.0000'), vUnCom=Decimal('5.75'), indRegra='A'), imposto=Imposto( icms=ICMSSN102(Orig='2', CSOSN='500'), pis=PISSN(CST='49'), cofins=COFINSSN(CST='49'))), ], pagamentos=[ MeioPagamento( cMP=constantes.WA03_DINHEIRO, vMP=Decimal('10.00')), ]) return cfe def _valores_possiveis(opcoes): return '; '.join(['{}-{}'.format(v, unidecode(s)) for v, s in opcoes])
kmee/satcfe
satcfe/tests/conftest.py
Python
apache-2.0
8,960
#!/usr/bin/python # coding: utf-8 """ Generate graph of profit against Stratagem data for 2012. """ __author__ = 'riko' import datetime import matplotlib.pyplot as plt import numpy as np import pickle import data_tools as dt import models import settings as stg ################################################################################ # Set variables. # ################################################################################ SURFACE = None model = models.DoubleEloModel() ################################################################################ # Run analysis. # ################################################################################ file = stg.ROOT_PATH + "data/more_data.p" data = pickle.load(open(file, "rb" )) bo3_mask = (data["match_type"] == "bo3") data = data.rename(columns={"ID1":"Winner", "ID2":"Loser", "DATE_G_y":"Date", "K1":"Winner_odds", "K2":"Loser_odds", "match_type":"Best_of", "ID_C": "Surface"}) data["Best_of"][bo3_mask] = 3 data["Best_of"][~bo3_mask] = 5 if SURFACE != None: data = data[data["Surface"] == SURFACE] data = data.sort("Date") train_range = [datetime.date(2003, 1, 1), datetime.date(2012, 1, 1)] train_data = dt.filter_data_time_range(data, train_range) model.train(train_data, True) test_range = [datetime.date(2012, 1, 1), datetime.date(2015, 1, 1)] test_data = dt.filter_data_time_range(data, test_range) df = model.test(test_data, True) n = np.size(df["bet_amount"]) print "n: ", n v_df = df bet_on_p1_mask = ( 1.0 / np.array(v_df["win_prob"]) < np.array(v_df["Winner_odds"]) ) * df["bet_amount"] bet_on_p2_mask = ( 1.0 / (1.-np.array(v_df["win_prob"])) < np.array(v_df["Loser_odds"]) ) * df["bet_amount"] ret = np.sum(v_df[bet_on_p1_mask > 0]["Winner_odds"] * df["bet_amount"]) bet_amount = np.sum(bet_on_p1_mask) + np.sum(bet_on_p2_mask) bets_done = np.sum(bet_on_p1_mask > 0) + np.sum(bet_on_p2_mask > 0) r = bets_done, bet_amount, ret print "bets taken: ", r[0] print "return: ", r[2] / r[1] - 1.0 result = np.cumsum(bet_on_p1_mask * (v_df["Winner_odds"]-1) - bet_on_p2_mask, axis=0) x = [i for i in range(1, np.size(result) + 1)] # Draw this s*it. fig, ax = plt.subplots() plt.plot(x, result, '-r') ax.yaxis.grid(True, which='major') plt.xlabel('Match number', fontsize=10) plt.ylabel('Comulative profit', fontsize=10) plt.show()
erix5son/Tennis-Modelling
demos/double_elo_profit.py
Python
mit
2,546
#!/usr/bin/env python3 """inject_repos.py - CI secret repos injection. """ import yaml from lxml import etree from lxml.etree import ElementTree as ET import argparse from six import iteritems def main(): repos_file, beaker_file = parse_args() repos = load_secret_data(repos_file) inject_repos(repos, beaker_file) def parse_args(): description_msg = 'Resolve and filter secret data' parser = argparse.ArgumentParser(description=description_msg) parser.add_argument( "-f", "--secret-file", type=str, help=("Path to secret file.") ) parser.add_argument( "-b", "--beaker-file", type=str, help=("Path to beaker file.") ) args = parser.parse_args() return args.secret_file, args.beaker_file def load_secret_data(file_to_load=None): """Load yaml file from a given location :param str file_to_load: (optional) Path to the file we need to load. :rtype: list :returns: A list with the file's data. An empty list if data was not found. """ try: with open(file_to_load, 'r') as sf: return yaml.safe_load(sf) except IOError: return [] def inject_repos(repos, beaker_file): parser = etree.XMLParser(strip_cdata=False) tree = etree.parse(beaker_file, parser) root = tree.getroot() for repo_name, url in iteritems(repos): etree.SubElement(root[1][0][4], "repo", attrib={"name": repo_name, "url": url}) tree.write( beaker_file, pretty_print=True, xml_declaration=True, encoding="utf-8" ) if __name__ == "__main__": main()
oVirt/jenkins
stdci_libs/inject_repos.py
Python
gpl-3.0
1,611
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('bill_board', '0003_auto_20151025_1339'), ] operations = [ migrations.AlterField( model_name='request', name='requires_presence', field=models.BooleanField(default=False), ), ]
refugeehackathon/interpreteer-backend
bill_board/migrations/0004_auto_20151109_1707.py
Python
gpl-2.0
420
# -*- coding: utf-8 -*- __author__ = "Sergey Aganezov" __email__ = "aganezov@gwu.edu" __status__ = "develop"
aganezov/fga
tests/assembly/fgr/__init__.py
Python
gpl-3.0
110
#!/usr/bin/env python # Copyright (C) 2006-2021 Music Technology Group - Universitat Pompeu Fabra # # This file is part of Essentia # # Essentia is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the Free # Software Foundation (FSF), either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the Affero GNU General Public License # version 3 along with this program. If not, see http://www.gnu.org/licenses/ from essentia_test import * class TestLoudnessVickers(TestCase): def testEmpty(self): self.assertEqual(LoudnessVickers()([]), -100) def testOne(self): self.assertAlmostEqual(LoudnessVickers()([1]),-32.0094032288) def testSilence(self): self.assertEqual(LoudnessVickers()([0]*2000), -100) def testInvalidSampleRate(self): self.assertConfigureFails(LoudnessVickers(), {'sampleRate': 44101}) self.assertConfigureFails(LoudnessVickers(), {'sampleRate': 44099}) def testDifferentFrequencies(self): # loudness of a 1000Hz signal should be higher than the loudness of a # 100 Hz signal from math import sin, pi sr = 44100 size = 1*sr sine1 = [sin(2.0*pi*100.0*i/sr) for i in range(size)] sine2 = [sin(2.0*pi*1000.0*i/sr) for i in range(size)] fc1 = FrameCutter() fc2 = FrameCutter() frame1 = fc1(sine1) frame2 = fc2(sine2) while len(frame1) != 0 and len(frame2) != 0: self.assertTrue(LoudnessVickers()(frame1), LoudnessVickers()(frame2)) frame1 = fc1(sine1) frame2 = fc2(sine2) def testFullScaleSquare(self): # the vicker's loudness of a full scale square wave should # be 0dB, but it isn't (?) sr = 44100 freq = 1000 step = 0.5*sr/freq size = 1*sr val = 1 square = zeros(size) for i in range(size): square[i] = val if i%step < 1.0 : val *= -1 result = 0 nFrame = 0 for frame in FrameGenerator(square): self.assertAlmostEqual(LoudnessVickers()(square), result, 0.15) nFrame += 1 suite = allTests(TestLoudnessVickers) if __name__ == '__main__': TextTestRunner(verbosity=2).run(suite)
MTG/essentia
test/src/unittests/temporal/test_loudnessvickers.py
Python
agpl-3.0
2,639
import sqlite3 def main(): conn = sqlite3.connect("../database") cursor = conn.cursor() # I claim this gives the current score. Another formulation is # select trackid, score, max(scoreid) from scores group by trackid; # cursor.execute("""select trackid, score from scores # group by trackid order by scoreid""") # cursor.execute("""select scores.trackid, score, path from scores, tracks # where scores.trackid = tracks.trackid # group by scores.trackid order by scoreid""") cursor.execute("""select score, path from tracks where score is not null and missing is not 1""") results = cursor.fetchall() for result in results: print(str(result[0]) + "\t" + result[1]) if __name__ == '__main__': main()
erbridge/NQr
src/export.py
Python
bsd-3-clause
834
#! /usr/bin/env python # The MIT License (MIT) # # Copyright (c) 2014 Wen Shan Chang # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # Class used to draw the quadtree. This was clobbered together to draw the quadtree and is # fairly 'hacky'. # __author__ = 'Chang' import time import Queue from Tkinter import Tk, Canvas, Button, Label, StringVar, NW, SE, FLAT class DisplayQuadTree(object): def __init__(self, width, height, scale=1, show_grid=False): self._scale = scale self._grid_width = 1 if show_grid else 0 self.size = (width*scale, height*scale) self._is_animate = False self._anime_time = 0.1 self._tk_root = Tk() self._canvas = Canvas(self._tk_root, width=self.size[0], height=self.size[1]) self._canvas.pack() self._button = Button(self._tk_root, text='Finished', command=self._stop_animate) self._button.configure(width=10, relief=FLAT) self._canvas.create_window(10, 10, anchor=NW, window=self._button) self._labelstr = StringVar() self._labelstr.set('0%') self._label = Label(self._tk_root, textvariable=self._labelstr) self._canvas.create_window(self.size[0] - 10, self.size[1] - 10, anchor=SE, window=self._label) def static(self, qtree): """ Draw the final output. """ if qtree is None: return stack = [qtree] while len(stack): qnode = stack.pop() if qnode.children: for child in qnode.children: stack.append(child) else: colour = '#{0:02x}{1:02x}{2:02x}'.format(*qnode.ave_color) self._canvas.create_rectangle(qnode.x * self._scale, qnode.y * self._scale, (qnode.x + qnode.width) * self._scale, (qnode.y + qnode.height) * self._scale, width=self._grid_width, outline='grey', fill=colour) self._labelstr.set('100%') def _stop_animate(self): """ Stop the animation. Called by the "Stop" button """ self._is_animate = False self._button.configure(text='Finished') def animate(self, qtree): """ Animate the development of the quadtree as it was generated. Press the "Stop" to stop the animation """ if qtree is None: return self._is_animate = True self._button.configure(text='Stop') q = Queue.Queue() q.put(qtree) count, max_qtrees = 0.0, qtree.sq_num while not q.empty() and self._is_animate: qnode = q.get() if qnode.children: for child in qnode.children: q.put(child) count += 1.0 colour = '#{0:02x}{1:02x}{2:02x}'.format(*qnode.ave_color) self._canvas.create_rectangle(qnode.x * self._scale, qnode.y * self._scale, (qnode.x + qnode.width) * self._scale, (qnode.y + qnode.height) * self._scale, width=self._grid_width, outline='grey', fill=colour) self._canvas.update() self._labelstr.set('{}%'.format(int(count/max_qtrees * 100))) time.sleep(self._anime_time) self._stop_animate() def show(self): """ Call this to start the display. """ self._tk_root.mainloop()
wschang/DrawQtree
display_qtree.py
Python
mit
4,937
from __future__ import division import importlib import re import time class SkeletonMode(object): """Skeleton (base) mode. This mode can take two commands: - change to another mode - print help It is also able to send messages to the channel. This mode must superclass all other modes, or you will likely get undesired behavior.""" def __init__(self, bot): self.bot = bot # information about where we are in the meeting self._in_meeting = False def msg(self, channel, msg, *args): """Send a message to the given channel.""" # Unicode makes Twisted (or SOMETHING) sad. ASCII. self.bot.msg(channel, (msg % args).encode('ascii', 'ignore')) def exec_command(self, command, command_type, user, channel, *args): """Execute an arbitrary command, provided it is found on the mode.""" # if this is a command beginning with a comma, # then inform the user that the comma is superfluous if command.startswith(','): self.msg(user, 'A leading comma is only necessary for chair ' 'commands.') return # find the correct command and execute it method = '%s_%s' % (command_type, command) if hasattr(self, method): if command_type == 'chair': return getattr(self, method)(user, channel, *args) else: return getattr(self, method)(user, *args) # whups, we clearly weren't able to find the command...bork out help_command = 'help' if command_type == 'chair': help_command = ',' + help_command self.msg(channel, "Sorry, I don't recognize that command. Issue `%s` for a command list." % help_command) else: self.msg(user, "Sorry, I don't recognize that command. Issue `%s` for a command list." % help_command) def chair_mode(self, user, channel, new_mode=None, _silent=False): """Set the channel's mode. If no mode is provided, print out the mode we're in now. If the requested mode is "none", then set us into the base mode.""" # if no argument is given, print out the mode that # we are in now if not new_mode: mode_name = self.bot.mode.__class__.__module__.__name__.lower() if mode_name == 'base': mode_name = '(none)' self.msg(channel, "Current mode: %s" % mode_name[:-4]) return # okay, we were asked to *set* the mode -- do that now # sanity check: however, if we were given "none", that just # means set in base mode if new_mode.lower() == 'none': self.bot.mode = SkeletonMode(self.bot) if not _silent: self.msg(channel, 'Mode deactivated.') return try: mod = importlib.import_module('pycon_bot.modes.%s' % new_mode) self.bot.mode = mod.Mode(self.bot) self.msg(channel, 'Activated %s mode.' % new_mode) except (ImportError, AttributeError) as e: self.msg(channel, 'Unable to load mode `%s`: %s' % (new_mode, e)) def chair_help(self, user, channel, command=None): """Return a list of chair commands that we currently understand. If a specific command is given, print its docstring.""" return self._help(user, channel, 'chair', command=command) def private_help(self, user, command=None): """Return a list of private message commands that we currently understand. If a specific command is specified, print its docstring.""" return self._help(user, user, 'private', command=command) def _help(self, user, channel, command_type, command=None): # if an argument is given, print help about that specific command if command: command = command.replace(',', '') method = getattr(self, '%s_%s' % (command_type, command), None) # sanity check: does this method actually exist? if not method: help_command = 'help' if command_type == 'chair': help_command = ',%s' % help_command self.msg(channel, 'This command does not exist. Issue `%s` by itself for a command list.' % help_command) return # okay, now take the docstring and present it as help; however # we need to reformat my docstrings to be more IRC friendly -- specifically: # - change single `\n` to just spaces # - change double `\n` to single `\n` help_text = method.__doc__ help_text = re.sub(r'\\n[ ]+\\n', '|---|', help_text) help_text = re.sub(r'\s+', ' ', help_text) help_text = help_text.replace('|---|', '\n') self.msg(channel, help_text) return # okay, give a list of the commands available commands = [] for attr in dir(self): if callable(getattr(self, attr)) and attr.startswith('%s_' % command_type): if command_type == 'chair': command_name = ',%s' % attr[len(command_type) + 1:] else: command_name = attr[len(command_type) + 1:] commands.append(command_name) commands.sort() # now print out the list of commands to the channel self.msg(channel, 'I recognize the following %s commands:' % command_type) msg_queue = ' ' for i in range(0, len(commands)): command = commands[i] msg_queue += command if i % 3 != 2 and i != len(commands) - 1: msg_queue += (' ' * (20 - (len(command) * 2))) else: self.msg(channel, msg_queue) msg_queue = ' ' class BaseMode(SkeletonMode): """Base class for all modes, handling all the base commands.""" def __init__(self, bot): super(BaseMode, self).__init__(bot) self.reported_in = set() self.nonvoters = set() @property def nonvoter_list(self): return ', '.join(self.nonvoters) if self.nonvoters else 'none' def names(self, channel): """Prompt everyone in the channel to write their names. Note who has done so in order to easily compile a non-voter list.""" self.msg(channel, 'Please write your full name in the channel, for the meeting records.') self.bot.state_handler = self.handler_user_names def chair_nonvoter(self, user, channel, *users): """Set the given user to a non-voter. If no user is specified, then print the list of all non-voters. Exception: If we're just starting the meeting, then set anyone who has not reported in to be a non-voter.""" # this is a special command if we're in the "reporting in" phase; # set as a non-voter everyone who hasn't reported in yet # note: also adds as a non-voter the person who ran the command if self.bot.state_handler == self.handler_user_names and not users: def _(names): laggards = set(names) - self.reported_in - self.nonvoters laggards.remove(self.bot.nickname) laggards.add(user) if laggards: self.nonvoters.update(laggards) self.msg(channel, 'Will no longer pester %s.' % ', '.join(laggards)) self.bot.names(channel).addCallback(_) return # run normally users = set(users) users.discard(self.bot.nickname) if not users: self.msg(channel, "Nonvoters: %s.", self.nonvoter_list) return self.nonvoters.update(users) self.msg(channel, "Will no longer pester %s.", ', '.join(users)) def chair_voter(self, user, channel, *users): """Set a given user to be a voter. If no user is specified, print the list of all voters.""" users = set(users) users.discard(self.bot.nickname) if not users: self.msg(channel, "Nonvoters: %s.", self.nonvoter_list) return if '*' in users: self.nonvoters.clear() self.msg(channel, "Will now pester everyone.") else: self.nonvoters.difference_update(users) self.msg(channel, "Will now pester %s.", ', '.join(users)) def chair_pester(self, user, channel): """Pester the laggards.""" # special case: if we're in the "reporting in" phase, then check for that # instead of checking for votes like we'd normally do if self.bot.state_handler == self.handler_user_names: def _(names): laggards = set(names) - self.reported_in - self.nonvoters laggards.remove(self.bot.nickname) if laggards: self.msg(channel, '%s: ping' % ', '.join(laggards)) else: self.msg(channel, 'Everyone is accounted for!') self.bot.names(channel).addCallback(_) return else: # okay, this is the normal situation case def _(names): laggards = (set(names) - set(self.current_votes.keys()) - self.nonvoters) laggards.remove(self.bot.nickname) if laggards: self.msg(channel, "Didn't vote: %s.", ", ".join(laggards)) else: self.msg(channel, "Everyone voted.") # actually do the pestering self.bot.names(channel).addCallback(_) def handler_user_names(self, user, channel, message): """As users write their names, note that they've reported in, so we can see who isn't here and set them as non-voters.""" # this user has now reported in self.reported_in.add(user) # if this user is in the non-voter list, fix that if user in self.nonvoters and user not in self.bot.superusers: self.chair_voter(user, channel, user) def _seconds_to_text(self, seconds): """Convert a number of seconds, specified as an int or string, to a pretty string.""" # let's get started seconds = int(seconds) time_text = '' # sanity check: 0 seconds is a corner case; just return it back statically if seconds == 0: return '0 seconds' # deal with the minutes portion if seconds // 60 > 0: time_text += '%d minute' % (seconds // 60) if seconds // 60 != 1: time_text += 's' if seconds % 60: time_text += ', ' # deal with the seconds portion if seconds % 60: time_text += '%d second' % (seconds % 60) if seconds % 60 != 1: time_text += 's' return time_text def _minutes_to_text(self, minutes): """Convert a number of minutes, specified as a float, int, or string, to a pretty string.""" seconds = int(float(minutes) * 60) return self._seconds_to_text(seconds) def _english_list(self, l, conjunction='and'): """Return a string with a comma-separated list, with an "and" between the penultimate and ultimate list items.""" # sanity check: if there is only one list item, do nothing # except convert to a string if len(l) == 1: return '{0}'.format(*l) # sanity check: if there are two items, then join them with "and" but # don't use commas if len(l) == 2: return '{0} {conjunction} {1}'.format(*l, conjunction=conjunction) # okay, there are three or more items: I want the format to be # "a, b, c, and d" return '{initial}, {conjunction} {last}'.format( conjunction=conjunction, initial=', '.join(l[:-1]), last=l[-1], )
PyCon/pc-bot
pycon_bot/modes/base.py
Python
bsd-3-clause
12,485
#!/usr/bin/env python ############################################################# # ubi_reader # (c) 2013 Jason Pruitt (jrspruitt@gmail.com) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ############################################################# import os import sys import argparse from ubi_io import ubi_file from ubifs import ubifs, get_leb_size from ui.common import extract_files, output_dir if __name__ == '__main__': description = """Extract file contents of UBIFS image.""" usage = 'ubifs_extract_files.py [options] filepath' parser = argparse.ArgumentParser(usage=usage, description=description) parser.add_argument('-l', '--log-file', dest='logpath', help='Log output to file output/LOGPATH. (default: ubifs_output.log)') parser.add_argument('-k', '--keep-permissions', action='store_true', dest='permissions', help='Maintain file permissions, requires running as root. (default: False)') parser.add_argument('-q', '--quiet', action='store_true', dest='quiet', help='Suppress warnings and non-fatal errors. (default: False)') parser.add_argument('-e', '--leb-size', type=int, dest='block_size', help='Specify LEB size.') parser.add_argument('-o', '--output-dir', dest='output_path', help='Specify output directory path.') parser.add_argument('filepath', help='File to extract file contents of.') if len(sys.argv) == 1: parser.print_help() sys.exit() args = parser.parse_args() if args.filepath: path = args.filepath if not os.path.exists(path): parser.error("File path doesn't exist.") if args.output_path: output_path = args.output_path else: img_name = os.path.splitext(os.path.basename(path))[0] output_path = os.path.join(output_dir, img_name) if args.logpath: log_to_file = True log_file = args.logpath else: log_to_file = None log_file = None # Determine block size if not provided if args.block_size: block_size = args.block_size else: block_size = get_leb_size(path) if not os.path.exists(output_path): os.makedirs(output_path) perms = args.permissions quiet = args.quiet # Create file object ufsfile = ubi_file(path, block_size) # Create UBIFS object uubifs = ubifs(ufsfile) # Set up logging uubifs.log.log_file = log_file uubifs.log.log_to_file = log_to_file uubifs.quiet = quiet if not os.path.exists(output_path): os.makedirs(output_path) elif os.listdir(output_path): parser.error('Volume output directory is not empty. %s' % output_path) # Run extract all files print 'Writing to: %s' % output_path extract_files(uubifs, output_path, perms) sys.exit()
leonsio/YAHM
share/tools/ubi_reader/ubifs_extract_files.py
Python
cc0-1.0
3,501
import os import sys import os.path import errno import shutil from distutils import util from Config import Config supportedLanguages = ["C", "C++", "Java", "Python"] class ConfigInfo: ''' This class contains information about the config file while providing options to directly access the flags section of the .ini file. ''' def __init__(self, newFile): self.setConfigFile(newFile) def setConfigFile(self, newFile): self.CONFIG = newFile self.cfg = Config(self.CONFIG) option_flags = self.cfg.ConfigSectionMap("Flags") self.SEP = option_flags['sep'] self.DEBUG = bool(util.strtobool(option_flags['debug'])) self.DEBUGLITE = bool(util.strtobool(option_flags['debuglite'])) self.DATABASE = bool(util.strtobool(option_flags['database'])) self.CSV = bool(util.strtobool(option_flags['csv'])) self.LOGTIME = bool(util.strtobool(option_flags['logtime'])) class cd: """Context manager for changing the current working directory""" def __init__(self, newPath): self.newPath = newPath def __enter__(self): self.savedPath = os.getcwd() os.chdir(self.newPath) def __exit__(self, etype, value, traceback): os.chdir(self.savedPath) #Generic create directory function def create_dir(path): try: print path os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST: raise def copy_dir(src, dst): try: shutil.copytree(src, dst) except OSError as exc: # python >2.5 if exc.errno == errno.ENOTDIR: shutil.copy(src, dst) else: raise def cleanup(path): if os.path.isdir(path): print "!!! Cleaning up " , path shutil.rmtree(path) # var = raw_input("Path %s exists; do you want to delete it?" % (path)) # print "you entered", var # if var.lower().startswith('y'): # print "!!! Cleaning up " , path # shutil.rmtree(path) elif os.path.isfile(path): print "!!! Removing " , path os.remove(path) all_extension = ['.c', '.cc', '.cpp', '.c++', '.cp', '.cxx', '.h', '.ic', \ # '.cpp_' , '.cpp1' , '.cpp2' , '.cppclean' , \ # '.cpp_NvidiaAPI_sample' , '.cpp-s8inyu' , '.cpp-woains' , \ '.cs' , '.csharp' , '.m' , \ '.java' , '.scala' , '.scla' , \ '.go' , '.javascript' , '.js' , '.coffee' , '.coffeescript' , \ '.rb' , '.php' , '.pl' , '.py' , \ '.cljx' , '.cljscm' , '.clj' , '.cljc' , '.cljs' , \ '.erl' , '.hs' ] #cpp_extension = [ '.c', '.cc', '.cpp', '.c++', '.cp', '.cxx', '.h', '.ic'] cpp_extension = [ '.c', '.cc', '.cpp', '.c++', '.cp', '.cxx']
caseycas/gitcproc
src/util/Util.py
Python
bsd-3-clause
2,866
""" mbed SDK Copyright (c) 2016 ARM Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from os.path import join, abspath, dirname #ROOT = abspath(join(dirname(__file__), ".")) ############################################################################## # Build System Settings ############################################################################## #BUILD_DIR = abspath(join(ROOT, "build")) # ARM #ARM_PATH = "C:/Program Files/ARM" # GCC ARM #GCC_ARM_PATH = "" # GCC CodeRed #GCC_CR_PATH = "C:/code_red/RedSuite_4.2.0_349/redsuite/Tools/bin" # IAR #IAR_PATH = "C:/Program Files (x86)/IAR Systems/Embedded Workbench 7.0/arm" # Goanna static analyser. Please overload it in private_settings.py #GOANNA_PATH = "c:/Program Files (x86)/RedLizards/Goanna Central 3.2.3/bin" #BUILD_OPTIONS = [] # mbed.org username #MBED_ORG_USER = ""
fahhem/mbed-os
tools/default_settings.py
Python
apache-2.0
1,329
# -*- coding: utf-8 -*- from openfisca_france.model.base import * class chomeur_longue_duree(Variable): cerfa_field = { 0: u"1AI", 1: u"1BI", 2: u"1CI", 3: u"1DI", 4: u"1EI", } value_type = bool entity = Individu label = u"Demandeur d'emploi inscrit depuis plus d'un an" definition_period = YEAR # Pour toutes les variables de ce type, les pac3 ne sont plus proposés après 2007 class chomage_brut(Variable): value_type = float entity = Individu label = u"Chômage brut" definition_period = MONTH set_input = set_input_divide_by_period calculate_output = calculate_output_add class indemnites_chomage_partiel(Variable): value_type = float entity = Individu label = u"Indemnités de chômage partiel" definition_period = MONTH set_input = set_input_divide_by_period
antoinearnoud/openfisca-france
openfisca_france/model/revenus/remplacement/chomage.py
Python
agpl-3.0
889
# -*- coding: utf-8 -*- """ Base settings file, common to all environments. These settings can be overridden in local.py. """ import datetime import os import json import hashlib import logging from datetime import timedelta from collections import OrderedDict os_env = os.environ def parent_dir(path): '''Return the parent of a directory.''' return os.path.abspath(os.path.join(path, os.pardir)) HERE = os.path.dirname(os.path.abspath(__file__)) BASE_PATH = parent_dir(HERE) # website/ directory APP_PATH = parent_dir(BASE_PATH) ADDON_PATH = os.path.join(APP_PATH, 'addons') STATIC_FOLDER = os.path.join(BASE_PATH, 'static') STATIC_URL_PATH = '/static' ASSET_HASH_PATH = os.path.join(APP_PATH, 'webpack-assets.json') ROOT = os.path.join(BASE_PATH, '..') BCRYPT_LOG_ROUNDS = 12 # Logging level to use when DEBUG is False LOG_LEVEL = logging.INFO with open(os.path.join(APP_PATH, 'package.json'), 'r') as fobj: VERSION = json.load(fobj)['version'] # Expiration time for verification key EXPIRATION_TIME_DICT = { 'password': 24 * 60, # 24 hours in minutes for forgot and reset password 'confirm': 24 * 60, # 24 hours in minutes for confirm account and email 'claim': 30 * 24 * 60 # 30 days in minutes for claim contributor-ship } CITATION_STYLES_PATH = os.path.join(BASE_PATH, 'static', 'vendor', 'bower_components', 'styles') # Minimum seconds between forgot password email attempts SEND_EMAIL_THROTTLE = 30 # Seconds that must elapse before updating a user's date_last_login field DATE_LAST_LOGIN_THROTTLE = 60 # Hours before pending embargo/retraction/registration automatically becomes active RETRACTION_PENDING_TIME = datetime.timedelta(days=2) EMBARGO_PENDING_TIME = datetime.timedelta(days=2) EMBARGO_TERMINATION_PENDING_TIME = datetime.timedelta(days=2) REGISTRATION_APPROVAL_TIME = datetime.timedelta(days=2) # Date range for embargo periods EMBARGO_END_DATE_MIN = datetime.timedelta(days=2) EMBARGO_END_DATE_MAX = datetime.timedelta(days=1460) # Four years # Question titles to be reomved for anonymized VOL ANONYMIZED_TITLES = ['Authors'] LOAD_BALANCER = False PROXY_ADDRS = [] USE_POSTGRES = True # May set these to True in local.py for development DEV_MODE = False DEBUG_MODE = False SECURE_MODE = not DEBUG_MODE # Set secure cookie PROTOCOL = 'https://' if SECURE_MODE else 'http://' DOMAIN = PROTOCOL + 'localhost:5000/' INTERNAL_DOMAIN = DOMAIN API_DOMAIN = PROTOCOL + 'localhost:8000/' PREPRINT_PROVIDER_DOMAINS = { 'enabled': False, 'prefix': PROTOCOL, 'suffix': '/' } # External Ember App Local Development USE_EXTERNAL_EMBER = False EXTERNAL_EMBER_APPS = {} LOG_PATH = os.path.join(APP_PATH, 'logs') TEMPLATES_PATH = os.path.join(BASE_PATH, 'templates') ANALYTICS_PATH = os.path.join(BASE_PATH, 'analytics') # User management & registration CONFIRM_REGISTRATIONS_BY_EMAIL = True ALLOW_REGISTRATION = True ALLOW_LOGIN = True SEARCH_ENGINE = 'elastic' # Can be 'elastic', or None ELASTIC_URI = 'localhost:9200' ELASTIC_TIMEOUT = 10 ELASTIC_INDEX = 'website' # Sessions COOKIE_NAME = 'osf' # TODO: Override OSF_COOKIE_DOMAIN in local.py in production OSF_COOKIE_DOMAIN = None # server-side verification timeout OSF_SESSION_TIMEOUT = 30 * 24 * 60 * 60 # 30 days in seconds # TODO: Override SECRET_KEY in local.py in production SECRET_KEY = 'CHANGEME' SESSION_COOKIE_SECURE = SECURE_MODE SESSION_COOKIE_HTTPONLY = True # local path to private key and cert for local development using https, overwrite in local.py OSF_SERVER_KEY = None OSF_SERVER_CERT = None # Change if using `scripts/cron.py` to manage crontab CRON_USER = None # External services USE_CDN_FOR_CLIENT_LIBS = True USE_EMAIL = True FROM_EMAIL = 'openscienceframework-noreply@osf.io' SUPPORT_EMAIL = 'support@osf.io' # SMTP Settings MAIL_SERVER = 'smtp.sendgrid.net' MAIL_USERNAME = 'osf-smtp' MAIL_PASSWORD = '' # Set this in local.py # OR, if using Sendgrid's API SENDGRID_API_KEY = None # Mailchimp MAILCHIMP_API_KEY = None MAILCHIMP_WEBHOOK_SECRET_KEY = 'CHANGEME' # OSF secret key to ensure webhook is secure ENABLE_EMAIL_SUBSCRIPTIONS = True MAILCHIMP_GENERAL_LIST = 'Open Science Framework General' #Triggered emails OSF_HELP_LIST = 'Open Science Framework Help' WAIT_BETWEEN_MAILS = timedelta(days=7) NO_ADDON_WAIT_TIME = timedelta(weeks=8) NO_LOGIN_WAIT_TIME = timedelta(weeks=4) WELCOME_OSF4M_WAIT_TIME = timedelta(weeks=2) NO_LOGIN_OSF4M_WAIT_TIME = timedelta(weeks=6) NEW_PUBLIC_PROJECT_WAIT_TIME = timedelta(hours=24) WELCOME_OSF4M_WAIT_TIME_GRACE = timedelta(days=12) # TODO: Override in local.py MAILGUN_API_KEY = None # Use Celery for file rendering USE_CELERY = True # File rendering timeout (in ms) MFR_TIMEOUT = 30000 # TODO: Override in local.py in production DB_HOST = 'localhost' DB_PORT = os_env.get('OSF_DB_PORT', 27017) DB_NAME = 'osf20130903' DB_USER = None DB_PASS = None # Cache settings SESSION_HISTORY_LENGTH = 5 SESSION_HISTORY_IGNORE_RULES = [ lambda url: '/static/' in url, lambda url: 'favicon' in url, lambda url: url.startswith('/api/'), ] # TODO: Configuration should not change between deploys - this should be dynamic. CANONICAL_DOMAIN = 'openscienceframework.org' COOKIE_DOMAIN = '.openscienceframework.org' # Beaker SHORT_DOMAIN = 'osf.io' # TODO: Combine Python and JavaScript config COMMENT_MAXLENGTH = 500 # Profile image options PROFILE_IMAGE_LARGE = 70 PROFILE_IMAGE_MEDIUM = 40 PROFILE_IMAGE_SMALL = 20 # Conference options CONFERENCE_MIN_COUNT = 5 WIKI_WHITELIST = { 'tags': [ 'a', 'abbr', 'acronym', 'b', 'bdo', 'big', 'blockquote', 'br', 'center', 'cite', 'code', 'dd', 'del', 'dfn', 'div', 'dl', 'dt', 'em', 'embed', 'font', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'ins', 'kbd', 'li', 'object', 'ol', 'param', 'pre', 'p', 'q', 's', 'samp', 'small', 'span', 'strike', 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'th', 'thead', 'tr', 'tt', 'ul', 'u', 'var', 'wbr', ], 'attributes': [ 'align', 'alt', 'border', 'cite', 'class', 'dir', 'height', 'href', 'id', 'src', 'style', 'title', 'type', 'width', 'face', 'size', # font tags 'salign', 'align', 'wmode', 'target', ], # Styles currently used in Reproducibility Project wiki pages 'styles' : [ 'top', 'left', 'width', 'height', 'position', 'background', 'font-size', 'text-align', 'z-index', 'list-style', ] } # Maps category identifier => Human-readable representation for use in # titles, menus, etc. # Use an OrderedDict so that menu items show in the correct order NODE_CATEGORY_MAP = OrderedDict([ ('analysis', 'Analysis'), ('communication', 'Communication'), ('data', 'Data'), ('hypothesis', 'Hypothesis'), ('instrumentation', 'Instrumentation'), ('methods and measures', 'Methods and Measures'), ('procedure', 'Procedure'), ('project', 'Project'), ('software', 'Software'), ('other', 'Other'), ('', 'Uncategorized') ]) # Add-ons # Load addons from addons.json with open(os.path.join(ROOT, 'addons.json')) as fp: addon_settings = json.load(fp) ADDONS_REQUESTED = addon_settings['addons'] ADDONS_ARCHIVABLE = addon_settings['addons_archivable'] ADDONS_COMMENTABLE = addon_settings['addons_commentable'] ADDONS_BASED_ON_IDS = addon_settings['addons_based_on_ids'] ADDONS_DESCRIPTION = addon_settings['addons_description'] ADDONS_URL = addon_settings['addons_url'] ADDON_CATEGORIES = [ 'documentation', 'storage', 'bibliography', 'other', 'security', 'citations', ] SYSTEM_ADDED_ADDONS = { 'user': [], 'node': [], } KEEN = { 'public': { 'project_id': None, 'master_key': 'changeme', 'write_key': '', 'read_key': '', }, 'private': { 'project_id': '', 'write_key': '', 'read_key': '', }, } SENTRY_DSN = None SENTRY_DSN_JS = None MISSING_FILE_NAME = 'untitled' # Project Organizer ALL_MY_PROJECTS_ID = '-amp' ALL_MY_REGISTRATIONS_ID = '-amr' ALL_MY_PROJECTS_NAME = 'All my projects' ALL_MY_REGISTRATIONS_NAME = 'All my registrations' # Most Popular and New and Noteworthy Nodes POPULAR_LINKS_NODE = None # TODO Override in local.py in production. POPULAR_LINKS_REGISTRATIONS = None # TODO Override in local.py in production. NEW_AND_NOTEWORTHY_LINKS_NODE = None # TODO Override in local.py in production. MAX_POPULAR_PROJECTS = 10 NEW_AND_NOTEWORTHY_CONTRIBUTOR_BLACKLIST = [] # TODO Override in local.py in production. # FOR EMERGENCIES ONLY: Setting this to True will disable forks, registrations, # and uploads in order to save disk space. DISK_SAVING_MODE = False # Seconds before another notification email can be sent to a contributor when added to a project CONTRIBUTOR_ADDED_EMAIL_THROTTLE = 24 * 3600 # Google Analytics GOOGLE_ANALYTICS_ID = None GOOGLE_SITE_VERIFICATION = None DEFAULT_HMAC_SECRET = 'changeme' DEFAULT_HMAC_ALGORITHM = hashlib.sha256 WATERBUTLER_URL = 'http://localhost:7777' WATERBUTLER_INTERNAL_URL = WATERBUTLER_URL WATERBUTLER_ADDRS = ['127.0.0.1'] # Test identifier namespaces DOI_NAMESPACE = 'doi:10.5072/FK2' ARK_NAMESPACE = 'ark:99999/fk4' # For creating DOIs and ARKs through the EZID service EZID_USERNAME = None EZID_PASSWORD = None # Format for DOIs and ARKs EZID_FORMAT = '{namespace}osf.io/{guid}' SHARE_REGISTRATION_URL = '' SHARE_URL = None SHARE_API_TOKEN = None # Required to send project updates to SHARE CAS_SERVER_URL = 'http://localhost:8080' MFR_SERVER_URL = 'http://localhost:7778' ###### ARCHIVER ########### ARCHIVE_PROVIDER = 'osfstorage' MAX_ARCHIVE_SIZE = 5 * 1024 ** 3 # == math.pow(1024, 3) == 1 GB MAX_FILE_SIZE = MAX_ARCHIVE_SIZE # TODO limit file size? ARCHIVE_TIMEOUT_TIMEDELTA = timedelta(1) # 24 hours ENABLE_ARCHIVER = True JWT_SECRET = 'changeme' JWT_ALGORITHM = 'HS256' ##### CELERY ##### DEFAULT_QUEUE = 'celery' LOW_QUEUE = 'low' MED_QUEUE = 'med' HIGH_QUEUE = 'high' # Seconds, not an actual celery setting CELERY_RETRY_BACKOFF_BASE = 5 LOW_PRI_MODULES = { 'framework.analytics.tasks', 'framework.celery_tasks', 'scripts.osfstorage.usage_audit', 'scripts.stuck_registration_audit', 'scripts.osfstorage.glacier_inventory', 'scripts.analytics.tasks', 'scripts.osfstorage.files_audit', 'scripts.osfstorage.glacier_audit', 'scripts.populate_new_and_noteworthy_projects', 'scripts.populate_popular_projects_and_registrations', 'website.search.elastic_search', } MED_PRI_MODULES = { 'framework.email.tasks', 'scripts.send_queued_mails', 'scripts.triggered_mails', 'website.mailchimp_utils', 'website.notifications.tasks', } HIGH_PRI_MODULES = { 'scripts.approve_embargo_terminations', 'scripts.approve_registrations', 'scripts.embargo_registrations', 'scripts.refresh_addon_tokens', 'scripts.retract_registrations', 'website.archiver.tasks', } try: from kombu import Queue, Exchange except ImportError: pass else: CELERY_QUEUES = ( Queue(LOW_QUEUE, Exchange(LOW_QUEUE), routing_key=LOW_QUEUE, consumer_arguments={'x-priority': -1}), Queue(DEFAULT_QUEUE, Exchange(DEFAULT_QUEUE), routing_key=DEFAULT_QUEUE, consumer_arguments={'x-priority': 0}), Queue(MED_QUEUE, Exchange(MED_QUEUE), routing_key=MED_QUEUE, consumer_arguments={'x-priority': 1}), Queue(HIGH_QUEUE, Exchange(HIGH_QUEUE), routing_key=HIGH_QUEUE, consumer_arguments={'x-priority': 10}), ) CELERY_DEFAULT_EXCHANGE_TYPE = 'direct' CELERY_ROUTES = ('framework.celery_tasks.routers.CeleryRouter', ) CELERY_IGNORE_RESULT = True CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True # Default RabbitMQ broker BROKER_URL = 'amqp://' # Default RabbitMQ backend CELERY_RESULT_BACKEND = 'amqp://' # Modules to import when celery launches CELERY_IMPORTS = ( 'framework.celery_tasks', 'framework.email.tasks', 'website.mailchimp_utils', 'website.notifications.tasks', 'website.archiver.tasks', 'website.search.search', 'website.project.tasks', 'scripts.populate_new_and_noteworthy_projects', 'scripts.populate_popular_projects_and_registrations', 'scripts.refresh_addon_tokens', 'scripts.retract_registrations', 'scripts.embargo_registrations', 'scripts.approve_registrations', 'scripts.approve_embargo_terminations', 'scripts.triggered_mails', 'scripts.send_queued_mails', 'scripts.analytics.run_keen_summaries', 'scripts.analytics.run_keen_snapshots', 'scripts.analytics.run_keen_events', 'scripts.generate_sitemap', ) # Modules that need metrics and release requirements # CELERY_IMPORTS += ( # 'scripts.osfstorage.glacier_inventory', # 'scripts.osfstorage.glacier_audit', # 'scripts.osfstorage.usage_audit', # 'scripts.stuck_registration_audit', # 'scripts.osfstorage.files_audit', # 'scripts.analytics.tasks', # 'scripts.analytics.upload', # ) # celery.schedule will not be installed when running invoke requirements the first time. try: from celery.schedules import crontab except ImportError: pass else: # Setting up a scheduler, essentially replaces an independent cron job CELERYBEAT_SCHEDULE = { '5-minute-emails': { 'task': 'website.notifications.tasks.send_users_email', 'schedule': crontab(minute='*/5'), 'args': ('email_transactional',), }, 'daily-emails': { 'task': 'website.notifications.tasks.send_users_email', 'schedule': crontab(minute=0, hour=0), 'args': ('email_digest',), }, 'refresh_addons': { 'task': 'scripts.refresh_addon_tokens', 'schedule': crontab(minute=0, hour= 2), # Daily 2:00 a.m 'kwargs': {'dry_run': False, 'addons': { 'box': 60, # https://docs.box.com/docs/oauth-20#section-6-using-the-access-and-refresh-tokens 'googledrive': 14, # https://developers.google.com/identity/protocols/OAuth2#expiration 'mendeley': 14 # http://dev.mendeley.com/reference/topics/authorization_overview.html }}, }, 'retract_registrations': { 'task': 'scripts.retract_registrations', 'schedule': crontab(minute=0, hour=0), # Daily 12 a.m 'kwargs': {'dry_run': False}, }, 'embargo_registrations': { 'task': 'scripts.embargo_registrations', 'schedule': crontab(minute=0, hour=0), # Daily 12 a.m 'kwargs': {'dry_run': False}, }, 'approve_registrations': { 'task': 'scripts.approve_registrations', 'schedule': crontab(minute=0, hour=0), # Daily 12 a.m 'kwargs': {'dry_run': False}, }, 'approve_embargo_terminations': { 'task': 'scripts.approve_embargo_terminations', 'schedule': crontab(minute=0, hour=0), # Daily 12 a.m 'kwargs': {'dry_run': False}, }, 'triggered_mails': { 'task': 'scripts.triggered_mails', 'schedule': crontab(minute=0, hour=0), # Daily 12 a.m 'kwargs': {'dry_run': False}, }, 'send_queued_mails': { 'task': 'scripts.send_queued_mails', 'schedule': crontab(minute=0, hour=12), # Daily 12 p.m. 'kwargs': {'dry_run': False}, }, 'new-and-noteworthy': { 'task': 'scripts.populate_new_and_noteworthy_projects', 'schedule': crontab(minute=0, hour=2, day_of_week=6), # Saturday 2:00 a.m. 'kwargs': {'dry_run': False} }, 'update_popular_nodes': { 'task': 'scripts.populate_popular_projects_and_registrations', 'schedule': crontab(minute=0, hour=2), # Daily 2:00 a.m. 'kwargs': {'dry_run': False} }, 'run_keen_summaries': { 'task': 'scripts.analytics.run_keen_summaries', 'schedule': crontab(minute=00, hour=1), # Daily 1:00 a.m. 'kwargs': {'yesterday': True} }, 'run_keen_snapshots': { 'task': 'scripts.analytics.run_keen_snapshots', 'schedule': crontab(minute=0, hour=3), # Daily 3:00 a.m. }, 'run_keen_events': { 'task': 'scripts.analytics.run_keen_events', 'schedule': crontab(minute=0, hour=4), # Daily 4:00 a.m. 'kwargs': {'yesterday': True} }, 'generate_sitemap': { 'task': 'scripts.generate_sitemap', 'schedule': crontab(minute=0, hour=0), # Daily 12:00 a.m. } } # Tasks that need metrics and release requirements # CELERYBEAT_SCHEDULE.update({ # 'usage_audit': { # 'task': 'scripts.osfstorage.usage_audit', # 'schedule': crontab(minute=0, hour=0), # Daily 12 a.m # 'kwargs': {'send_mail': True}, # }, # 'stuck_registration_audit': { # 'task': 'scripts.stuck_registration_audit', # 'schedule': crontab(minute=0, hour=6), # Daily 6 a.m # 'kwargs': {}, # }, # 'glacier_inventory': { # 'task': 'scripts.osfstorage.glacier_inventory', # 'schedule': crontab(minute=0, hour= 0, day_of_week=0), # Sunday 12:00 a.m. # 'args': (), # }, # 'glacier_audit': { # 'task': 'scripts.osfstorage.glacier_audit', # 'schedule': crontab(minute=0, hour=6, day_of_week=0), # Sunday 6:00 a.m. # 'kwargs': {'dry_run': False}, # }, # 'files_audit_0': { # 'task': 'scripts.osfstorage.files_audit.0', # 'schedule': crontab(minute=0, hour=2, day_of_week=0), # Sunday 2:00 a.m. # 'kwargs': {'num_of_workers': 4, 'dry_run': False}, # }, # 'files_audit_1': { # 'task': 'scripts.osfstorage.files_audit.1', # 'schedule': crontab(minute=0, hour=2, day_of_week=0), # Sunday 2:00 a.m. # 'kwargs': {'num_of_workers': 4, 'dry_run': False}, # }, # 'files_audit_2': { # 'task': 'scripts.osfstorage.files_audit.2', # 'schedule': crontab(minute=0, hour=2, day_of_week=0), # Sunday 2:00 a.m. # 'kwargs': {'num_of_workers': 4, 'dry_run': False}, # }, # 'files_audit_3': { # 'task': 'scripts.osfstorage.files_audit.3', # 'schedule': crontab(minute=0, hour=2, day_of_week=0), # Sunday 2:00 a.m. # 'kwargs': {'num_of_workers': 4, 'dry_run': False}, # }, # }) WATERBUTLER_JWE_SALT = 'yusaltydough' WATERBUTLER_JWE_SECRET = 'CirclesAre4Squares' WATERBUTLER_JWT_SECRET = 'ILiekTrianglesALot' WATERBUTLER_JWT_ALGORITHM = 'HS256' WATERBUTLER_JWT_EXPIRATION = 15 SENSITIVE_DATA_SALT = 'yusaltydough' SENSITIVE_DATA_SECRET = 'TrainglesAre5Squares' DRAFT_REGISTRATION_APPROVAL_PERIOD = datetime.timedelta(days=10) assert (DRAFT_REGISTRATION_APPROVAL_PERIOD > EMBARGO_END_DATE_MIN), 'The draft registration approval period should be more than the minimum embargo end date.' PREREG_ADMIN_TAG = "prereg_admin" # TODO: Remove references to this flag ENABLE_INSTITUTIONS = True ENABLE_VARNISH = False ENABLE_ESI = False VARNISH_SERVERS = [] # This should be set in local.py or cache invalidation won't work ESI_MEDIA_TYPES = {'application/vnd.api+json', 'application/json'} # Used for gathering meta information about the current build GITHUB_API_TOKEN = None # switch for disabling things that shouldn't happen during # the modm to django migration RUNNING_MIGRATION = False # External Identity Provider EXTERNAL_IDENTITY_PROFILE = { 'OrcidProfile': 'ORCID', } # Source: https://github.com/maxd/fake_email_validator/blob/master/config/fake_domains.list BLACKLISTED_DOMAINS = [ '0-mail.com', '0815.ru', '0815.su', '0clickemail.com', '0wnd.net', '0wnd.org', '10mail.org', '10minut.com.pl', '10minutemail.cf', '10minutemail.co.uk', '10minutemail.co.za', '10minutemail.com', '10minutemail.de', '10minutemail.eu', '10minutemail.ga', '10minutemail.gq', '10minutemail.info', '10minutemail.ml', '10minutemail.net', '10minutemail.org', '10minutemail.ru', '10minutemail.us', '10minutesmail.co.uk', '10minutesmail.com', '10minutesmail.eu', '10minutesmail.net', '10minutesmail.org', '10minutesmail.ru', '10minutesmail.us', '123-m.com', '15qm-mail.red', '15qm.com', '1chuan.com', '1mail.ml', '1pad.de', '1usemail.com', '1zhuan.com', '20mail.in', '20mail.it', '20minutemail.com', '2prong.com', '30minutemail.com', '30minutesmail.com', '33mail.com', '3d-painting.com', '3mail.ga', '4mail.cf', '4mail.ga', '4warding.com', '4warding.net', '4warding.org', '5mail.cf', '5mail.ga', '60minutemail.com', '675hosting.com', '675hosting.net', '675hosting.org', '6ip.us', '6mail.cf', '6mail.ga', '6mail.ml', '6paq.com', '6url.com', '75hosting.com', '75hosting.net', '75hosting.org', '7mail.ga', '7mail.ml', '7mail7.com', '7tags.com', '8mail.cf', '8mail.ga', '8mail.ml', '99experts.com', '9mail.cf', '9ox.net', 'a-bc.net', 'a45.in', 'abcmail.email', 'abusemail.de', 'abyssmail.com', 'acentri.com', 'advantimo.com', 'afrobacon.com', 'agedmail.com', 'ajaxapp.net', 'alivance.com', 'ama-trade.de', 'amail.com', 'amail4.me', 'amilegit.com', 'amiri.net', 'amiriindustries.com', 'anappthat.com', 'ano-mail.net', 'anobox.ru', 'anonbox.net', 'anonmails.de', 'anonymail.dk', 'anonymbox.com', 'antichef.com', 'antichef.net', 'antireg.ru', 'antispam.de', 'antispammail.de', 'appixie.com', 'armyspy.com', 'artman-conception.com', 'asdasd.ru', 'azmeil.tk', 'baxomale.ht.cx', 'beddly.com', 'beefmilk.com', 'beerolympics.se', 'bestemailaddress.net', 'bigprofessor.so', 'bigstring.com', 'binkmail.com', 'bio-muesli.net', 'bladesmail.net', 'bloatbox.com', 'bobmail.info', 'bodhi.lawlita.com', 'bofthew.com', 'bootybay.de', 'bossmail.de', 'boun.cr', 'bouncr.com', 'boxformail.in', 'boximail.com', 'boxtemp.com.br', 'breakthru.com', 'brefmail.com', 'brennendesreich.de', 'broadbandninja.com', 'bsnow.net', 'bspamfree.org', 'buffemail.com', 'bugmenot.com', 'bumpymail.com', 'bund.us', 'bundes-li.ga', 'burnthespam.info', 'burstmail.info', 'buymoreplays.com', 'buyusedlibrarybooks.org', 'byom.de', 'c2.hu', 'cachedot.net', 'card.zp.ua', 'casualdx.com', 'cbair.com', 'cdnqa.com', 'cek.pm', 'cellurl.com', 'cem.net', 'centermail.com', 'centermail.net', 'chammy.info', 'cheatmail.de', 'chewiemail.com', 'childsavetrust.org', 'chogmail.com', 'choicemail1.com', 'chong-mail.com', 'chong-mail.net', 'chong-mail.org', 'clixser.com', 'clrmail.com', 'cmail.net', 'cmail.org', 'coldemail.info', 'consumerriot.com', 'cool.fr.nf', 'correo.blogos.net', 'cosmorph.com', 'courriel.fr.nf', 'courrieltemporaire.com', 'crapmail.org', 'crazymailing.com', 'cubiclink.com', 'curryworld.de', 'cust.in', 'cuvox.de', 'd3p.dk', 'dacoolest.com', 'daintly.com', 'dandikmail.com', 'dayrep.com', 'dbunker.com', 'dcemail.com', 'deadaddress.com', 'deadfake.cf', 'deadfake.ga', 'deadfake.ml', 'deadfake.tk', 'deadspam.com', 'deagot.com', 'dealja.com', 'delikkt.de', 'despam.it', 'despammed.com', 'devnullmail.com', 'dfgh.net', 'digitalsanctuary.com', 'dingbone.com', 'dingfone.com', 'discard.cf', 'discard.email', 'discard.ga', 'discard.gq', 'discard.ml', 'discard.tk', 'discardmail.com', 'discardmail.de', 'dispomail.eu', 'disposable-email.ml', 'disposable.cf', 'disposable.ga', 'disposable.ml', 'disposableaddress.com', 'disposableemailaddresses.com', 'disposableinbox.com', 'dispose.it', 'disposeamail.com', 'disposemail.com', 'dispostable.com', 'divermail.com', 'dodgeit.com', 'dodgemail.de', 'dodgit.com', 'dodgit.org', 'dodsi.com', 'doiea.com', 'domozmail.com', 'donemail.ru', 'dontmail.net', 'dontreg.com', 'dontsendmespam.de', 'dotmsg.com', 'drdrb.com', 'drdrb.net', 'droplar.com', 'dropmail.me', 'duam.net', 'dudmail.com', 'dump-email.info', 'dumpandjunk.com', 'dumpmail.de', 'dumpyemail.com', 'duskmail.com', 'e-mail.com', 'e-mail.org', 'e4ward.com', 'easytrashmail.com', 'ee1.pl', 'ee2.pl', 'eelmail.com', 'einmalmail.de', 'einrot.com', 'einrot.de', 'eintagsmail.de', 'email-fake.cf', 'email-fake.com', 'email-fake.ga', 'email-fake.gq', 'email-fake.ml', 'email-fake.tk', 'email60.com', 'email64.com', 'emailage.cf', 'emailage.ga', 'emailage.gq', 'emailage.ml', 'emailage.tk', 'emaildienst.de', 'emailgo.de', 'emailias.com', 'emailigo.de', 'emailinfive.com', 'emaillime.com', 'emailmiser.com', 'emailproxsy.com', 'emails.ga', 'emailsensei.com', 'emailspam.cf', 'emailspam.ga', 'emailspam.gq', 'emailspam.ml', 'emailspam.tk', 'emailtemporanea.com', 'emailtemporanea.net', 'emailtemporar.ro', 'emailtemporario.com.br', 'emailthe.net', 'emailtmp.com', 'emailto.de', 'emailwarden.com', 'emailx.at.hm', 'emailxfer.com', 'emailz.cf', 'emailz.ga', 'emailz.gq', 'emailz.ml', 'emeil.in', 'emeil.ir', 'emeraldwebmail.com', 'emil.com', 'emkei.cf', 'emkei.ga', 'emkei.gq', 'emkei.ml', 'emkei.tk', 'emz.net', 'enterto.com', 'ephemail.net', 'ero-tube.org', 'etranquil.com', 'etranquil.net', 'etranquil.org', 'evopo.com', 'example.com', 'explodemail.com', 'express.net.ua', 'eyepaste.com', 'facebook-email.cf', 'facebook-email.ga', 'facebook-email.ml', 'facebookmail.gq', 'facebookmail.ml', 'fake-box.com', 'fake-mail.cf', 'fake-mail.ga', 'fake-mail.ml', 'fakeinbox.cf', 'fakeinbox.com', 'fakeinbox.ga', 'fakeinbox.ml', 'fakeinbox.tk', 'fakeinformation.com', 'fakemail.fr', 'fakemailgenerator.com', 'fakemailz.com', 'fammix.com', 'fansworldwide.de', 'fantasymail.de', 'fastacura.com', 'fastchevy.com', 'fastchrysler.com', 'fastkawasaki.com', 'fastmazda.com', 'fastmitsubishi.com', 'fastnissan.com', 'fastsubaru.com', 'fastsuzuki.com', 'fasttoyota.com', 'fastyamaha.com', 'fatflap.com', 'fdfdsfds.com', 'fightallspam.com', 'fiifke.de', 'filzmail.com', 'fivemail.de', 'fixmail.tk', 'fizmail.com', 'fleckens.hu', 'flurre.com', 'flurred.com', 'flurred.ru', 'flyspam.com', 'footard.com', 'forgetmail.com', 'forward.cat', 'fr33mail.info', 'frapmail.com', 'free-email.cf', 'free-email.ga', 'freemails.cf', 'freemails.ga', 'freemails.ml', 'freundin.ru', 'friendlymail.co.uk', 'front14.org', 'fuckingduh.com', 'fudgerub.com', 'fux0ringduh.com', 'fyii.de', 'garliclife.com', 'gehensiemirnichtaufdensack.de', 'gelitik.in', 'germanmails.biz', 'get-mail.cf', 'get-mail.ga', 'get-mail.ml', 'get-mail.tk', 'get1mail.com', 'get2mail.fr', 'getairmail.cf', 'getairmail.com', 'getairmail.ga', 'getairmail.gq', 'getairmail.ml', 'getairmail.tk', 'getmails.eu', 'getonemail.com', 'getonemail.net', 'gfcom.com', 'ghosttexter.de', 'giantmail.de', 'girlsundertheinfluence.com', 'gishpuppy.com', 'gmial.com', 'goemailgo.com', 'gorillaswithdirtyarmpits.com', 'gotmail.com', 'gotmail.net', 'gotmail.org', 'gowikibooks.com', 'gowikicampus.com', 'gowikicars.com', 'gowikifilms.com', 'gowikigames.com', 'gowikimusic.com', 'gowikinetwork.com', 'gowikitravel.com', 'gowikitv.com', 'grandmamail.com', 'grandmasmail.com', 'great-host.in', 'greensloth.com', 'grr.la', 'gsrv.co.uk', 'guerillamail.biz', 'guerillamail.com', 'guerillamail.de', 'guerillamail.net', 'guerillamail.org', 'guerillamailblock.com', 'guerrillamail.biz', 'guerrillamail.com', 'guerrillamail.de', 'guerrillamail.info', 'guerrillamail.net', 'guerrillamail.org', 'guerrillamailblock.com', 'gustr.com', 'h8s.org', 'hacccc.com', 'haltospam.com', 'haqed.com', 'harakirimail.com', 'hartbot.de', 'hat-geld.de', 'hatespam.org', 'headstrong.de', 'hellodream.mobi', 'herp.in', 'hidemail.de', 'hideme.be', 'hidzz.com', 'hiru-dea.com', 'hmamail.com', 'hochsitze.com', 'hopemail.biz', 'hot-mail.cf', 'hot-mail.ga', 'hot-mail.gq', 'hot-mail.ml', 'hot-mail.tk', 'hotpop.com', 'hulapla.de', 'hushmail.com', 'ieatspam.eu', 'ieatspam.info', 'ieh-mail.de', 'ihateyoualot.info', 'iheartspam.org', 'ikbenspamvrij.nl', 'imails.info', 'imgof.com', 'imgv.de', 'imstations.com', 'inbax.tk', 'inbox.si', 'inboxalias.com', 'inboxclean.com', 'inboxclean.org', 'inboxproxy.com', 'incognitomail.com', 'incognitomail.net', 'incognitomail.org', 'ineec.net', 'infocom.zp.ua', 'inoutmail.de', 'inoutmail.eu', 'inoutmail.info', 'inoutmail.net', 'insorg-mail.info', 'instant-mail.de', 'instantemailaddress.com', 'instantlyemail.com', 'ip6.li', 'ipoo.org', 'irish2me.com', 'iwi.net', 'jetable.com', 'jetable.fr.nf', 'jetable.net', 'jetable.org', 'jnxjn.com', 'jourrapide.com', 'junk1e.com', 'junkmail.com', 'junkmail.ga', 'junkmail.gq', 'jupimail.com', 'kasmail.com', 'kaspop.com', 'keepmymail.com', 'killmail.com', 'killmail.net', 'kimsdisk.com', 'kingsq.ga', 'kiois.com', 'kir.ch.tc', 'klassmaster.com', 'klassmaster.net', 'klzlk.com', 'kook.ml', 'koszmail.pl', 'kulturbetrieb.info', 'kurzepost.de', 'l33r.eu', 'labetteraverouge.at', 'lackmail.net', 'lags.us', 'landmail.co', 'lastmail.co', 'lawlita.com', 'lazyinbox.com', 'legitmail.club', 'letthemeatspam.com', 'lhsdv.com', 'libox.fr', 'lifebyfood.com', 'link2mail.net', 'litedrop.com', 'loadby.us', 'login-email.cf', 'login-email.ga', 'login-email.ml', 'login-email.tk', 'lol.ovpn.to', 'lolfreak.net', 'lookugly.com', 'lopl.co.cc', 'lortemail.dk', 'lovemeleaveme.com', 'lr78.com', 'lroid.com', 'lukop.dk', 'm21.cc', 'm4ilweb.info', 'maboard.com', 'mail-filter.com', 'mail-temporaire.fr', 'mail.by', 'mail.mezimages.net', 'mail.zp.ua', 'mail114.net', 'mail1a.de', 'mail21.cc', 'mail2rss.org', 'mail333.com', 'mail4trash.com', 'mailbidon.com', 'mailbiz.biz', 'mailblocks.com', 'mailblog.biz', 'mailbucket.org', 'mailcat.biz', 'mailcatch.com', 'mailde.de', 'mailde.info', 'maildrop.cc', 'maildrop.cf', 'maildrop.ga', 'maildrop.gq', 'maildrop.ml', 'maildu.de', 'maildx.com', 'maileater.com', 'mailed.ro', 'maileimer.de', 'mailexpire.com', 'mailfa.tk', 'mailforspam.com', 'mailfree.ga', 'mailfree.gq', 'mailfree.ml', 'mailfreeonline.com', 'mailfs.com', 'mailguard.me', 'mailhazard.com', 'mailhazard.us', 'mailhz.me', 'mailimate.com', 'mailin8r.com', 'mailinater.com', 'mailinator.com', 'mailinator.gq', 'mailinator.net', 'mailinator.org', 'mailinator.us', 'mailinator2.com', 'mailinator2.net', 'mailincubator.com', 'mailismagic.com', 'mailjunk.cf', 'mailjunk.ga', 'mailjunk.gq', 'mailjunk.ml', 'mailjunk.tk', 'mailmate.com', 'mailme.gq', 'mailme.ir', 'mailme.lv', 'mailme24.com', 'mailmetrash.com', 'mailmoat.com', 'mailms.com', 'mailnator.com', 'mailnesia.com', 'mailnull.com', 'mailorg.org', 'mailpick.biz', 'mailproxsy.com', 'mailquack.com', 'mailrock.biz', 'mailscrap.com', 'mailshell.com', 'mailsiphon.com', 'mailslapping.com', 'mailslite.com', 'mailspeed.ru', 'mailtemp.info', 'mailtome.de', 'mailtothis.com', 'mailtrash.net', 'mailtv.net', 'mailtv.tv', 'mailzilla.com', 'mailzilla.org', 'mailzilla.orgmbx.cc', 'makemetheking.com', 'mallinator.com', 'manifestgenerator.com', 'manybrain.com', 'mbx.cc', 'mciek.com', 'mega.zik.dj', 'meinspamschutz.de', 'meltmail.com', 'messagebeamer.de', 'mezimages.net', 'mfsa.ru', 'mierdamail.com', 'migmail.pl', 'migumail.com', 'mindless.com', 'ministry-of-silly-walks.de', 'mintemail.com', 'misterpinball.de', 'mjukglass.nu', 'moakt.com', 'mobi.web.id', 'mobileninja.co.uk', 'moburl.com', 'mohmal.com', 'moncourrier.fr.nf', 'monemail.fr.nf', 'monmail.fr.nf', 'monumentmail.com', 'msa.minsmail.com', 'mt2009.com', 'mt2014.com', 'mt2015.com', 'mx0.wwwnew.eu', 'my10minutemail.com', 'myalias.pw', 'mycard.net.ua', 'mycleaninbox.net', 'myemailboxy.com', 'mymail-in.net', 'mymailoasis.com', 'mynetstore.de', 'mypacks.net', 'mypartyclip.de', 'myphantomemail.com', 'mysamp.de', 'myspaceinc.com', 'myspaceinc.net', 'myspaceinc.org', 'myspacepimpedup.com', 'myspamless.com', 'mytemp.email', 'mytempemail.com', 'mytempmail.com', 'mytrashmail.com', 'nabuma.com', 'neomailbox.com', 'nepwk.com', 'nervmich.net', 'nervtmich.net', 'netmails.com', 'netmails.net', 'netzidiot.de', 'neverbox.com', 'nice-4u.com', 'nincsmail.com', 'nincsmail.hu', 'nmail.cf', 'nnh.com', 'no-spam.ws', 'noblepioneer.com', 'nobulk.com', 'noclickemail.com', 'nogmailspam.info', 'nomail.pw', 'nomail.xl.cx', 'nomail2me.com', 'nomorespamemails.com', 'nonspam.eu', 'nonspammer.de', 'noref.in', 'nospam.ze.tc', 'nospam4.us', 'nospamfor.us', 'nospammail.net', 'nospamthanks.info', 'notmailinator.com', 'notsharingmy.info', 'nowhere.org', 'nowmymail.com', 'nurfuerspam.de', 'nwldx.com', 'objectmail.com', 'obobbo.com', 'odaymail.com', 'odnorazovoe.ru', 'one-time.email', 'oneoffemail.com', 'oneoffmail.com', 'onewaymail.com', 'onlatedotcom.info', 'online.ms', 'oopi.org', 'opayq.com', 'opentrash.com', 'ordinaryamerican.net', 'otherinbox.com', 'ourklips.com', 'outlawspam.com', 'ovpn.to', 'owlpic.com', 'pancakemail.com', 'paplease.com', 'pepbot.com', 'pfui.ru', 'pimpedupmyspace.com', 'pjjkp.com', 'plexolan.de', 'poczta.onet.pl', 'politikerclub.de', 'poofy.org', 'pookmail.com', 'pop3.xyz', 'postalmail.biz', 'privacy.net', 'privatdemail.net', 'privy-mail.com', 'privymail.de', 'proxymail.eu', 'prtnx.com', 'prtz.eu', 'pubmail.io', 'punkass.com', 'putthisinyourspamdatabase.com', 'pwrby.com', 'q314.net', 'qisdo.com', 'qisoa.com', 'qoika.com', 'qq.com', 'quickinbox.com', 'quickmail.nl', 'rainmail.biz', 'rcpt.at', 're-gister.com', 'reallymymail.com', 'realtyalerts.ca', 'recode.me', 'reconmail.com', 'recursor.net', 'recyclemail.dk', 'regbypass.com', 'regbypass.comsafe-mail.net', 'rejectmail.com', 'reliable-mail.com', 'remail.cf', 'remail.ga', 'renraku.in', 'rhyta.com', 'rklips.com', 'rmqkr.net', 'royal.net', 'rppkn.com', 'rtrtr.com', 's0ny.net', 'safe-mail.net', 'safersignup.de', 'safetymail.info', 'safetypost.de', 'sandelf.de', 'sayawaka-dea.info', 'saynotospams.com', 'scatmail.com', 'schafmail.de', 'schrott-email.de', 'secretemail.de', 'secure-mail.biz', 'secure-mail.cc', 'selfdestructingmail.com', 'selfdestructingmail.org', 'sendspamhere.com', 'senseless-entertainment.com', 'services391.com', 'sharedmailbox.org', 'sharklasers.com', 'shieldedmail.com', 'shieldemail.com', 'shiftmail.com', 'shitmail.me', 'shitmail.org', 'shitware.nl', 'shmeriously.com', 'shortmail.net', 'showslow.de', 'sibmail.com', 'sinnlos-mail.de', 'siteposter.net', 'skeefmail.com', 'slapsfromlastnight.com', 'slaskpost.se', 'slipry.net', 'slopsbox.com', 'slowslow.de', 'slushmail.com', 'smashmail.de', 'smellfear.com', 'smellrear.com', 'smoug.net', 'snakemail.com', 'sneakemail.com', 'sneakmail.de', 'snkmail.com', 'sofimail.com', 'sofort-mail.de', 'softpls.asia', 'sogetthis.com', 'soisz.com', 'solvemail.info', 'soodonims.com', 'spam.la', 'spam.su', 'spam4.me', 'spamail.de', 'spamarrest.com', 'spamavert.com', 'spambob.com', 'spambob.net', 'spambob.org', 'spambog.com', 'spambog.de', 'spambog.net', 'spambog.ru', 'spambooger.com', 'spambox.info', 'spambox.irishspringrealty.com', 'spambox.us', 'spambpg.com', 'spamcannon.com', 'spamcannon.net', 'spamcero.com', 'spamcon.org', 'spamcorptastic.com', 'spamcowboy.com', 'spamcowboy.net', 'spamcowboy.org', 'spamday.com', 'spamex.com', 'spamfighter.cf', 'spamfighter.ga', 'spamfighter.gq', 'spamfighter.ml', 'spamfighter.tk', 'spamfree.eu', 'spamfree24.com', 'spamfree24.de', 'spamfree24.eu', 'spamfree24.info', 'spamfree24.net', 'spamfree24.org', 'spamgoes.in', 'spamgourmet.com', 'spamgourmet.net', 'spamgourmet.org', 'spamherelots.com', 'spamhereplease.com', 'spamhole.com', 'spamify.com', 'spaminator.de', 'spamkill.info', 'spaml.com', 'spaml.de', 'spammotel.com', 'spamobox.com', 'spamoff.de', 'spamsalad.in', 'spamslicer.com', 'spamsphere.com', 'spamspot.com', 'spamstack.net', 'spamthis.co.uk', 'spamthisplease.com', 'spamtrail.com', 'spamtroll.net', 'speed.1s.fr', 'spikio.com', 'spoofmail.de', 'spybox.de', 'squizzy.de', 'ssoia.com', 'startkeys.com', 'stexsy.com', 'stinkefinger.net', 'stop-my-spam.cf', 'stop-my-spam.com', 'stop-my-spam.ga', 'stop-my-spam.ml', 'stop-my-spam.tk', 'streetwisemail.com', 'stuffmail.de', 'super-auswahl.de', 'supergreatmail.com', 'supermailer.jp', 'superrito.com', 'superstachel.de', 'suremail.info', 'sute.jp', 'svk.jp', 'sweetxxx.de', 'tafmail.com', 'tagyourself.com', 'talkinator.com', 'tapchicuoihoi.com', 'teewars.org', 'teleworm.com', 'teleworm.us', 'temp-mail.com', 'temp-mail.net', 'temp-mail.org', 'temp-mail.ru', 'temp15qm.com', 'tempail.com', 'tempalias.com', 'tempe-mail.com', 'tempemail.biz', 'tempemail.co.za', 'tempemail.com', 'tempemail.net', 'tempemail.org', 'tempinbox.co.uk', 'tempinbox.com', 'tempmail.de', 'tempmail.eu', 'tempmail.it', 'tempmail2.com', 'tempmaildemo.com', 'tempmailer.com', 'tempmailer.de', 'tempomail.fr', 'temporarily.de', 'temporarioemail.com.br', 'temporaryemail.net', 'temporaryemail.us', 'temporaryforwarding.com', 'temporaryinbox.com', 'temporarymailaddress.com', 'tempsky.com', 'tempthe.net', 'tempymail.com', 'test.com', 'thanksnospam.info', 'thankyou2010.com', 'thc.st', 'thecloudindex.com', 'thisisnotmyrealemail.com', 'thismail.net', 'thismail.ru', 'throam.com', 'throwam.com', 'throwawayemailaddress.com', 'throwawaymail.com', 'tilien.com', 'tittbit.in', 'tizi.com', 'tmail.ws', 'tmailinator.com', 'tmpeml.info', 'toiea.com', 'tokenmail.de', 'toomail.biz', 'topranklist.de', 'tormail.net', 'tormail.org', 'tradermail.info', 'trash-amil.com', 'trash-mail.at', 'trash-mail.cf', 'trash-mail.com', 'trash-mail.de', 'trash-mail.ga', 'trash-mail.gq', 'trash-mail.ml', 'trash-mail.tk', 'trash-me.com', 'trash2009.com', 'trash2010.com', 'trash2011.com', 'trashdevil.com', 'trashdevil.de', 'trashemail.de', 'trashmail.at', 'trashmail.com', 'trashmail.de', 'trashmail.me', 'trashmail.net', 'trashmail.org', 'trashmail.ws', 'trashmailer.com', 'trashymail.com', 'trashymail.net', 'trayna.com', 'trbvm.com', 'trialmail.de', 'trickmail.net', 'trillianpro.com', 'tryalert.com', 'turual.com', 'twinmail.de', 'twoweirdtricks.com', 'tyldd.com', 'ubismail.net', 'uggsrock.com', 'umail.net', 'unlimit.com', 'unmail.ru', 'upliftnow.com', 'uplipht.com', 'uroid.com', 'us.af', 'valemail.net', 'venompen.com', 'vermutlich.net', 'veryrealemail.com', 'vidchart.com', 'viditag.com', 'viewcastmedia.com', 'viewcastmedia.net', 'viewcastmedia.org', 'viralplays.com', 'vmail.me', 'voidbay.com', 'vomoto.com', 'vpn.st', 'vsimcard.com', 'vubby.com', 'w3internet.co.uk', 'walala.org', 'walkmail.net', 'watchever.biz', 'webemail.me', 'webm4il.info', 'webuser.in', 'wee.my', 'weg-werf-email.de', 'wegwerf-email-addressen.de', 'wegwerf-email.at', 'wegwerf-emails.de', 'wegwerfadresse.de', 'wegwerfemail.com', 'wegwerfemail.de', 'wegwerfmail.de', 'wegwerfmail.info', 'wegwerfmail.net', 'wegwerfmail.org', 'wem.com', 'wetrainbayarea.com', 'wetrainbayarea.org', 'wh4f.org', 'whatiaas.com', 'whatpaas.com', 'whatsaas.com', 'whopy.com', 'whyspam.me', 'wickmail.net', 'wilemail.com', 'willhackforfood.biz', 'willselfdestruct.com', 'winemaven.info', 'wmail.cf', 'writeme.com', 'wronghead.com', 'wuzup.net', 'wuzupmail.net', 'wwwnew.eu', 'wzukltd.com', 'xagloo.com', 'xemaps.com', 'xents.com', 'xmaily.com', 'xoxy.net', 'xww.ro', 'xyzfree.net', 'yapped.net', 'yep.it', 'yogamaven.com', 'yomail.info', 'yopmail.com', 'yopmail.fr', 'yopmail.gq', 'yopmail.net', 'yopmail.org', 'yoru-dea.com', 'you-spam.com', 'youmail.ga', 'yourdomain.com', 'ypmail.webarnak.fr.eu.org', 'yuurok.com', 'yyhmail.com', 'z1p.biz', 'za.com', 'zebins.com', 'zebins.eu', 'zehnminuten.de', 'zehnminutenmail.de', 'zetmail.com', 'zippymail.info', 'zoaxe.com', 'zoemail.com', 'zoemail.net', 'zoemail.org', 'zomg.info', 'zxcv.com', 'zxcvbnm.com', 'zzz.com', ] # reCAPTCHA API RECAPTCHA_SITE_KEY = None RECAPTCHA_SECRET_KEY = None RECAPTCHA_VERIFY_URL = 'https://www.google.com/recaptcha/api/siteverify' # akismet spam check AKISMET_APIKEY = None SPAM_CHECK_ENABLED = False SPAM_CHECK_PUBLIC_ONLY = True SPAM_ACCOUNT_SUSPENSION_ENABLED = False SPAM_ACCOUNT_SUSPENSION_THRESHOLD = timedelta(hours=24) SPAM_FLAGGED_MAKE_NODE_PRIVATE = False SPAM_FLAGGED_REMOVE_FROM_SEARCH = False SHARE_API_TOKEN = None # number of nodes that need to be affiliated with an institution before the institution logo is shown on the dashboard INSTITUTION_DISPLAY_NODE_THRESHOLD = 5 # refresh campaign every 5 minutes CAMPAIGN_REFRESH_THRESHOLD = 5 * 60 # 5 minutes in seconds AWS_ACCESS_KEY_ID = None AWS_SECRET_ACCESS_KEY = None # sitemap default settings SITEMAP_TO_S3 = False SITEMAP_AWS_BUCKET = None SITEMAP_URL_MAX = 25000 SITEMAP_INDEX_MAX = 50000 SITEMAP_STATIC_URLS = [ OrderedDict([('loc', ''), ('changefreq', 'yearly'), ('priority', '0.5')]), OrderedDict([('loc', 'preprints'), ('changefreq', 'yearly'), ('priority', '0.5')]), OrderedDict([('loc', 'prereg'), ('changefreq', 'yearly'), ('priority', '0.5')]), OrderedDict([('loc', 'meetings'), ('changefreq', 'yearly'), ('priority', '0.5')]), OrderedDict([('loc', 'registries'), ('changefreq', 'yearly'), ('priority', '0.5')]), OrderedDict([('loc', 'explore/activity'), ('changefreq', 'weekly'), ('priority', '0.5')]), OrderedDict([('loc', 'support'), ('changefreq', 'yearly'), ('priority', '0.5')]), OrderedDict([('loc', 'faq'), ('changefreq', 'yearly'), ('priority', '0.5')]), ] SITEMAP_USER_CONFIG = OrderedDict([('loc', ''), ('changefreq', 'yearly'), ('priority', '0.5')]) SITEMAP_NODE_CONFIG = OrderedDict([('loc', ''), ('lastmod', ''), ('changefreq', 'monthly'), ('priority', '0.5')]) SITEMAP_REGISTRATION_CONFIG = OrderedDict([('loc', ''), ('lastmod', ''), ('changefreq', 'never'), ('priority', '0.5')]) SITEMAP_PREPRINT_CONFIG = OrderedDict([('loc', ''), ('lastmod', ''), ('changefreq', 'yearly'), ('priority', '0.5')]) SITEMAP_PREPRINT_FILE_CONFIG = OrderedDict([('loc', ''), ('lastmod', ''), ('changefreq', 'yearly'), ('priority', '0.5')]) CUSTOM_CITATIONS = { 'bluebook-law-review': 'bluebook', 'bluebook2': 'bluebook', 'bluebook-inline': 'bluebook' } PREPRINTS_ASSETS = '/static/img/preprints_assets/'
caneruguz/osf.io
website/settings/defaults.py
Python
apache-2.0
46,262
from sklearn2sql_heroku.tests.classification import generic as class_gen class_gen.test_model("GradientBoostingClassifier" , "FourClass_10" , "oracle")
antoinecarme/sklearn2sql_heroku
tests/classification/FourClass_10/ws_FourClass_10_GradientBoostingClassifier_oracle_code_gen.py
Python
bsd-3-clause
154
from __future__ import annotations import pytest_mock from xia2.cli.delta_cc_half import run from xia2.Modules.DeltaCcHalf import DeltaCcHalf def test_from_experiments_reflections(dials_data, tmpdir, capsys, mocker): data_dir = dials_data("l_cysteine_4_sweeps_scaled") input_files = data_dir.listdir("scaled_*.refl") + data_dir.listdir("scaled_*.expt") input_files = sorted(f.strpath for f in input_files) mocker.spy(DeltaCcHalf, "get_table") with tmpdir.as_cwd(): run(input_files) if getattr(pytest_mock, "version", "").startswith("1."): rv = DeltaCcHalf.get_table.return_value else: rv = DeltaCcHalf.get_table.spy_return assert rv == [ ["Dataset", "Batches", "CC½", "ΔCC½", "σ", "Compl. (%)"], ["0", "8 to 1795", " 0.995", " 0.000", "-1.11", "94.4"], ["3", "5 to 1694", " 0.995", " 0.000", "-0.59", "93.1"], ["2", "4 to 1696", " 0.994", " 0.001", " 0.84", "92.3"], ["1", "5 to 1694", " 0.994", " 0.001", " 0.85", "94.8"], ] assert tmpdir.join("delta_cc_hist.png").check() assert tmpdir.join("normalised_scores.png").check() def test_image_groups_from_unmerged_mtz(dials_data, tmpdir, capsys, mocker): data_dir = dials_data("x4wide_processed") mocker.spy(DeltaCcHalf, "get_table") with tmpdir.as_cwd(): run( [ data_dir.join("AUTOMATIC_DEFAULT_scaled_unmerged.mtz").strpath, "group_size=10", ] ) if getattr(pytest_mock, "version", "").startswith("1."): rv = DeltaCcHalf.get_table.return_value else: rv = DeltaCcHalf.get_table.spy_return assert rv == [ ["Dataset", "Batches", "CC½", "ΔCC½", "σ", "Compl. (%)"], ["0", "11 to 20", " 0.922", " 0.007", "-0.95", "99.4"], ["0", "31 to 40", " 0.922", " 0.007", "-0.84", "99.8"], ["0", "1 to 10", " 0.921", " 0.007", "-0.67", "99.3"], ["0", "21 to 30", " 0.921", " 0.007", "-0.59", "99.8"], ["0", "81 to 90", " 0.921", " 0.007", "-0.48", "100.0"], ["0", "61 to 70", " 0.920", " 0.008", " 0.17", "99.3"], ["0", "71 to 80", " 0.920", " 0.008", " 0.51", "99.9"], ["0", "41 to 50", " 0.920", " 0.009", " 0.72", "99.9"], ["0", "51 to 60", " 0.918", " 0.010", " 2.13", "99.4"], ] assert tmpdir.join("delta_cc_hist.png").check() assert tmpdir.join("normalised_scores.png").check()
xia2/xia2
tests/regression/test_delta_cc_half.py
Python
bsd-3-clause
2,567
import fileinput for line in fileinput.input(inplace=True, backup='.bak'): if fileinput.isfirstline(): do_print = True dash_num = 0 if do_print: print (line, end='') if line.startswith("---"): dash_num += 1 if dash_num == 2: do_print = False
casutton/casutton.github.io
_scripts/before-dash.py
Python
mit
299
# -*- coding: utf-8 -*- ''' Little script meant to analyse elemental statistical stuff about the sequence of dice numbers rolled during a TEG game. The primary goal is to find out if different playes tend to score differently. I start by making a histogram of each player's performance. Part of the code in the function savehistfigure is adapted from the code posted as an answer in Stack Overflow. The lines that were remixed from the original code have been properly indicated.The question was made by user bioslime on Aug 2 '12 at 9:39. The code was originally posted in the answer by user imsc on Aug 2 '12 at 10:37. Question page: http://stackoverflow.com/questions/11774822/matplotlib-histogram-with-errorbars User bioslime profile page: http://stackoverflow.com/users/1565662/bioslime User imsc profile page: http://stackoverflow.com/users/302369/imsc @author: sarroyo ''' import numpy as np import matplotlib.pyplot as plt # Data, dice score secuence for each player Lo=np.array([2, 3, 4, 3, 5, 4, 4, 2, 2, 2, 3, 4, 4, 1, 5, 5, 4, 3, 2, 1, 2, 3, 6, 3, 4, 4, 4, 3, 4, 2, 3, 1, 1, 4, 5, 3, 6, 5, 1, 2, 5, 3, 6, 5, 1, 2, 5, 3, 5, 1, 1, 3, 5, 1, 6, 6, 3, 2, 2, 1, 2, 2, 4, 4, 1, 1, 2, 5, 4, 4, 1, 2, 2, 1]) Lu=np.array([2, 1, 2, 1, 6, 6, 1, 4, 3, 2, 6, 2, 2, 2, 2, 4, 6, 5, 4, 6, 4, 6, 6, 4, 6, 1, 6, 4, 6, 4, 5, 6, 6, 5, 3, 3, 6, 5, 5, 6, 1, 4, 6, 3, 2, 4, 4, 1, 5, 5, 1, 6, 3, 5, 4, 2, 6, 4, 1, 6, 6, 2, 2, 5, 4]) Go=np.array([1, 5, 4, 1, 6, 2, 6, 1, 4, 1, 4, 5, 6, 3, 3, 4, 4, 3, 5, 1, 4, 6, 4, 4, 6, 2, 6, 4, 6, 1, 5, 1, 5, 4, 1, 1, 4, 3, 3, 4, 3, 2, 6, 6, 6, 5, 5, 3, 3, 3, 4, 2, 4, 1]) Se=np.array([4, 1, 1, 6, 6, 3, 2, 1, 3, 4, 5, 2, 1, 4, 3, 2, 5, 4, 6, 6, 6, 1, 5, 1, 2, 5, 3, 1, 6, 1, 5, 5, 2, 6, 2, 6, 5, 5, 5, 5, 3, 6, 6, 4, 2, 6, 6, 2, 2, 3]) # list of data vectors (not array because of different length) Dat=[[Lo,Lu],[Go,Se]] # players names Jugadores=[['Lore','Lu'],['Gon','Seba']] def savehistfigure(Dat,Jugadores): ''' Function that plots the histogram for each player. ''' # compute number of throws of each player tiradas=[[np.size(Lo),np.size(Lu)],[np.size(Go),np.size(Se)]] # average score promedio=[[np.average(Lo),np.average(Lu)], [np.average(Go),np.average(Se)]] xlabels=['', 1, 2, 3, 4, 5, 6, '']# x axis labels fig, axis = plt.subplots(2,2)# create four plots bins=np.arange(0.5,7.5,1) # bins edges to compute frequency prob=1.0/6.0 # probability if ideal # loop for i in range(2): for j in range(2): print 'plotting %d %d'%(i,j) # leyend to be displayed on graph txt='%d tiradas\npromedio %1.2f'%(tiradas[i][j],promedio[i][j]) axis[i,j].set_yticks([0,1/6.0]) axis[i,j].set_yticklabels(['0','1/6']) axis[i,j].set_ylim([0,0.3]) axis[i,j].set_yticks([]) axis[i,j].set_xticklabels(xlabels) axis[i,j].text(0.3, 0.23, txt) axis[i,j].set_title(Jugadores[i][j]) axis[i,j].plot([0.5,6.5],[prob, prob]) ### Code attributed to user imsc in an answer to user bioslime's ### question in Stack Overflow, starts on this line until the line ### appropiately indicated. # compute histogram y,bin_edges=np.histogram( Dat[i][j], bins=bins, normed=False) # compute center of bins bin_centers = 0.5*(bin_edges[1:] + bin_edges[:-1]) # now plot bars with error bars axis[i,j].bar( bin_centers, y*1.0/tiradas[i][j], yerr = y**0.5/tiradas[i][j]) # marker = '.') # drawstyle = 'steps-mid-') ### End of the code attributed to user imsc in an answer to user ### bioslime's question in Stack Overflow. # axis[i,j].hist( # Dat[i][j], # bins=bins, # normed=True, # histtype='bar', # align='mid', # rwidth=0.9) print 'plotting %d %d'%(i,j) # once constructed all four histograms, save image plt.savefig('histogramas2.png') plt.show() def comparemodels01(Dat,Jugadores): ''' I propose two models for the probability of each dice face. Model 0 consists of equal constant probability for each face (1/6). Model 1 describes the probability of each face as a linear function of the number of the face (to test if some people have greater/lower probability of getting higher scores). This function returns the ratio P(Mod1|Data)/P(Mod0|Data) and also graphs the probability distribution of the slope of the linear function. ''' freq=np.ndarray([2,2,6]) # variable to save feqs atics=200 # Number of intervals to plot slope a_range=np.linspace(-1.0/15,1.0/15,atics) # range of posible slopes prob_a=np.ndarray([2,2,atics]) # to save prob dist of a p6=1.0/6 # probability if uniform # calculate prob dist of data given slope for each player #do with respect to prob of data given model 0 to take results #of each player to same level. They differ in number of throws. for i in range(2): for j in range(2): # get frequency of appearance of each score freq[i,j]=np.histogram(Dat[i][j],6,(0.5,6.5))[0] prob_model0=1.0/(6**np.sum(freq[i,j])) # evaluate probability of data given slope # function comes from ... prob_a[i,j]=[ np.prod( [ ((l-3.5)*a_range[k]+p6) for l in range(1,7)] **freq[i][j] ) for k in range(atics)] / prob_model0#[i,j] #don't know how to do the normalization more #efficiently... # prob_model0=1.0/(6**np.sum(freq,2)) # (1/6)**(Number of rolls) #probability of data given model 1 (integrated over slope values) #with respect to probability of data given Model 0. d_a=1.0/15/atics # columns width, useful for integration mod1_mod0=np.sum(prob_a,2)*d_a#/ prob_model0 print mod1_mod0# , prob_model0 # now plot fig, axis = plt.subplots(2,2)# create four plots probmax = np.max(np.max(np.max(prob_a))) # limit of y axis for i in range(2): for j in range(2): # leyend to be displayed on graph # axis[i,j].set_yticklabels([]) # axis[i,j].set_ylim(0,probmax) # axis[i,j].set_yscale('log') # axis[i,j].set_yticks([]) # axis[i,j].set_xticklabels(xlabels) # axis[i,j].text(0.3, 0.23, txt) axis[i,j].set_title(Jugadores[i][j]) # axis[i,j].plot([0.5,6.5],[prob, prob]) axis[i,j].plot(a_range,prob_a[i][j]) # once constructed all four histograms, save image # plt.savefig('aProbdist.png') plt.show() # return the relative probability of the models for each pplayer return mod1_mod0 savehistfigure(Dat,Jugadores) #comparemodels01(Dat,Jugadores) # auxiliar space to check formula #Iter_l= ((l-3.5)*a_range[k]+p6) #Iter_k=np.prod( [ Iter_l for l in range(1,7)]**freq[i][j] ) #prob_a[i,j]=[ Iter_k for k in range(atics)] #[ np.prod( [ ((l-3.5)*a_range[k]+p6) for l in range(1,7)]**freq[i][j] ) for k in range(atics)]
sebalander/tegstatistics
tegstatistics.py
Python
gpl-3.0
6,985
# -*- coding: utf-8 -*- from setuptools import setup, find_packages with open('VERSION', 'r') as version_file: version = version_file.read().rstrip() with open('README.rst', 'r') as readme_file: long_desc = readme_file.read() requires = ['Sphinx >= 1.0'] setup( name='sphinxcontrib-cheader', version=version, url='http://bitbucket.org/birkenfeld/sphinx-contrib', download_url='http://pypi.python.org/pypi/sphinxcontrib-cheader', license='Public Domain', author='Arto Bendiken', author_email='arto@bendiken.net', description='Sphinx c:header directive', long_description=long_desc, zip_safe=False, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: Public Domain', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Documentation', 'Topic :: Utilities', ], platforms='any', packages=find_packages(), include_package_data=True, install_requires=requires, namespace_packages=['sphinxcontrib'], )
Lemma1/MAC-POSTS
doc_builder/sphinx-contrib/cheader/setup.py
Python
mit
1,194
""" Models used by the block structure framework. """ from __future__ import absolute_import import errno from contextlib import contextmanager from datetime import datetime from logging import getLogger import six from six.moves import map from django.conf import settings from django.core.exceptions import SuspiciousOperation from django.core.files.base import ContentFile from django.db import models, transaction from django.utils.encoding import python_2_unicode_compatible from model_utils.models import TimeStampedModel from openedx.core.djangoapps.xmodule_django.models import UsageKeyWithRunField from openedx.core.storage import get_storage from . import config from .exceptions import BlockStructureNotFound log = getLogger(__name__) def _create_path(directory, filename): """ Returns the full path for the given directory and filename. """ return '{}/{}'.format(directory, filename) def _directory_name(data_usage_key): """ Returns the directory name for the given data_usage_key. """ # replace any '/' in the usage key so they aren't interpreted # as folder separators. encoded_usage_key = six.text_type(data_usage_key).replace('/', '_') return '{}{}'.format( settings.BLOCK_STRUCTURES_SETTINGS.get('DIRECTORY_PREFIX', ''), encoded_usage_key, ) def _path_name(bs_model, _filename): """ Returns path name to use for the given BlockStructureModel instance. """ filename = datetime.utcnow().strftime('%Y-%m-%d-%H:%M:%S-%f') return _create_path( _directory_name(bs_model.data_usage_key), filename, ) def _bs_model_storage(): """ Get django Storage object for BlockStructureModel. """ return get_storage( settings.BLOCK_STRUCTURES_SETTINGS.get('STORAGE_CLASS'), **settings.BLOCK_STRUCTURES_SETTINGS.get('STORAGE_KWARGS', {}) ) class CustomizableFileField(models.FileField): """ Subclass of FileField that allows custom settings to not be serialized (hard-coded) in migrations. Otherwise, migrations include optional settings for storage (such as the storage class and bucket name); we don't want to create new migration files for each configuration change. """ def __init__(self, *args, **kwargs): kwargs.update(dict( upload_to=_path_name, storage=_bs_model_storage(), max_length=500, # allocate enough for base path + prefix + usage_key + timestamp in filepath )) super(CustomizableFileField, self).__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super(CustomizableFileField, self).deconstruct() del kwargs['upload_to'] del kwargs['storage'] del kwargs['max_length'] return name, path, args, kwargs @contextmanager def _storage_error_handling(bs_model, operation, is_read_operation=False): """ Helpful context manager that handles various errors from the backend storage. Typical errors at read time on configuration changes: IOError: - File not found (S3 or FS) - Bucket name changed (S3) SuspiciousOperation - Path mismatches when changing backends Other known errors: OSError - Access issues in creating files (FS) S3ResponseError - Incorrect credentials with 403 status (S3) - Non-existent bucket with 404 status (S3) """ try: yield except Exception as error: # pylint: disable=broad-except log.exception(u'BlockStructure: Exception %s on store %s; %s.', error.__class__, operation, bs_model) if isinstance(error, OSError) and error.errno in (errno.EACCES, errno.EPERM): # pylint: disable=no-member raise elif is_read_operation and isinstance(error, (IOError, SuspiciousOperation)): # May have been caused by one of the possible error # situations listed above. Raise BlockStructureNotFound # so the block structure can be regenerated and restored. raise BlockStructureNotFound(bs_model.data_usage_key) else: raise @python_2_unicode_compatible class BlockStructureModel(TimeStampedModel): """ Model for storing Block Structure information. .. no_pii: """ VERSION_FIELDS = [ u'data_version', u'data_edit_timestamp', u'transformers_schema_version', u'block_structure_schema_version', ] UNIQUENESS_FIELDS = [u'data_usage_key'] + VERSION_FIELDS class Meta(object): db_table = 'block_structure' data_usage_key = UsageKeyWithRunField( u'Identifier of the data being collected.', blank=False, max_length=255, unique=True, ) data_version = models.CharField( u'Version of the data at the time of collection.', blank=True, null=True, max_length=255, ) data_edit_timestamp = models.DateTimeField( u'Edit timestamp of the data at the time of collection.', blank=True, null=True, ) transformers_schema_version = models.CharField( u'Representation of the schema version of the transformers used during collection.', blank=False, max_length=255, ) block_structure_schema_version = models.CharField( u'Version of the block structure schema at the time of collection.', blank=False, max_length=255, ) data = CustomizableFileField() def get_serialized_data(self): """ Returns the collected data for this instance. """ operation = u'Read' with _storage_error_handling(self, operation, is_read_operation=True): serialized_data = self.data.read() self._log(self, operation, serialized_data) return serialized_data @classmethod def get(cls, data_usage_key): """ Returns the entry associated with the given data_usage_key. Raises: BlockStructureNotFound if an entry for data_usage_key is not found. """ try: return cls.objects.get(data_usage_key=data_usage_key) except cls.DoesNotExist: log.info(u'BlockStructure: Not found in table; %s.', data_usage_key) raise BlockStructureNotFound(data_usage_key) @classmethod def update_or_create(cls, serialized_data, data_usage_key, **kwargs): """ Updates or creates the BlockStructureModel entry for the given data_usage_key in the kwargs, uploading serialized_data as the content data. """ # Use an atomic transaction so the model isn't updated # unless the file is successfully persisted. with transaction.atomic(): bs_model, created = cls.objects.update_or_create(defaults=kwargs, data_usage_key=data_usage_key) operation = u'Created' if created else u'Updated' with _storage_error_handling(bs_model, operation): bs_model.data.save('', ContentFile(serialized_data)) cls._log(bs_model, operation, serialized_data) if not created: cls._prune_files(data_usage_key) return bs_model, created def __str__(self): """ Returns a string representation of this model. """ return u', '.join( u'{}: {}'.format(field_name, six.text_type(getattr(self, field_name))) for field_name in self.UNIQUENESS_FIELDS ) @classmethod def _prune_files(cls, data_usage_key, num_to_keep=None): """ Deletes previous file versions for data_usage_key. """ if not settings.BLOCK_STRUCTURES_SETTINGS.get('PRUNING_ACTIVE', False): return if num_to_keep is None: num_to_keep = config.num_versions_to_keep() try: all_files_by_date = sorted(cls._get_all_files(data_usage_key)) files_to_delete = all_files_by_date[:-num_to_keep] if num_to_keep > 0 else all_files_by_date cls._delete_files(files_to_delete) log.info( u'BlockStructure: Deleted %d out of total %d files in store; data_usage_key: %s, num_to_keep: %d.', len(files_to_delete), len(all_files_by_date), data_usage_key, num_to_keep, ) except Exception: # pylint: disable=broad-except log.exception(u'BlockStructure: Exception when deleting old files; data_usage_key: %s.', data_usage_key) @classmethod def _delete_files(cls, files): """ Deletes the given files from storage. """ storage = _bs_model_storage() list(map(storage.delete, files)) @classmethod def _get_all_files(cls, data_usage_key): """ Returns all filenames that exist for the given key. """ directory = _directory_name(data_usage_key) _, filenames = _bs_model_storage().listdir(directory) return [ _create_path(directory, filename) for filename in filenames if filename and not filename.startswith('.') ] @classmethod def _log(cls, bs_model, operation, serialized_data): """ Writes log information for the given values. """ log.info( u'BlockStructure: %s in store %s at %s%s; %s, size: %d', operation, bs_model.data.storage.__class__, getattr(bs_model.data.storage, 'bucket_name', ''), getattr(bs_model.data.storage, 'location', ''), bs_model, len(serialized_data), )
ESOedX/edx-platform
openedx/core/djangoapps/content/block_structure/models.py
Python
agpl-3.0
9,770
from django import forms from django.core.exceptions import ImproperlyConfigured from payment import signals from payment.forms import SimplePayShipForm from payment.modules.purchaseorder.models import PurchaseOrder from satchmo_utils import app_enabled from signals_ahoy import signals class PurchaseorderPayShipForm(SimplePayShipForm): po_number = forms.CharField(max_length=20, required=False) def __init__(self, *args, **kwargs): if not app_enabled('purchaseorder'): raise ImproperlyConfigured('To use Purchase Order payment methods, you must have payment.modules.purchaseorder in your INSTALLED_APPS') super(PurchaseorderPayShipForm, self).__init__(*args, **kwargs) def save(self, request, cart, contact, payment_module): """Save the order and the po information for this orderpayment""" signals.form_presave.send(PurchaseorderPayShipForm, form=self) super(PurchaseorderPayShipForm, self).save(request, cart, contact, payment_module) data = self.cleaned_data po = PurchaseOrder(po_number=data.get('po_number', ''), order=self.order) po.save() self.purchaseorder = po signals.form_postsave.send(PurchaseorderPayShipForm, form=self)
russellmayhew/satchmo
satchmo/apps/payment/modules/purchaseorder/forms.py
Python
bsd-3-clause
1,253
import argparse import os import subprocess from platform import python_version from py3status.version import version def parse_cli_args(): """ Parse the command line arguments """ # get config paths home_path = os.path.expanduser("~") xdg_home_path = os.environ.get("XDG_CONFIG_HOME", "{}/.config".format(home_path)) xdg_dirs_path = os.environ.get("XDG_CONFIG_DIRS", "/etc/xdg") # get i3status path try: with open(os.devnull, "w") as devnull: command = ["which", "i3status"] i3status_path = ( subprocess.check_output(command, stderr=devnull).decode().strip() ) except subprocess.CalledProcessError: i3status_path = None # get window manager with open(os.devnull, "w") as devnull: if subprocess.call(["pgrep", "i3"], stdout=devnull) == 0: wm = "i3" else: wm = "sway" # i3status config file default detection # respect i3status' file detection order wrt issue #43 i3status_config_file_candidates = [ "{}/py3status/config".format(xdg_home_path), "{}/i3status/config".format(xdg_home_path), "{}/i3/i3status.conf".format(xdg_home_path), # custom "{}/.i3status.conf".format(home_path), "{}/.i3/i3status.conf".format(home_path), # custom "{}/i3status/config".format(xdg_dirs_path), "/etc/i3status.conf", ] for fn in i3status_config_file_candidates: if os.path.isfile(fn): i3status_config_file_default = fn break else: # if files does not exists, defaults to ~/.i3/i3status.conf i3status_config_file_default = i3status_config_file_candidates[3] class Parser(argparse.ArgumentParser): # print usages and exit on errors def error(self, message): print("\x1b[1;31merror: \x1b[0m{}".format(message)) self.print_help() self.exit(1) # hide choices on errors def _check_value(self, action, value): if action.choices is not None and value not in action.choices: raise argparse.ArgumentError( action, "invalid choice: '{}'".format(value) ) class HelpFormatter(argparse.ArgumentDefaultsHelpFormatter): def _format_action_invocation(self, action): metavar = self._format_args(action, action.dest.upper()) return "{} {}".format(", ".join(action.option_strings), metavar) # command line options parser = Parser( description="The agile, python-powered, i3status wrapper", formatter_class=HelpFormatter, ) parser.add_argument( "-b", "--dbus-notify", action="store_true", dest="dbus_notify", help="send notifications via dbus instead of i3-nagbar", ) parser.add_argument( "-c", "--config", action="store", default=i3status_config_file_default, dest="i3status_config_path", help="load config", metavar="FILE", type=str, ) parser.add_argument( "-d", "--debug", action="store_true", help="enable debug logging in syslog and --log-file", ) parser.add_argument( "-g", "--gevent", action="store_true", dest="gevent", help="enable gevent monkey patching", ) parser.add_argument( "-i", "--include", action="append", dest="include_paths", help="append additional user-defined module paths", metavar="PATH", ) parser.add_argument( "-l", "--log-file", action="store", dest="log_file", help="enable logging to FILE", metavar="FILE", type=str, ) parser.add_argument( "-s", "--standalone", action="store_true", dest="standalone", help="run py3status without i3status", ) parser.add_argument( "-t", "--timeout", action="store", default=60, dest="cache_timeout", help="default module cache timeout in seconds", metavar="INT", type=int, ) parser.add_argument( "-m", "--disable-click-events", action="store_true", dest="disable_click_events", help="disable all click events", ) parser.add_argument( "-u", "--i3status", action="store", default=i3status_path, dest="i3status_path", help="specify i3status path", metavar="PATH", type=str, ) parser.add_argument( "-v", "--version", action="store_true", dest="print_version", help="show py3status version and exit", ) parser.add_argument( "--wm", action="store", # add comment to preserve formatting dest="wm", metavar="WINDOW_MANAGER", default=wm, choices=["i3", "sway"], help="specify window manager i3 or sway", ) # deprecations parser.add_argument("-n", "--interval", help=argparse.SUPPRESS) # parse options, command, etc options = parser.parse_args() # make versions options.python_version = python_version() options.version = version if options.print_version: msg = "py3status version {version} (python {python_version}) on {wm}" print(msg.format(**vars(options))) parser.exit() # get wm options.wm_name = options.wm options.wm = { "i3": {"msg": "i3-msg", "nag": "i3-nagbar"}, "sway": {"msg": "swaymsg", "nag": "swaynag"}, }[options.wm] # make it i3status if None if not options.i3status_path: options.i3status_path = "i3status" # make include path to search for user modules if None if not options.include_paths: options.include_paths = [ "{}/py3status/modules".format(xdg_home_path), "{}/i3status/py3status".format(xdg_home_path), "{}/i3/py3status".format(xdg_home_path), "{}/.i3/py3status".format(home_path), ] include_paths = [] for path in options.include_paths: path = os.path.abspath(path) if os.path.isdir(path) and os.listdir(path): include_paths.append(path) options.include_paths = include_paths # defaults del options.interval del options.print_version options.minimum_interval = 0.1 # minimum module update interval options.click_events = not options.__dict__.pop("disable_click_events") # all done return options
Andrwe/py3status
py3status/argparsers.py
Python
bsd-3-clause
6,668
import os import pathlib # RELEASE-UPDATE APP_DIR = pathlib.Path(os.path.realpath(__file__)).parent.parent ROOT_DIR = APP_DIR.parent DEFAULT_DB_PATH = '/instance/storage' PROJECT_NAME = 'Zordon' PROJECT_VERSION = '4.0.0' PROJECT_FULL_NAME = '{} v{}'.format(PROJECT_NAME, PROJECT_VERSION)
KrusnikViers/Zordon
app/core/info.py
Python
mit
290
''' ''' def main(): info('trap in pipette') close(description='Bone to Minibone') close(description='Minibone to Bone') sleep(2) open(description='Bone to Turbo') sleep(15) close(description='Bone to Turbo') sleep(2) open(description='Bone to Minibone')
UManPychron/pychron
docs/user_guide/operation/scripts/examples/helix/extraction/felix/TrapInBonePipette.py
Python
apache-2.0
291
# Software License Agreement (BSD License) # # Copyright (c) 2008, Willow Garage, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Willow Garage, Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ ROS Service Description Language Spec Implements http://ros.org/wiki/srv """ import os import sys from . names import is_legal_resource_name, is_legal_resource_base_name, package_resource_name, resource_name class SrvSpec(object): def __init__(self, request, response, text, full_name = '', short_name = '', package = ''): alt_package, alt_short_name = package_resource_name(full_name) if not package: package = alt_package if not short_name: short_name = alt_short_name self.request = request self.response = response self.text = text self.full_name = full_name self.short_name = short_name self.package = package def __eq__(self, other): if not other or not isinstance(other, SrvSpec): return False return self.request == other.request and \ self.response == other.response and \ self.text == other.text and \ self.full_name == other.full_name and \ self.short_name == other.short_name and \ self.package == other.package def __ne__(self, other): if not other or not isinstance(other, SrvSpec): return True return not self.__eq__(other) def __repr__(self): return "SrvSpec[%s, %s]"%(repr(self.request), repr(self.response))
Marilynmontu/final-competiton
mk/VRBRAIN/Tools/genmsg/src/genmsg/srvs.py
Python
gpl-3.0
3,017
"""empty message Revision ID: 01356afcc714 Revises: 356add9f6b39 Create Date: 2017-09-25 18:36:08.427994 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '01356afcc714' down_revision = '356add9f6b39' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('user', sa.Column('firebase_name', sa.String(length=50), nullable=True)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('user', 'firebase_name') # ### end Alembic commands ###
ahoarfrost/metaseek
server/migrations/versions/01356afcc714_.py
Python
mit
670
# Tweepy # Copyright 2009-2010 Joshua Roesslein # See LICENSE for details. import httplib import urllib import time import re from tweepy.error import TweepError from tweepy.utils import convert_to_utf8_str from tweepy.models import Model re_path_template = re.compile('{\w+}') def bind_api(**config): class APIMethod(object): path = config['path'] payload_type = config.get('payload_type', None) payload_list = config.get('payload_list', False) allowed_param = config.get('allowed_param', []) method = config.get('method', 'GET') require_auth = config.get('require_auth', False) search_api = config.get('search_api', False) use_cache = config.get('use_cache', True) def __init__(self, api, args, kargs): # If authentication is required and no credentials # are provided, throw an error. if self.require_auth and not api.auth: raise TweepError('Authentication required!') self.api = api self.post_data = kargs.pop('post_data', None) self.retry_count = kargs.pop('retry_count', api.retry_count) self.retry_delay = kargs.pop('retry_delay', api.retry_delay) self.retry_errors = kargs.pop('retry_errors', api.retry_errors) self.headers = kargs.pop('headers', {}) self.build_parameters(args, kargs) # Pick correct URL root to use if self.search_api: self.api_root = api.search_root else: self.api_root = api.api_root # Perform any path variable substitution self.build_path() if api.secure: self.scheme = 'https://' else: self.scheme = 'http://' if self.search_api: self.host = api.search_host else: self.host = api.host # Manually set Host header to fix an issue in python 2.5 # or older where Host is set including the 443 port. # This causes Twitter to issue 301 redirect. # See Issue https://github.com/tweepy/tweepy/issues/12 self.headers['Host'] = self.host def build_parameters(self, args, kargs): self.parameters = {} for idx, arg in enumerate(args): if arg is None: continue try: self.parameters[self.allowed_param[idx]] = convert_to_utf8_str(arg) except IndexError: raise TweepError('Too many parameters supplied!') for k, arg in kargs.items(): if arg is None: continue if k in self.parameters: raise TweepError('Multiple values for parameter %s supplied!' % k) self.parameters[k] = convert_to_utf8_str(arg) def build_path(self): for variable in re_path_template.findall(self.path): name = variable.strip('{}') if name == 'user' and 'user' not in self.parameters and self.api.auth: # No 'user' parameter provided, fetch it from Auth instead. value = self.api.auth.get_username() else: try: value = urllib.quote(self.parameters[name]) except KeyError: raise TweepError('No parameter value found for path variable: %s' % name) del self.parameters[name] self.path = self.path.replace(variable, value) def execute(self): # Build the request URL url = self.api_root + self.path if len(self.parameters): url = '%s?%s' % (url, urllib.urlencode(self.parameters)) # Query the cache if one is available # and this request uses a GET method. if self.use_cache and self.api.cache and self.method == 'GET': cache_result = self.api.cache.get(url) # if cache result found and not expired, return it if cache_result: # must restore api reference if isinstance(cache_result, list): for result in cache_result: if isinstance(result, Model): result._api = self.api else: if isinstance(cache_result, Model): cache_result._api = self.api return cache_result # Continue attempting request until successful # or maximum number of retries is reached. retries_performed = 0 while retries_performed < self.retry_count + 1: # Open connection # FIXME: add timeout if self.api.secure: conn = httplib.HTTPSConnection(self.host) else: conn = httplib.HTTPConnection(self.host) # Apply authentication if self.api.auth: self.api.auth.apply_auth( self.scheme + self.host + url, self.method, self.headers, self.parameters ) # Execute request try: conn.request(self.method, url, headers=self.headers, body=self.post_data) resp = conn.getresponse() except Exception, e: raise TweepError('Failed to send request: %s' % e) # Exit request loop if non-retry error code if self.retry_errors: if resp.status not in self.retry_errors: break else: if resp.status == 200: break # Sleep before retrying request again time.sleep(self.retry_delay) retries_performed += 1 # If an error was returned, throw an exception self.api.last_response = resp if resp.status != 200: try: error_msg = self.api.parser.parse_error(resp.read()) except Exception: error_msg = "Twitter error response: status code = %s" % resp.status raise TweepError(error_msg, resp) # Parse the response payload result = self.api.parser.parse(self, resp.read()) conn.close() # Store result into cache if one is available. if self.use_cache and self.api.cache and self.method == 'GET' and result: self.api.cache.store(url, result) return result def _call(api, *args, **kargs): method = APIMethod(api, args, kargs) return method.execute() # Set pagination mode if 'cursor' in APIMethod.allowed_param: _call.pagination_mode = 'cursor' elif 'page' in APIMethod.allowed_param: _call.pagination_mode = 'page' return _call
olemoudi/tweetdigest
tweepy/tweepy/binder.py
Python
apache-2.0
7,174
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ Various utilities for WebJournal, e.g. config parser, etc. """ import time import datetime import calendar import re import os import cPickle import math import urllib from MySQLdb import OperationalError from xml.dom import minidom from urlparse import urlparse from invenio.config import \ CFG_ETCDIR, \ CFG_SITE_URL, \ CFG_CACHEDIR, \ CFG_SITE_LANG, \ CFG_ACCESS_CONTROL_LEVEL_SITE, \ CFG_SITE_SUPPORT_EMAIL, \ CFG_DEVEL_SITE, \ CFG_CERN_SITE from invenio.dbquery import run_sql from invenio.bibformat_engine import BibFormatObject from invenio.search_engine import search_pattern, record_exists from invenio.messages import gettext_set_language from invenio.errorlib import register_exception from invenio.urlutils import make_invenio_opener WEBJOURNAL_OPENER = make_invenio_opener('WebJournal') ########################### REGULAR EXPRESSIONS ###################### header_pattern = re.compile('<p\s*(align=justify)??>\s*<strong>(?P<header>.*?)</strong>\s*</p>') header_pattern2 = re.compile('<p\s*(class="articleHeader").*?>(?P<header>.*?)</p>') para_pattern = re.compile('<p.*?>(?P<paragraph>.+?)</p>', re.DOTALL) img_pattern = re.compile('<img.*?src=("|\')?(?P<image>\S+?)("|\'|\s).*?/>', re.DOTALL) image_pattern = re.compile(r''' (<a\s*href=["']?(?P<hyperlink>\S*)["']?>)?# get the link location for the image \s*# after each tag we can have arbitrary whitespaces <center># the image is always centered \s* <img\s*(class=["']imageScale["'])*?\s*src=(?P<image>\S*)\s*border=1\s*(/)?># getting the image itself \s* </center> \s* (</a>)? (<br />|<br />|<br/>)*# the caption can be separated by any nr of line breaks ( <b> \s* <i> \s* <center>(?P<caption>.*?)</center># getting the caption \s* </i> \s* </b> )?''', re.DOTALL | re.VERBOSE | re.IGNORECASE ) #' ############################## FEATURED RECORDS ###################### def get_featured_records(journal_name): """ Returns the 'featured' records i.e. records chosen to be displayed with an image on the main page, in the widgets section, for the given journal. parameter: journal_name - (str) the name of the journal for which we want to get the featured records returns: list of tuples (recid, img_url) """ try: feature_file = open('%s/webjournal/%s/featured_record' % \ (CFG_ETCDIR, journal_name)) except: return [] records = feature_file.readlines() return [(record.split('---', 1)[0], record.split('---', 1)[1]) \ for record in records if "---" in record] def add_featured_record(journal_name, recid, img_url): """ Adds the given record to the list of featured records of the given journal. parameters: journal_name - (str) the name of the journal to which the record should be added. recid - (int) the record id of the record to be featured. img_url - (str) a url to an image icon displayed along the featured record. returns: 0 if everything went ok 1 if record is already in the list 2 if other problems """ # Check that record is not already there featured_records = get_featured_records(journal_name) for featured_recid, featured_img in featured_records: if featured_recid == str(recid): return 1 try: fptr = open('%s/webjournal/%s/featured_record' % (CFG_ETCDIR, journal_name), "a") fptr.write(str(recid) + '---' + img_url + '\n') fptr.close() except: return 2 return 0 def remove_featured_record(journal_name, recid): """ Removes the given record from the list of featured records of the given journal. parameters: journal_name - (str) the name of the journal to which the record should be added. recid - (int) the record id of the record to be featured. """ featured_records = get_featured_records(journal_name) try: fptr = open('%s/webjournal/%s/featured_record' % (CFG_ETCDIR, journal_name), "w") for featured_recid, featured_img in featured_records: if str(featured_recid) != str(recid): fptr.write(str(featured_recid) + '---' + featured_img + \ '\n') fptr.close() except: return 1 return 0 ############################ ARTICLES RELATED ######################## def get_order_dict_from_recid_list(recids, journal_name, issue_number, newest_first=False, newest_only=False): """ Returns the ordered list of input recids, for given 'issue_number'. Since there might be several articles at the same position, the returned structure is a dictionary with keys being order number indicated in record metadata, and values being list of recids for this order number (recids for one position are ordered from highest to lowest recid). Eg: {'1': [2390, 2386, 2385], '3': [2388], '2': [2389], '4': [2387]} Parameters: recids - a list of all recid's that should be brought into order journal_name - the name of the journal issue_number - *str* the issue_number for which we are deriving the order newest_first - *bool* if True, new articles should be placed at beginning of the list. If so, their position/order will be negative integers newest_only - *bool* if only new articles should be returned Returns: ordered_records: a dictionary with the recids ordered by keys """ ordered_records = {} ordered_new_records = {} records_without_defined_order = [] new_records_without_defined_order = [] for record in recids: temp_rec = BibFormatObject(record) articles_info = temp_rec.fields('773__') for article_info in articles_info: if article_info.get('n', '') == issue_number or \ '0' + article_info.get('n', '') == issue_number: if article_info.has_key('c') and \ article_info['c'].isdigit(): order_number = int(article_info.get('c', '')) if (newest_first or newest_only) and \ is_new_article(journal_name, issue_number, record): if ordered_new_records.has_key(order_number): ordered_new_records[order_number].append(record) else: ordered_new_records[order_number] = [record] elif not newest_only: if ordered_records.has_key(order_number): ordered_records[order_number].append(record) else: ordered_records[order_number] = [record] else: # No order? No problem! Append it at the end. if newest_first and is_new_article(journal_name, issue_number, record): new_records_without_defined_order.append(record) elif not newest_only: records_without_defined_order.append(record) # Append records without order at the end of the list if records_without_defined_order: if ordered_records: ordered_records[max(ordered_records.keys()) + 1] = records_without_defined_order else: ordered_records[1] = records_without_defined_order # Append new records without order at the end of the list of new # records if new_records_without_defined_order: if ordered_new_records: ordered_new_records[max(ordered_new_records.keys()) + 1] = new_records_without_defined_order else: ordered_new_records[1] = new_records_without_defined_order # Append new records at the beginning of the list of 'old' # records. To do so, use negative integers if ordered_new_records: highest_new_record_order = max(ordered_new_records.keys()) for order, new_records in ordered_new_records.iteritems(): ordered_records[- highest_new_record_order + order - 1] = new_records for (order, records) in ordered_records.iteritems(): # Reverse so that if there are several articles at same # positon, newest appear first records.reverse() return ordered_records def get_journal_articles(journal_name, issue, category, newest_first=False, newest_only=False): """ Returns the recids in given category and journal, for given issue number. The returned recids are grouped according to their 773__c field. Example of returned value: {'1': [2390, 2386, 2385], '3': [2388], '2': [2389], '4': [2387]} Parameters: journal_name - *str* the name of the journal (as used in URLs) issue - *str* the issue. Eg: "08/2007" category - *str* the name of the category newest_first - *bool* if True, new articles should be placed at beginning of the list. If so, their position/order will be negative integers newest_only - *bool* if only new articles should be returned """ use_cache = True current_issue = get_current_issue(CFG_SITE_LANG, journal_name) if issue_is_later_than(issue, current_issue): # If we are working on unreleased issue, do not use caching # mechanism use_cache = False if use_cache: cached_articles = _get_cached_journal_articles(journal_name, issue, category) if cached_articles is not None: ordered_articles = get_order_dict_from_recid_list(cached_articles, journal_name, issue, newest_first, newest_only) return ordered_articles # Retrieve the list of rules that map Category -> Search Pattern. # Keep only the rule matching our category config_strings = get_xml_from_config(["record/rule"], journal_name) category_to_search_pattern_rules = config_strings["record/rule"] try: matching_rule = [rule.split(',', 1) for rule in \ category_to_search_pattern_rules \ if rule.split(',')[0] == category] except: return [] recids_issue = search_pattern(p='773__n:%s -980:DELETED' % issue) recids_rule = search_pattern(p=matching_rule[0][1]) if issue[0] == '0': # search for 09/ and 9/ recids_issue.union_update(search_pattern(p='773__n:%s -980:DELETED' % issue.lstrip('0'))) recids_rule.intersection_update(recids_issue) recids = [recid for recid in recids_rule if record_exists(recid) == 1] if use_cache: _cache_journal_articles(journal_name, issue, category, recids) ordered_articles = get_order_dict_from_recid_list(recids, journal_name, issue, newest_first, newest_only) return ordered_articles def _cache_journal_articles(journal_name, issue, category, articles): """ Caches given articles IDs. """ journal_cache_path = get_journal_article_cache_path(journal_name, issue) try: journal_cache_file = open(journal_cache_path, 'r') journal_info = cPickle.load(journal_cache_file) journal_cache_file.close() except cPickle.PickleError, e: journal_info = {} except IOError: journal_info = {} except EOFError: journal_info = {} except ValueError: journal_info = {} if not journal_info.has_key('journal_articles'): journal_info['journal_articles'] = {} journal_info['journal_articles'][category] = articles # Create cache directory if it does not exist journal_cache_dir = os.path.dirname(journal_cache_path) if not os.path.exists(journal_cache_dir): try: os.makedirs(journal_cache_dir) except: return False journal_cache_file = open(journal_cache_path, 'w') cPickle.dump(journal_info, journal_cache_file) journal_cache_file.close() return True def _get_cached_journal_articles(journal_name, issue, category): """ Retrieve the articles IDs cached for this journal. Returns None if cache does not exist or more than 5 minutes old """ # Check if our cache is more or less up-to-date (not more than 5 # minutes old) try: journal_cache_path = get_journal_article_cache_path(journal_name, issue) last_update = os.path.getctime(journal_cache_path) except Exception, e : return None now = time.time() if (last_update + 5*60) < now: return None # Get from cache try: journal_cache_file = open(journal_cache_path, 'r') journal_info = cPickle.load(journal_cache_file) journal_articles = journal_info.get('journal_articles', {}).get(category, None) journal_cache_file.close() except cPickle.PickleError, e: journal_articles = None except IOError: journal_articles = None except EOFError: journal_articles = None except ValueError: journal_articles = None return journal_articles def is_new_article(journal_name, issue, recid): """ Check if given article should be considered as new or not. New articles are articles that have never appeared in older issues than given one. """ article_found_in_older_issue = False temp_rec = BibFormatObject(recid) publication_blocks = temp_rec.fields('773__') for publication_block in publication_blocks: this_issue_number, this_issue_year = issue.split('/') issue_number, issue_year = publication_block.get('n', '/').split('/', 1) if int(issue_year) < int(this_issue_year): # Found an older issue article_found_in_older_issue = True break elif int(issue_year) == int(this_issue_year) and \ int(issue_number) < int(this_issue_number): # Found an older issue article_found_in_older_issue = True break return not article_found_in_older_issue ############################ CATEGORIES RELATED ###################### def get_journal_categories(journal_name, issue=None): """ List the categories for the given journal and issue. Returns categories in same order as in config file. Parameters: journal_name - *str* the name of the journal (as used in URLs) issue - *str* the issue. Eg:'08/2007'. If None, consider all categories defined in journal config """ categories = [] current_issue = get_current_issue(CFG_SITE_LANG, journal_name) config_strings = get_xml_from_config(["record/rule"], journal_name) all_categories = [rule.split(',')[0] for rule in \ config_strings["record/rule"]] if issue is None: return all_categories for category in all_categories: recids = get_journal_articles(journal_name, issue, category) if len(recids.keys()) > 0: categories.append(category) return categories def get_category_query(journal_name, category): """ Returns the category definition for the given category and journal name Parameters: journal_name - *str* the name of the journal (as used in URLs) categoy - *str* a category name, as found in the XML config """ config_strings = get_xml_from_config(["record/rule"], journal_name) category_to_search_pattern_rules = config_strings["record/rule"] try: matching_rule = [rule.split(',', 1)[1].strip() for rule in \ category_to_search_pattern_rules \ if rule.split(',')[0] == category] except: return None return matching_rule[0] ######################### JOURNAL CONFIG VARS ###################### cached_parsed_xml_config = {} def get_xml_from_config(nodes, journal_name): """ Returns values from the journal configuration file. The needed values can be specified by node name, or by a hierarchy of nodes names using '/' as character to mean 'descendant of'. Eg. 'record/rule' to get all the values of 'rule' tags inside the 'record' node Returns a dictionary with a key for each query and a list of strings (innerXml) results for each key. Has a special field "config_fetching_error" that returns an error when something has gone wrong. """ # Get and open the config file results = {} if cached_parsed_xml_config.has_key(journal_name): config_file = cached_parsed_xml_config[journal_name] else: config_path = '%s/webjournal/%s/%s-config.xml' % \ (CFG_ETCDIR, journal_name, journal_name) config_file = minidom.Document try: config_file = minidom.parse("%s" % config_path) except: # todo: raise exception "error: no config file found" results["config_fetching_error"] = "could not find config file" return results else: cached_parsed_xml_config[journal_name] = config_file for node_path in nodes: node = config_file for node_path_component in node_path.split('/'): # pylint: disable=E1103 # The node variable can be rewritten in the loop and therefore # its type can change. if node != config_file and node.length > 0: # We have a NodeList object: consider only first child node = node.item(0) # pylint: enable=E1103 try: node = node.getElementsByTagName(node_path_component) except: # WARNING, config did not have such value node = [] break results[node_path] = [] for result in node: try: result_string = result.firstChild.toxml(encoding="utf-8") except: # WARNING, config did not have such value continue results[node_path].append(result_string) return results def get_journal_issue_field(journal_name): """ Returns the MARC field in which this journal expects to find the issue number. Read this from the journal config file Parameters: journal_name - *str* the name of the journal (as used in URLs) """ config_strings = get_xml_from_config(["issue_number"], journal_name) try: issue_field = config_strings["issue_number"][0] except: issue_field = '773__n' return issue_field def get_journal_css_url(journal_name, type='screen'): """ Returns URL to this journal's CSS. Parameters: journal_name - *str* the name of the journal (as used in URLs) type - *str* 'screen' or 'print', depending on the kind of CSS """ config_strings = get_xml_from_config([type], journal_name) css_path = '' try: css_path = config_strings["screen"][0] except Exception: register_exception(req=None, suffix="No css file for journal %s. Is this right?" % \ journal_name) return CFG_SITE_URL + '/' + css_path def get_journal_submission_params(journal_name): """ Returns the (doctype, identifier element, identifier field) for the submission of articles in this journal, so that it is possible to build direct submission links. Parameter: journal_name - *str* the name of the journal (as used in URLs) """ doctype = '' identifier_field = '' identifier_element = '' config_strings = get_xml_from_config(["submission/doctype"], journal_name) if config_strings.get('submission/doctype', ''): doctype = config_strings['submission/doctype'][0] config_strings = get_xml_from_config(["submission/identifier_element"], journal_name) if config_strings.get('submission/identifier_element', ''): identifier_element = config_strings['submission/identifier_element'][0] config_strings = get_xml_from_config(["submission/identifier_field"], journal_name) if config_strings.get('submission/identifier_field', ''): identifier_field = config_strings['submission/identifier_field'][0] else: identifier_field = '037__a' return (doctype, identifier_element, identifier_field) def get_journal_draft_keyword_to_remove(journal_name): """ Returns the keyword that should be removed from the article metadata in order to move the article from Draft to Ready """ config_strings = get_xml_from_config(["draft_keyword"], journal_name) if config_strings.get('draft_keyword', ''): return config_strings['draft_keyword'][0] return '' def get_journal_alert_sender_email(journal_name): """ Returns the email address that should be used as send of the alert email. If not specified, use CFG_SITE_SUPPORT_EMAIL """ config_strings = get_xml_from_config(["alert_sender"], journal_name) if config_strings.get('alert_sender', ''): return config_strings['alert_sender'][0] return CFG_SITE_SUPPORT_EMAIL def get_journal_alert_recipient_email(journal_name): """ Returns the default email address of the recipients of the email Return a string of comma-separated emails. """ if CFG_DEVEL_SITE: # To be on the safe side, do not return the default alert recipients. return '' config_strings = get_xml_from_config(["alert_recipients"], journal_name) if config_strings.get('alert_recipients', ''): return config_strings['alert_recipients'][0] return '' def get_journal_collection_to_refresh_on_release(journal_name): """ Returns the list of collection to update (WebColl) upon release of an issue. """ from invenio.search_engine import collection_reclist_cache config_strings = get_xml_from_config(["update_on_release/collection"], journal_name) return [coll for coll in config_strings.get('update_on_release/collection', []) if \ collection_reclist_cache.cache.has_key(coll)] def get_journal_index_to_refresh_on_release(journal_name): """ Returns the list of indexed to update (BibIndex) upon release of an issue. """ from invenio.bibindex_engine import get_index_id_from_index_name config_strings = get_xml_from_config(["update_on_release/index"], journal_name) return [index for index in config_strings.get('update_on_release/index', []) if \ get_index_id_from_index_name(index) != ''] def get_journal_template(template, journal_name, ln=CFG_SITE_LANG): """ Returns the journal templates name for the given template type Raise an exception if template cannot be found. """ from invenio.webjournal_config import \ InvenioWebJournalTemplateNotFoundError config_strings = get_xml_from_config([template], journal_name) try: index_page_template = 'webjournal' + os.sep + \ config_strings[template][0] except: raise InvenioWebJournalTemplateNotFoundError(ln, journal_name, template) return index_page_template def get_journal_name_intl(journal_name, ln=CFG_SITE_LANG): """ Returns the nice name of the journal, translated if possible """ _ = gettext_set_language(ln) config_strings = get_xml_from_config(["niceName"], journal_name) if config_strings.get('niceName', ''): return _(config_strings['niceName'][0]) return '' def get_journal_languages(journal_name): """ Returns the list of languages defined for this journal """ config_strings = get_xml_from_config(["languages"], journal_name) if config_strings.get('languages', ''): return [ln.strip() for ln in \ config_strings['languages'][0].split(',')] return [] def get_journal_issue_grouping(journal_name): """ Returns the number of issue that are typically released at the same time. This is used if every two weeks you release an issue that should contains issue of next 2 weeks (eg. at week 16, you relase an issue named '16-17/2009') This number should help in the admin interface to guess how to release the next issue (can be overidden by user). """ config_strings = get_xml_from_config(["issue_grouping"], journal_name) if config_strings.get('issue_grouping', ''): issue_grouping = config_strings['issue_grouping'][0] if issue_grouping.isdigit() and int(issue_grouping) > 0: return int(issue_grouping) return 1 def get_journal_nb_issues_per_year(journal_name): """ Returns the default number of issues per year for this journal. This number should help in the admin interface to guess the next issue number (can be overidden by user). """ config_strings = get_xml_from_config(["issues_per_year"], journal_name) if config_strings.get('issues_per_year', ''): issues_per_year = config_strings['issues_per_year'][0] if issues_per_year.isdigit() and int(issues_per_year) > 0: return int(issues_per_year) return 52 def get_journal_preferred_language(journal_name, ln): """ Returns the most adequate language to display the journal, given a language. """ languages = get_journal_languages(journal_name) if ln in languages: return ln elif CFG_SITE_LANG in languages: return CFG_SITE_LANG elif languages: return languages else: return CFG_SITE_LANG def get_unreleased_issue_hiding_mode(journal_name): """ Returns how unreleased issue should be treated. Can be one of the following string values: 'future' - only future unreleased issues are hidden. Past unreleased one can be viewed 'all' - any unreleased issue (past and future) have to be hidden - 'none' - no unreleased issue is hidden """ config_strings = get_xml_from_config(["hide_unreleased_issues"], journal_name) if config_strings.get('hide_unreleased_issues', ''): hide_unreleased_issues = config_strings['hide_unreleased_issues'][0] if hide_unreleased_issues in ['future', 'all', 'none']: return hide_unreleased_issues return 'all' def get_first_issue_from_config(journal_name): """ Returns the first issue as defined from config. This should only be useful when no issue have been released. If not specified, returns the issue made of current week number and year. """ config_strings = get_xml_from_config(["first_issue"], journal_name) if config_strings.has_key('first_issue'): return config_strings['first_issue'][0] return time.strftime("%W/%Y", time.localtime()) ######################## TIME / ISSUE FUNCTIONS ###################### def get_current_issue(ln, journal_name): """ Returns the current issue of a journal as a string. Current issue is the latest released issue. """ journal_id = get_journal_id(journal_name, ln) try: current_issue = run_sql("""SELECT issue_number FROM jrnISSUE WHERE date_released <= NOW() AND id_jrnJOURNAL=%s ORDER BY date_released DESC LIMIT 1""", (journal_id,))[0][0] except: # start the first journal ever current_issue = get_first_issue_from_config(journal_name) run_sql("""INSERT INTO jrnISSUE (id_jrnJOURNAL, issue_number, issue_display) VALUES(%s, %s, %s)""", (journal_id, current_issue, current_issue)) return current_issue def get_all_released_issues(journal_name): """ Returns the list of released issue, ordered by release date Note that it only includes the issues that are considered as released in the DB: it will not for example include articles that have been imported in the system but not been released """ journal_id = get_journal_id(journal_name) res = run_sql("""SELECT issue_number FROM jrnISSUE WHERE id_jrnJOURNAL = %s AND UNIX_TIMESTAMP(date_released) != 0 ORDER BY date_released DESC""", (journal_id,)) if res: return [row[0] for row in res] else: return [] def get_next_journal_issues(current_issue_number, journal_name, n=2): """ This function suggests the 'n' next issue numbers """ number, year = current_issue_number.split('/', 1) number = int(number) year = int(year) number_issues_per_year = get_journal_nb_issues_per_year(journal_name) next_issues = [make_issue_number(journal_name, ((number - 1 + i) % (number_issues_per_year)) + 1, year + ((number - 1 + i) / number_issues_per_year)) \ for i in range(1, n + 1)] return next_issues def get_grouped_issues(journal_name, issue_number): """ Returns all the issues grouped with a given one. Issues are sorted from the oldest to newest one. """ grouped_issues = [] journal_id = get_journal_id(journal_name, CFG_SITE_LANG) issue_display = get_issue_number_display(issue_number, journal_name) res = run_sql("""SELECT issue_number FROM jrnISSUE WHERE id_jrnJOURNAL=%s AND issue_display=%s""", (journal_id, issue_display)) if res: grouped_issues = [row[0] for row in res] grouped_issues.sort(compare_issues) return grouped_issues def compare_issues(issue1, issue2): """ Comparison function for issues. Returns: -1 if issue1 is older than issue2 0 if issues are equal 1 if issue1 is newer than issue2 """ issue1_number, issue1_year = issue1.split('/', 1) issue2_number, issue2_year = issue2.split('/', 1) if int(issue1_year) == int(issue2_year): return cmp(int(issue1_number), int(issue2_number)) else: return cmp(int(issue1_year), int(issue2_year)) def issue_is_later_than(issue1, issue2): """ Returns true if issue1 is later than issue2 """ issue_number1, issue_year1 = issue1.split('/', 1) issue_number2, issue_year2 = issue2.split('/', 1) if int(issue_year1) > int(issue_year2): return True elif int(issue_year1) == int(issue_year2): return int(issue_number1) > int(issue_number2) else: return False def get_issue_number_display(issue_number, journal_name, ln=CFG_SITE_LANG): """ Returns the display string for a given issue number. """ journal_id = get_journal_id(journal_name, ln) issue_display = run_sql("""SELECT issue_display FROM jrnISSUE WHERE issue_number=%s AND id_jrnJOURNAL=%s""", (issue_number, journal_id)) if issue_display: return issue_display[0][0] else: # Not yet released... return issue_number def make_issue_number(journal_name, number, year, for_url_p=False): """ Creates a normalized issue number representation with given issue number (as int or str) and year (as int or str). Reverse the year and number if for_url_p is True """ number_issues_per_year = get_journal_nb_issues_per_year(journal_name) precision = len(str(number_issues_per_year)) number = int(str(number)) year = int(str(year)) if for_url_p: return ("%i/%0" + str(precision) + "i") % \ (year, number) else: return ("%0" + str(precision) + "i/%i") % \ (number, year) def get_release_datetime(issue, journal_name, ln=CFG_SITE_LANG): """ Gets the date at which an issue was released from the DB. Returns None if issue has not yet been released. See issue_to_datetime() to get the *theoretical* release time of an issue. """ journal_id = get_journal_id(journal_name, ln) try: release_date = run_sql("""SELECT date_released FROM jrnISSUE WHERE issue_number=%s AND id_jrnJOURNAL=%s""", (issue, journal_id))[0][0] except: return None if release_date: return release_date else: return None def get_announcement_datetime(issue, journal_name, ln=CFG_SITE_LANG): """ Get the date at which an issue was announced through the alert system. Return None if not announced """ journal_id = get_journal_id(journal_name, ln) try: announce_date = run_sql("""SELECT date_announced FROM jrnISSUE WHERE issue_number=%s AND id_jrnJOURNAL=%s""", (issue, journal_id))[0][0] except: return None if announce_date: return announce_date else: return None def datetime_to_issue(issue_datetime, journal_name): """ Returns the issue corresponding to the given datetime object. If issue_datetime is too far in the future or in the past, gives the best possible matching issue, or None, if it does not seem to exist. #If issue_datetime is too far in the future, return the latest #released issue. #If issue_datetime is too far in the past, return None Parameters: issue_datetime - *datetime* date of the issue to be retrieved journal_name - *str* the name of the journal (as used in URLs) """ issue_number = None journal_id = get_journal_id(journal_name) # Try to discover how much days an issue is valid nb_issues_per_year = get_journal_nb_issues_per_year(journal_name) this_year_number_of_days = 365 if calendar.isleap(issue_datetime.year): this_year_number_of_days = 366 issue_day_lifetime = math.ceil(float(this_year_number_of_days)/nb_issues_per_year) res = run_sql("""SELECT issue_number, date_released FROM jrnISSUE WHERE date_released < %s AND id_jrnJOURNAL = %s ORDER BY date_released DESC LIMIT 1""", (issue_datetime, journal_id)) if res and res[0][1]: issue_number = res[0][0] issue_release_date = res[0][1] # Check that the result is not too far in the future: if issue_release_date + datetime.timedelta(issue_day_lifetime) < issue_datetime: # In principle, the latest issue will no longer be valid # at that time return None else: # Mmh, are we too far in the past? This can happen in the case # of articles that have been imported in the system but never # considered as 'released' in the database. So we should still # try to approximate/match an issue: if round(issue_day_lifetime) in [6, 7, 8]: # Weekly issues. We can use this information to better # match the issue number issue_nb = int(issue_datetime.strftime('%W')) # = week number else: # Compute the number of days since beginning of year, and # divide by the lifetime of an issue: we get the # approximate issue_number issue_nb = math.ceil((int(issue_datetime.strftime('%j')) / issue_day_lifetime)) issue_number = ("%0" + str(len(str(nb_issues_per_year)))+ "i/%i") % (issue_nb, issue_datetime.year) # Now check if this issue exists in the system for this # journal if not get_journal_categories(journal_name, issue_number): # This issue did not exist return None return issue_number DAILY = 1 WEEKLY = 2 MONTHLY = 3 def issue_to_datetime(issue_number, journal_name, granularity=None): """ Returns the *theoretical* date of release for given issue: useful if you release on Friday, but the issue date of the journal should correspond to the next Monday. This will correspond to the next day/week/month, depending on the number of issues per year (or the 'granularity' if specified) and the release time (if close to the end of a period defined by the granularity, consider next period since release is made a bit in advance). See get_release_datetime() for the *real* release time of an issue THIS FUNCTION SHOULD ONLY BE USED FOR INFORMATIVE DISPLAY PURPOSE, AS IT GIVES APPROXIMATIVE RESULTS. Do not use it to make decisions. Parameters: issue_number - *str* issue number to consider journal_name - *str* the name of the journal (as used in URLs) granularity - *int* the granularity to consider """ # If we have released, we can use this information. Otherwise we # have to approximate. issue_date = get_release_datetime(issue_number, journal_name) if not issue_date: # Approximate release date number, year = issue_number.split('/') number = int(number) year = int(year) nb_issues_per_year = get_journal_nb_issues_per_year(journal_name) this_year_number_of_days = 365 if calendar.isleap(year): this_year_number_of_days = 366 issue_day_lifetime = float(this_year_number_of_days)/nb_issues_per_year # Compute from beginning of the year issue_date = datetime.datetime(year, 1, 1) + \ datetime.timedelta(days=int(round((number - 1) * issue_day_lifetime))) # Okay, but if last release is not too far in the past, better # compute from the release. current_issue = get_current_issue(CFG_SITE_LANG, journal_name) current_issue_time = get_release_datetime(current_issue, journal_name) if current_issue_time.year == issue_date.year: current_issue_number, current_issue_year = current_issue.split('/') current_issue_number = int(current_issue_number) # Compute from last release issue_date = current_issue_time + \ datetime.timedelta(days=int((number - current_issue_number) * issue_day_lifetime)) # If granularity is not specifed, deduce from config if granularity is None: nb_issues_per_year = get_journal_nb_issues_per_year(journal_name) if nb_issues_per_year > 250: granularity = DAILY elif nb_issues_per_year > 40: granularity = WEEKLY else: granularity = MONTHLY # Now we can adapt the date to match the granularity if granularity == DAILY: if issue_date.hour >= 15: # If released after 3pm, consider it is the issue of the next # day issue_date = issue_date + datetime.timedelta(days=1) elif granularity == WEEKLY: (year, week_nb, day_nb) = issue_date.isocalendar() if day_nb > 4: # If released on Fri, Sat or Sun, consider that it is next # week's issue. issue_date = issue_date + datetime.timedelta(weeks=1) # Get first day of the week issue_date = issue_date - datetime.timedelta(days=issue_date.weekday()) else: if issue_date.day > 22: # If released last week of the month, consider release for # next month issue_date = issue_date.replace(month=issue_date.month+1) date_string = issue_date.strftime("%Y %m 1") issue_date = datetime.datetime(*(time.strptime(date_string, "%Y %m %d")[0:6])) return issue_date def get_number_of_articles_for_issue(issue, journal_name, ln=CFG_SITE_LANG): """ Function that returns a dictionary with all categories and number of articles in each category. """ all_articles = {} categories = get_journal_categories(journal_name, issue) for category in categories: all_articles[category] = len(get_journal_articles(journal_name, issue, category)) return all_articles ########################## JOURNAL RELATED ########################### def get_journal_info_path(journal_name): """ Returns the path to the info file of the given journal. The info file should be used to get information about a journal when database is not available. Returns None if path cannot be determined """ # We must make sure we don't try to read outside of webjournal # cache dir info_path = os.path.abspath("%s/webjournal/%s/info.dat" % \ (CFG_CACHEDIR, journal_name)) if info_path.startswith(CFG_CACHEDIR + '/webjournal/'): return info_path else: return None def get_journal_article_cache_path(journal_name, issue): """ Returns the path to cache file of the articles of a given issue Returns None if path cannot be determined """ # We must make sure we don't try to read outside of webjournal # cache dir issue_number, year = issue.replace('/', '_').split('_', 1) cache_path = os.path.abspath("%s/webjournal/%s/%s/%s/articles_cache.dat" % \ (CFG_CACHEDIR, journal_name, year, issue_number)) if cache_path.startswith(CFG_CACHEDIR + '/webjournal/'): return cache_path else: return None def get_journal_id(journal_name, ln=CFG_SITE_LANG): """ Get the id for this journal from the DB. If DB is down, try to get from cache. """ journal_id = None from invenio.webjournal_config import InvenioWebJournalJournalIdNotFoundDBError if CFG_ACCESS_CONTROL_LEVEL_SITE == 2: # do not connect to the database as the site is closed for # maintenance: journal_info_path = get_journal_info_path(journal_name) try: journal_info_file = open(journal_info_path, 'r') journal_info = cPickle.load(journal_info_file) journal_id = journal_info.get('journal_id', None) except cPickle.PickleError, e: journal_id = None except IOError: journal_id = None except ValueError: journal_id = None else: try: res = run_sql("SELECT id FROM jrnJOURNAL WHERE name=%s", (journal_name,)) if len(res) > 0: journal_id = res[0][0] except OperationalError, e: # Cannot connect to database. Try to read from cache journal_info_path = get_journal_info_path(journal_name) try: journal_info_file = open(journal_info_path, 'r') journal_info = cPickle.load(journal_info_file) journal_id = journal_info['journal_id'] except cPickle.PickleError, e: journal_id = None except IOError: journal_id = None except ValueError: journal_id = None if journal_id is None: raise InvenioWebJournalJournalIdNotFoundDBError(ln, journal_name) return journal_id def guess_journal_name(ln, journal_name=None): """ Tries to take a guess what a user was looking for on the server if not providing a name for the journal, or if given journal name does not match case of original journal. """ from invenio.webjournal_config import InvenioWebJournalNoJournalOnServerError from invenio.webjournal_config import InvenioWebJournalNoNameError journals_id_and_names = get_journals_ids_and_names() if len(journals_id_and_names) == 0: raise InvenioWebJournalNoJournalOnServerError(ln) elif not journal_name and \ journals_id_and_names[0].has_key('journal_name'): return journals_id_and_names[0]['journal_name'] elif len(journals_id_and_names) > 0: possible_journal_names = [journal_id_and_name['journal_name'] for journal_id_and_name \ in journals_id_and_names \ if journal_id_and_name.get('journal_name', '').lower() == journal_name.lower()] if possible_journal_names: return possible_journal_names[0] else: raise InvenioWebJournalNoNameError(ln) else: raise InvenioWebJournalNoNameError(ln) def get_journals_ids_and_names(): """ Returns the list of existing journals IDs and names. Try to read from the DB, or from cache if DB is not accessible. """ journals = [] if CFG_ACCESS_CONTROL_LEVEL_SITE == 2: # do not connect to the database as the site is closed for # maintenance: files = os.listdir("%s/webjournal" % CFG_CACHEDIR) info_files = [path + os.sep + 'info.dat' for path in files if \ os.path.isdir(path) and \ os.path.exists(path + os.sep + 'info.dat')] for info_file in info_files: try: journal_info_file = open(info_file, 'r') journal_info = cPickle.load(journal_info_file) journal_id = journal_info.get('journal_id', None) journal_name = journal_info.get('journal_name', None) current_issue = journal_info.get('current_issue', None) if journal_id is not None and \ journal_name is not None: journals.append({'journal_id': journal_id, 'journal_name': journal_name, 'current_issue': current_issue}) except cPickle.PickleError, e: # Well, can't do anything... continue except IOError: # Well, can't do anything... continue except ValueError: continue else: try: res = run_sql("SELECT id, name FROM jrnJOURNAL ORDER BY id") for journal_id, journal_name in res: journals.append({'journal_id': journal_id, 'journal_name': journal_name}) except OperationalError, e: # Cannot connect to database. Try to read from cache files = os.listdir("%s/webjournal" % CFG_CACHEDIR) info_files = [path + os.sep + 'info.dat' for path in files if \ os.path.isdir(path) and \ os.path.exists(path + os.sep + 'info.dat')] for info_file in info_files: try: journal_info_file = open(info_file, 'r') journal_info = cPickle.load(journal_info_file) journal_id = journal_info.get('journal_id', None) journal_name = journal_info.get('journal_name', None) current_issue = journal_info.get('current_issue', None) if journal_id is not None and \ journal_name is not None: journals.append({'journal_id': journal_id, 'journal_name': journal_name, 'current_issue': current_issue}) except cPickle.PickleError, e: # Well, can't do anything... continue except IOError: # Well, can't do anything... continue except ValueError: continue return journals def parse_url_string(uri): """ Centralized function to parse any url string given in webjournal. Useful to retrieve current category, journal, etc. from within format elements The webjournal interface handler should already have cleaned the URI beforehand, so that journal name exist, issue number is correct, etc. The only remaining problem might be due to the capitalization of journal name in contact, search and popup pages, so clean the journal name. Note that language is also as returned from the URL, which might need to be filtered to match available languages (WebJournal elements can rely in bfo.lang to retrieve washed language) returns: args: all arguments in dict form """ args = {'journal_name' : '', 'issue_year' : '', 'issue_number' : None, 'issue' : None, 'category' : '', 'recid' : -1, 'verbose' : 0, 'ln' : CFG_SITE_LANG, 'archive_year' : None, 'archive_search': ''} if not uri.startswith('/journal'): # Mmh, incorrect context. Still, keep language if available url_params = urlparse(uri)[4] args['ln'] = dict([part.split('=') for part in url_params.split('&') \ if len(part.split('=')) == 2]).get('ln', CFG_SITE_LANG) return args # Take everything after journal and before first question mark splitted_uri = uri.split('journal', 1) second_part = splitted_uri[1] splitted_uri = second_part.split('?') uri_middle_part = splitted_uri[0] uri_arguments = '' if len(splitted_uri) > 1: uri_arguments = splitted_uri[1] arg_list = uri_arguments.split("&") args['ln'] = CFG_SITE_LANG args['verbose'] = 0 for arg_pair in arg_list: arg_and_value = arg_pair.split('=') if len(arg_and_value) == 2: if arg_and_value[0] == 'ln': args['ln'] = arg_and_value[1] elif arg_and_value[0] == 'verbose' and \ arg_and_value[1].isdigit(): args['verbose'] = int(arg_and_value[1]) elif arg_and_value[0] == 'archive_year' and \ arg_and_value[1].isdigit(): args['archive_year'] = int(arg_and_value[1]) elif arg_and_value[0] == 'archive_search': args['archive_search'] = arg_and_value[1] elif arg_and_value[0] == 'name': args['journal_name'] = guess_journal_name(args['ln'], arg_and_value[1]) arg_list = uri_middle_part.split("/") if len(arg_list) > 1 and arg_list[1] not in ['search', 'contact', 'popup']: args['journal_name'] = urllib.unquote(arg_list[1]) elif arg_list[1] not in ['search', 'contact', 'popup']: args['journal_name'] = guess_journal_name(args['ln'], args['journal_name']) cur_issue = get_current_issue(args['ln'], args['journal_name']) if len(arg_list) > 2: try: args['issue_year'] = int(urllib.unquote(arg_list[2])) except: args['issue_year'] = int(cur_issue.split('/')[1]) else: args['issue'] = cur_issue args['issue_year'] = int(cur_issue.split('/')[1]) args['issue_number'] = int(cur_issue.split('/')[0]) if len(arg_list) > 3: try: args['issue_number'] = int(urllib.unquote(arg_list[3])) except: args['issue_number'] = int(cur_issue.split('/')[0]) args['issue'] = make_issue_number(args['journal_name'], args['issue_number'], args['issue_year']) if len(arg_list) > 4: args['category'] = urllib.unquote(arg_list[4]) if len(arg_list) > 5: try: args['recid'] = int(urllib.unquote(arg_list[5])) except: pass args['ln'] = get_journal_preferred_language(args['journal_name'], args['ln']) # FIXME : wash arguments? return args def make_journal_url(current_uri, custom_parameters=None): """ Create a URL, using the current URI and overriding values with the given custom_parameters Parameters: current_uri - *str* the current full URI custom_parameters - *dict* a dictionary of parameters that should override those of curent_uri """ if not custom_parameters: custom_parameters = {} default_params = parse_url_string(current_uri) for key, value in custom_parameters.iteritems(): # Override default params with custom params default_params[key] = str(value) uri = CFG_SITE_URL + '/journal/' if default_params['journal_name']: uri += urllib.quote(default_params['journal_name']) + '/' if default_params['issue_year'] and default_params['issue_number']: uri += make_issue_number(default_params['journal_name'], default_params['issue_number'], default_params['issue_year'], for_url_p=True) + '/' if default_params['category']: uri += urllib.quote(default_params['category']) if default_params['recid'] and \ default_params['recid'] != -1: uri += '/' + str(default_params['recid']) printed_question_mark = False if default_params['ln']: uri += '?ln=' + default_params['ln'] printed_question_mark = True if default_params['verbose'] != 0: if printed_question_mark: uri += '&amp;verbose=' + str(default_params['verbose']) else: uri += '?verbose=' + str(default_params['verbose']) return uri ############################ HTML CACHING FUNCTIONS ############################ def cache_index_page(html, journal_name, category, issue, ln): """ Caches the index page main area of a Bulletin (right hand menu cannot be cached) @return: tuple (path to cache file (or None), message) """ issue = issue.replace("/", "_") issue_number, year = issue.split("_", 1) category = category.replace(" ", "") cache_path = os.path.abspath('%s/webjournal/%s/%s/%s/index_%s_%s.html' % \ (CFG_CACHEDIR, journal_name, year, issue_number, category, ln)) if not cache_path.startswith(CFG_CACHEDIR + '/webjournal'): # Mmh, not accessing correct path. Stop caching return (None, 'Trying to cache at wrong location: %s' % cache_path) cache_path_dir = os.path.dirname(cache_path) try: if not os.path.isdir(cache_path_dir): os.makedirs(cache_path_dir) cached_file = open(cache_path, "w") cached_file.write(html) cached_file.close() except Exception, e: register_exception(req=None, prefix="Could not store index page cache", alert_admin=True) return (None, e) return (cache_path, '') def get_index_page_from_cache(journal_name, category, issue, ln): """ Function to get an index page from the cache. False if not in cache. """ issue = issue.replace("/", "_") issue_number, year = issue.split("_", 1) category = category.replace(" ", "") cache_path = os.path.abspath('%s/webjournal/%s/%s/%s/index_%s_%s.html' % \ (CFG_CACHEDIR, journal_name, year, issue_number, category, ln)) if not cache_path.startswith(CFG_CACHEDIR + '/webjournal'): # Mmh, not accessing correct path. Stop reading cache return False try: cached_file = open(cache_path).read() except: return False return cached_file def cache_article_page(html, journal_name, category, recid, issue, ln): """ Caches an article view of a journal. If cache cannot be written, a warning is reported to the admin. @return: tuple (path to cache file (or None), message) """ issue = issue.replace("/", "_") issue_number, year = issue.split("_", 1) category = category.replace(" ", "") cache_path = os.path.abspath('%s/webjournal/%s/%s/%s/article_%s_%s_%s.html' % \ (CFG_CACHEDIR, journal_name, year, issue_number, category, recid, ln)) if not cache_path.startswith(CFG_CACHEDIR + '/webjournal'): # Mmh, not accessing correct path. Stop caching return (None, 'Trying to cache at wrong location: %s' % cache_path) cache_path_dir = os.path.dirname(cache_path) try: if not os.path.isdir(cache_path_dir): os.makedirs(cache_path_dir) cached_file = open(cache_path, "w") cached_file.write(html) cached_file.close() except Exception, e: register_exception(req=None, prefix="Could not store article cache", alert_admin=True) return (None, e) return (cache_path_dir, '') NOT_FOR_ALERT_COMMENTS_RE = re.compile('<!--\s*START_NOT_FOR_ALERT\s*-->.*?<!--\s*END_NOT_FOR_ALERT\s*-->', re.IGNORECASE | re.DOTALL) def get_article_page_from_cache(journal_name, category, recid, issue, ln, bfo=None): """ Gets an article view of a journal from cache. False if not in cache. """ issue = issue.replace("/", "_") issue_number, year = issue.split("_", 1) category = category.replace(" ", "") cache_path = os.path.abspath('%s/webjournal/%s/%s/%s/article_%s_%s_%s.html' % \ (CFG_CACHEDIR, journal_name, year, issue_number, category, recid, ln)) if not cache_path.startswith(CFG_CACHEDIR + '/webjournal'): # Mmh, not accessing correct path. Stop reading cache return False try: cached_file = open(cache_path).read() except: return False if CFG_CERN_SITE and bfo: try: from invenio.bibformat_elements import bfe_webjournal_cern_toolbar cached_file = NOT_FOR_ALERT_COMMENTS_RE.sub(bfe_webjournal_cern_toolbar.format_element(bfo), cached_file, 1) except ImportError, e: pass return cached_file def clear_cache_for_article(journal_name, category, recid, issue): """ Resets the cache for an article (e.g. after an article has been modified) """ issue = issue.replace("/", "_") issue_number, year = issue.split("_", 1) category = category.replace(" ", "") cache_path = os.path.abspath('%s/webjournal/%s/' % (CFG_CACHEDIR, journal_name)) if not cache_path.startswith(CFG_CACHEDIR + '/webjournal'): # Mmh, not accessing correct path. Stop deleting cache return False # try to delete the article cached file try: os.remove('%s/webjournal/%s/%s/%s/article_%s_%s_en.html' % (CFG_CACHEDIR, journal_name, year, issue_number, category, recid)) except: pass try: os.remove('%s/webjournal/%s/%s/%s/article_%s_%s_fr.html' % (CFG_CACHEDIR, journal_name, year, issue_number, category, recid)) except: pass # delete the index page for the category try: os.remove('%s/webjournal/%s/%s/%s/index_%s_en.html' % (CFG_CACHEDIR, journal_name, year, issue_number, category)) except: pass try: os.remove('%s/webjournal/%s/%s/%s/index_%s_fr.html' % (CFG_CACHEDIR, journal_name, year, issue_number, category)) except: pass try: path = get_journal_article_cache_path(journal_name, issue) os.remove(path) except: pass return True def clear_cache_for_issue(journal_name, issue): """ clears the cache of a whole issue. """ issue = issue.replace("/", "_") issue_number, year = issue.split("_", 1) cache_path_dir = os.path.abspath('%s/webjournal/%s/%s/%s/' % \ (CFG_CACHEDIR, journal_name, year, issue_number)) if not cache_path_dir.startswith(CFG_CACHEDIR + '/webjournal'): # Mmh, not accessing correct path. Stop deleting cache return False all_cached_files = os.listdir(cache_path_dir) for cached_file in all_cached_files: try: os.remove(cache_path_dir + '/' + cached_file) except: return False return True ######################### CERN SPECIFIC FUNCTIONS ################# def get_recid_from_legacy_number(issue_number, category, number): """ Returns the recid based on the issue number, category and 'number'. This is used to support URLs using the now deprecated 'number' argument. The function tries to reproduce the behaviour of the old way of doing, even keeping some of its 'problems' (so that we reach the same article as before with a given number).. Returns the recid as int, or -1 if not found """ recids = [] if issue_number[0] == "0": alternative_issue_number = issue_number[1:] recids = list(search_pattern(p='65017a:"%s" and 773__n:%s' % (category, issue_number))) recids.extend(list(search_pattern(p='65017a:"%s" and 773__n:%s' % (category, alternative_issue_number)))) else: recids = list(search_pattern(p='65017:"%s" and 773__n:%s' % (category, issue_number))) # Now must order the records and pick the one at index 'number'. # But we have to take into account that there can be multiple # records at position 1, and that these additional records should # be numbered with negative numbers: # 1, 1, 1, 2, 3 -> 1, -1, -2, 2, 3... negative_index_records = {} positive_index_records = {} # Fill in 'negative_index_records' and 'positive_index_records' # lists with the following loop for recid in recids: bfo = BibFormatObject(recid) order = [subfield['c'] for subfield in bfo.fields('773__') if \ issue_number in subfield.get('n', '')] if len(order) > 0: # If several orders are defined for the same article and # the same issue, keep the first one order = order[0] if order.isdigit(): # Order must be an int. Otherwise skip order = int(order) if order == 1 and positive_index_records.has_key(1): # This is then a negative number for this record index = (len(negative_index_records.keys()) > 0 and \ min(negative_index_records.keys()) -1) or 0 negative_index_records[index] = recid else: # Positive number for this record if not positive_index_records.has_key(order): positive_index_records[order] = recid else: # We make the assumption that we cannot have # twice the same position for two # articles. Previous WebJournal module was not # clear about that. Just drop this record # (better than crashing or looping forever..) pass recid_to_return = -1 # Ok, we can finally pick the recid corresponding to 'number' if number <= 0: negative_indexes = negative_index_records.keys() negative_indexes.sort() negative_indexes.reverse() if len(negative_indexes) > abs(number): recid_to_return = negative_index_records[negative_indexes[abs(number)]] else: if positive_index_records.has_key(number): recid_to_return = positive_index_records[number] return recid_to_return def is_recid_in_released_issue(recid): """ Returns True if recid is part of the latest issue of the given journal. WARNING: the function does not check that the article does not belong to the draft collection of the record. This is wanted, in order to workaround the time needed for a record to go from the draft collection to the final collection """ bfo = BibFormatObject(recid) journal_name = '' journal_names = [journal_name for journal_name in bfo.fields('773__t') if journal_name] if journal_names: journal_name = journal_names[0] else: return False existing_journal_names = [o['journal_name'] for o in get_journals_ids_and_names()] if not journal_name in existing_journal_names: # Try to remove whitespace journal_name = journal_name.replace(' ', '') if not journal_name in existing_journal_names: # Journal name unknown from WebJournal return False config_strings = get_xml_from_config(["draft_image_access_policy"], journal_name) if config_strings['draft_image_access_policy'] and \ config_strings['draft_image_access_policy'][0] != 'allow': # The journal does not want to optimize access to images return False article_issues = bfo.fields('773__n') current_issue = get_current_issue(CFG_SITE_LANG, journal_name) for article_issue in article_issues: # Check each issue until a released one is found if get_release_datetime(article_issue, journal_name): # Release date exists, issue has been released return True else: # Unreleased issue. Do we still allow based on journal config? unreleased_issues_mode = get_unreleased_issue_hiding_mode(journal_name) if (unreleased_issues_mode == 'none' or \ (unreleased_issues_mode == 'future' and \ not issue_is_later_than(article_issue, current_issue))): return True return False
GRArmstrong/invenio-inspire-ops
modules/webjournal/lib/webjournal_utils.py
Python
gpl-2.0
69,087
import logging from alvi.tests.test_client.base import TestContainer import alvi.tests.pages as pages logger = logging.getLogger(__name__) class TestScenes(TestContainer): def test_check_scenes(self): home_page = pages.Home(self._browser.driver) home_page.goto() scene_links = home_page.scene_links self.assertEqual(len(self._client.scenes), len(scene_links), "not all client processes (scenes) were successfully connected")
alviproject/alvi
alvi/tests/test_client/test_scenes.py
Python
mit
515
""" """ from bs4 import BeautifulSoup import requests from framework import fields from website.addons.base import AddonNodeSettingsBase API_URL = 'https://api.zotero.org/groups/{zid}/items' params = { 'order': 'dateAdded', 'sort': 'desc', 'limit': 5, } class AddonZoteroNodeSettings(AddonNodeSettingsBase): zotero_id = fields.StringField() def _fetch_references(self): url = API_URL.format( zid=self.zotero_id, ) xml = requests.get(url, params=params) return xml.content def _summarize_references(self): xml = self._fetch_references() parsed = BeautifulSoup(xml) titles = parsed.select('entry title') if titles: return ''' <ul> {lis} </ul> '''.format( lis=''.join([ '<li>{}</li>'.format(title.string) for title in titles ]) ) def to_json(self, user): rv = super(AddonZoteroNodeSettings, self).to_json(user) rv.update({ 'zotero_id': self.zotero_id or '', }) return rv
AndrewSallans/osf.io
website/addons/zotero/model.py
Python
apache-2.0
1,191