repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
nicolargo/intellij-community
python/lib/Lib/site-packages/django/contrib/comments/views/utils.py
161
1777
""" A few bits of helper functions for comment views. """ import urllib import textwrap from django.http import HttpResponseRedirect from django.core import urlresolvers from django.shortcuts import render_to_response from django.template import RequestContext from django.core.exceptions import ObjectDoesNotExist from django.contrib import comments def next_redirect(data, default, default_view, **get_kwargs): """ Handle the "where should I go next?" part of comment views. The next value could be a kwarg to the function (``default``), or a ``?next=...`` GET arg, or the URL of a given view (``default_view``). See the view modules for examples. Returns an ``HttpResponseRedirect``. """ next = data.get("next", default) if next is None: next = urlresolvers.reverse(default_view) if get_kwargs: joiner = ('?' in next) and '&' or '?' next += joiner + urllib.urlencode(get_kwargs) return HttpResponseRedirect(next) def confirmation_view(template, doc="Display a confirmation view."): """ Confirmation view generator for the "comment was posted/flagged/deleted/approved" views. """ def confirmed(request): comment = None if 'c' in request.GET: try: comment = comments.get_model().objects.get(pk=request.GET['c']) except (ObjectDoesNotExist, ValueError): pass return render_to_response(template, {'comment': comment}, context_instance=RequestContext(request) ) confirmed.__doc__ = textwrap.dedent("""\ %s Templates: `%s`` Context: comment The posted comment """ % (doc, template) ) return confirmed
apache-2.0
waynenilsen/statsmodels
examples/python/robust_models_0.py
33
2992
## Robust Linear Models from __future__ import print_function import numpy as np import statsmodels.api as sm import matplotlib.pyplot as plt from statsmodels.sandbox.regression.predstd import wls_prediction_std # ## Estimation # # Load data: data = sm.datasets.stackloss.load() data.exog = sm.add_constant(data.exog) # Huber's T norm with the (default) median absolute deviation scaling huber_t = sm.RLM(data.endog, data.exog, M=sm.robust.norms.HuberT()) hub_results = huber_t.fit() print(hub_results.params) print(hub_results.bse) print(hub_results.summary(yname='y', xname=['var_%d' % i for i in range(len(hub_results.params))])) # Huber's T norm with 'H2' covariance matrix hub_results2 = huber_t.fit(cov="H2") print(hub_results2.params) print(hub_results2.bse) # Andrew's Wave norm with Huber's Proposal 2 scaling and 'H3' covariance matrix andrew_mod = sm.RLM(data.endog, data.exog, M=sm.robust.norms.AndrewWave()) andrew_results = andrew_mod.fit(scale_est=sm.robust.scale.HuberScale(), cov="H3") print('Parameters: ', andrew_results.params) # See ``help(sm.RLM.fit)`` for more options and ``module sm.robust.scale`` for scale options # # ## Comparing OLS and RLM # # Artificial data with outliers: nsample = 50 x1 = np.linspace(0, 20, nsample) X = np.column_stack((x1, (x1-5)**2)) X = sm.add_constant(X) sig = 0.3 # smaller error variance makes OLS<->RLM contrast bigger beta = [5, 0.5, -0.0] y_true2 = np.dot(X, beta) y2 = y_true2 + sig*1. * np.random.normal(size=nsample) y2[[39,41,43,45,48]] -= 5 # add some outliers (10% of nsample) # ### Example 1: quadratic function with linear truth # # Note that the quadratic term in OLS regression will capture outlier effects. res = sm.OLS(y2, X).fit() print(res.params) print(res.bse) print(res.predict()) # Estimate RLM: resrlm = sm.RLM(y2, X).fit() print(resrlm.params) print(resrlm.bse) # Draw a plot to compare OLS estimates to the robust estimates: fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) ax.plot(x1, y2, 'o',label="data") ax.plot(x1, y_true2, 'b-', label="True") prstd, iv_l, iv_u = wls_prediction_std(res) ax.plot(x1, res.fittedvalues, 'r-', label="OLS") ax.plot(x1, iv_u, 'r--') ax.plot(x1, iv_l, 'r--') ax.plot(x1, resrlm.fittedvalues, 'g.-', label="RLM") ax.legend(loc="best") # ### Example 2: linear function with linear truth # # Fit a new OLS model using only the linear term and the constant: X2 = X[:,[0,1]] res2 = sm.OLS(y2, X2).fit() print(res2.params) print(res2.bse) # Estimate RLM: resrlm2 = sm.RLM(y2, X2).fit() print(resrlm2.params) print(resrlm2.bse) # Draw a plot to compare OLS estimates to the robust estimates: prstd, iv_l, iv_u = wls_prediction_std(res2) fig, ax = plt.subplots() ax.plot(x1, y2, 'o', label="data") ax.plot(x1, y_true2, 'b-', label="True") ax.plot(x1, res2.fittedvalues, 'r-', label="OLS") ax.plot(x1, iv_u, 'r--') ax.plot(x1, iv_l, 'r--') ax.plot(x1, resrlm2.fittedvalues, 'g.-', label="RLM") ax.legend(loc="best")
bsd-3-clause
Azure/azure-sdk-for-python
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations/_ddos_custom_policies_operations.py
1
20462
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class DdosCustomPoliciesOperations: """DdosCustomPoliciesOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2020_03_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def _delete_initial( self, resource_group_name: str, ddos_custom_policy_name: str, **kwargs ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-03-01" accept = "application/json" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore async def begin_delete( self, resource_group_name: str, ddos_custom_policy_name: str, **kwargs ) -> AsyncLROPoller[None]: """Deletes the specified DDoS custom policy. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param ddos_custom_policy_name: The name of the DDoS custom policy. :type ddos_custom_policy_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, ddos_custom_policy_name=ddos_custom_policy_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore async def get( self, resource_group_name: str, ddos_custom_policy_name: str, **kwargs ) -> "_models.DdosCustomPolicy": """Gets information about the specified DDoS custom policy. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param ddos_custom_policy_name: The name of the DDoS custom policy. :type ddos_custom_policy_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: DdosCustomPolicy, or the result of cls(response) :rtype: ~azure.mgmt.network.v2020_03_01.models.DdosCustomPolicy :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-03-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('DdosCustomPolicy', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore async def _create_or_update_initial( self, resource_group_name: str, ddos_custom_policy_name: str, parameters: "_models.DdosCustomPolicy", **kwargs ) -> "_models.DdosCustomPolicy": cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-03-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_or_update_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'DdosCustomPolicy') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('DdosCustomPolicy', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('DdosCustomPolicy', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore async def begin_create_or_update( self, resource_group_name: str, ddos_custom_policy_name: str, parameters: "_models.DdosCustomPolicy", **kwargs ) -> AsyncLROPoller["_models.DdosCustomPolicy"]: """Creates or updates a DDoS custom policy. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param ddos_custom_policy_name: The name of the DDoS custom policy. :type ddos_custom_policy_name: str :param parameters: Parameters supplied to the create or update operation. :type parameters: ~azure.mgmt.network.v2020_03_01.models.DdosCustomPolicy :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either DdosCustomPolicy or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_03_01.models.DdosCustomPolicy] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._create_or_update_initial( resource_group_name=resource_group_name, ddos_custom_policy_name=ddos_custom_policy_name, parameters=parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('DdosCustomPolicy', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore async def update_tags( self, resource_group_name: str, ddos_custom_policy_name: str, parameters: "_models.TagsObject", **kwargs ) -> "_models.DdosCustomPolicy": """Update a DDoS custom policy tags. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param ddos_custom_policy_name: The name of the DDoS custom policy. :type ddos_custom_policy_name: str :param parameters: Parameters supplied to update DDoS custom policy resource tags. :type parameters: ~azure.mgmt.network.v2020_03_01.models.TagsObject :keyword callable cls: A custom type or function that will be passed the direct response :return: DdosCustomPolicy, or the result of cls(response) :rtype: ~azure.mgmt.network.v2020_03_01.models.DdosCustomPolicy :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-03-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.update_tags.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'TagsObject') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('DdosCustomPolicy', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
mit
florentx/OpenUpgrade
addons/stock_dropshipping/__init__.py
61
1045
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
gloaec/bamboo
bamboo/alembic/ddl/postgresql.py
18
1442
import re from sqlalchemy import types as sqltypes from .base import compiles, alter_table, format_table_name, RenameTable from .impl import DefaultImpl class PostgresqlImpl(DefaultImpl): __dialect__ = 'postgresql' transactional_ddl = True def compare_server_default(self, inspector_column, metadata_column, rendered_metadata_default, rendered_inspector_default): # don't do defaults for SERIAL columns if metadata_column.primary_key and \ metadata_column is metadata_column.table._autoincrement_column: return False conn_col_default = rendered_inspector_default if None in (conn_col_default, rendered_metadata_default): return conn_col_default != rendered_metadata_default if metadata_column.type._type_affinity is not sqltypes.String: rendered_metadata_default = re.sub(r"^'|'$", "", rendered_metadata_default) return not self.connection.scalar( "SELECT %s = %s" % ( conn_col_default, rendered_metadata_default ) ) @compiles(RenameTable, "postgresql") def visit_rename_table(element, compiler, **kw): return "%s RENAME TO %s" % ( alter_table(compiler, element.table_name, element.schema), format_table_name(compiler, element.new_table_name, None) )
gpl-3.0
NaohiroTamura/ironic
ironic/tests/unit/db/test_chassis.py
11
3335
# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating Chassis via the DB API""" from oslo_utils import uuidutils import six from ironic.common import exception from ironic.tests.unit.db import base from ironic.tests.unit.db import utils class DbChassisTestCase(base.DbTestCase): def setUp(self): super(DbChassisTestCase, self).setUp() self.chassis = utils.create_test_chassis() def test_get_chassis_list(self): uuids = [self.chassis.uuid] for i in range(1, 6): ch = utils.create_test_chassis(uuid=uuidutils.generate_uuid()) uuids.append(six.text_type(ch.uuid)) res = self.dbapi.get_chassis_list() res_uuids = [r.uuid for r in res] six.assertCountEqual(self, uuids, res_uuids) def test_get_chassis_by_id(self): chassis = self.dbapi.get_chassis_by_id(self.chassis.id) self.assertEqual(self.chassis.uuid, chassis.uuid) def test_get_chassis_by_uuid(self): chassis = self.dbapi.get_chassis_by_uuid(self.chassis.uuid) self.assertEqual(self.chassis.id, chassis.id) def test_get_chassis_that_does_not_exist(self): self.assertRaises(exception.ChassisNotFound, self.dbapi.get_chassis_by_id, 666) def test_update_chassis(self): res = self.dbapi.update_chassis(self.chassis.id, {'description': 'hello'}) self.assertEqual('hello', res.description) def test_update_chassis_that_does_not_exist(self): self.assertRaises(exception.ChassisNotFound, self.dbapi.update_chassis, 666, {'description': ''}) def test_update_chassis_uuid(self): self.assertRaises(exception.InvalidParameterValue, self.dbapi.update_chassis, self.chassis.id, {'uuid': 'hello'}) def test_destroy_chassis(self): self.dbapi.destroy_chassis(self.chassis.id) self.assertRaises(exception.ChassisNotFound, self.dbapi.get_chassis_by_id, self.chassis.id) def test_destroy_chassis_that_does_not_exist(self): self.assertRaises(exception.ChassisNotFound, self.dbapi.destroy_chassis, 666) def test_destroy_chassis_with_nodes(self): utils.create_test_node(chassis_id=self.chassis.id) self.assertRaises(exception.ChassisNotEmpty, self.dbapi.destroy_chassis, self.chassis.id) def test_create_chassis_already_exists(self): self.assertRaises(exception.ChassisAlreadyExists, utils.create_test_chassis, uuid=self.chassis.uuid)
apache-2.0
Haynie-Research-and-Development/jarvis
deps/lib/python3.4/site-packages/oauth2client/_pkce.py
10
2119
# Copyright 2016 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility functions for implementing Proof Key for Code Exchange (PKCE) by OAuth Public Clients See RFC7636. """ import base64 import hashlib import os def code_verifier(n_bytes=64): """ Generates a 'code_verifier' as described in section 4.1 of RFC 7636. This is a 'high-entropy cryptographic random string' that will be impractical for an attacker to guess. Args: n_bytes: integer between 31 and 96, inclusive. default: 64 number of bytes of entropy to include in verifier. Returns: Bytestring, representing urlsafe base64-encoded random data. """ verifier = base64.urlsafe_b64encode(os.urandom(n_bytes)) # https://tools.ietf.org/html/rfc7636#section-4.1 # minimum length of 43 characters and a maximum length of 128 characters. if len(verifier) < 43: raise ValueError("Verifier too short. n_bytes must be > 30.") elif len(verifier) > 128: raise ValueError("Verifier too long. n_bytes must be < 97.") else: return verifier def code_challenge(verifier): """ Creates a 'code_challenge' as described in section 4.2 of RFC 7636 by taking the sha256 hash of the verifier and then urlsafe base64-encoding it. Args: verifier: bytestring, representing a code_verifier as generated by code_verifier(). Returns: Bytestring, representing a urlsafe base64-encoded sha256 hash digest. """ return base64.urlsafe_b64encode(hashlib.sha256(verifier).digest())
gpl-2.0
taichatha/youtube-dl
youtube_dl/extractor/xtube.py
87
4621
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_urllib_request, compat_urllib_parse_unquote, ) from ..utils import ( parse_duration, str_to_int, ) class XTubeIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?P<url>xtube\.com/watch\.php\?v=(?P<id>[^/?&#]+))' _TEST = { 'url': 'http://www.xtube.com/watch.php?v=kVTUy_G222_', 'md5': '092fbdd3cbe292c920ef6fc6a8a9cdab', 'info_dict': { 'id': 'kVTUy_G222_', 'ext': 'mp4', 'title': 'strange erotica', 'description': 'contains:an ET kind of thing', 'uploader': 'greenshowers', 'duration': 450, 'age_limit': 18, } } def _real_extract(self, url): video_id = self._match_id(url) req = compat_urllib_request.Request(url) req.add_header('Cookie', 'age_verified=1') webpage = self._download_webpage(req, video_id) video_title = self._html_search_regex( r'<p class="title">([^<]+)', webpage, 'title') video_uploader = self._html_search_regex( [r"var\s+contentOwnerId\s*=\s*'([^']+)", r'By:\s*<a href="/community/profile\.php\?user=([^"]+)'], webpage, 'uploader', fatal=False) video_description = self._html_search_regex( r'<p class="fieldsDesc">([^<]+)', webpage, 'description', fatal=False) duration = parse_duration(self._html_search_regex( r'<span class="bold">Runtime:</span> ([^<]+)</p>', webpage, 'duration', fatal=False)) view_count = str_to_int(self._html_search_regex( r'<span class="bold">Views:</span> ([\d,\.]+)</p>', webpage, 'view count', fatal=False)) comment_count = str_to_int(self._html_search_regex( r'<div id="commentBar">([\d,\.]+) Comments</div>', webpage, 'comment count', fatal=False)) formats = [] for format_id, video_url in re.findall( r'flashvars\.quality_(.+?)\s*=\s*"([^"]+)"', webpage): fmt = { 'url': compat_urllib_parse_unquote(video_url), 'format_id': format_id, } m = re.search(r'^(?P<height>\d+)[pP]', format_id) if m: fmt['height'] = int(m.group('height')) formats.append(fmt) if not formats: video_url = compat_urllib_parse_unquote(self._search_regex( r'flashvars\.video_url\s*=\s*"([^"]+)"', webpage, 'video URL')) formats.append({'url': video_url}) self._sort_formats(formats) return { 'id': video_id, 'title': video_title, 'uploader': video_uploader, 'description': video_description, 'duration': duration, 'view_count': view_count, 'comment_count': comment_count, 'formats': formats, 'age_limit': 18, } class XTubeUserIE(InfoExtractor): IE_DESC = 'XTube user profile' _VALID_URL = r'https?://(?:www\.)?xtube\.com/community/profile\.php\?(.*?)user=(?P<username>[^&#]+)(?:$|[&#])' _TEST = { 'url': 'http://www.xtube.com/community/profile.php?user=greenshowers', 'info_dict': { 'id': 'greenshowers', 'age_limit': 18, }, 'playlist_mincount': 155, } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) username = mobj.group('username') profile_page = self._download_webpage( url, username, note='Retrieving profile page') video_count = int(self._search_regex( r'<strong>%s\'s Videos \(([0-9]+)\)</strong>' % username, profile_page, 'video count')) PAGE_SIZE = 25 urls = [] page_count = (video_count + PAGE_SIZE + 1) // PAGE_SIZE for n in range(1, page_count + 1): lpage_url = 'http://www.xtube.com/user_videos.php?page=%d&u=%s' % (n, username) lpage = self._download_webpage( lpage_url, username, note='Downloading page %d/%d' % (n, page_count)) urls.extend( re.findall(r'addthis:url="([^"]+)"', lpage)) return { '_type': 'playlist', 'id': username, 'age_limit': 18, 'entries': [{ '_type': 'url', 'url': eurl, 'ie_key': 'XTube', } for eurl in urls] }
unlicense
theyaa/Impala
tests/query_test/test_insert_permutation.py
15
1597
# Copyright (c) 2012 Cloudera, Inc. All rights reserved. # Targeted Impala insert tests # import logging import pytest from tests.common.test_vector import * from tests.common.impala_test_suite import * from tests.common.test_dimensions import create_exec_option_dimension class TestInsertQueriesWithPermutation(ImpalaTestSuite): """ Tests for the column permutation feature of INSERT statements """ @classmethod def get_workload(self): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestInsertQueriesWithPermutation, cls).add_test_dimensions() # Fix the exec_option vector to have a single value. This is needed should we decide # to run the insert tests in parallel (otherwise there will be two tests inserting # into the same table at the same time for the same file format). # TODO: When we do decide to run these tests in parallel we could create unique temp # tables for each test case to resolve the concurrency problems. # TODO: do we need to run with multiple file formats? This seems to be really # targeting FE behavior. cls.TestMatrix.add_dimension(create_exec_option_dimension( cluster_sizes=[0], disable_codegen_options=[False], batch_sizes=[0])) cls.TestMatrix.add_dimension(create_uncompressed_text_dimension(cls.get_workload())) def test_insert_permutation(self, vector): map(self.cleanup_db, ["insert_permutation_test"]) self.run_test_case('QueryTest/insert_permutation', vector) def teardown_method(self, method): map(self.cleanup_db, ["insert_permutation_test"])
apache-2.0
kjagoo/wger_stark
wger/__init__.py
1
1027
#!/usr/bin/env python # -*- coding: utf-8 -*- """ :copyright: 2011, 2012 by OpenSlides team, see AUTHORS. :license: GNU GPL, see LICENSE for more details. """ VERSION = (1, 8, 0, 'alpha', 3) RELEASE = False def get_version(version=None, release=None): """Derives a PEP386-compliant version number from VERSION.""" if version is None: version = VERSION if release is None: release = RELEASE assert len(version) == 5 assert version[3] in ('alpha', 'beta', 'rc', 'final') # Now build the two parts of the version number: # main = X.Y[.Z] # sub = .devN - for pre-alpha releases # | {a|b|c}N - for alpha, beta and rc releases main_parts = 2 if version[2] == 0 else 3 main = '.'.join(str(x) for x in version[:main_parts]) if version[3] != 'final': mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'} sub = mapping[version[3]] + str(version[4]) else: sub = '' if not release: sub += '-dev' return main + sub
agpl-3.0
ChrisAntaki/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/models/test_failures_unittest.py
124
3148
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest2 as unittest from webkitpy.layout_tests.models.test_failures import * class TestFailuresTest(unittest.TestCase): def assert_loads(self, cls): failure_obj = cls() s = failure_obj.dumps() new_failure_obj = TestFailure.loads(s) self.assertIsInstance(new_failure_obj, cls) self.assertEqual(failure_obj, new_failure_obj) # Also test that != is implemented. self.assertFalse(failure_obj != new_failure_obj) def test_unknown_failure_type(self): class UnknownFailure(TestFailure): def message(self): return '' failure_obj = UnknownFailure() self.assertRaises(ValueError, determine_result_type, [failure_obj]) def test_message_is_virtual(self): failure_obj = TestFailure() self.assertRaises(NotImplementedError, failure_obj.message) def test_loads(self): for c in ALL_FAILURE_CLASSES: self.assert_loads(c) def test_equals(self): self.assertEqual(FailureCrash(), FailureCrash()) self.assertNotEqual(FailureCrash(), FailureTimeout()) crash_set = set([FailureCrash(), FailureCrash()]) self.assertEqual(len(crash_set), 1) # The hash happens to be the name of the class, but sets still work: crash_set = set([FailureCrash(), "FailureCrash"]) self.assertEqual(len(crash_set), 2) def test_crashes(self): self.assertEqual(FailureCrash().message(), 'DumpRenderTree crashed') self.assertEqual(FailureCrash(process_name='foo', pid=1234).message(), 'foo crashed [pid=1234]')
bsd-3-clause
jonathan-beard/edx-platform
cms/djangoapps/contentstore/views/entrance_exam.py
77
10703
""" Entrance Exams view module -- handles all requests related to entrance exam management via Studio Intended to be utilized as an AJAX callback handler, versus a proper view/screen """ from functools import wraps import json import logging from django.contrib.auth.decorators import login_required from django.views.decorators.csrf import ensure_csrf_cookie from django.http import HttpResponse, HttpResponseBadRequest from contentstore.views.helpers import create_xblock, remove_entrance_exam_graders from contentstore.views.item import delete_item from models.settings.course_metadata import CourseMetadata from opaque_keys.edx.keys import CourseKey, UsageKey from opaque_keys import InvalidKeyError from student.auth import has_course_author_access from util import milestones_helpers from xmodule.modulestore.django import modulestore from xmodule.modulestore.exceptions import ItemNotFoundError from django.conf import settings from django.utils.translation import ugettext as _ __all__ = ['entrance_exam', ] log = logging.getLogger(__name__) # pylint: disable=invalid-name def _get_default_entrance_exam_minimum_pct(): """ Helper method to return the default value from configuration Converts integer values to decimals, since that what we use internally """ entrance_exam_minimum_score_pct = float(settings.ENTRANCE_EXAM_MIN_SCORE_PCT) if entrance_exam_minimum_score_pct.is_integer(): entrance_exam_minimum_score_pct = entrance_exam_minimum_score_pct / 100 return entrance_exam_minimum_score_pct # pylint: disable=missing-docstring def check_feature_enabled(feature_name): """ Ensure the specified feature is turned on. Return an HTTP 400 code if not. """ def _check_feature_enabled(view_func): def _decorator(request, *args, **kwargs): # Deny access if the entrance exam feature is disabled if not settings.FEATURES.get(feature_name, False): return HttpResponseBadRequest() return view_func(request, *args, **kwargs) return wraps(view_func)(_decorator) return _check_feature_enabled @login_required @ensure_csrf_cookie @check_feature_enabled(feature_name='ENTRANCE_EXAMS') def entrance_exam(request, course_key_string): """ The restful handler for entrance exams. It allows retrieval of all the assets (as an HTML page), as well as uploading new assets, deleting assets, and changing the "locked" state of an asset. GET Retrieves the entrance exam module (metadata) for the specified course POST Adds an entrance exam module to the specified course. DELETE Removes the entrance exam from the course """ course_key = CourseKey.from_string(course_key_string) # Deny access if the user is valid, but they lack the proper object access privileges if not has_course_author_access(request.user, course_key): return HttpResponse(status=403) # Retrieve the entrance exam module for the specified course (returns 404 if none found) if request.method == 'GET': return _get_entrance_exam(request, course_key) # Create a new entrance exam for the specified course (returns 201 if created) elif request.method == 'POST': response_format = request.REQUEST.get('format', 'html') http_accept = request.META.get('http_accept') if response_format == 'json' or 'application/json' in http_accept: ee_min_score = request.POST.get('entrance_exam_minimum_score_pct', None) # if request contains empty value or none then save the default one. entrance_exam_minimum_score_pct = _get_default_entrance_exam_minimum_pct() if ee_min_score != '' and ee_min_score is not None: entrance_exam_minimum_score_pct = float(ee_min_score) return create_entrance_exam(request, course_key, entrance_exam_minimum_score_pct) return HttpResponse(status=400) # Remove the entrance exam module for the specified course (returns 204 regardless of existence) elif request.method == 'DELETE': return delete_entrance_exam(request, course_key) # No other HTTP verbs/methods are supported at this time else: return HttpResponse(status=405) @check_feature_enabled(feature_name='ENTRANCE_EXAMS') def create_entrance_exam(request, course_key, entrance_exam_minimum_score_pct): """ api method to create an entrance exam. First clean out any old entrance exams. """ _delete_entrance_exam(request, course_key) return _create_entrance_exam( request=request, course_key=course_key, entrance_exam_minimum_score_pct=entrance_exam_minimum_score_pct ) def _create_entrance_exam(request, course_key, entrance_exam_minimum_score_pct=None): """ Internal workflow operation to create an entrance exam """ # Provide a default value for the minimum score percent if nothing specified if entrance_exam_minimum_score_pct is None: entrance_exam_minimum_score_pct = _get_default_entrance_exam_minimum_pct() # Confirm the course exists course = modulestore().get_course(course_key) if course is None: return HttpResponse(status=400) # Create the entrance exam item (currently it's just a chapter) payload = { 'category': "chapter", 'display_name': _("Entrance Exam"), 'parent_locator': unicode(course.location), 'is_entrance_exam': True, 'in_entrance_exam': True, } parent_locator = unicode(course.location) created_block = create_xblock( parent_locator=parent_locator, user=request.user, category='chapter', display_name=_('Entrance Exam'), is_entrance_exam=True ) # Set the entrance exam metadata flags for this course # Reload the course so we don't overwrite the new child reference course = modulestore().get_course(course_key) metadata = { 'entrance_exam_enabled': True, 'entrance_exam_minimum_score_pct': unicode(entrance_exam_minimum_score_pct), 'entrance_exam_id': unicode(created_block.location), } CourseMetadata.update_from_dict(metadata, course, request.user) # Create the entrance exam section item. create_xblock( parent_locator=unicode(created_block.location), user=request.user, category='sequential', display_name=_('Entrance Exam - Subsection') ) # Add an entrance exam milestone if one does not already exist namespace_choices = milestones_helpers.get_namespace_choices() milestone_namespace = milestones_helpers.generate_milestone_namespace( namespace_choices.get('ENTRANCE_EXAM'), course_key ) milestones = milestones_helpers.get_milestones(milestone_namespace) if len(milestones): milestone = milestones[0] else: description = 'Autogenerated during {} entrance exam creation.'.format(unicode(course.id)) milestone = milestones_helpers.add_milestone({ 'name': _('Completed Course Entrance Exam'), 'namespace': milestone_namespace, 'description': description }) relationship_types = milestones_helpers.get_milestone_relationship_types() milestones_helpers.add_course_milestone( unicode(course.id), relationship_types['REQUIRES'], milestone ) milestones_helpers.add_course_content_milestone( unicode(course.id), unicode(created_block.location), relationship_types['FULFILLS'], milestone ) return HttpResponse(status=201) def _get_entrance_exam(request, course_key): # pylint: disable=W0613 """ Internal workflow operation to retrieve an entrance exam """ course = modulestore().get_course(course_key) if course is None: return HttpResponse(status=400) if not getattr(course, 'entrance_exam_id'): return HttpResponse(status=404) try: exam_key = UsageKey.from_string(course.entrance_exam_id) except InvalidKeyError: return HttpResponse(status=404) try: exam_descriptor = modulestore().get_item(exam_key) return HttpResponse( _serialize_entrance_exam(exam_descriptor), status=200, mimetype='application/json') except ItemNotFoundError: return HttpResponse(status=404) @check_feature_enabled(feature_name='ENTRANCE_EXAMS') def update_entrance_exam(request, course_key, exam_data): """ Operation to update course fields pertaining to entrance exams The update operation is not currently exposed directly via the API Because the operation is not exposed directly, we do not return a 200 response But we do return a 400 in the error case because the workflow is executed in a request context """ course = modulestore().get_course(course_key) if course: metadata = exam_data CourseMetadata.update_from_dict(metadata, course, request.user) @check_feature_enabled(feature_name='ENTRANCE_EXAMS') def delete_entrance_exam(request, course_key): """ api method to delete an entrance exam """ return _delete_entrance_exam(request=request, course_key=course_key) def _delete_entrance_exam(request, course_key): """ Internal workflow operation to remove an entrance exam """ store = modulestore() course = store.get_course(course_key) if course is None: return HttpResponse(status=400) course_children = store.get_items( course_key, qualifiers={'category': 'chapter'} ) for course_child in course_children: if course_child.is_entrance_exam: delete_item(request, course_child.scope_ids.usage_id) milestones_helpers.remove_content_references(unicode(course_child.scope_ids.usage_id)) # Reset the entrance exam flags on the course # Reload the course so we have the latest state course = store.get_course(course_key) if getattr(course, 'entrance_exam_id'): metadata = { 'entrance_exam_enabled': False, 'entrance_exam_minimum_score_pct': None, 'entrance_exam_id': None, } CourseMetadata.update_from_dict(metadata, course, request.user) # Clean up any pre-existing entrance exam graders remove_entrance_exam_graders(course_key, request.user) return HttpResponse(status=204) def _serialize_entrance_exam(entrance_exam_module): """ Internal helper to convert an entrance exam module/object into JSON """ return json.dumps({ 'locator': unicode(entrance_exam_module.location) })
agpl-3.0
philippze/django-cms
cms/management/commands/subcommands/copy_lang.py
48
3158
# -*- coding: utf-8 -*- from django.conf import settings from django.core.management.base import BaseCommand, CommandError from cms.api import copy_plugins_to_language from cms.models import Page, StaticPlaceholder, EmptyTitle from cms.utils.copy_plugins import copy_plugins_to from cms.utils.i18n import get_language_list class CopyLangCommand(BaseCommand): args = '<language_from language_to>' help = u'duplicate the cms content from one lang to another (to boot a new lang) using draft pages' def handle(self, *args, **kwargs): verbose = 'verbose' in args only_empty = 'force-copy' not in args site = [arg.split("=")[1] for arg in args if arg.startswith("site")] if site: site = site.pop() else: site = settings.SITE_ID #test both langs try: assert len(args) >= 2 from_lang = args[0] to_lang = args[1] assert from_lang != to_lang except AssertionError: raise CommandError("Error: bad arguments -- Usage: manage.py cms copy-lang <lang_from> <lang_to>") try: assert from_lang in get_language_list(site) assert to_lang in get_language_list(site) except AssertionError: raise CommandError("Both languages have to be present in settings.LANGUAGES and settings.CMS_LANGUAGES") for page in Page.objects.on_site(site).drafts(): # copy title if from_lang in page.get_languages(): title = page.get_title_obj(to_lang, fallback=False) if isinstance(title, EmptyTitle): title = page.get_title_obj(from_lang) if verbose: self.stdout.write('copying title %s from language %s\n' % (title.title, from_lang)) title.id = None title.publisher_public_id = None title.publisher_state = 0 title.language = to_lang title.save() # copy plugins using API if verbose: self.stdout.write('copying plugins for %s from %s\n' % (page.get_page_title(from_lang), from_lang)) copy_plugins_to_language(page, from_lang, to_lang, only_empty) else: if verbose: self.stdout.write('Skipping page %s, language %s not defined\n' % (page, from_lang)) for static_placeholder in StaticPlaceholder.objects.all(): plugin_list = [] for plugin in static_placeholder.draft.get_plugins(): if plugin.language == from_lang: plugin_list.append(plugin) if plugin_list: if verbose: self.stdout.write("copying plugins from static_placeholder '%s' in '%s' to '%s'\n" % (static_placeholder.name, from_lang, to_lang)) copy_plugins_to(plugin_list, static_placeholder.draft, to_lang) self.stdout.write(u"all done")
bsd-3-clause
wang1352083/pythontool
python-2.7.12-lib/test/test_tempfile.py
6
35623
# tempfile.py unit tests. import tempfile import errno import io import os import signal import shutil import sys import re import warnings import contextlib import unittest from test import test_support as support warnings.filterwarnings("ignore", category=RuntimeWarning, message="mktemp", module=__name__) if hasattr(os, 'stat'): import stat has_stat = 1 else: has_stat = 0 has_textmode = (tempfile._text_openflags != tempfile._bin_openflags) has_spawnl = hasattr(os, 'spawnl') # TEST_FILES may need to be tweaked for systems depending on the maximum # number of files that can be opened at one time (see ulimit -n) if sys.platform in ('openbsd3', 'openbsd4'): TEST_FILES = 48 else: TEST_FILES = 100 # This is organized as one test for each chunk of code in tempfile.py, # in order of their appearance in the file. Testing which requires # threads is not done here. # Common functionality. class TC(unittest.TestCase): str_check = re.compile(r"[a-zA-Z0-9_-]{6}$") def failOnException(self, what, ei=None): if ei is None: ei = sys.exc_info() self.fail("%s raised %s: %s" % (what, ei[0], ei[1])) def nameCheck(self, name, dir, pre, suf): (ndir, nbase) = os.path.split(name) npre = nbase[:len(pre)] nsuf = nbase[len(nbase)-len(suf):] # check for equality of the absolute paths! self.assertEqual(os.path.abspath(ndir), os.path.abspath(dir), "file '%s' not in directory '%s'" % (name, dir)) self.assertEqual(npre, pre, "file '%s' does not begin with '%s'" % (nbase, pre)) self.assertEqual(nsuf, suf, "file '%s' does not end with '%s'" % (nbase, suf)) nbase = nbase[len(pre):len(nbase)-len(suf)] self.assertTrue(self.str_check.match(nbase), "random string '%s' does not match /^[a-zA-Z0-9_-]{6}$/" % nbase) test_classes = [] class test_exports(TC): def test_exports(self): # There are no surprising symbols in the tempfile module dict = tempfile.__dict__ expected = { "NamedTemporaryFile" : 1, "TemporaryFile" : 1, "mkstemp" : 1, "mkdtemp" : 1, "mktemp" : 1, "TMP_MAX" : 1, "gettempprefix" : 1, "gettempdir" : 1, "tempdir" : 1, "template" : 1, "SpooledTemporaryFile" : 1 } unexp = [] for key in dict: if key[0] != '_' and key not in expected: unexp.append(key) self.assertTrue(len(unexp) == 0, "unexpected keys: %s" % unexp) test_classes.append(test_exports) class test__RandomNameSequence(TC): """Test the internal iterator object _RandomNameSequence.""" def setUp(self): self.r = tempfile._RandomNameSequence() def test_get_six_char_str(self): # _RandomNameSequence returns a six-character string s = self.r.next() self.nameCheck(s, '', '', '') def test_many(self): # _RandomNameSequence returns no duplicate strings (stochastic) dict = {} r = self.r for i in xrange(TEST_FILES): s = r.next() self.nameCheck(s, '', '', '') self.assertNotIn(s, dict) dict[s] = 1 def test_supports_iter(self): # _RandomNameSequence supports the iterator protocol i = 0 r = self.r try: for s in r: i += 1 if i == 20: break except: self.failOnException("iteration") @unittest.skipUnless(hasattr(os, 'fork'), "os.fork is required for this test") def test_process_awareness(self): # ensure that the random source differs between # child and parent. read_fd, write_fd = os.pipe() pid = None try: pid = os.fork() if not pid: os.close(read_fd) os.write(write_fd, next(self.r).encode("ascii")) os.close(write_fd) # bypass the normal exit handlers- leave those to # the parent. os._exit(0) parent_value = next(self.r) child_value = os.read(read_fd, len(parent_value)).decode("ascii") finally: if pid: # best effort to ensure the process can't bleed out # via any bugs above try: os.kill(pid, signal.SIGKILL) except EnvironmentError: pass os.close(read_fd) os.close(write_fd) self.assertNotEqual(child_value, parent_value) test_classes.append(test__RandomNameSequence) class test__candidate_tempdir_list(TC): """Test the internal function _candidate_tempdir_list.""" def test_nonempty_list(self): # _candidate_tempdir_list returns a nonempty list of strings cand = tempfile._candidate_tempdir_list() self.assertFalse(len(cand) == 0) for c in cand: self.assertIsInstance(c, basestring) def test_wanted_dirs(self): # _candidate_tempdir_list contains the expected directories # Make sure the interesting environment variables are all set. with support.EnvironmentVarGuard() as env: for envname in 'TMPDIR', 'TEMP', 'TMP': dirname = os.getenv(envname) if not dirname: env[envname] = os.path.abspath(envname) cand = tempfile._candidate_tempdir_list() for envname in 'TMPDIR', 'TEMP', 'TMP': dirname = os.getenv(envname) if not dirname: raise ValueError self.assertIn(dirname, cand) try: dirname = os.getcwd() except (AttributeError, os.error): dirname = os.curdir self.assertIn(dirname, cand) # Not practical to try to verify the presence of OS-specific # paths in this list. test_classes.append(test__candidate_tempdir_list) # We test _get_default_tempdir some more by testing gettempdir. class TestGetDefaultTempdir(TC): """Test _get_default_tempdir().""" def test_no_files_left_behind(self): # use a private empty directory our_temp_directory = tempfile.mkdtemp() try: # force _get_default_tempdir() to consider our empty directory def our_candidate_list(): return [our_temp_directory] with support.swap_attr(tempfile, "_candidate_tempdir_list", our_candidate_list): # verify our directory is empty after _get_default_tempdir() tempfile._get_default_tempdir() self.assertEqual(os.listdir(our_temp_directory), []) def raise_OSError(*args, **kwargs): raise OSError(-1) with support.swap_attr(io, "open", raise_OSError): # test again with failing io.open() with self.assertRaises(IOError) as cm: tempfile._get_default_tempdir() self.assertEqual(cm.exception.errno, errno.ENOENT) self.assertEqual(os.listdir(our_temp_directory), []) open = io.open def bad_writer(*args, **kwargs): fp = open(*args, **kwargs) fp.write = raise_OSError return fp with support.swap_attr(io, "open", bad_writer): # test again with failing write() with self.assertRaises(IOError) as cm: tempfile._get_default_tempdir() self.assertEqual(cm.exception.errno, errno.ENOENT) self.assertEqual(os.listdir(our_temp_directory), []) finally: shutil.rmtree(our_temp_directory) test_classes.append(TestGetDefaultTempdir) class test__get_candidate_names(TC): """Test the internal function _get_candidate_names.""" def test_retval(self): # _get_candidate_names returns a _RandomNameSequence object obj = tempfile._get_candidate_names() self.assertIsInstance(obj, tempfile._RandomNameSequence) def test_same_thing(self): # _get_candidate_names always returns the same object a = tempfile._get_candidate_names() b = tempfile._get_candidate_names() self.assertTrue(a is b) test_classes.append(test__get_candidate_names) @contextlib.contextmanager def _inside_empty_temp_dir(): dir = tempfile.mkdtemp() try: with support.swap_attr(tempfile, 'tempdir', dir): yield finally: support.rmtree(dir) def _mock_candidate_names(*names): return support.swap_attr(tempfile, '_get_candidate_names', lambda: iter(names)) class TestBadTempdir: def test_read_only_directory(self): with _inside_empty_temp_dir(): oldmode = mode = os.stat(tempfile.tempdir).st_mode mode &= ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH) os.chmod(tempfile.tempdir, mode) try: if os.access(tempfile.tempdir, os.W_OK): self.skipTest("can't set the directory read-only") with self.assertRaises(OSError) as cm: self.make_temp() self.assertIn(cm.exception.errno, (errno.EPERM, errno.EACCES)) self.assertEqual(os.listdir(tempfile.tempdir), []) finally: os.chmod(tempfile.tempdir, oldmode) def test_nonexisting_directory(self): with _inside_empty_temp_dir(): tempdir = os.path.join(tempfile.tempdir, 'nonexistent') with support.swap_attr(tempfile, 'tempdir', tempdir): with self.assertRaises(OSError) as cm: self.make_temp() self.assertEqual(cm.exception.errno, errno.ENOENT) def test_non_directory(self): with _inside_empty_temp_dir(): tempdir = os.path.join(tempfile.tempdir, 'file') open(tempdir, 'wb').close() with support.swap_attr(tempfile, 'tempdir', tempdir): with self.assertRaises(OSError) as cm: self.make_temp() self.assertIn(cm.exception.errno, (errno.ENOTDIR, errno.ENOENT)) class test__mkstemp_inner(TestBadTempdir, TC): """Test the internal function _mkstemp_inner.""" class mkstemped: _bflags = tempfile._bin_openflags _tflags = tempfile._text_openflags _close = os.close _unlink = os.unlink def __init__(self, dir, pre, suf, bin): if bin: flags = self._bflags else: flags = self._tflags (self.fd, self.name) = tempfile._mkstemp_inner(dir, pre, suf, flags) def write(self, str): os.write(self.fd, str) def __del__(self): self._close(self.fd) self._unlink(self.name) def do_create(self, dir=None, pre="", suf="", bin=1): if dir is None: dir = tempfile.gettempdir() try: file = self.mkstemped(dir, pre, suf, bin) except: self.failOnException("_mkstemp_inner") self.nameCheck(file.name, dir, pre, suf) return file def test_basic(self): # _mkstemp_inner can create files self.do_create().write("blat") self.do_create(pre="a").write("blat") self.do_create(suf="b").write("blat") self.do_create(pre="a", suf="b").write("blat") self.do_create(pre="aa", suf=".txt").write("blat") def test_basic_many(self): # _mkstemp_inner can create many files (stochastic) extant = range(TEST_FILES) for i in extant: extant[i] = self.do_create(pre="aa") def test_choose_directory(self): # _mkstemp_inner can create files in a user-selected directory dir = tempfile.mkdtemp() try: self.do_create(dir=dir).write("blat") finally: os.rmdir(dir) @unittest.skipUnless(has_stat, 'os.stat not available') def test_file_mode(self): # _mkstemp_inner creates files with the proper mode file = self.do_create() mode = stat.S_IMODE(os.stat(file.name).st_mode) expected = 0600 if sys.platform in ('win32', 'os2emx'): # There's no distinction among 'user', 'group' and 'world'; # replicate the 'user' bits. user = expected >> 6 expected = user * (1 + 8 + 64) self.assertEqual(mode, expected) @unittest.skipUnless(has_spawnl, 'os.spawnl not available') def test_noinherit(self): # _mkstemp_inner file handles are not inherited by child processes if support.verbose: v="v" else: v="q" file = self.do_create() fd = "%d" % file.fd try: me = __file__ except NameError: me = sys.argv[0] # We have to exec something, so that FD_CLOEXEC will take # effect. The core of this test is therefore in # tf_inherit_check.py, which see. tester = os.path.join(os.path.dirname(os.path.abspath(me)), "tf_inherit_check.py") # On Windows a spawn* /path/ with embedded spaces shouldn't be quoted, # but an arg with embedded spaces should be decorated with double # quotes on each end if sys.platform in ('win32',): decorated = '"%s"' % sys.executable tester = '"%s"' % tester else: decorated = sys.executable retval = os.spawnl(os.P_WAIT, sys.executable, decorated, tester, v, fd) self.assertFalse(retval < 0, "child process caught fatal signal %d" % -retval) self.assertFalse(retval > 0, "child process reports failure %d"%retval) @unittest.skipUnless(has_textmode, "text mode not available") def test_textmode(self): # _mkstemp_inner can create files in text mode self.do_create(bin=0).write("blat\n") # XXX should test that the file really is a text file def make_temp(self): return tempfile._mkstemp_inner(tempfile.gettempdir(), tempfile.template, '', tempfile._bin_openflags) def test_collision_with_existing_file(self): # _mkstemp_inner tries another name when a file with # the chosen name already exists with _inside_empty_temp_dir(), \ _mock_candidate_names('aaa', 'aaa', 'bbb'): (fd1, name1) = self.make_temp() os.close(fd1) self.assertTrue(name1.endswith('aaa')) (fd2, name2) = self.make_temp() os.close(fd2) self.assertTrue(name2.endswith('bbb')) def test_collision_with_existing_directory(self): # _mkstemp_inner tries another name when a directory with # the chosen name already exists with _inside_empty_temp_dir(), \ _mock_candidate_names('aaa', 'aaa', 'bbb'): dir = tempfile.mkdtemp() self.assertTrue(dir.endswith('aaa')) (fd, name) = self.make_temp() os.close(fd) self.assertTrue(name.endswith('bbb')) test_classes.append(test__mkstemp_inner) class test_gettempprefix(TC): """Test gettempprefix().""" def test_sane_template(self): # gettempprefix returns a nonempty prefix string p = tempfile.gettempprefix() self.assertIsInstance(p, basestring) self.assertTrue(len(p) > 0) def test_usable_template(self): # gettempprefix returns a usable prefix string # Create a temp directory, avoiding use of the prefix. # Then attempt to create a file whose name is # prefix + 'xxxxxx.xxx' in that directory. p = tempfile.gettempprefix() + "xxxxxx.xxx" d = tempfile.mkdtemp(prefix="") try: p = os.path.join(d, p) try: fd = os.open(p, os.O_RDWR | os.O_CREAT) except: self.failOnException("os.open") os.close(fd) os.unlink(p) finally: os.rmdir(d) test_classes.append(test_gettempprefix) class test_gettempdir(TC): """Test gettempdir().""" def test_directory_exists(self): # gettempdir returns a directory which exists dir = tempfile.gettempdir() self.assertTrue(os.path.isabs(dir) or dir == os.curdir, "%s is not an absolute path" % dir) self.assertTrue(os.path.isdir(dir), "%s is not a directory" % dir) def test_directory_writable(self): # gettempdir returns a directory writable by the user # sneaky: just instantiate a NamedTemporaryFile, which # defaults to writing into the directory returned by # gettempdir. try: file = tempfile.NamedTemporaryFile() file.write("blat") file.close() except: self.failOnException("create file in %s" % tempfile.gettempdir()) def test_same_thing(self): # gettempdir always returns the same object a = tempfile.gettempdir() b = tempfile.gettempdir() self.assertTrue(a is b) test_classes.append(test_gettempdir) class test_mkstemp(TC): """Test mkstemp().""" def do_create(self, dir=None, pre="", suf=""): if dir is None: dir = tempfile.gettempdir() try: (fd, name) = tempfile.mkstemp(dir=dir, prefix=pre, suffix=suf) (ndir, nbase) = os.path.split(name) adir = os.path.abspath(dir) self.assertEqual(adir, ndir, "Directory '%s' incorrectly returned as '%s'" % (adir, ndir)) except: self.failOnException("mkstemp") try: self.nameCheck(name, dir, pre, suf) finally: os.close(fd) os.unlink(name) def test_basic(self): # mkstemp can create files self.do_create() self.do_create(pre="a") self.do_create(suf="b") self.do_create(pre="a", suf="b") self.do_create(pre="aa", suf=".txt") self.do_create(dir=".") def test_choose_directory(self): # mkstemp can create directories in a user-selected directory dir = tempfile.mkdtemp() try: self.do_create(dir=dir) finally: os.rmdir(dir) test_classes.append(test_mkstemp) class test_mkdtemp(TestBadTempdir, TC): """Test mkdtemp().""" def make_temp(self): return tempfile.mkdtemp() def do_create(self, dir=None, pre="", suf=""): if dir is None: dir = tempfile.gettempdir() try: name = tempfile.mkdtemp(dir=dir, prefix=pre, suffix=suf) except: self.failOnException("mkdtemp") try: self.nameCheck(name, dir, pre, suf) return name except: os.rmdir(name) raise def test_basic(self): # mkdtemp can create directories os.rmdir(self.do_create()) os.rmdir(self.do_create(pre="a")) os.rmdir(self.do_create(suf="b")) os.rmdir(self.do_create(pre="a", suf="b")) os.rmdir(self.do_create(pre="aa", suf=".txt")) def test_basic_many(self): # mkdtemp can create many directories (stochastic) extant = range(TEST_FILES) try: for i in extant: extant[i] = self.do_create(pre="aa") finally: for i in extant: if(isinstance(i, basestring)): os.rmdir(i) def test_choose_directory(self): # mkdtemp can create directories in a user-selected directory dir = tempfile.mkdtemp() try: os.rmdir(self.do_create(dir=dir)) finally: os.rmdir(dir) @unittest.skipUnless(has_stat, 'os.stat not available') def test_mode(self): # mkdtemp creates directories with the proper mode dir = self.do_create() try: mode = stat.S_IMODE(os.stat(dir).st_mode) mode &= 0777 # Mask off sticky bits inherited from /tmp expected = 0700 if sys.platform in ('win32', 'os2emx'): # There's no distinction among 'user', 'group' and 'world'; # replicate the 'user' bits. user = expected >> 6 expected = user * (1 + 8 + 64) self.assertEqual(mode, expected) finally: os.rmdir(dir) def test_collision_with_existing_file(self): # mkdtemp tries another name when a file with # the chosen name already exists with _inside_empty_temp_dir(), \ _mock_candidate_names('aaa', 'aaa', 'bbb'): file = tempfile.NamedTemporaryFile(delete=False) file.close() self.assertTrue(file.name.endswith('aaa')) dir = tempfile.mkdtemp() self.assertTrue(dir.endswith('bbb')) def test_collision_with_existing_directory(self): # mkdtemp tries another name when a directory with # the chosen name already exists with _inside_empty_temp_dir(), \ _mock_candidate_names('aaa', 'aaa', 'bbb'): dir1 = tempfile.mkdtemp() self.assertTrue(dir1.endswith('aaa')) dir2 = tempfile.mkdtemp() self.assertTrue(dir2.endswith('bbb')) test_classes.append(test_mkdtemp) class test_mktemp(TC): """Test mktemp().""" # For safety, all use of mktemp must occur in a private directory. # We must also suppress the RuntimeWarning it generates. def setUp(self): self.dir = tempfile.mkdtemp() def tearDown(self): if self.dir: os.rmdir(self.dir) self.dir = None class mktemped: _unlink = os.unlink _bflags = tempfile._bin_openflags def __init__(self, dir, pre, suf): self.name = tempfile.mktemp(dir=dir, prefix=pre, suffix=suf) # Create the file. This will raise an exception if it's # mysteriously appeared in the meanwhile. os.close(os.open(self.name, self._bflags, 0600)) def __del__(self): self._unlink(self.name) def do_create(self, pre="", suf=""): try: file = self.mktemped(self.dir, pre, suf) except: self.failOnException("mktemp") self.nameCheck(file.name, self.dir, pre, suf) return file def test_basic(self): # mktemp can choose usable file names self.do_create() self.do_create(pre="a") self.do_create(suf="b") self.do_create(pre="a", suf="b") self.do_create(pre="aa", suf=".txt") def test_many(self): # mktemp can choose many usable file names (stochastic) extant = range(TEST_FILES) for i in extant: extant[i] = self.do_create(pre="aa") ## def test_warning(self): ## # mktemp issues a warning when used ## warnings.filterwarnings("error", ## category=RuntimeWarning, ## message="mktemp") ## self.assertRaises(RuntimeWarning, ## tempfile.mktemp, dir=self.dir) test_classes.append(test_mktemp) # We test _TemporaryFileWrapper by testing NamedTemporaryFile. class test_NamedTemporaryFile(TC): """Test NamedTemporaryFile().""" def do_create(self, dir=None, pre="", suf="", delete=True): if dir is None: dir = tempfile.gettempdir() try: file = tempfile.NamedTemporaryFile(dir=dir, prefix=pre, suffix=suf, delete=delete) except: self.failOnException("NamedTemporaryFile") self.nameCheck(file.name, dir, pre, suf) return file def test_basic(self): # NamedTemporaryFile can create files self.do_create() self.do_create(pre="a") self.do_create(suf="b") self.do_create(pre="a", suf="b") self.do_create(pre="aa", suf=".txt") def test_creates_named(self): # NamedTemporaryFile creates files with names f = tempfile.NamedTemporaryFile() self.assertTrue(os.path.exists(f.name), "NamedTemporaryFile %s does not exist" % f.name) def test_del_on_close(self): # A NamedTemporaryFile is deleted when closed dir = tempfile.mkdtemp() try: f = tempfile.NamedTemporaryFile(dir=dir) f.write('blat') f.close() self.assertFalse(os.path.exists(f.name), "NamedTemporaryFile %s exists after close" % f.name) finally: os.rmdir(dir) def test_dis_del_on_close(self): # Tests that delete-on-close can be disabled dir = tempfile.mkdtemp() tmp = None try: f = tempfile.NamedTemporaryFile(dir=dir, delete=False) tmp = f.name f.write('blat') f.close() self.assertTrue(os.path.exists(f.name), "NamedTemporaryFile %s missing after close" % f.name) finally: if tmp is not None: os.unlink(tmp) os.rmdir(dir) def test_multiple_close(self): # A NamedTemporaryFile can be closed many times without error f = tempfile.NamedTemporaryFile() f.write('abc\n') f.close() try: f.close() f.close() except: self.failOnException("close") def test_context_manager(self): # A NamedTemporaryFile can be used as a context manager with tempfile.NamedTemporaryFile() as f: self.assertTrue(os.path.exists(f.name)) self.assertFalse(os.path.exists(f.name)) def use_closed(): with f: pass self.assertRaises(ValueError, use_closed) def test_no_leak_fd(self): # Issue #21058: don't leak file descriptor when fdopen() fails old_close = os.close old_fdopen = os.fdopen closed = [] def close(fd): closed.append(fd) def fdopen(*args): raise ValueError() os.close = close os.fdopen = fdopen try: self.assertRaises(ValueError, tempfile.NamedTemporaryFile) self.assertEqual(len(closed), 1) finally: os.close = old_close os.fdopen = old_fdopen def test_bad_mode(self): dir = tempfile.mkdtemp() self.addCleanup(support.rmtree, dir) with self.assertRaises(TypeError): tempfile.NamedTemporaryFile(mode=(), dir=dir) self.assertEqual(os.listdir(dir), []) # How to test the mode and bufsize parameters? test_classes.append(test_NamedTemporaryFile) class test_SpooledTemporaryFile(TC): """Test SpooledTemporaryFile().""" def do_create(self, max_size=0, dir=None, pre="", suf=""): if dir is None: dir = tempfile.gettempdir() try: file = tempfile.SpooledTemporaryFile(max_size=max_size, dir=dir, prefix=pre, suffix=suf) except: self.failOnException("SpooledTemporaryFile") return file def test_basic(self): # SpooledTemporaryFile can create files f = self.do_create() self.assertFalse(f._rolled) f = self.do_create(max_size=100, pre="a", suf=".txt") self.assertFalse(f._rolled) def test_del_on_close(self): # A SpooledTemporaryFile is deleted when closed dir = tempfile.mkdtemp() try: f = tempfile.SpooledTemporaryFile(max_size=10, dir=dir) self.assertFalse(f._rolled) f.write('blat ' * 5) self.assertTrue(f._rolled) filename = f.name f.close() self.assertFalse(os.path.exists(filename), "SpooledTemporaryFile %s exists after close" % filename) finally: os.rmdir(dir) def test_rewrite_small(self): # A SpooledTemporaryFile can be written to multiple within the max_size f = self.do_create(max_size=30) self.assertFalse(f._rolled) for i in range(5): f.seek(0, 0) f.write('x' * 20) self.assertFalse(f._rolled) def test_write_sequential(self): # A SpooledTemporaryFile should hold exactly max_size bytes, and roll # over afterward f = self.do_create(max_size=30) self.assertFalse(f._rolled) f.write('x' * 20) self.assertFalse(f._rolled) f.write('x' * 10) self.assertFalse(f._rolled) f.write('x') self.assertTrue(f._rolled) def test_writelines(self): # Verify writelines with a SpooledTemporaryFile f = self.do_create() f.writelines((b'x', b'y', b'z')) f.seek(0) buf = f.read() self.assertEqual(buf, b'xyz') def test_writelines_sequential(self): # A SpooledTemporaryFile should hold exactly max_size bytes, and roll # over afterward f = self.do_create(max_size=35) f.writelines((b'x' * 20, b'x' * 10, b'x' * 5)) self.assertFalse(f._rolled) f.write(b'x') self.assertTrue(f._rolled) def test_xreadlines(self): f = self.do_create(max_size=20) f.write(b'abc\n' * 5) f.seek(0) self.assertFalse(f._rolled) self.assertEqual(list(f.xreadlines()), [b'abc\n'] * 5) f.write(b'x\ny') self.assertTrue(f._rolled) f.seek(0) self.assertEqual(list(f.xreadlines()), [b'abc\n'] * 5 + [b'x\n', b'y']) def test_sparse(self): # A SpooledTemporaryFile that is written late in the file will extend # when that occurs f = self.do_create(max_size=30) self.assertFalse(f._rolled) f.seek(100, 0) self.assertFalse(f._rolled) f.write('x') self.assertTrue(f._rolled) def test_fileno(self): # A SpooledTemporaryFile should roll over to a real file on fileno() f = self.do_create(max_size=30) self.assertFalse(f._rolled) self.assertTrue(f.fileno() > 0) self.assertTrue(f._rolled) def test_multiple_close_before_rollover(self): # A SpooledTemporaryFile can be closed many times without error f = tempfile.SpooledTemporaryFile() f.write('abc\n') self.assertFalse(f._rolled) f.close() try: f.close() f.close() except: self.failOnException("close") def test_multiple_close_after_rollover(self): # A SpooledTemporaryFile can be closed many times without error f = tempfile.SpooledTemporaryFile(max_size=1) f.write('abc\n') self.assertTrue(f._rolled) f.close() try: f.close() f.close() except: self.failOnException("close") def test_bound_methods(self): # It should be OK to steal a bound method from a SpooledTemporaryFile # and use it independently; when the file rolls over, those bound # methods should continue to function f = self.do_create(max_size=30) read = f.read write = f.write seek = f.seek write("a" * 35) write("b" * 35) seek(0, 0) self.assertTrue(read(70) == 'a'*35 + 'b'*35) def test_properties(self): f = tempfile.SpooledTemporaryFile(max_size=10) f.write(b'x' * 10) self.assertFalse(f._rolled) self.assertEqual(f.mode, 'w+b') self.assertIsNone(f.name) with self.assertRaises(AttributeError): f.newlines with self.assertRaises(AttributeError): f.encoding f.write(b'x') self.assertTrue(f._rolled) self.assertEqual(f.mode, 'w+b') self.assertIsNotNone(f.name) with self.assertRaises(AttributeError): f.newlines with self.assertRaises(AttributeError): f.encoding def test_context_manager_before_rollover(self): # A SpooledTemporaryFile can be used as a context manager with tempfile.SpooledTemporaryFile(max_size=1) as f: self.assertFalse(f._rolled) self.assertFalse(f.closed) self.assertTrue(f.closed) def use_closed(): with f: pass self.assertRaises(ValueError, use_closed) def test_context_manager_during_rollover(self): # A SpooledTemporaryFile can be used as a context manager with tempfile.SpooledTemporaryFile(max_size=1) as f: self.assertFalse(f._rolled) f.write('abc\n') f.flush() self.assertTrue(f._rolled) self.assertFalse(f.closed) self.assertTrue(f.closed) def use_closed(): with f: pass self.assertRaises(ValueError, use_closed) def test_context_manager_after_rollover(self): # A SpooledTemporaryFile can be used as a context manager f = tempfile.SpooledTemporaryFile(max_size=1) f.write('abc\n') f.flush() self.assertTrue(f._rolled) with f: self.assertFalse(f.closed) self.assertTrue(f.closed) def use_closed(): with f: pass self.assertRaises(ValueError, use_closed) test_classes.append(test_SpooledTemporaryFile) class test_TemporaryFile(TC): """Test TemporaryFile().""" def test_basic(self): # TemporaryFile can create files # No point in testing the name params - the file has no name. try: tempfile.TemporaryFile() except: self.failOnException("TemporaryFile") def test_has_no_name(self): # TemporaryFile creates files with no names (on this system) dir = tempfile.mkdtemp() f = tempfile.TemporaryFile(dir=dir) f.write('blat') # Sneaky: because this file has no name, it should not prevent # us from removing the directory it was created in. try: os.rmdir(dir) except: ei = sys.exc_info() # cleanup f.close() os.rmdir(dir) self.failOnException("rmdir", ei) def test_multiple_close(self): # A TemporaryFile can be closed many times without error f = tempfile.TemporaryFile() f.write('abc\n') f.close() try: f.close() f.close() except: self.failOnException("close") # How to test the mode and bufsize parameters? if tempfile.NamedTemporaryFile is not tempfile.TemporaryFile: test_classes.append(test_TemporaryFile) def test_main(): support.run_unittest(*test_classes) if __name__ == "__main__": test_main()
mit
irmen/Pyro5
tests/test_echoserver.py
1
2734
""" Tests for the built-in test echo server. Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net). """ import time import pytest from threading import Thread, Event import Pyro5.client import Pyro5.errors import Pyro5.utils.echoserver as echoserver from Pyro5 import config class EchoServerThread(Thread): def __init__(self): super(EchoServerThread, self).__init__() self.setDaemon(True) self.started = Event() self.echodaemon = self.echoserver = self.uri = None def run(self): self.echodaemon, self.echoserver, self.uri = echoserver.main(args=["-q"], returnWithoutLooping=True) self.started.set() self.echodaemon.requestLoop(loopCondition=lambda: not self.echoserver._must_shutdown) class TestEchoserver: def setup_method(self): self.echoserverthread = EchoServerThread() self.echoserverthread.start() self.echoserverthread.started.wait() self.uri = self.echoserverthread.uri def teardown_method(self): self.echoserverthread.echodaemon.shutdown() time.sleep(0.02) self.echoserverthread.join() config.SERVERTYPE = "thread" def testExposed(self): e = Pyro5.utils.echoserver.EchoServer() assert hasattr(e, "_pyroExposed") def testEcho(self): with Pyro5.client.Proxy(self.uri) as echo: try: assert echo.echo("hello") == "hello" assert echo.echo(None) is None assert echo.echo([1,2,3]) == [1,2,3] finally: echo.shutdown() def testError(self): with Pyro5.client.Proxy(self.uri) as echo: with pytest.raises(Exception) as x: echo.error() tb = "".join(Pyro5.errors.get_pyro_traceback(x.type, x.value, x.tb)) assert "Remote traceback" in tb assert "ValueError" in tb assert str(x.value) == "this is the generated error from echoserver echo() method" with pytest.raises(Exception) as x: echo.error_with_text() tb = "".join(Pyro5.errors.get_pyro_traceback(x.type, x.value, x.tb)) assert "Remote traceback" in tb assert "ValueError" in tb assert str(x.value) == "the message of the error" def testGenerator(self): with Pyro5.client.Proxy(self.uri) as echo: remotegenerator = echo.generator() assert isinstance(remotegenerator, Pyro5.client._StreamResultIterator) next(remotegenerator) next(remotegenerator) next(remotegenerator) with pytest.raises(StopIteration): next(remotegenerator)
mit
CunningLogic/vivo-2.6.35
scripts/tracing/draw_functrace.py
14676
3560
#!/usr/bin/python """ Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com> Licensed under the terms of the GNU GPL License version 2 This script parses a trace provided by the function tracer in kernel/trace/trace_functions.c The resulted trace is processed into a tree to produce a more human view of the call stack by drawing textual but hierarchical tree of calls. Only the functions's names and the the call time are provided. Usage: Be sure that you have CONFIG_FUNCTION_TRACER # mount -t debugfs nodev /sys/kernel/debug # echo function > /sys/kernel/debug/tracing/current_tracer $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func Wait some times but not too much, the script is a bit slow. Break the pipe (Ctrl + Z) $ scripts/draw_functrace.py < raw_trace_func > draw_functrace Then you have your drawn trace in draw_functrace """ import sys, re class CallTree: """ This class provides a tree representation of the functions call stack. If a function has no parent in the kernel (interrupt, syscall, kernel thread...) then it is attached to a virtual parent called ROOT. """ ROOT = None def __init__(self, func, time = None, parent = None): self._func = func self._time = time if parent is None: self._parent = CallTree.ROOT else: self._parent = parent self._children = [] def calls(self, func, calltime): """ If a function calls another one, call this method to insert it into the tree at the appropriate place. @return: A reference to the newly created child node. """ child = CallTree(func, calltime, self) self._children.append(child) return child def getParent(self, func): """ Retrieve the last parent of the current node that has the name given by func. If this function is not on a parent, then create it as new child of root @return: A reference to the parent. """ tree = self while tree != CallTree.ROOT and tree._func != func: tree = tree._parent if tree == CallTree.ROOT: child = CallTree.ROOT.calls(func, None) return child return tree def __repr__(self): return self.__toString("", True) def __toString(self, branch, lastChild): if self._time is not None: s = "%s----%s (%s)\n" % (branch, self._func, self._time) else: s = "%s----%s\n" % (branch, self._func) i = 0 if lastChild: branch = branch[:-1] + " " while i < len(self._children): if i != len(self._children) - 1: s += "%s" % self._children[i].__toString(branch +\ " |", False) else: s += "%s" % self._children[i].__toString(branch +\ " |", True) i += 1 return s class BrokenLineException(Exception): """If the last line is not complete because of the pipe breakage, we want to stop the processing and ignore this line. """ pass class CommentLineException(Exception): """ If the line is a comment (as in the beginning of the trace file), just ignore it. """ pass def parseLine(line): line = line.strip() if line.startswith("#"): raise CommentLineException m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line) if m is None: raise BrokenLineException return (m.group(1), m.group(2), m.group(3)) def main(): CallTree.ROOT = CallTree("Root (Nowhere)", None, None) tree = CallTree.ROOT for line in sys.stdin: try: calltime, callee, caller = parseLine(line) except BrokenLineException: break except CommentLineException: continue tree = tree.getParent(caller) tree = tree.calls(callee, calltime) print CallTree.ROOT if __name__ == "__main__": main()
gpl-2.0
normanmaurer/autobahntestsuite-maven-plugin
src/main/resources/twisted/trial/runner.py
1
26192
# -*- test-case-name: twisted.trial.test.test_runner -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ A miscellany of code used to run Trial tests. Maintainer: Jonathan Lange """ __all__ = [ 'TestSuite', 'DestructiveTestSuite', 'DryRunVisitor', 'ErrorHolder', 'LoggedSuite', 'TestHolder', 'TestLoader', 'TrialRunner', 'TrialSuite', 'filenameToModule', 'isPackage', 'isPackageDirectory', 'isTestCase', 'name', 'samefile', 'NOT_IN_TEST', ] import os, types, warnings, sys, inspect, imp import doctest, time from twisted.python import reflect, log, failure, modules, filepath from twisted.python.deprecate import deprecatedModuleAttribute from twisted.python.versions import Version from twisted.internet import defer from twisted.trial import util, unittest from twisted.trial.itrial import ITestCase from twisted.trial.reporter import UncleanWarningsReporterWrapper # These are imported so that they remain in the public API for t.trial.runner from twisted.trial.unittest import TestSuite from zope.interface import implements pyunit = __import__('unittest') def isPackage(module): """Given an object return True if the object looks like a package""" if not isinstance(module, types.ModuleType): return False basename = os.path.splitext(os.path.basename(module.__file__))[0] return basename == '__init__' def isPackageDirectory(dirname): """Is the directory at path 'dirname' a Python package directory? Returns the name of the __init__ file (it may have a weird extension) if dirname is a package directory. Otherwise, returns False""" for ext in zip(*imp.get_suffixes())[0]: initFile = '__init__' + ext if os.path.exists(os.path.join(dirname, initFile)): return initFile return False def samefile(filename1, filename2): """ A hacky implementation of C{os.path.samefile}. Used by L{filenameToModule} when the platform doesn't provide C{os.path.samefile}. Do not use this. """ return os.path.abspath(filename1) == os.path.abspath(filename2) def filenameToModule(fn): """ Given a filename, do whatever possible to return a module object matching that file. If the file in question is a module in Python path, properly import and return that module. Otherwise, load the source manually. @param fn: A filename. @return: A module object. @raise ValueError: If C{fn} does not exist. """ if not os.path.exists(fn): raise ValueError("%r doesn't exist" % (fn,)) try: ret = reflect.namedAny(reflect.filenameToModuleName(fn)) except (ValueError, AttributeError): # Couldn't find module. The file 'fn' is not in PYTHONPATH return _importFromFile(fn) # ensure that the loaded module matches the file retFile = os.path.splitext(ret.__file__)[0] + '.py' # not all platforms (e.g. win32) have os.path.samefile same = getattr(os.path, 'samefile', samefile) if os.path.isfile(fn) and not same(fn, retFile): del sys.modules[ret.__name__] ret = _importFromFile(fn) return ret def _importFromFile(fn, moduleName=None): fn = _resolveDirectory(fn) if not moduleName: moduleName = os.path.splitext(os.path.split(fn)[-1])[0] if moduleName in sys.modules: return sys.modules[moduleName] fd = open(fn, 'r') try: module = imp.load_source(moduleName, fn, fd) finally: fd.close() return module def _resolveDirectory(fn): if os.path.isdir(fn): initFile = isPackageDirectory(fn) if initFile: fn = os.path.join(fn, initFile) else: raise ValueError('%r is not a package directory' % (fn,)) return fn def _getMethodNameInClass(method): """ Find the attribute name on the method's class which refers to the method. For some methods, notably decorators which have not had __name__ set correctly: getattr(method.im_class, method.__name__) != method """ if getattr(method.im_class, method.__name__, object()) != method: for alias in dir(method.im_class): if getattr(method.im_class, alias, object()) == method: return alias return method.__name__ class DestructiveTestSuite(TestSuite): """ A test suite which remove the tests once run, to minimize memory usage. """ def run(self, result): """ Almost the same as L{TestSuite.run}, but with C{self._tests} being empty at the end. """ while self._tests: if result.shouldStop: break test = self._tests.pop(0) test(result) return result # When an error occurs outside of any test, the user will see this string # in place of a test's name. NOT_IN_TEST = "<not in test>" class LoggedSuite(TestSuite): """ Any errors logged in this suite will be reported to the L{TestResult} object. """ def run(self, result): """ Run the suite, storing all errors in C{result}. If an error is logged while no tests are running, then it will be added as an error to C{result}. @param result: A L{TestResult} object. """ observer = unittest._logObserver observer._add() super(LoggedSuite, self).run(result) observer._remove() for error in observer.getErrors(): result.addError(TestHolder(NOT_IN_TEST), error) observer.flushErrors() class TrialSuite(TestSuite): """ Suite to wrap around every single test in a C{trial} run. Used internally by Trial to set up things necessary for Trial tests to work, regardless of what context they are run in. """ def __init__(self, tests=(), forceGarbageCollection=False): if forceGarbageCollection: newTests = [] for test in tests: test = unittest.decorate( test, unittest._ForceGarbageCollectionDecorator) newTests.append(test) tests = newTests suite = LoggedSuite(tests) super(TrialSuite, self).__init__([suite]) def _bail(self): from twisted.internet import reactor d = defer.Deferred() reactor.addSystemEventTrigger('after', 'shutdown', lambda: d.callback(None)) reactor.fireSystemEvent('shutdown') # radix's suggestion # As long as TestCase does crap stuff with the reactor we need to # manually shutdown the reactor here, and that requires util.wait # :( # so that the shutdown event completes unittest.TestCase('mktemp')._wait(d) def run(self, result): try: TestSuite.run(self, result) finally: self._bail() def name(thing): """ @param thing: an object from modules (instance of PythonModule, PythonAttribute), a TestCase subclass, or an instance of a TestCase. """ if isTestCase(thing): # TestCase subclass theName = reflect.qual(thing) else: # thing from trial, or thing from modules. # this monstrosity exists so that modules' objects do not have to # implement id(). -jml try: theName = thing.id() except AttributeError: theName = thing.name return theName def isTestCase(obj): """ @return: C{True} if C{obj} is a class that contains test cases, C{False} otherwise. Used to find all the tests in a module. """ try: return issubclass(obj, pyunit.TestCase) except TypeError: return False class TestHolder(object): """ Placeholder for a L{TestCase} inside a reporter. As far as a L{TestResult} is concerned, this looks exactly like a unit test. """ implements(ITestCase) failureException = None def __init__(self, description): """ @param description: A string to be displayed L{TestResult}. """ self.description = description def __call__(self, result): return self.run(result) def id(self): return self.description def countTestCases(self): return 0 def run(self, result): """ This test is just a placeholder. Run the test successfully. @param result: The C{TestResult} to store the results in. @type result: L{twisted.trial.itrial.IReporter}. """ result.startTest(self) result.addSuccess(self) result.stopTest(self) def shortDescription(self): return self.description class ErrorHolder(TestHolder): """ Used to insert arbitrary errors into a test suite run. Provides enough methods to look like a C{TestCase}, however, when it is run, it simply adds an error to the C{TestResult}. The most common use-case is for when a module fails to import. """ def __init__(self, description, error): """ @param description: A string used by C{TestResult}s to identify this error. Generally, this is the name of a module that failed to import. @param error: The error to be added to the result. Can be an `exc_info` tuple or a L{twisted.python.failure.Failure}. """ super(ErrorHolder, self).__init__(description) self.error = util.excInfoOrFailureToExcInfo(error) def __repr__(self): return "<ErrorHolder description=%r error=%s%s>" % ( # Format the exception type and arguments explicitly, as exception # objects do not have nice looking string formats on Python 2.4. self.description, self.error[0].__name__, self.error[1].args) def run(self, result): """ Run the test, reporting the error. @param result: The C{TestResult} to store the results in. @type result: L{twisted.trial.itrial.IReporter}. """ result.startTest(self) result.addError(self, self.error) result.stopTest(self) class TestLoader(object): """ I find tests inside function, modules, files -- whatever -- then return them wrapped inside a Test (either a L{TestSuite} or a L{TestCase}). @ivar methodPrefix: A string prefix. C{TestLoader} will assume that all the methods in a class that begin with C{methodPrefix} are test cases. @ivar modulePrefix: A string prefix. Every module in a package that begins with C{modulePrefix} is considered a module full of tests. @ivar forceGarbageCollection: A flag applied to each C{TestCase} loaded. See L{unittest.TestCase} for more information. @ivar sorter: A key function used to sort C{TestCase}s, test classes, modules and packages. @ivar suiteFactory: A callable which is passed a list of tests (which themselves may be suites of tests). Must return a test suite. """ methodPrefix = 'test' modulePrefix = 'test_' def __init__(self): self.suiteFactory = TestSuite self.sorter = name self._importErrors = [] def sort(self, xs): """ Sort the given things using L{sorter}. @param xs: A list of test cases, class or modules. """ return sorted(xs, key=self.sorter) def findTestClasses(self, module): """Given a module, return all Trial test classes""" classes = [] for name, val in inspect.getmembers(module): if isTestCase(val): classes.append(val) return self.sort(classes) def findByName(self, name): """ Return a Python object given a string describing it. @param name: a string which may be either a filename or a fully-qualified Python name. @return: If C{name} is a filename, return the module. If C{name} is a fully-qualified Python name, return the object it refers to. """ if os.path.exists(name): return filenameToModule(name) return reflect.namedAny(name) def loadModule(self, module): """ Return a test suite with all the tests from a module. Included are TestCase subclasses and doctests listed in the module's __doctests__ module. If that's not good for you, put a function named either C{testSuite} or C{test_suite} in your module that returns a TestSuite, and I'll use the results of that instead. If C{testSuite} and C{test_suite} are both present, then I'll use C{testSuite}. """ ## XXX - should I add an optional parameter to disable the check for ## a custom suite. ## OR, should I add another method if not isinstance(module, types.ModuleType): raise TypeError("%r is not a module" % (module,)) if hasattr(module, 'testSuite'): return module.testSuite() elif hasattr(module, 'test_suite'): return module.test_suite() suite = self.suiteFactory() for testClass in self.findTestClasses(module): suite.addTest(self.loadClass(testClass)) if not hasattr(module, '__doctests__'): return suite docSuite = self.suiteFactory() for doctest in module.__doctests__: docSuite.addTest(self.loadDoctests(doctest)) return self.suiteFactory([suite, docSuite]) loadTestsFromModule = loadModule def loadClass(self, klass): """ Given a class which contains test cases, return a sorted list of C{TestCase} instances. """ if not (isinstance(klass, type) or isinstance(klass, types.ClassType)): raise TypeError("%r is not a class" % (klass,)) if not isTestCase(klass): raise ValueError("%r is not a test case" % (klass,)) names = self.getTestCaseNames(klass) tests = self.sort([self._makeCase(klass, self.methodPrefix+name) for name in names]) return self.suiteFactory(tests) loadTestsFromTestCase = loadClass def getTestCaseNames(self, klass): """ Given a class that contains C{TestCase}s, return a list of names of methods that probably contain tests. """ return reflect.prefixedMethodNames(klass, self.methodPrefix) def loadMethod(self, method): """ Given a method of a C{TestCase} that represents a test, return a C{TestCase} instance for that test. """ if not isinstance(method, types.MethodType): raise TypeError("%r not a method" % (method,)) return self._makeCase(method.im_class, _getMethodNameInClass(method)) def _makeCase(self, klass, methodName): return klass(methodName) def loadPackage(self, package, recurse=False): """ Load tests from a module object representing a package, and return a TestSuite containing those tests. Tests are only loaded from modules whose name begins with 'test_' (or whatever C{modulePrefix} is set to). @param package: a types.ModuleType object (or reasonable facsimilie obtained by importing) which may contain tests. @param recurse: A boolean. If True, inspect modules within packages within the given package (and so on), otherwise, only inspect modules in the package itself. @raise: TypeError if 'package' is not a package. @return: a TestSuite created with my suiteFactory, containing all the tests. """ if not isPackage(package): raise TypeError("%r is not a package" % (package,)) pkgobj = modules.getModule(package.__name__) if recurse: discovery = pkgobj.walkModules() else: discovery = pkgobj.iterModules() discovered = [] for disco in discovery: if disco.name.split(".")[-1].startswith(self.modulePrefix): discovered.append(disco) suite = self.suiteFactory() for modinfo in self.sort(discovered): try: module = modinfo.load() except: thingToAdd = ErrorHolder(modinfo.name, failure.Failure()) else: thingToAdd = self.loadModule(module) suite.addTest(thingToAdd) return suite def loadDoctests(self, module): """ Return a suite of tests for all the doctests defined in C{module}. @param module: A module object or a module name. """ if isinstance(module, str): try: module = reflect.namedAny(module) except: return ErrorHolder(module, failure.Failure()) if not inspect.ismodule(module): warnings.warn("trial only supports doctesting modules") return extraArgs = {} if sys.version_info > (2, 4): # Work around Python issue2604: DocTestCase.tearDown clobbers globs def saveGlobals(test): """ Save C{test.globs} and replace it with a copy so that if necessary, the original will be available for the next test run. """ test._savedGlobals = getattr(test, '_savedGlobals', test.globs) test.globs = test._savedGlobals.copy() extraArgs['setUp'] = saveGlobals return doctest.DocTestSuite(module, **extraArgs) def loadAnything(self, thing, recurse=False): """ Given a Python object, return whatever tests that are in it. Whatever 'in' might mean. @param thing: A Python object. A module, method, class or package. @param recurse: Whether or not to look in subpackages of packages. Defaults to False. @return: A C{TestCase} or C{TestSuite}. """ if isinstance(thing, types.ModuleType): if isPackage(thing): return self.loadPackage(thing, recurse) return self.loadModule(thing) elif isinstance(thing, types.ClassType): return self.loadClass(thing) elif isinstance(thing, type): return self.loadClass(thing) elif isinstance(thing, types.MethodType): return self.loadMethod(thing) raise TypeError("No loader for %r. Unrecognized type" % (thing,)) def loadByName(self, name, recurse=False): """ Given a string representing a Python object, return whatever tests are in that object. If C{name} is somehow inaccessible (e.g. the module can't be imported, there is no Python object with that name etc) then return an L{ErrorHolder}. @param name: The fully-qualified name of a Python object. """ try: thing = self.findByName(name) except: return ErrorHolder(name, failure.Failure()) return self.loadAnything(thing, recurse) loadTestsFromName = loadByName def loadByNames(self, names, recurse=False): """ Construct a TestSuite containing all the tests found in 'names', where names is a list of fully qualified python names and/or filenames. The suite returned will have no duplicate tests, even if the same object is named twice. """ things = [] errors = [] for name in names: try: things.append(self.findByName(name)) except: errors.append(ErrorHolder(name, failure.Failure())) suites = [self.loadAnything(thing, recurse) for thing in self._uniqueTests(things)] suites.extend(errors) return self.suiteFactory(suites) def _uniqueTests(self, things): """ Gather unique suite objects from loaded things. This will guarantee uniqueness of inherited methods on TestCases which would otherwise hash to same value and collapse to one test unexpectedly if using simpler means: e.g. set(). """ seen = set() for thing in things: if isinstance(thing, types.MethodType): thing = (thing, thing.im_class) else: thing = (thing,) if thing not in seen: yield thing[0] seen.add(thing) class DryRunVisitor(object): """ A visitor that makes a reporter think that every test visited has run successfully. """ deprecatedModuleAttribute( Version("Twisted", 13, 0, 0), "Trial no longer has support for visitors", "twisted.trial.runner", "DryRunVisitor") def __init__(self, reporter): """ @param reporter: A C{TestResult} object. """ self.reporter = reporter def markSuccessful(self, testCase): """ Convince the reporter that this test has been run successfully. """ self.reporter.startTest(testCase) self.reporter.addSuccess(testCase) self.reporter.stopTest(testCase) class TrialRunner(object): """ A specialised runner that the trial front end uses. """ DEBUG = 'debug' DRY_RUN = 'dry-run' def _setUpTestdir(self): self._tearDownLogFile() currentDir = os.getcwd() base = filepath.FilePath(self.workingDirectory) testdir, self._testDirLock = util._unusedTestDirectory(base) os.chdir(testdir.path) return currentDir def _tearDownTestdir(self, oldDir): os.chdir(oldDir) self._testDirLock.unlock() _log = log def _makeResult(self): reporter = self.reporterFactory(self.stream, self.tbformat, self.rterrors, self._log) if self.uncleanWarnings: reporter = UncleanWarningsReporterWrapper(reporter) return reporter def __init__(self, reporterFactory, mode=None, logfile='test.log', stream=sys.stdout, profile=False, tracebackFormat='default', realTimeErrors=False, uncleanWarnings=False, workingDirectory=None, forceGarbageCollection=False, debugger=None): self.reporterFactory = reporterFactory self.logfile = logfile self.mode = mode self.stream = stream self.tbformat = tracebackFormat self.rterrors = realTimeErrors self.uncleanWarnings = uncleanWarnings self._result = None self.workingDirectory = workingDirectory or '_trial_temp' self._logFileObserver = None self._logFileObject = None self._forceGarbageCollection = forceGarbageCollection self.debugger = debugger if profile: self.run = util.profiled(self.run, 'profile.data') def _tearDownLogFile(self): if self._logFileObserver is not None: log.removeObserver(self._logFileObserver.emit) self._logFileObserver = None if self._logFileObject is not None: self._logFileObject.close() self._logFileObject = None def _setUpLogFile(self): self._tearDownLogFile() if self.logfile == '-': logFile = sys.stdout else: logFile = file(self.logfile, 'a') self._logFileObject = logFile self._logFileObserver = log.FileLogObserver(logFile) log.startLoggingWithObserver(self._logFileObserver.emit, 0) def run(self, test): """ Run the test or suite and return a result object. """ test = unittest.decorate(test, ITestCase) return self._runWithoutDecoration(test, self._forceGarbageCollection) def _runWithoutDecoration(self, test, forceGarbageCollection=False): """ Private helper that runs the given test but doesn't decorate it. """ result = self._makeResult() # decorate the suite with reactor cleanup and log starting # This should move out of the runner and be presumed to be # present suite = TrialSuite([test], forceGarbageCollection) startTime = time.time() if self.mode == self.DRY_RUN: for single in unittest._iterateTests(suite): result.startTest(single) result.addSuccess(single) result.stopTest(single) else: if self.mode == self.DEBUG: run = lambda: self.debugger.runcall(suite.run, result) else: run = lambda: suite.run(result) oldDir = self._setUpTestdir() try: self._setUpLogFile() run() finally: self._tearDownLogFile() self._tearDownTestdir(oldDir) endTime = time.time() done = getattr(result, 'done', None) if done is None: warnings.warn( "%s should implement done() but doesn't. Falling back to " "printErrors() and friends." % reflect.qual(result.__class__), category=DeprecationWarning, stacklevel=3) result.printErrors() result.writeln(result.separator) result.writeln('Ran %d tests in %.3fs', result.testsRun, endTime - startTime) result.write('\n') result.printSummary() else: result.done() return result def runUntilFailure(self, test): """ Repeatedly run C{test} until it fails. """ count = 0 while True: count += 1 self.stream.write("Test Pass %d\n" % (count,)) if count == 1: result = self.run(test) else: result = self._runWithoutDecoration(test) if result.testsRun == 0: break if not result.wasSuccessful(): break return result
apache-2.0
akatsoulas/feedthefox
feedthefox/devices/models.py
2
2447
import os import uuid from django.db import models from django.conf import settings from django.core.exceptions import ValidationError from django.utils.encoding import python_2_unicode_compatible from stdnum import imei DEFAULT_DEVICE_TYPE = 'smartphone' def _get_upload_file_name(instance, filename): return os.path.join(settings.USER_PHOTOS_DIR, str(uuid.uuid4()) + '.jpg') def validate_imei(imei_number): if imei_number.strip() and not imei.is_valid(imei_number): raise ValidationError('Please enter a valid IMEI number.') return imei_number @python_2_unicode_compatible class Build(models.Model): """Model for FxOS builds.""" name = models.CharField(max_length=120) date = models.DateField(auto_now_add=True) link = models.URLField(blank=True, default='') is_foxfooding = models.BooleanField(default=False) comment = models.TextField(blank=True, default='') def __str__(self): return self.name @python_2_unicode_compatible class Device(models.Model): """Model for FxOS devices.""" model = models.CharField(max_length=120) manufacturer = models.CharField(max_length=120) image = models.ImageField(upload_to=_get_upload_file_name, blank=True, null=True) comment = models.TextField(blank=True, default='') builds = models.ManyToManyField(Build, blank=True) users = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='devices', through='DeviceInfo', blank=True) type = models.CharField(max_length=120, default=DEFAULT_DEVICE_TYPE) link = models.URLField(blank=True, default='') codename = models.CharField(max_length=120, blank=True, default='') def __str__(self): return u'{0} - {1}'.format(self.manufacturer, self.model) class Meta: ordering = ('manufacturer', 'model',) unique_together = ('manufacturer', 'model',) @python_2_unicode_compatible class DeviceInfo(models.Model): """Device Info model.""" user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='devices_info') device = models.ForeignKey(Device) imei = models.CharField(max_length=17, blank=True, default='', validators=[validate_imei]) is_foxfooding = models.BooleanField(default=False) def __str__(self): return u'{0} {1}'.format(self.user, self.device) class Meta: verbose_name_plural = 'Devices Info'
mpl-2.0
SNAPPETITE/backend
flask/lib/python2.7/site-packages/coverage/report.py
27
2872
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt """Reporter foundation for coverage.py.""" import os from coverage.files import prep_patterns, FnmatchMatcher from coverage.misc import CoverageException, NoSource, NotPython, isolate_module os = isolate_module(os) class Reporter(object): """A base class for all reporters.""" def __init__(self, coverage, config): """Create a reporter. `coverage` is the coverage instance. `config` is an instance of CoverageConfig, for controlling all sorts of behavior. """ self.coverage = coverage self.config = config # The FileReporters to report on. Set by find_file_reporters. self.file_reporters = [] # The directory into which to place the report, used by some derived # classes. self.directory = None def find_file_reporters(self, morfs): """Find the FileReporters we'll report on. `morfs` is a list of modules or file names. """ reporters = self.coverage._get_file_reporters(morfs) if self.config.include: matcher = FnmatchMatcher(prep_patterns(self.config.include)) reporters = [fr for fr in reporters if matcher.match(fr.filename)] if self.config.omit: matcher = FnmatchMatcher(prep_patterns(self.config.omit)) reporters = [fr for fr in reporters if not matcher.match(fr.filename)] self.file_reporters = sorted(reporters) def report_files(self, report_fn, morfs, directory=None): """Run a reporting function on a number of morfs. `report_fn` is called for each relative morf in `morfs`. It is called as:: report_fn(file_reporter, analysis) where `file_reporter` is the `FileReporter` for the morf, and `analysis` is the `Analysis` for the morf. """ self.find_file_reporters(morfs) if not self.file_reporters: raise CoverageException("No data to report.") self.directory = directory if self.directory and not os.path.exists(self.directory): os.makedirs(self.directory) for fr in self.file_reporters: try: report_fn(fr, self.coverage._analyze(fr)) except NoSource: if not self.config.ignore_errors: raise except NotPython: # Only report errors for .py files, and only if we didn't # explicitly suppress those errors. # NotPython is only raised by PythonFileReporter, which has a # should_be_python() method. if fr.should_be_python() and not self.config.ignore_errors: raise
mit
colinligertwood/odoo
addons/website_certification/__init__.py
385
1030
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import certification import controllers
agpl-3.0
rvbelefonte/Rockfish2
rockfish2/extensions/cps/model.py
1
3390
""" Tools for working with Computer Programs in Seismology velocity models """ import os import numpy as np import datetime import pandas as pd from scipy.interpolate import interp1d import matplotlib.pyplot as plt from rockfish2 import logging from rockfish2.models.profile import Profile class CPSModel1d(Profile): def __init__(self, *args, **kwargs): self.NAME = kwargs.pop('name', '1D model') self.UNITS = kwargs.pop('units', 'KGS') self.ISOTROPY = kwargs.pop('isotropy', 'ISOTROPIC') self.SHAPE = kwargs.pop('shape', 'FLAT EARTH') self.DIM = kwargs.pop('dim', '1-D') Profile.__init__(self, *args, **kwargs) def __str__(self): return self.write() def write(self, path_or_buf=None, float_format='%10.6f', **kwargs): """ Write profile to the Computer Programs in Seismology model format Parameters ---------- path_or_buf : string or file handle, default None File path or object, if None is provided the result is returned as a string. """ model = self.model.copy() col = ['hr'] + [k for k in model if k != 'hr'] model['hr'] = np.concatenate((np.diff(np.asarray(model.index)), [0.0])) model.index = np.arange(len(model)) #model = model[0:len(model) - 1] sng = "MODEL\n" sng += "{:}\n".format(self.NAME) sng += "{:}\n".format(self.ISOTROPY) sng += "{:}\n".format(self.UNITS) sng += "{:}\n".format(self.SHAPE) sng += "{:}\n".format(self.DIM) sng += "CONSTANT VELOCITY\n" sng += "#\n" sng += "Created by: {:}{:}\n"\ .format(self.__module__, self.__class__.__name__) sng += "Created on: {:}\n".format(datetime.datetime.now()) sng += "#\n" sng += model[col].to_csv(sep='\t', index=False, float_format=float_format, **kwargs) if path_or_buf is None: return sng if hasattr(path_or_buf, 'write'): path_or_buf.write(sng) else: f = open(path_or_buf, 'w') f.write(sng) def read(self, filename, sep='\t'): """ Write profile from the Computer Programs in Seismology model format """ f = open(filename, 'rb') kind = f.readline().replace('\n', '') assert kind.startswith('MODEL'),\ 'File does not appear to be CPS format' self.NAME = f.readline().replace('\n', '') self.ISOTROPY = f.readline().replace('\n', '') self.UNITS = f.readline().replace('\n', '') self.SHAPE = f.readline().replace('\n', '') self.DIM = f.readline().replace('\n', '') _ = f.readline().replace('\n', '') _ = f.readline().replace('\n', '') _ = f.readline().replace('\n', '') _ = f.readline().replace('\n', '') _ = f.readline().replace('\n', '') cols = f.readline().replace('\n', '').split() self.model = pd.read_csv(filename, sep=sep, skiprows=11, index_col=0) try: dz = self.model.index[:] z = np.cumsum(np.asarray(dz)) - dz[0] if z[-1] == 0: z[-1] = dz[-2] self.model.index = z self.model.index.name = 'depth' except: pass
gpl-2.0
drjova/zenodo
zenodo/modules/accessrequests/receivers.py
2
4872
# -*- coding: utf-8 -*- # # This file is part of Zenodo. # Copyright (C) 2015 CERN. # # Zenodo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Zenodo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Zenodo. If not, see <http://www.gnu.org/licenses/>. # # In applying this licence, CERN does not waive the privileges and immunities # granted to it by virtue of its status as an Intergovernmental Organization # or submit itself to any jurisdiction. """Signal receivers.""" from __future__ import absolute_import from datetime import timedelta from flask import render_template, url_for, current_app from invenio.base.globals import cfg from invenio.base.i18n import _ from invenio.ext.email import send_email from invenio.modules.records.api import get_record from .errors import RecordNotFound from .signals import request_created, request_confirmed, request_accepted, \ request_rejected from .tokens import EmailConfirmationSerializer def connect_receivers(): """Connect receivers to signals.""" request_created.connect(send_email_validation) request_confirmed.connect(send_confirmed_notifications) request_rejected.connect(send_reject_notification) # Order is important: request_accepted.connect(create_secret_link) request_accepted.connect(send_accept_notification) def create_secret_link(request, message=None, expires_at=None): """Receiver for request-accepted signal.""" record = get_record(request.recid) if not record: raise RecordNotFound(request.recid) description = render_template( "accessrequests/link_description.tpl", request=request, record=record, expires_at=expires_at, message=message, ) request.create_secret_link( record["title"], description=description, expires_at=expires_at ) def send_accept_notification(request, message=None, expires_at=None): """Receiver for request-accepted signal to send email notification.""" _send_notification( request.sender_email, _("Access request accepted"), "accessrequests/emails/accepted.tpl", request=request, record=get_record(request.recid), record_link=request.link.get_absolute_url('record.metadata'), message=message, expires_at=expires_at, ) def send_confirmed_notifications(request): """Receiver for request-confirmed signal to send email notification.""" record = get_record(request.recid) if record is None: current_app.logger.error("Cannot retrieve record %s. Emails not sent" % request.recid) return title = _("Access request: %(record)s", record=record["title"]) _send_notification( request.receiver.email, title, "accessrequests/emails/new_request.tpl", request=request, record=record, ) _send_notification( request.sender_email, title, "accessrequests/emails/confirmation.tpl", request=request, record=record, ) def send_email_validation(request): """Receiver for request-created signal to send email notification.""" token = EmailConfirmationSerializer().create_token( request.id, dict(email=request.sender_email) ) _send_notification( request.sender_email, _("Access request verification"), "accessrequests/emails/validate_email.tpl", request=request, record=get_record(request.recid), days=timedelta( seconds=cfg["ACCESSREQUESTS_CONFIRMLINK_EXPIRES_IN"]).days, confirm_link=url_for( "zenodo_accessrequests.confirm", recid=request.recid, token=token, _external=True, _scheme="https", ) ) def send_reject_notification(request, message=None): """Receiver for request-rejected signal to send email notification.""" _send_notification( request.sender_email, _("Access request rejected"), "accessrequests/emails/rejected.tpl", request=request, record=get_record(request.recid), message=message, ) def _send_notification(to, subject, template, **ctx): """Render a template and send as email.""" send_email( cfg.get('CFG_SITE_SUPPORT_EMAIL'), to, subject, render_template(template, **ctx), )
gpl-3.0
40423127/2017springcd_hw
plugin/liquid_tags/youtube.py
284
1674
""" Youtube Tag --------- This implements a Liquid-style youtube tag for Pelican, based on the jekyll / octopress youtube tag [1]_ Syntax ------ {% youtube id [width height] %} Example ------- {% youtube dQw4w9WgXcQ 640 480 %} Output ------ <iframe width="640" height="480" src="https://www.youtube.com/embed/dQw4w9WgXcQ" frameborder="0" webkitAllowFullScreen mozallowfullscreen allowFullScreen> </iframe> [1] https://gist.github.com/jamieowen/2063748 """ import re from .mdx_liquid_tags import LiquidTags SYNTAX = "{% youtube id [width height] %}" YOUTUBE = re.compile(r'([\S]+)(\s+(\d+)\s(\d+))?') @LiquidTags.register('youtube') def youtube(preprocessor, tag, markup): width = 640 height = 390 youtube_id = None match = YOUTUBE.search(markup) if match: groups = match.groups() youtube_id = groups[0] width = groups[2] or width height = groups[3] or height if youtube_id: youtube_out = """ <div class="videobox"> <iframe width="{width}" height="{height}" src='https://www.youtube.com/embed/{youtube_id}' frameborder='0' webkitAllowFullScreen mozallowfullscreen allowFullScreen> </iframe> </div> """.format(width=width, height=height, youtube_id=youtube_id).strip() else: raise ValueError("Error processing input, " "expected syntax: {0}".format(SYNTAX)) return youtube_out # --------------------------------------------------- # This import allows image tag to be a Pelican plugin from liquid_tags import register # noqa
agpl-3.0
anbangleo/NlsdeWeb
Python-3.6.0/Lib/test/list_tests.py
2
18761
""" Tests common to list and UserList.UserList """ import sys import os from functools import cmp_to_key from test import support, seq_tests class CommonTest(seq_tests.CommonTest): def test_init(self): # Iterable arg is optional self.assertEqual(self.type2test([]), self.type2test()) # Init clears previous values a = self.type2test([1, 2, 3]) a.__init__() self.assertEqual(a, self.type2test([])) # Init overwrites previous values a = self.type2test([1, 2, 3]) a.__init__([4, 5, 6]) self.assertEqual(a, self.type2test([4, 5, 6])) # Mutables always return a new object b = self.type2test(a) self.assertNotEqual(id(a), id(b)) self.assertEqual(a, b) def test_getitem_error(self): msg = "list indices must be integers or slices" with self.assertRaisesRegex(TypeError, msg): a = [] a['a'] = "python" def test_repr(self): l0 = [] l2 = [0, 1, 2] a0 = self.type2test(l0) a2 = self.type2test(l2) self.assertEqual(str(a0), str(l0)) self.assertEqual(repr(a0), repr(l0)) self.assertEqual(repr(a2), repr(l2)) self.assertEqual(str(a2), "[0, 1, 2]") self.assertEqual(repr(a2), "[0, 1, 2]") a2.append(a2) a2.append(3) self.assertEqual(str(a2), "[0, 1, 2, [...], 3]") self.assertEqual(repr(a2), "[0, 1, 2, [...], 3]") l0 = [] for i in range(sys.getrecursionlimit() + 100): l0 = [l0] self.assertRaises(RecursionError, repr, l0) def test_print(self): d = self.type2test(range(200)) d.append(d) d.extend(range(200,400)) d.append(d) d.append(400) try: with open(support.TESTFN, "w") as fo: fo.write(str(d)) with open(support.TESTFN, "r") as fo: self.assertEqual(fo.read(), repr(d)) finally: os.remove(support.TESTFN) def test_set_subscript(self): a = self.type2test(range(20)) self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 0), [1,2,3]) self.assertRaises(TypeError, a.__setitem__, slice(0, 10), 1) self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 2), [1,2]) self.assertRaises(TypeError, a.__getitem__, 'x', 1) a[slice(2,10,3)] = [1,2,3] self.assertEqual(a, self.type2test([0, 1, 1, 3, 4, 2, 6, 7, 3, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19])) def test_reversed(self): a = self.type2test(range(20)) r = reversed(a) self.assertEqual(list(r), self.type2test(range(19, -1, -1))) self.assertRaises(StopIteration, next, r) self.assertEqual(list(reversed(self.type2test())), self.type2test()) # Bug 3689: make sure list-reversed-iterator doesn't have __len__ self.assertRaises(TypeError, len, reversed([1,2,3])) def test_setitem(self): a = self.type2test([0, 1]) a[0] = 0 a[1] = 100 self.assertEqual(a, self.type2test([0, 100])) a[-1] = 200 self.assertEqual(a, self.type2test([0, 200])) a[-2] = 100 self.assertEqual(a, self.type2test([100, 200])) self.assertRaises(IndexError, a.__setitem__, -3, 200) self.assertRaises(IndexError, a.__setitem__, 2, 200) a = self.type2test([]) self.assertRaises(IndexError, a.__setitem__, 0, 200) self.assertRaises(IndexError, a.__setitem__, -1, 200) self.assertRaises(TypeError, a.__setitem__) a = self.type2test([0,1,2,3,4]) a[0] = 1 a[1] = 2 a[2] = 3 self.assertEqual(a, self.type2test([1,2,3,3,4])) a[0] = 5 a[1] = 6 a[2] = 7 self.assertEqual(a, self.type2test([5,6,7,3,4])) a[-2] = 88 a[-1] = 99 self.assertEqual(a, self.type2test([5,6,7,88,99])) a[-2] = 8 a[-1] = 9 self.assertEqual(a, self.type2test([5,6,7,8,9])) msg = "list indices must be integers or slices" with self.assertRaisesRegex(TypeError, msg): a['a'] = "python" def test_delitem(self): a = self.type2test([0, 1]) del a[1] self.assertEqual(a, [0]) del a[0] self.assertEqual(a, []) a = self.type2test([0, 1]) del a[-2] self.assertEqual(a, [1]) del a[-1] self.assertEqual(a, []) a = self.type2test([0, 1]) self.assertRaises(IndexError, a.__delitem__, -3) self.assertRaises(IndexError, a.__delitem__, 2) a = self.type2test([]) self.assertRaises(IndexError, a.__delitem__, 0) self.assertRaises(TypeError, a.__delitem__) def test_setslice(self): l = [0, 1] a = self.type2test(l) for i in range(-3, 4): a[:i] = l[:i] self.assertEqual(a, l) a2 = a[:] a2[:i] = a[:i] self.assertEqual(a2, a) a[i:] = l[i:] self.assertEqual(a, l) a2 = a[:] a2[i:] = a[i:] self.assertEqual(a2, a) for j in range(-3, 4): a[i:j] = l[i:j] self.assertEqual(a, l) a2 = a[:] a2[i:j] = a[i:j] self.assertEqual(a2, a) aa2 = a2[:] aa2[:0] = [-2, -1] self.assertEqual(aa2, [-2, -1, 0, 1]) aa2[0:] = [] self.assertEqual(aa2, []) a = self.type2test([1, 2, 3, 4, 5]) a[:-1] = a self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 5])) a = self.type2test([1, 2, 3, 4, 5]) a[1:] = a self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5])) a = self.type2test([1, 2, 3, 4, 5]) a[1:-1] = a self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5, 5])) a = self.type2test([]) a[:] = tuple(range(10)) self.assertEqual(a, self.type2test(range(10))) self.assertRaises(TypeError, a.__setitem__, slice(0, 1, 5)) self.assertRaises(TypeError, a.__setitem__) def test_delslice(self): a = self.type2test([0, 1]) del a[1:2] del a[0:1] self.assertEqual(a, self.type2test([])) a = self.type2test([0, 1]) del a[1:2] del a[0:1] self.assertEqual(a, self.type2test([])) a = self.type2test([0, 1]) del a[-2:-1] self.assertEqual(a, self.type2test([1])) a = self.type2test([0, 1]) del a[-2:-1] self.assertEqual(a, self.type2test([1])) a = self.type2test([0, 1]) del a[1:] del a[:1] self.assertEqual(a, self.type2test([])) a = self.type2test([0, 1]) del a[1:] del a[:1] self.assertEqual(a, self.type2test([])) a = self.type2test([0, 1]) del a[-1:] self.assertEqual(a, self.type2test([0])) a = self.type2test([0, 1]) del a[-1:] self.assertEqual(a, self.type2test([0])) a = self.type2test([0, 1]) del a[:] self.assertEqual(a, self.type2test([])) def test_append(self): a = self.type2test([]) a.append(0) a.append(1) a.append(2) self.assertEqual(a, self.type2test([0, 1, 2])) self.assertRaises(TypeError, a.append) def test_extend(self): a1 = self.type2test([0]) a2 = self.type2test((0, 1)) a = a1[:] a.extend(a2) self.assertEqual(a, a1 + a2) a.extend(self.type2test([])) self.assertEqual(a, a1 + a2) a.extend(a) self.assertEqual(a, self.type2test([0, 0, 1, 0, 0, 1])) a = self.type2test("spam") a.extend("eggs") self.assertEqual(a, list("spameggs")) self.assertRaises(TypeError, a.extend, None) self.assertRaises(TypeError, a.extend) # overflow test. issue1621 class CustomIter: def __iter__(self): return self def __next__(self): raise StopIteration def __length_hint__(self): return sys.maxsize a = self.type2test([1,2,3,4]) a.extend(CustomIter()) self.assertEqual(a, [1,2,3,4]) def test_insert(self): a = self.type2test([0, 1, 2]) a.insert(0, -2) a.insert(1, -1) a.insert(2, 0) self.assertEqual(a, [-2, -1, 0, 0, 1, 2]) b = a[:] b.insert(-2, "foo") b.insert(-200, "left") b.insert(200, "right") self.assertEqual(b, self.type2test(["left",-2,-1,0,0,"foo",1,2,"right"])) self.assertRaises(TypeError, a.insert) def test_pop(self): a = self.type2test([-1, 0, 1]) a.pop() self.assertEqual(a, [-1, 0]) a.pop(0) self.assertEqual(a, [0]) self.assertRaises(IndexError, a.pop, 5) a.pop(0) self.assertEqual(a, []) self.assertRaises(IndexError, a.pop) self.assertRaises(TypeError, a.pop, 42, 42) a = self.type2test([0, 10, 20, 30, 40]) def test_remove(self): a = self.type2test([0, 0, 1]) a.remove(1) self.assertEqual(a, [0, 0]) a.remove(0) self.assertEqual(a, [0]) a.remove(0) self.assertEqual(a, []) self.assertRaises(ValueError, a.remove, 0) self.assertRaises(TypeError, a.remove) class BadExc(Exception): pass class BadCmp: def __eq__(self, other): if other == 2: raise BadExc() return False a = self.type2test([0, 1, 2, 3]) self.assertRaises(BadExc, a.remove, BadCmp()) class BadCmp2: def __eq__(self, other): raise BadExc() d = self.type2test('abcdefghcij') d.remove('c') self.assertEqual(d, self.type2test('abdefghcij')) d.remove('c') self.assertEqual(d, self.type2test('abdefghij')) self.assertRaises(ValueError, d.remove, 'c') self.assertEqual(d, self.type2test('abdefghij')) # Handle comparison errors d = self.type2test(['a', 'b', BadCmp2(), 'c']) e = self.type2test(d) self.assertRaises(BadExc, d.remove, 'c') for x, y in zip(d, e): # verify that original order and values are retained. self.assertIs(x, y) def test_count(self): a = self.type2test([0, 1, 2])*3 self.assertEqual(a.count(0), 3) self.assertEqual(a.count(1), 3) self.assertEqual(a.count(3), 0) self.assertRaises(TypeError, a.count) class BadExc(Exception): pass class BadCmp: def __eq__(self, other): if other == 2: raise BadExc() return False self.assertRaises(BadExc, a.count, BadCmp()) def test_index(self): u = self.type2test([0, 1]) self.assertEqual(u.index(0), 0) self.assertEqual(u.index(1), 1) self.assertRaises(ValueError, u.index, 2) u = self.type2test([-2, -1, 0, 0, 1, 2]) self.assertEqual(u.count(0), 2) self.assertEqual(u.index(0), 2) self.assertEqual(u.index(0, 2), 2) self.assertEqual(u.index(-2, -10), 0) self.assertEqual(u.index(0, 3), 3) self.assertEqual(u.index(0, 3, 4), 3) self.assertRaises(ValueError, u.index, 2, 0, -10) self.assertRaises(TypeError, u.index) class BadExc(Exception): pass class BadCmp: def __eq__(self, other): if other == 2: raise BadExc() return False a = self.type2test([0, 1, 2, 3]) self.assertRaises(BadExc, a.index, BadCmp()) a = self.type2test([-2, -1, 0, 0, 1, 2]) self.assertEqual(a.index(0), 2) self.assertEqual(a.index(0, 2), 2) self.assertEqual(a.index(0, -4), 2) self.assertEqual(a.index(-2, -10), 0) self.assertEqual(a.index(0, 3), 3) self.assertEqual(a.index(0, -3), 3) self.assertEqual(a.index(0, 3, 4), 3) self.assertEqual(a.index(0, -3, -2), 3) self.assertEqual(a.index(0, -4*sys.maxsize, 4*sys.maxsize), 2) self.assertRaises(ValueError, a.index, 0, 4*sys.maxsize,-4*sys.maxsize) self.assertRaises(ValueError, a.index, 2, 0, -10) a.remove(0) self.assertRaises(ValueError, a.index, 2, 0, 4) self.assertEqual(a, self.type2test([-2, -1, 0, 1, 2])) # Test modifying the list during index's iteration class EvilCmp: def __init__(self, victim): self.victim = victim def __eq__(self, other): del self.victim[:] return False a = self.type2test() a[:] = [EvilCmp(a) for _ in range(100)] # This used to seg fault before patch #1005778 self.assertRaises(ValueError, a.index, None) def test_reverse(self): u = self.type2test([-2, -1, 0, 1, 2]) u2 = u[:] u.reverse() self.assertEqual(u, [2, 1, 0, -1, -2]) u.reverse() self.assertEqual(u, u2) self.assertRaises(TypeError, u.reverse, 42) def test_clear(self): u = self.type2test([2, 3, 4]) u.clear() self.assertEqual(u, []) u = self.type2test([]) u.clear() self.assertEqual(u, []) u = self.type2test([]) u.append(1) u.clear() u.append(2) self.assertEqual(u, [2]) self.assertRaises(TypeError, u.clear, None) def test_copy(self): u = self.type2test([1, 2, 3]) v = u.copy() self.assertEqual(v, [1, 2, 3]) u = self.type2test([]) v = u.copy() self.assertEqual(v, []) # test that it's indeed a copy and not a reference u = self.type2test(['a', 'b']) v = u.copy() v.append('i') self.assertEqual(u, ['a', 'b']) self.assertEqual(v, u + ['i']) # test that it's a shallow, not a deep copy u = self.type2test([1, 2, [3, 4], 5]) v = u.copy() self.assertEqual(u, v) self.assertIs(v[3], u[3]) self.assertRaises(TypeError, u.copy, None) def test_sort(self): u = self.type2test([1, 0]) u.sort() self.assertEqual(u, [0, 1]) u = self.type2test([2,1,0,-1,-2]) u.sort() self.assertEqual(u, self.type2test([-2,-1,0,1,2])) self.assertRaises(TypeError, u.sort, 42, 42) def revcmp(a, b): if a == b: return 0 elif a < b: return 1 else: # a > b return -1 u.sort(key=cmp_to_key(revcmp)) self.assertEqual(u, self.type2test([2,1,0,-1,-2])) # The following dumps core in unpatched Python 1.5: def myComparison(x,y): xmod, ymod = x%3, y%7 if xmod == ymod: return 0 elif xmod < ymod: return -1 else: # xmod > ymod return 1 z = self.type2test(range(12)) z.sort(key=cmp_to_key(myComparison)) self.assertRaises(TypeError, z.sort, 2) def selfmodifyingComparison(x,y): z.append(1) if x == y: return 0 elif x < y: return -1 else: # x > y return 1 self.assertRaises(ValueError, z.sort, key=cmp_to_key(selfmodifyingComparison)) self.assertRaises(TypeError, z.sort, 42, 42, 42, 42) def test_slice(self): u = self.type2test("spam") u[:2] = "h" self.assertEqual(u, list("ham")) def test_iadd(self): super().test_iadd() u = self.type2test([0, 1]) u2 = u u += [2, 3] self.assertIs(u, u2) u = self.type2test("spam") u += "eggs" self.assertEqual(u, self.type2test("spameggs")) self.assertRaises(TypeError, u.__iadd__, None) def test_imul(self): u = self.type2test([0, 1]) u *= 3 self.assertEqual(u, self.type2test([0, 1, 0, 1, 0, 1])) u *= 0 self.assertEqual(u, self.type2test([])) s = self.type2test([]) oldid = id(s) s *= 10 self.assertEqual(id(s), oldid) def test_extendedslicing(self): # subscript a = self.type2test([0,1,2,3,4]) # deletion del a[::2] self.assertEqual(a, self.type2test([1,3])) a = self.type2test(range(5)) del a[1::2] self.assertEqual(a, self.type2test([0,2,4])) a = self.type2test(range(5)) del a[1::-2] self.assertEqual(a, self.type2test([0,2,3,4])) a = self.type2test(range(10)) del a[::1000] self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 6, 7, 8, 9])) # assignment a = self.type2test(range(10)) a[::2] = [-1]*5 self.assertEqual(a, self.type2test([-1, 1, -1, 3, -1, 5, -1, 7, -1, 9])) a = self.type2test(range(10)) a[::-4] = [10]*3 self.assertEqual(a, self.type2test([0, 10, 2, 3, 4, 10, 6, 7, 8 ,10])) a = self.type2test(range(4)) a[::-1] = a self.assertEqual(a, self.type2test([3, 2, 1, 0])) a = self.type2test(range(10)) b = a[:] c = a[:] a[2:3] = self.type2test(["two", "elements"]) b[slice(2,3)] = self.type2test(["two", "elements"]) c[2:3:] = self.type2test(["two", "elements"]) self.assertEqual(a, b) self.assertEqual(a, c) a = self.type2test(range(10)) a[::2] = tuple(range(5)) self.assertEqual(a, self.type2test([0, 1, 1, 3, 2, 5, 3, 7, 4, 9])) # test issue7788 a = self.type2test(range(10)) del a[9::1<<333] def test_constructor_exception_handling(self): # Bug #1242657 class F(object): def __iter__(self): raise KeyboardInterrupt self.assertRaises(KeyboardInterrupt, list, F()) def test_exhausted_iterator(self): a = self.type2test([1, 2, 3]) exhit = iter(a) empit = iter(a) for x in exhit: # exhaust the iterator next(empit) # not exhausted a.append(9) self.assertEqual(list(exhit), []) self.assertEqual(list(empit), [9]) self.assertEqual(a, self.type2test([1, 2, 3, 9]))
mit
Metronote/metronotesd-alpha
lib/api.py
1
26200
#! /usr/bin/python3 import sys import os import threading import decimal import time import json import re import requests import collections import logging from logging import handlers as logging_handlers D = decimal.Decimal import apsw import flask from flask.ext.httpauth import HTTPBasicAuth from tornado.wsgi import WSGIContainer from tornado.httpserver import HTTPServer from tornado.ioloop import IOLoop import jsonrpc from jsonrpc import dispatcher from . import (config, bitcoin, exceptions, util) from . import (send, order, btcpay, issuance, broadcast, bet, dividend, burn, cancel, callback, rps, rpsresolve, publish) API_TABLES = ['balances', 'credits', 'debits', 'bets', 'bet_matches', 'broadcasts', 'btcpays', 'burns', 'callbacks', 'cancels', 'dividends', 'issuances', 'orders', 'order_matches', 'sends', 'bet_expirations', 'order_expirations', 'bet_match_expirations', 'order_match_expirations', 'bet_match_resolutions', 'rps', 'rpsresolves', 'rps_matches', 'rps_expirations', 'rps_match_expirations', 'mempool'] API_TRANSACTIONS = ['bet', 'broadcast', 'btcpay', 'burn', 'cancel', 'callback', 'dividend', 'issuance', 'order', 'send', 'rps', 'rpsresolve', 'publish'] COMMONS_ARGS = ['encoding', 'fee_per_kb', 'regular_dust_size', 'multisig_dust_size', 'op_return_value', 'pubkey', 'allow_unconfirmed_inputs', 'fee', 'fee_provided'] API_MAX_LOG_SIZE = 10 * 1024 * 1024 #max log size of 20 MB before rotation (make configurable later) API_MAX_LOG_COUNT = 10 current_api_status_code = None #is updated by the APIStatusPoller current_api_status_response_json = None #is updated by the APIStatusPoller # TODO: ALL queries EVERYWHERE should be done with these methods def db_query(db, statement, bindings=(), callback=None, **callback_args): cursor = db.cursor() if hasattr(callback, '__call__'): cursor.execute(statement, bindings) for row in cursor: callback(row, **callback_args) results = None else: results = list(cursor.execute(statement, bindings)) cursor.close() return results def get_rows(db, table, filters=[], filterop='AND', order_by=None, order_dir=None, start_block=None, end_block=None, status=None, limit=1000, offset=0, show_expired=True): """Filters results based on a filter data structure (as used by the API)""" def value_to_marker(value): # if value is an array place holder is (?,?,?,..) if isinstance(value, list): return '''({})'''.format(','.join(['?' for e in range(0,len(value))])) else: return '''?''' # TODO: Document that op can be anything that SQLite3 accepts. if not table or table.lower() not in API_TABLES: raise Exception('Unknown table') if filterop and filterop.upper() not in ['OR', 'AND']: raise Exception('Invalid filter operator (OR, AND)') if order_dir and order_dir.upper() not in ['ASC', 'DESC']: raise Exception('Invalid order direction (ASC, DESC)') if not isinstance(limit, int): raise Exception('Invalid limit') elif limit > 1000: raise Exception('Limit should be lower or equal to 1000') if not isinstance(offset, int): raise Exception('Invalid offset') # TODO: accept an object: {'field1':'ASC', 'field2': 'DESC'} if order_by and not re.compile('^[a-z0-9_]+$').match(order_by): raise Exception('Invalid order_by, must be a field name') if isinstance(filters, dict): #single filter entry, convert to a one entry list filters = [filters,] elif not isinstance(filters, list): filters = [] # TODO: Document this! (Each filter can be an ordered list.) new_filters = [] for filter_ in filters: if type(filter_) in (list, tuple) and len(filter_) in [3, 4]: new_filter = {'field': filter_[0], 'op': filter_[1], 'value': filter_[2]} if len(filter_) == 4: new_filter['case_sensitive'] = filter_[3] new_filters.append(new_filter) elif type(filter_) == dict: new_filters.append(filter_) else: raise Exception('Unknown filter type') filters = new_filters # validate filter(s) for filter_ in filters: for field in ['field', 'op', 'value']: #should have all fields if field not in filter_: raise Exception("A specified filter is missing the '%s' field" % field) if not isinstance(filter_['value'], (str, int, float, list)): raise Exception("Invalid value for the field '%s'" % filter_['field']) if isinstance(filter_['value'], list) and filter_['op'].upper() not in ['IN', 'NOT IN']: raise Exception("Invalid value for the field '%s'" % filter_['field']) if filter_['op'].upper() not in ['=', '==', '!=', '>', '<', '>=', '<=', 'IN', 'LIKE', 'NOT IN', 'NOT LIKE']: raise Exception("Invalid operator for the field '%s'" % filter_['field']) if 'case_sensitive' in filter_ and not isinstance(filter_['case_sensitive'], bool): raise Exception("case_sensitive must be a boolean") # SELECT statement = '''SELECT * FROM {}'''.format(table) # WHERE bindings = [] conditions = [] for filter_ in filters: case_sensitive = False if 'case_sensitive' not in filter_ else filter_['case_sensitive'] if filter_['op'] == 'LIKE' and case_sensitive == False: filter_['field'] = '''UPPER({})'''.format(filter_['field']) filter_['value'] = filter_['value'].upper() marker = value_to_marker(filter_['value']) conditions.append('''{} {} {}'''.format(filter_['field'], filter_['op'], marker)) if isinstance(filter_['value'], list): bindings += filter_['value'] else: bindings.append(filter_['value']) # AND filters more_conditions = [] if table not in ['balances', 'order_matches', 'bet_matches']: if start_block != None: more_conditions.append('''block_index >= ?''') bindings.append(start_block) if end_block != None: more_conditions.append('''block_index <= ?''') bindings.append(end_block) elif table in ['order_matches', 'bet_matches']: if start_block != None: more_conditions.append('''tx0_block_index >= ?''') bindings.append(start_block) if end_block != None: more_conditions.append('''tx1_block_index <= ?''') bindings.append(end_block) # status if isinstance(status, list) and len(status) > 0: more_conditions.append('''status IN {}'''.format(value_to_marker(status))) bindings += status elif isinstance(status, str) and status != '': more_conditions.append('''status == ?''') bindings.append(status) # legacy filters if not show_expired and table == 'orders': #Ignore BTC orders one block early. expire_index = util.last_block(db)['block_index'] + 1 more_conditions.append('''((give_asset == ? AND expire_index > ?) OR give_asset != ?)''') bindings += [config.BTC, expire_index, config.BTC] if (len(conditions) + len(more_conditions)) > 0: statement += ''' WHERE''' all_conditions = [] if len(conditions) > 0: all_conditions.append('''({})'''.format(''' {} '''.format(filterop.upper()).join(conditions))) if len(more_conditions) > 0: all_conditions.append('''({})'''.format(''' AND '''.join(more_conditions))) statement += ''' {}'''.format(''' AND '''.join(all_conditions)) # ORDER BY if order_by != None: statement += ''' ORDER BY {}'''.format(order_by) if order_dir != None: statement += ''' {}'''.format(order_dir.upper()) # LIMIT if limit: statement += ''' LIMIT {}'''.format(limit) if offset: statement += ''' OFFSET {}'''.format(offset) return db_query(db, statement, tuple(bindings)) def compose_transaction(db, name, params, encoding='auto', fee_per_kb=config.DEFAULT_FEE_PER_KB, regular_dust_size=config.DEFAULT_REGULAR_DUST_SIZE, multisig_dust_size=config.DEFAULT_MULTISIG_DUST_SIZE, op_return_value=config.DEFAULT_OP_RETURN_VALUE, pubkey=None, allow_unconfirmed_inputs=False, fee=None, fee_provided=0): tx_info = sys.modules['lib.{}'.format(name)].compose(db, **params) return bitcoin.transaction(tx_info, encoding=encoding, fee_per_kb=fee_per_kb, regular_dust_size=regular_dust_size, multisig_dust_size=multisig_dust_size, op_return_value=op_return_value, public_key_hex=pubkey, allow_unconfirmed_inputs=allow_unconfirmed_inputs, exact_fee=fee, fee_provided=fee_provided) def sign_transaction(unsigned_tx_hex, private_key_wif=None): return bitcoin.sign_tx(unsigned_tx_hex, private_key_wif=private_key_wif) def broadcast_transaction(signed_tx_hex): if not config.TESTNET and config.BROADCAST_TX_MAINNET in ['bci', 'bci-failover']: url = "https://blockchain.info/pushtx" params = {'tx': signed_tx_hex} response = requests.post(url, data=params) if response.text.lower() != 'transaction submitted' or response.status_code != 200: if config.BROADCAST_TX_MAINNET == 'bci-failover': return bitcoin.broadcast_tx(signed_tx_hex) else: raise Exception(response.text) return response.text else: return bitcoin.broadcast_tx(signed_tx_hex) def do_transaction(db, name, params, private_key_wif=None, **kwargs): unsigned_tx = compose_transaction(db, name, params, **kwargs) signed_tx = sign_transaction(unsigned_tx, private_key_wif=private_key_wif) return broadcast_transaction(signed_tx) class APIStatusPoller(threading.Thread): """Poll every few seconds for the length of time since the last version check, as well as the bitcoin status""" def __init__(self): self.last_version_check = 0 self.last_database_check = 0 threading.Thread.__init__(self) def run(self): global current_api_status_code, current_api_status_response_json db = util.connect_to_db(flags='SQLITE_OPEN_READONLY') while True: try: # Check version. if time.time() - self.last_version_check >= 10: # Four hours since last check. code = 10 util.version_check(db) self.last_version_check = time.time() # Check that bitcoind is running, communicable, and caught up with the blockchain. # Check that the database has caught up with bitcoind. if time.time() - self.last_database_check > 10 * 60: # Ten minutes since last check. code = 11 bitcoin.bitcoind_check(db) code = 12 util.database_check(db, bitcoin.get_block_count()) # TODO: If not reparse or rollback, once those use API. self.last_database_check = time.time() except Exception as e: exception_name = e.__class__.__name__ exception_text = str(e) jsonrpc_response = jsonrpc.exceptions.JSONRPCServerError(message=exception_name, data=exception_text) current_api_status_code = code current_api_status_response_json = jsonrpc_response.json.encode() else: current_api_status_code = None current_api_status_response_json = None time.sleep(2) class APIServer(threading.Thread): def __init__(self): threading.Thread.__init__(self) def run(self): db = util.connect_to_db(flags='SQLITE_OPEN_READONLY') app = flask.Flask(__name__) auth = HTTPBasicAuth() @auth.get_password def get_pw(username): if username == config.RPC_USER: return config.RPC_PASSWORD return None ###################### #READ API # Generate dynamically get_{table} methods def generate_get_method(table): def get_method(**kwargs): return get_rows(db, table=table, **kwargs) return get_method for table in API_TABLES: new_method = generate_get_method(table) new_method.__name__ = 'get_{}'.format(table) dispatcher.add_method(new_method) @dispatcher.add_method def sql(query, bindings=[]): return db_query(db, query, tuple(bindings)) ###################### #WRITE/ACTION API # Generate dynamically create_{transaction} and do_{transaction} methods def generate_create_method(transaction): def split_params(**kwargs): transaction_args = {} common_args = {} private_key_wif = None for key in kwargs: if key in COMMONS_ARGS: common_args[key] = kwargs[key] elif key == 'privkey': private_key_wif = kwargs[key] else: transaction_args[key] = kwargs[key] return transaction_args, common_args, private_key_wif def create_method(**kwargs): transaction_args, common_args, private_key_wif = split_params(**kwargs) return compose_transaction(db, name=transaction, params=transaction_args, **common_args) def do_method(**kwargs): transaction_args, common_args, private_key_wif = split_params(**kwargs) return do_transaction(db, name=transaction, params=transaction_args, private_key_wif=private_key_wif, **common_args) return create_method, do_method for transaction in API_TRANSACTIONS: create_method, do_method = generate_create_method(transaction) create_method.__name__ = 'create_{}'.format(transaction) do_method.__name__ = 'do_{}'.format(transaction) dispatcher.add_method(create_method) dispatcher.add_method(do_method) @dispatcher.add_method def sign_tx(unsigned_tx_hex, privkey=None): return sign_transaction(unsigned_tx_hex, private_key_wif=privkey) @dispatcher.add_method def broadcast_tx(signed_tx_hex): return broadcast_transaction(signed_tx_hex) @dispatcher.add_method def get_messages(block_index): if not isinstance(block_index, int): raise Exception("block_index must be an integer.") cursor = db.cursor() cursor.execute('select * from messages where block_index = ? order by message_index asc', (block_index,)) messages = cursor.fetchall() cursor.close() return messages @dispatcher.add_method def get_messages_by_index(message_indexes): """Get specific messages from the feed, based on the message_index. @param message_index: A single index, or a list of one or more message indexes to retrieve. """ if not isinstance(message_indexes, list): message_indexes = [message_indexes,] for idx in message_indexes: #make sure the data is clean if not isinstance(idx, int): raise Exception("All items in message_indexes are not integers") cursor = db.cursor() cursor.execute('SELECT * FROM messages WHERE message_index IN (%s) ORDER BY message_index ASC' % (','.join([str(x) for x in message_indexes]),)) messages = cursor.fetchall() cursor.close() return messages @dispatcher.add_method def get_xmn_supply(): return util.xmn_supply(db) @dispatcher.add_method def get_asset_info(assets): if not isinstance(assets, list): raise Exception("assets must be a list of asset names, even if it just contains one entry") assetsInfo = [] for asset in assets: # BTC and XMN. if asset in [config.BTC, config.XMN]: if asset == config.BTC: supply = bitcoin.get_btc_supply(normalize=False) else: supply = util.xmn_supply(db) assetsInfo.append({ 'asset': asset, 'owner': None, 'divisible': True, 'locked': False, 'supply': supply, 'callable': False, 'call_date': None, 'call_price': None, 'description': '', 'issuer': None }) continue # User‐created asset. cursor = db.cursor() issuances = list(cursor.execute('''SELECT * FROM issuances WHERE (status = ? AND asset = ?) ORDER BY block_index ASC''', ('valid', asset))) cursor.close() if not issuances: break #asset not found, most likely else: last_issuance = issuances[-1] supply = 0 locked = False for e in issuances: if e['locked']: locked = True supply += e['quantity'] assetsInfo.append({ 'asset': asset, 'owner': last_issuance['issuer'], 'divisible': bool(last_issuance['divisible']), 'locked': locked, 'supply': supply, 'callable': bool(last_issuance['callable']), 'call_date': last_issuance['call_date'], 'call_price': last_issuance['call_price'], 'description': last_issuance['description'], 'issuer': last_issuance['issuer']}) return assetsInfo @dispatcher.add_method def get_block_info(block_index): assert isinstance(block_index, int) cursor = db.cursor() cursor.execute('''SELECT * FROM blocks WHERE block_index = ?''', (block_index,)) try: blocks = list(cursor) assert len(blocks) == 1 block = blocks[0] except IndexError: raise exceptions.DatabaseError('No blocks found.') cursor.close() return block @dispatcher.add_method def get_blocks(block_indexes): """fetches block info and messages for the specified block indexes""" if not isinstance(block_indexes, (list, tuple)): raise Exception("block_indexes must be a list of integers.") if len(block_indexes) >= 250: raise Exception("can only specify up to 250 indexes at a time.") block_indexes_str = ','.join([str(x) for x in block_indexes]) cursor = db.cursor() cursor.execute('SELECT * FROM blocks WHERE block_index IN (%s) ORDER BY block_index ASC' % (block_indexes_str,)) blocks = cursor.fetchall() cursor.execute('SELECT * FROM messages WHERE block_index IN (%s) ORDER BY block_index ASC, message_index ASC' % (block_indexes_str,)) messages = collections.deque(cursor.fetchall()) for block in blocks: messages_in_block = [] block['_messages'] = [] while len(messages) and messages[0]['block_index'] == block['block_index']: block['_messages'].append(messages.popleft()) assert not len(messages) #should have been cleared out cursor.close() return blocks @dispatcher.add_method def get_running_info(): latestBlockIndex = bitcoin.get_block_count() try: util.database_check(db, latestBlockIndex) except exceptions.DatabaseError as e: caught_up = False else: caught_up = True try: last_block = util.last_block(db) except: last_block = {'block_index': None, 'block_hash': None, 'block_time': None} try: last_message = util.last_message(db) except: last_message = None return { 'db_caught_up': caught_up, 'bitcoin_block_count': latestBlockIndex, 'last_block': last_block, 'last_message_index': last_message['message_index'] if last_message else -1, 'running_testnet': config.TESTNET, 'running_testcoin': config.TESTCOIN, 'version_major': config.VERSION_MAJOR, 'version_minor': config.VERSION_MINOR, 'version_revision': config.VERSION_REVISION } @dispatcher.add_method def get_element_counts(): counts = {} cursor = db.cursor() for element in ['transactions', 'blocks', 'debits', 'credits', 'balances', 'sends', 'orders', 'order_matches', 'btcpays', 'issuances', 'broadcasts', 'bets', 'bet_matches', 'dividends', 'burns', 'cancels', 'callbacks', 'order_expirations', 'bet_expirations', 'order_match_expirations', 'bet_match_expirations', 'messages']: cursor.execute("SELECT COUNT(*) AS count FROM %s" % element) count_list = cursor.fetchall() assert len(count_list) == 1 counts[element] = count_list[0]['count'] cursor.close() return counts @dispatcher.add_method def get_asset_names(): cursor = db.cursor() names = [row['asset'] for row in cursor.execute("SELECT DISTINCT asset FROM issuances WHERE status = 'valid' ORDER BY asset ASC")] cursor.close() return names def _set_cors_headers(response): if config.RPC_ALLOW_CORS: response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS' response.headers['Access-Control-Allow-Headers'] = 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'; @app.route('/', methods=["OPTIONS",]) @app.route('/api/', methods=["OPTIONS",]) def handle_options(): response = flask.Response('', 204) _set_cors_headers(response) return response @app.route('/', methods=["POST",]) @app.route('/api/', methods=["POST",]) @auth.login_required def handle_post(): try: request_json = flask.request.get_data().decode('utf-8') request_data = json.loads(request_json) assert 'id' in request_data and request_data['jsonrpc'] == "2.0" and request_data['method'] # params may be omitted except: obj_error = jsonrpc.exceptions.JSONRPCInvalidRequest(data="Invalid JSON-RPC 2.0 request format") return flask.Response(obj_error.json.encode(), 200, mimetype='application/json') #only arguments passed as a dict are supported if request_data.get('params', None) and not isinstance(request_data['params'], dict): obj_error = jsonrpc.exceptions.JSONRPCInvalidRequest( data='Arguments must be passed as a JSON object (list of unnamed arguments not supported)') return flask.Response(obj_error.json.encode(), 200, mimetype='application/json') #return an error if API fails checks if not config.FORCE and current_api_status_code: return flask.Response(current_api_status_response_json, 200, mimetype='application/json') jsonrpc_response = jsonrpc.JSONRPCResponseManager.handle(request_json, dispatcher) response = flask.Response(jsonrpc_response.json.encode(), 200, mimetype='application/json') _set_cors_headers(response) return response if not config.UNITTEST: #skip setting up logs when for the test suite api_logger = logging.getLogger("tornado") h = logging_handlers.RotatingFileHandler(os.path.join(config.DATA_DIR, "api.access.log"), 'a', API_MAX_LOG_SIZE, API_MAX_LOG_COUNT) api_logger.setLevel(logging.INFO) api_logger.addHandler(h) api_logger.propagate = False http_server = HTTPServer(WSGIContainer(app), xheaders=True) try: http_server.listen(config.RPC_PORT, address=config.RPC_HOST) IOLoop.instance().start() except OSError: raise Exception("Cannot start the API subsystem. Is {} already running, or is something else listening on port {}?".format(config.XMN_CLIENT, config.RPC_PORT)) # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
mit
PawarPawan/h2o-v3
py2/testdir_rapids/test_xl_seq.py
21
5328
import unittest, random, sys, time sys.path.extend(['.','..','../..','py']) import h2o2 as h2o import h2o_cmd, h2o_import as h2i, h2o_xl from h2o_xl import DF, Xbase, Key, KeyIndexed, Assign, Fcn from h2o_test import dump_json, verboseprint def checkAst(expected): ast = h2o_xl.Xbase.lastExecResult['ast'] assert ast==expected, "Actual: %s Expected: %s" % (ast, expected) print "----------------------------------------------------------------\n" print "Going to see if different xl coding styles yield same ast strings" class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): global SEED SEED = h2o.setup_random_seed() h2o.init() @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def test_xl_seq_A(self): # uses h2o_xl to do magic with Rapids # does this DFInit to rows=0 now? a = DF('a1') # knon_* key b = DF('b1') c = DF('c1') print "lastExecResult:", dump_json(h2o_xl.Xbase.lastExecResult) # look at our secret stash in the base class. Should see the DFInit? # DF does a kv store init. Key doesn't # DF inherits from Key. KeyIndexed inherits from Key assert isinstance(a, DF) assert isinstance(a, Key) assert isinstance(a, Xbase) assert not isinstance(a, KeyIndexed) assert not isinstance(a, Fcn) assert not isinstance(a, Assign) assert isinstance(a, Key) assert isinstance(b, Key) assert isinstance(c, Key) Assign(a, 0) checkAst("(= !a1 #0)") Assign(b, 0) checkAst("(= !b1 #0)") Assign(c, 0) checkAst("(= !c1 #0)") Assign(a, [0]) checkAst("(= !a1 (c {#0}))") Assign(b, [0,1]) checkAst("(= !b1 (c {#0;#1}))") Assign(c, [0,1,2]) checkAst("(= !c1 (c {#0;#1;#2}))") Assign(a, (0,)) # make sure it's a tuple with comma checkAst("(= !a1 (c {#0}))") Assign(b, (0,1)) checkAst("(= !b1 (c {#0;#1}))") Assign(c, (0,1,2)) checkAst("(= !c1 (c {#0;#1;#2}))") Assign(c, a[0] + b[1]) checkAst("(= !c1 (+ ([ %a1 #0 #0) ([ %b1 #1 #0)))") Assign(c[0], (a[0] + b[1])) checkAst("(= ([ %c1 #0 #0) (+ ([ %a1 #0 #0) ([ %b1 #1 #0)))") # print "\nDoes the keyWriteHistoryList work?" for k in Xbase.keyWriteHistoryList: print k h2o.check_sandbox_for_errors() def test_xl_seq_B(self): a = DF('a1') b = DF('b1') c = DF('c1') assert isinstance(a, Key) assert isinstance(b, Key) assert isinstance(c, Key) a <<= 0 checkAst("(= !a1 #0)") b <<= 0 checkAst("(= !b1 #0)") c <<= 0 checkAst("(= !c1 #0)") a <<= [0] checkAst("(= !a1 (c {#0}))") b <<= [0,1] checkAst("(= !b1 (c {#0;#1}))") c <<= [0,1,2] checkAst("(= !c1 (c {#0;#1;#2}))") a <<= (0,) # make sure it's a tuple with comma checkAst("(= !a1 (c {#0}))") b <<= (0,1) checkAst("(= !b1 (c {#0;#1}))") c <<= (0,1,2) checkAst("(= !c1 (c {#0;#1;#2}))") c <<= a[0] + b[1] checkAst("(= !c1 (+ ([ %a1 #0 #0) ([ %b1 #1 #0)))") c[0] <<= a[0] + b[1] checkAst("(= ([ %c1 #0 #0) (+ ([ %a1 #0 #0) ([ %b1 #1 #0)))") # print "\nDoes the keyWriteHistoryList work?" for k in Xbase.keyWriteHistoryList: print k h2o.check_sandbox_for_errors() def test_xl_seq_C(self): a = DF('a1') b = DF('b1') c = DF('c1') assert isinstance(a, Key) assert isinstance(b, Key) assert isinstance(c, Key) # this just overwrite the a/b/c with python datatypes if 1==0: a = 0 checkAst("(= !a1 #0)") b = 0 checkAst("(= !b1 #0)") c = 0 checkAst("(= !c1 #0)") a = [0] checkAst("(= !a1 (c {#0}))") b = [0,1] checkAst("(= !b1 (c {#0;#1}))") c = [0,1,2] checkAst("(= !c1 (c {#0;#1;#2}))") a = (0,) # make sure it's a tuple with comma checkAst("(= !a1 (c {#0}))") b = (0,1) checkAst("(= !b1 (c {#0;#1}))") c = (0,1,2) checkAst("(= !c1 (c {#0;#1;#2}))") # added to init the keys, to avoid AAIOBE at h2o a <<= [0] # comma isn't needed checkAst("(= !a1 (c {#0}))") b <<= [0,1] checkAst("(= !b1 (c {#0;#1}))") c <<= [0,1,2] checkAst("(= !c1 (c {#0;#1;#2}))") # these don't work if 1==0: c = a[0] + b[1] # no .do() needed because of types on rhs? or ? c.do() checkAst("(= !c1 (+ ([ %a1 #0 #0) ([ %b1 #1 #0)))") c[0] = a[0] + b[1] c.do() checkAst("(= ([ %c1 #0 #0) (+ ([ %a1 #0 #0) ([ %b1 #1 #0)))") # print "\nDoes the keyWriteHistoryList work?" for k in Xbase.keyWriteHistoryList: print k h2o.check_sandbox_for_errors() if __name__ == '__main__': h2o.unit_main()
apache-2.0
smart-developerr/my-first-blog
Lib/site-packages/django/contrib/gis/db/backends/spatialite/schema.py
518
6882
from django.db.backends.sqlite3.schema import DatabaseSchemaEditor from django.db.utils import DatabaseError class SpatialiteSchemaEditor(DatabaseSchemaEditor): sql_add_geometry_column = ( "SELECT AddGeometryColumn(%(table)s, %(column)s, %(srid)s, " "%(geom_type)s, %(dim)s, %(null)s)" ) sql_add_spatial_index = "SELECT CreateSpatialIndex(%(table)s, %(column)s)" sql_drop_spatial_index = "DROP TABLE idx_%(table)s_%(column)s" sql_remove_geometry_metadata = "SELECT DiscardGeometryColumn(%(table)s, %(column)s)" sql_discard_geometry_columns = "DELETE FROM %(geom_table)s WHERE f_table_name = %(table)s" sql_update_geometry_columns = ( "UPDATE %(geom_table)s SET f_table_name = %(new_table)s " "WHERE f_table_name = %(old_table)s" ) geometry_tables = [ "geometry_columns", "geometry_columns_auth", "geometry_columns_time", "geometry_columns_statistics", ] def __init__(self, *args, **kwargs): super(SpatialiteSchemaEditor, self).__init__(*args, **kwargs) self.geometry_sql = [] def geo_quote_name(self, name): return self.connection.ops.geo_quote_name(name) def column_sql(self, model, field, include_default=False): from django.contrib.gis.db.models.fields import GeometryField if not isinstance(field, GeometryField): return super(SpatialiteSchemaEditor, self).column_sql(model, field, include_default) # Geometry columns are created by the `AddGeometryColumn` function self.geometry_sql.append( self.sql_add_geometry_column % { "table": self.geo_quote_name(model._meta.db_table), "column": self.geo_quote_name(field.column), "srid": field.srid, "geom_type": self.geo_quote_name(field.geom_type), "dim": field.dim, "null": int(not field.null), } ) if field.spatial_index: self.geometry_sql.append( self.sql_add_spatial_index % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(field.column), } ) return None, None def remove_geometry_metadata(self, model, field): self.execute( self.sql_remove_geometry_metadata % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(field.column), } ) self.execute( self.sql_drop_spatial_index % { "table": model._meta.db_table, "column": field.column, } ) def create_model(self, model): super(SpatialiteSchemaEditor, self).create_model(model) # Create geometry columns for sql in self.geometry_sql: self.execute(sql) self.geometry_sql = [] def delete_model(self, model, **kwargs): from django.contrib.gis.db.models.fields import GeometryField # Drop spatial metadata (dropping the table does not automatically remove them) for field in model._meta.local_fields: if isinstance(field, GeometryField): self.remove_geometry_metadata(model, field) # Make sure all geom stuff is gone for geom_table in self.geometry_tables: try: self.execute( self.sql_discard_geometry_columns % { "geom_table": geom_table, "table": self.quote_name(model._meta.db_table), } ) except DatabaseError: pass super(SpatialiteSchemaEditor, self).delete_model(model, **kwargs) def add_field(self, model, field): from django.contrib.gis.db.models.fields import GeometryField if isinstance(field, GeometryField): # Populate self.geometry_sql self.column_sql(model, field) for sql in self.geometry_sql: self.execute(sql) self.geometry_sql = [] else: super(SpatialiteSchemaEditor, self).add_field(model, field) def remove_field(self, model, field): from django.contrib.gis.db.models.fields import GeometryField # NOTE: If the field is a geometry field, the table is just recreated, # the parent's remove_field can't be used cause it will skip the # recreation if the field does not have a database type. Geometry fields # do not have a db type cause they are added and removed via stored # procedures. if isinstance(field, GeometryField): self._remake_table(model, delete_fields=[field]) else: super(SpatialiteSchemaEditor, self).remove_field(model, field) def alter_db_table(self, model, old_db_table, new_db_table): from django.contrib.gis.db.models.fields import GeometryField # Remove geometry-ness from temp table for field in model._meta.local_fields: if isinstance(field, GeometryField): self.execute( self.sql_remove_geometry_metadata % { "table": self.quote_name(old_db_table), "column": self.quote_name(field.column), } ) # Alter table super(SpatialiteSchemaEditor, self).alter_db_table(model, old_db_table, new_db_table) # Repoint any straggler names for geom_table in self.geometry_tables: try: self.execute( self.sql_update_geometry_columns % { "geom_table": geom_table, "old_table": self.quote_name(old_db_table), "new_table": self.quote_name(new_db_table), } ) except DatabaseError: pass # Re-add geometry-ness and rename spatial index tables for field in model._meta.local_fields: if isinstance(field, GeometryField): self.execute(self.sql_add_geometry_column % { "table": self.geo_quote_name(new_db_table), "column": self.geo_quote_name(field.column), "srid": field.srid, "geom_type": self.geo_quote_name(field.geom_type), "dim": field.dim, "null": int(not field.null), }) if getattr(field, 'spatial_index', False): self.execute(self.sql_rename_table % { "old_table": self.quote_name("idx_%s_%s" % (old_db_table, field.column)), "new_table": self.quote_name("idx_%s_%s" % (new_db_table, field.column)), })
gpl-3.0
madirish/hector
app/scripts/screenshot_scan/screenshot_scan.py
1
4015
#!/usr/bin/python """ This script is part of HECTOR. by Josh Bauer <joshbauer3@gmail.com> Modified by: Justin C. Klein Keane <jukeane@sas.upenn.edu> Last modified: 31 July, 2014 This script requires python 2.5 or higher. This script is a threaded screenshot scan using phantomjs to render screenshots for urls in Hector's url table. Files are stored in the "app/screenshots" directory. This script is called by "screenshot_scan.php". """ import Queue import threading import MySQLdb import time import ConfigParser import urllib2 import subprocess import logging import sys, os # appPath - for example /opt/hector/app appPath = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../") sys.path.append(appPath + "/lib/pylib") from pull_config import Configurator DEBUG = False # Credentials used for the database connection configr = Configurator() DB = configr.get_var('db') HOST = configr.get_var('db_host') USERNAME = configr.get_var('db_user') PASSWORD = configr.get_var('db_pass') PHANTOMJS = configr.get_var('phantomjs_exec_path') if PHANTOMJS == '/no/such/path' : raise Exception('phantomJS not configured, please update your config.ini with the proper path') #logging set up logger = logging.getLogger('screenshot scan') hdlr = logging.FileHandler(appPath + '/logs/message_log') formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') hdlr.setFormatter(formatter) logger.addHandler(hdlr) logger.setLevel(logging.WARNING) if DEBUG : logger.setLevel(logging.DEBUG) logger.info('screenshot_scan.py is starting in Python') class ScreenShotThread(threading.Thread): """Threaded Screenshot Grab""" def __init__(self, urls): threading.Thread.__init__(self) self.urls = urls def run(self): while True: #grabs url from queue self.url = self.urls.get() #url for database purposes self.full_url='' #url for phantomjs/urllib2 if not self.url.startswith('http'): self.full_url = 'http://' self.full_url += self.url try : response = urllib2.urlopen(self.full_url,timeout=10) response = response.getcode() except : response = 'failed' logger.debug(self.name + " " + self.full_url + ' gave response: ' + str(response)) if response != 'failed': self.take_snapshot() #signals to queue job is done self.urls.task_done() def take_snapshot(self): """calls phantomjs to capture screenshot and updates the database""" filter=['/','.',':',';'] filename = self.full_url for c in filter : filename=filename.replace(c, '_') filename += '_' + str(int(time.time())) + '.png' command = PHANTOMJS + ' /opt/hector/app/scripts/snapshot.js \'' + self.full_url +'\' \'' + filename +'\'' logger.debug(self.name + " command: " + command + " start") proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True) (out, err) = proc.communicate() logger.debug(self.name + " command: "+ command + "\n\toutput: " + out) if out.count('Status: success')>0 : conn = MySQLdb.connect(host=HOST, user=USERNAME, passwd=PASSWORD, db=DB) cursor = conn.cursor() cursor.execute('update url set url_screenshot=%s where url_url=%s',(filename,self.url)) conn.commit() conn = MySQLdb.connect(host=HOST, user=USERNAME, passwd=PASSWORD, db=DB) cursor = conn.cursor() cursor.execute('select url_url from url') results = cursor.fetchall() conn.close() urls=Queue.Queue() #initialize threads for i in range(10): t = ScreenShotThread(urls) t.setDaemon(True) t.start() #populate the queue for result in results: urls.put(result[0]) #wait for the queue to be emptied urls.join()
gpl-3.0
hhstore/learning-notes
python/src/exercise/py27/ex03_callable_usage.py
2
1777
#!/usr/bin/env python # -*- coding: utf-8 -*- """ 可调用对象: - 魔法方法: __call__(self, [args...]) - 允许类的一个实例, 像函数那样被调用 - 允许你自己类的对象, 表现得像是函数,然后你就可以“调用”它们,把它们传递到使用函数做参数的函数中 - 本质上这代表了 x() 和 x.__call__() 是相同的 - 注意 __call__ 可以有多个参数,可以像定义其他任何函数一样,定义 __call__ ,喜欢用多少参数就用多少. - __call__ 在某些需要经常改变状态的类的实例中显得特别有用. - “调用”这个实例来改变它的状态,是一种更加符合直觉,也更加优雅的方法. 参考: - http://pyzh.readthedocs.io/en/latest/python-magic-methods-guide.html#id20 """ class A(object): """表示一个实体的类. 调用它的实例, 可以更新实体的位置 """ def __init__(self, x, y): self.x, self.y = x, y print "<__init__() is called.> | x={}, y={}".format(self.x, self.y) def __call__(self, x, y): """改变实体的位置 :param x: :param y: :return: """ self.x, self.y = x, y print "<__call__() is called.> | x={}, y={}".format(self.x, self.y) class B(object): """表示一个实体的类. 调用它的实例, 可以更新实体的位置 """ def __init__(self, x, y): self.x, self.y = x, y print "<__init__() is called.> | x={}, y={}".format(self.x, self.y) def __call__(self, *args, **kwargs): self.x, self.y = self.y, self.x print "<__call__() is called.> | x={}, y={}".format(self.x, self.y) if __name__ == '__main__': m = A(2, 3) m(4, 8) n = B(1, 9) n()
mit
StanfordBioinformatics/xppf
client/loomengine/template_tag.py
2
7550
#!/usr/bin/env python import argparse import os import sys from loomengine import server from loomengine import verify_has_connection_settings, \ get_server_url, verify_server_is_running, get_token from loomengine_utils.connection import Connection from loomengine_utils.exceptions import LoomengineUtilsError class TemplateTagAdd(object): """Add a new template tags """ def __init__(self, args=None, silent=False): # Args may be given as an input argument for testing purposes # or from the main parser. # Otherwise get them from the parser. if args is None: args = self._get_args() self.args = args self.silent = silent verify_has_connection_settings() server_url = get_server_url() verify_server_is_running(url=server_url) self.connection = Connection(server_url, token=get_token()) def _get_args(self): self.parser = self.get_parser() return self.parser.parse_args() @classmethod def get_parser(cls, parser=None): # If called from main, use the subparser provided. # Otherwise create a top-level parser here. if parser is None: parser = argparse.ArgumentParser(__file__) parser.add_argument( 'target', metavar='TARGET', help='identifier for template to be tagged') parser.add_argument( 'tag', metavar='TAG', help='tag name to be added') return parser def run(self): try: templates = self.connection.get_template_index( min=1, max=1, query_string=self.args.target) except LoomengineUtilsError as e: raise SystemExit("ERROR! Failed to get template list: '%s'" % e) tag_data = {'tag': self.args.tag} try: tag = self.connection.post_template_tag( templates[0]['uuid'], tag_data) except LoomengineUtilsError as e: raise SystemExit("ERROR! Failed to create tag: '%s'" % e) print 'Target "%s@%s" has been tagged as "%s"' % \ (templates[0].get('name'), templates[0].get('uuid'), tag.get('tag')) class TemplateTagRemove(object): """Remove a template tag """ def __init__(self, args=None, silent=False): if args is None: args = self._get_args() self.args = args self.silent = silent verify_has_connection_settings() server_url = get_server_url() verify_server_is_running(url=server_url) self.connection = Connection(server_url, token=get_token()) def _get_args(self): self.parser = self.get_parser() return self.parser.parse_args() @classmethod def get_parser(cls, parser=None): # If called from main, use the subparser provided. # Otherwise create a top-level parser here. if parser is None: parser = argparse.ArgumentParser(__file__) parser.add_argument( 'target', metavar='TARGET', help='identifier for template to be untagged') parser.add_argument( 'tag', metavar='TAG', help='tag name to be removed') return parser def run(self): try: templates = self.connection.get_template_index( min=1, max=1, query_string=self.args.target) except LoomengineUtilsError as e: raise SystemExit("ERROR! Failed to get template list: '%s'" % e) tag_data = {'tag': self.args.tag} try: tag = self.connection.remove_template_tag( templates[0]['uuid'], tag_data) except LoomengineUtilsError as e: raise SystemExit("ERROR! Failed to remove tag: '%s'" % e) print 'Tag %s has been removed from template "%s@%s"' % \ (tag.get('tag'), templates[0].get('name'), templates[0].get('uuid')) class TemplateTagList(object): def __init__(self, args=None, silent=False): if args is None: args = self._get_args() self.args = args self.silent = silent verify_has_connection_settings() server_url = get_server_url() verify_server_is_running(url=server_url) self.connection = Connection(server_url, token=get_token()) def _get_args(self): self.parser = self.get_parser() return self.parser.parse_args() @classmethod def get_parser(cls, parser=None): # If called from main, use the subparser provided. # Otherwise create a top-level parser here. if parser is None: parser = argparse.ArgumentParser(__file__) parser.add_argument( 'target', metavar='TARGET', nargs='?', help='show tags only for the specified template') return parser def run(self): if self.args.target: try: templates = self.connection.get_template_index( min=1, max=1, query_string=self.args.target) except LoomengineUtilsError as e: raise SystemExit( "ERROR! Failed to get template list: '%s'" % e) try: tag_data = self.connection.list_template_tags( templates[0]['uuid']) except LoomengineUtilsError as e: raise SystemExit("ERROR! Failed to get tag list: '%s'" % e) tags = tag_data.get('tags', []) else: try: tag_list = self.connection.get_template_tag_index() except LoomengineUtilsError as e: raise SystemExit("ERROR! Failed to get tag list: '%s'" % e) tags = [item.get('tag') for item in tag_list] print '[showing %s tags]' % len(tags) for tag in tags: print tag class TemplateTag(object): """Configures and executes subcommands under "tag" on the parent parser. """ def __init__(self, args=None, silent=False): if args is None: args = self._get_args() self.args = args self.silent = silent def _get_args(self): parser = self.get_parser() return parser.parse_args() @classmethod def get_parser(cls, parser=None): # If called from main, a subparser should be provided. # Otherwise we create a top-level parser here. if parser is None: parser = argparse.ArgumentParser(__file__) subparsers = parser.add_subparsers() add_subparser = subparsers.add_parser( 'add', help='add a template tag') TemplateTagAdd.get_parser(add_subparser) add_subparser.set_defaults(SubSubSubcommandClass=TemplateTagAdd) remove_subparser = subparsers.add_parser( 'remove', help='remove a template tag') TemplateTagRemove.get_parser(remove_subparser) remove_subparser.set_defaults(SubSubSubcommandClass=TemplateTagRemove) list_subparser = subparsers.add_parser( 'list', help='list template tags') TemplateTagList.get_parser(list_subparser) list_subparser.set_defaults(SubSubSubcommandClass=TemplateTagList) return parser def run(self): return self.args.SubSubSubcommandClass( self.args, silent=self.silent).run() if __name__ == '__main__': response = TemplateTag().run()
agpl-3.0
burdell/CS4464-Final-Project
django/db/utils.py
151
6225
import inspect import os from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.utils.importlib import import_module DEFAULT_DB_ALIAS = 'default' # Define some exceptions that mirror the PEP249 interface. # We will rethrow any backend-specific errors using these # common wrappers class DatabaseError(Exception): pass class IntegrityError(DatabaseError): pass def load_backend(backend_name): try: module = import_module('.base', 'django.db.backends.%s' % backend_name) import warnings warnings.warn( "Short names for DATABASE_ENGINE are deprecated; prepend with 'django.db.backends.'", DeprecationWarning ) return module except ImportError, e: # Look for a fully qualified database backend name try: return import_module('.base', backend_name) except ImportError, e_user: # The database backend wasn't found. Display a helpful error message # listing all possible (built-in) database backends. backend_dir = os.path.join(os.path.dirname(__file__), 'backends') try: available_backends = [f for f in os.listdir(backend_dir) if os.path.isdir(os.path.join(backend_dir, f)) and not f.startswith('.')] except EnvironmentError: available_backends = [] if backend_name.startswith('django.db.backends.'): backend_name = backend_name[19:] # See #15621. if backend_name not in available_backends: error_msg = ("%r isn't an available database backend. \n" + "Try using django.db.backends.XXX, where XXX is one of:\n %s\n" + "Error was: %s") % \ (backend_name, ", ".join(map(repr, sorted(available_backends))), e_user) raise ImproperlyConfigured(error_msg) else: raise # If there's some other error, this must be an error in Django itself. class ConnectionDoesNotExist(Exception): pass class ConnectionHandler(object): def __init__(self, databases): self.databases = databases self._connections = {} def ensure_defaults(self, alias): """ Puts the defaults into the settings dictionary for a given connection where no settings is provided. """ try: conn = self.databases[alias] except KeyError: raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias) conn.setdefault('ENGINE', 'django.db.backends.dummy') if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']: conn['ENGINE'] = 'django.db.backends.dummy' conn.setdefault('OPTIONS', {}) conn.setdefault('TEST_CHARSET', None) conn.setdefault('TEST_COLLATION', None) conn.setdefault('TEST_NAME', None) conn.setdefault('TEST_MIRROR', None) conn.setdefault('TIME_ZONE', settings.TIME_ZONE) for setting in ('NAME', 'USER', 'PASSWORD', 'HOST', 'PORT'): conn.setdefault(setting, '') def __getitem__(self, alias): if alias in self._connections: return self._connections[alias] self.ensure_defaults(alias) db = self.databases[alias] backend = load_backend(db['ENGINE']) conn = backend.DatabaseWrapper(db, alias) self._connections[alias] = conn return conn def __iter__(self): return iter(self.databases) def all(self): return [self[alias] for alias in self] class ConnectionRouter(object): def __init__(self, routers): self.routers = [] for r in routers: if isinstance(r, basestring): try: module_name, klass_name = r.rsplit('.', 1) module = import_module(module_name) except ImportError, e: raise ImproperlyConfigured('Error importing database router %s: "%s"' % (klass_name, e)) try: router_class = getattr(module, klass_name) except AttributeError: raise ImproperlyConfigured('Module "%s" does not define a database router name "%s"' % (module, klass_name)) else: router = router_class() else: router = r self.routers.append(router) def _router_func(action): def _route_db(self, model, **hints): chosen_db = None for router in self.routers: try: method = getattr(router, action) except AttributeError: # If the router doesn't have a method, skip to the next one. pass else: chosen_db = method(model, **hints) if chosen_db: return chosen_db try: return hints['instance']._state.db or DEFAULT_DB_ALIAS except KeyError: return DEFAULT_DB_ALIAS return _route_db db_for_read = _router_func('db_for_read') db_for_write = _router_func('db_for_write') def allow_relation(self, obj1, obj2, **hints): for router in self.routers: try: method = router.allow_relation except AttributeError: # If the router doesn't have a method, skip to the next one. pass else: allow = method(obj1, obj2, **hints) if allow is not None: return allow return obj1._state.db == obj2._state.db def allow_syncdb(self, db, model): for router in self.routers: try: method = router.allow_syncdb except AttributeError: # If the router doesn't have a method, skip to the next one. pass else: allow = method(db, model) if allow is not None: return allow return True
bsd-3-clause
bjolivot/ansible
lib/ansible/modules/packaging/os/homebrew_tap.py
42
7344
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Daniel Jaouen <dcj24@cornell.edu> # (c) 2016, Indrajit Raychaudhuri <irc+code@indrajit.com> # # Based on homebrew (Andrew Dunham <andrew@du.nham.ca>) # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: homebrew_tap author: - "Indrajit Raychaudhuri (@indrajitr)" - "Daniel Jaouen (@danieljaouen)" short_description: Tap a Homebrew repository. description: - Tap external Homebrew repositories. version_added: "1.6" options: name: description: - The GitHub user/organization repository to tap. required: true aliases: ['tap'] url: description: - The optional git URL of the repository to tap. The URL is not assumed to be on GitHub, and the protocol doesn't have to be HTTP. Any location and protocol that git can handle is fine. - I(name) option may not be a list of multiple taps (but a single tap instead) when this option is provided. required: false version_added: "2.2" state: description: - state of the repository. choices: [ 'present', 'absent' ] required: false default: 'present' requirements: [ homebrew ] ''' EXAMPLES = ''' - homebrew_tap: name: homebrew/dupes - homebrew_tap: name: homebrew/dupes state: absent - homebrew_tap: name: homebrew/dupes,homebrew/science state: present - homebrew_tap: name: telemachus/brew url: 'https://bitbucket.org/telemachus/brew' ''' import re def a_valid_tap(tap): '''Returns True if the tap is valid.''' regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$') return regex.match(tap) def already_tapped(module, brew_path, tap): '''Returns True if already tapped.''' rc, out, err = module.run_command([ brew_path, 'tap', ]) taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_] tap_name = re.sub('homebrew-', '', tap.lower()) return tap_name in taps def add_tap(module, brew_path, tap, url=None): '''Adds a single tap.''' failed, changed, msg = False, False, '' if not a_valid_tap(tap): failed = True msg = 'not a valid tap: %s' % tap elif not already_tapped(module, brew_path, tap): if module.check_mode: module.exit_json(changed=True) rc, out, err = module.run_command([ brew_path, 'tap', tap, url, ]) if already_tapped(module, brew_path, tap): changed = True msg = 'successfully tapped: %s' % tap else: failed = True msg = 'failed to tap: %s' % tap else: msg = 'already tapped: %s' % tap return (failed, changed, msg) def add_taps(module, brew_path, taps): '''Adds one or more taps.''' failed, unchanged, added, msg = False, 0, 0, '' for tap in taps: (failed, changed, msg) = add_tap(module, brew_path, tap) if failed: break if changed: added += 1 else: unchanged += 1 if failed: msg = 'added: %d, unchanged: %d, error: ' + msg msg = msg % (added, unchanged) elif added: changed = True msg = 'added: %d, unchanged: %d' % (added, unchanged) else: msg = 'added: %d, unchanged: %d' % (added, unchanged) return (failed, changed, msg) def remove_tap(module, brew_path, tap): '''Removes a single tap.''' failed, changed, msg = False, False, '' if not a_valid_tap(tap): failed = True msg = 'not a valid tap: %s' % tap elif already_tapped(module, brew_path, tap): if module.check_mode: module.exit_json(changed=True) rc, out, err = module.run_command([ brew_path, 'untap', tap, ]) if not already_tapped(module, brew_path, tap): changed = True msg = 'successfully untapped: %s' % tap else: failed = True msg = 'failed to untap: %s' % tap else: msg = 'already untapped: %s' % tap return (failed, changed, msg) def remove_taps(module, brew_path, taps): '''Removes one or more taps.''' failed, unchanged, removed, msg = False, 0, 0, '' for tap in taps: (failed, changed, msg) = remove_tap(module, brew_path, tap) if failed: break if changed: removed += 1 else: unchanged += 1 if failed: msg = 'removed: %d, unchanged: %d, error: ' + msg msg = msg % (removed, unchanged) elif removed: changed = True msg = 'removed: %d, unchanged: %d' % (removed, unchanged) else: msg = 'removed: %d, unchanged: %d' % (removed, unchanged) return (failed, changed, msg) def main(): module = AnsibleModule( argument_spec=dict( name=dict(aliases=['tap'], type='list', required=True), url=dict(default=None, required=False), state=dict(default='present', choices=['present', 'absent']), ), supports_check_mode=True, ) brew_path = module.get_bin_path( 'brew', required=True, opt_dirs=['/usr/local/bin'] ) taps = module.params['name'] url = module.params['url'] if module.params['state'] == 'present': if url is None: # No tap URL provided explicitly, continue with bulk addition # of all the taps. failed, changed, msg = add_taps(module, brew_path, taps) else: # When an tap URL is provided explicitly, we allow adding # *single* tap only. Validate and proceed to add single tap. if len(taps) > 1: msg = "List of muliple taps may not be provided with 'url' option." module.fail_json(msg=msg) else: failed, changed, msg = add_tap(module, brew_path, taps[0], url) if failed: module.fail_json(msg=msg) else: module.exit_json(changed=changed, msg=msg) elif module.params['state'] == 'absent': failed, changed, msg = remove_taps(module, brew_path, taps) if failed: module.fail_json(msg=msg) else: module.exit_json(changed=changed, msg=msg) # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
sssemil/cjdns
node_build/dependencies/libuv/build/gyp/test/win/gyptest-command-quote.py
296
1282
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Make sure the program in a command can be a called batch file, or an application in the path. Specifically, this means not quoting something like "call x.bat", lest the shell look for a program named "call x.bat", rather than calling "x.bat". """ import TestGyp import sys if sys.platform == 'win32': test = TestGyp.TestGyp(formats=['msvs', 'ninja']) CHDIR = 'command-quote' test.run_gyp('command-quote.gyp', chdir=CHDIR) test.build('command-quote.gyp', 'test_batch', chdir=CHDIR) test.build('command-quote.gyp', 'test_call_separate', chdir=CHDIR) test.build('command-quote.gyp', 'test_with_double_quotes', chdir=CHDIR) test.build('command-quote.gyp', 'test_with_single_quotes', chdir=CHDIR) # We confirm that this fails because other generators don't handle spaces in # inputs so it's preferable to not have it work here. test.build('command-quote.gyp', 'test_with_spaces', chdir=CHDIR, status=1) CHDIR = 'command-quote/subdir/and/another' test.run_gyp('in-subdir.gyp', chdir=CHDIR) test.build('in-subdir.gyp', 'test_batch_depth', chdir=CHDIR) test.pass_test()
gpl-3.0
neeasade/qutebrowser
tests/unit/completion/test_models.py
1
15355
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2016 Ryan Roden-Corrent (rcorre) <ryan@rcorre.net> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Tests for completion models.""" import collections from datetime import datetime import pytest from PyQt5.QtCore import QUrl from PyQt5.QtWidgets import QTreeView from qutebrowser.completion.models import miscmodels, urlmodel, configmodel from qutebrowser.browser.webkit import history from qutebrowser.config import sections, value def _get_completions(model): """Collect all the completion entries of a model, organized by category. The result is a list of form: [ (CategoryName: [(name, desc, misc), ...]), (CategoryName: [(name, desc, misc), ...]), ... ] """ completions = [] for i in range(0, model.rowCount()): category = model.item(i) entries = [] for j in range(0, category.rowCount()): name = category.child(j, 0) desc = category.child(j, 1) misc = category.child(j, 2) entries.append((name.text(), desc.text(), misc.text())) completions.append((category.text(), entries)) return completions def _patch_cmdutils(monkeypatch, stubs, symbol): """Patch the cmdutils module to provide fake commands.""" cmd_utils = stubs.FakeCmdUtils({ 'stop': stubs.FakeCommand(name='stop', desc='stop qutebrowser'), 'drop': stubs.FakeCommand(name='drop', desc='drop all user data'), 'roll': stubs.FakeCommand(name='roll', desc='never gonna give you up'), 'hide': stubs.FakeCommand(name='hide', hide=True), 'depr': stubs.FakeCommand(name='depr', deprecated=True), }) monkeypatch.setattr(symbol, cmd_utils) def _patch_configdata(monkeypatch, stubs, symbol): """Patch the configdata module to provide fake data.""" data = collections.OrderedDict([ ('general', sections.KeyValue( ('time', value.SettingValue(stubs.FakeConfigType('fast', 'slow'), default='slow'), 'Is an illusion.\n\nLunchtime doubly so.'), ('volume', value.SettingValue(stubs.FakeConfigType('0', '11'), default='11'), 'Goes to 11'))), ('ui', sections.KeyValue( ('gesture', value.SettingValue(stubs.FakeConfigType(('on', 'off')), default='off'), 'Waggle your hands to control qutebrowser'), ('mind', value.SettingValue(stubs.FakeConfigType(('on', 'off')), default='off'), 'Enable mind-control ui (experimental)'), ('voice', value.SettingValue(stubs.FakeConfigType(('on', 'off')), default='off'), 'Whether to respond to voice commands'))), ]) monkeypatch.setattr(symbol, data) def _patch_config_section_desc(monkeypatch, stubs, symbol): """Patch the configdata module to provide fake SECTION_DESC.""" section_desc = { 'general': 'General/miscellaneous options.', 'ui': 'General options related to the user interface.', } monkeypatch.setattr(symbol, section_desc) def _mock_view_index(model, category_idx, child_idx, qtbot): """Create a tree view from a model and set the current index. Args: model: model to create a fake view for. category_idx: index of the category to select. child_idx: index of the child item under that category to select. """ view = QTreeView() qtbot.add_widget(view) view.setModel(model) idx = model.indexFromItem(model.item(category_idx).child(child_idx)) view.setCurrentIndex(idx) return view @pytest.fixture def quickmarks(quickmark_manager_stub): """Pre-populate the quickmark-manager stub with some quickmarks.""" quickmark_manager_stub.marks = collections.OrderedDict([ ('aw', 'https://wiki.archlinux.org'), ('ddg', 'https://duckduckgo.com'), ('wiki', 'https://wikipedia.org'), ]) return quickmark_manager_stub @pytest.fixture def bookmarks(bookmark_manager_stub): """Pre-populate the bookmark-manager stub with some quickmarks.""" bookmark_manager_stub.marks = collections.OrderedDict([ ('https://github.com', 'GitHub'), ('https://python.org', 'Welcome to Python.org'), ('http://qutebrowser.org', 'qutebrowser | qutebrowser'), ]) return bookmark_manager_stub @pytest.fixture def web_history(stubs, web_history_stub): """Pre-populate the web-history stub with some history entries.""" web_history_stub.history_dict = collections.OrderedDict([ ('http://qutebrowser.org', history.Entry( datetime(2015, 9, 5).timestamp(), QUrl('http://qutebrowser.org'), 'qutebrowser | qutebrowser')), ('https://python.org', history.Entry( datetime(2016, 3, 8).timestamp(), QUrl('https://python.org'), 'Welcome to Python.org')), ('https://github.com', history.Entry( datetime(2016, 5, 1).timestamp(), QUrl('https://github.com'), 'GitHub')), ]) return web_history_stub def test_command_completion(monkeypatch, stubs, config_stub, key_config_stub): """Test the results of command completion. Validates that: - only non-hidden and non-deprecated commands are included - commands are sorted by name - the command description is shown in the desc column - the binding (if any) is shown in the misc column - aliases are included """ _patch_cmdutils(monkeypatch, stubs, 'qutebrowser.completion.models.miscmodels.cmdutils') config_stub.data['aliases'] = {'rock': 'roll'} key_config_stub.set_bindings_for('normal', {'s': 'stop', 'rr': 'roll'}) actual = _get_completions(miscmodels.CommandCompletionModel()) assert actual == [ ("Commands", [ ('drop', 'drop all user data', ''), ('rock', "Alias for 'roll'", ''), ('roll', 'never gonna give you up', 'rr'), ('stop', 'stop qutebrowser', 's') ]) ] def test_help_completion(monkeypatch, stubs): """Test the results of command completion. Validates that: - only non-hidden and non-deprecated commands are included - commands are sorted by name - the command description is shown in the desc column - the binding (if any) is shown in the misc column - aliases are included - only the first line of a multiline description is shown """ module = 'qutebrowser.completion.models.miscmodels' _patch_cmdutils(monkeypatch, stubs, module + '.cmdutils') _patch_configdata(monkeypatch, stubs, module + '.configdata.DATA') actual = _get_completions(miscmodels.HelpCompletionModel()) assert actual == [ ("Commands", [ (':drop', 'drop all user data', ''), (':roll', 'never gonna give you up', ''), (':stop', 'stop qutebrowser', '') ]), ("Settings", [ ('general->time', 'Is an illusion.', ''), ('general->volume', 'Goes to 11', ''), ('ui->gesture', 'Waggle your hands to control qutebrowser', ''), ('ui->mind', 'Enable mind-control ui (experimental)', ''), ('ui->voice', 'Whether to respond to voice commands', ''), ]) ] def test_quickmark_completion(quickmarks): """Test the results of quickmark completion.""" actual = _get_completions(miscmodels.QuickmarkCompletionModel()) assert actual == [ ("Quickmarks", [ ('aw', 'https://wiki.archlinux.org', ''), ('ddg', 'https://duckduckgo.com', ''), ('wiki', 'https://wikipedia.org', ''), ]) ] def test_bookmark_completion(bookmarks): """Test the results of bookmark completion.""" actual = _get_completions(miscmodels.BookmarkCompletionModel()) assert actual == [ ("Bookmarks", [ ('https://github.com', 'GitHub', ''), ('https://python.org', 'Welcome to Python.org', ''), ('http://qutebrowser.org', 'qutebrowser | qutebrowser', ''), ]) ] def test_url_completion(config_stub, web_history, quickmarks, bookmarks): """Test the results of url completion. Verify that: - quickmarks, bookmarks, and urls are included - no more than 'web-history-max-items' history entries are included - the most recent entries are included """ config_stub.data['completion'] = {'timestamp-format': '%Y-%m-%d', 'web-history-max-items': 2} actual = _get_completions(urlmodel.UrlCompletionModel()) assert actual == [ ("Quickmarks", [ ('https://wiki.archlinux.org', 'aw', ''), ('https://duckduckgo.com', 'ddg', ''), ('https://wikipedia.org', 'wiki', ''), ]), ("Bookmarks", [ ('https://github.com', 'GitHub', ''), ('https://python.org', 'Welcome to Python.org', ''), ('http://qutebrowser.org', 'qutebrowser | qutebrowser', ''), ]), ("History", [ ('https://python.org', 'Welcome to Python.org', '2016-03-08'), ('https://github.com', 'GitHub', '2016-05-01'), ]), ] def test_url_completion_delete_bookmark(config_stub, web_history, quickmarks, bookmarks, qtbot): """Test deleting a bookmark from the url completion model.""" config_stub.data['completion'] = {'timestamp-format': '%Y-%m-%d', 'web-history-max-items': 2} model = urlmodel.UrlCompletionModel() # delete item (1, 0) -> (bookmarks, 'https://github.com' ) view = _mock_view_index(model, 1, 0, qtbot) model.delete_cur_item(view) assert 'https://github.com' not in bookmarks.marks assert 'https://python.org' in bookmarks.marks assert 'http://qutebrowser.org' in bookmarks.marks def test_url_completion_delete_quickmark(config_stub, web_history, quickmarks, bookmarks, qtbot): """Test deleting a bookmark from the url completion model.""" config_stub.data['completion'] = {'timestamp-format': '%Y-%m-%d', 'web-history-max-items': 2} model = urlmodel.UrlCompletionModel() # delete item (0, 1) -> (quickmarks, 'ddg' ) view = _mock_view_index(model, 0, 1, qtbot) model.delete_cur_item(view) assert 'aw' in quickmarks.marks assert 'ddg' not in quickmarks.marks assert 'wiki' in quickmarks.marks def test_session_completion(session_manager_stub): session_manager_stub.sessions = ['default', '1', '2'] actual = _get_completions(miscmodels.SessionCompletionModel()) assert actual == [ ("Sessions", [('default', '', ''), ('1', '', ''), ('2', '', '')]) ] def test_tab_completion(fake_web_tab, app_stub, win_registry, tabbed_browser_stubs): tabbed_browser_stubs[0].tabs = [ fake_web_tab(QUrl('https://github.com'), 'GitHub', 0), fake_web_tab(QUrl('https://wikipedia.org'), 'Wikipedia', 1), fake_web_tab(QUrl('https://duckduckgo.com'), 'DuckDuckGo', 2), ] tabbed_browser_stubs[1].tabs = [ fake_web_tab(QUrl('https://wiki.archlinux.org'), 'ArchWiki', 0), ] actual = _get_completions(miscmodels.TabCompletionModel()) assert actual == [ ('0', [ ('0/1', 'https://github.com', 'GitHub'), ('0/2', 'https://wikipedia.org', 'Wikipedia'), ('0/3', 'https://duckduckgo.com', 'DuckDuckGo') ]), ('1', [ ('1/1', 'https://wiki.archlinux.org', 'ArchWiki'), ]) ] def test_tab_completion_delete(fake_web_tab, qtbot, app_stub, win_registry, tabbed_browser_stubs): """Verify closing a tab by deleting it from the completion widget.""" tabbed_browser_stubs[0].tabs = [ fake_web_tab(QUrl('https://github.com'), 'GitHub', 0), fake_web_tab(QUrl('https://wikipedia.org'), 'Wikipedia', 1), fake_web_tab(QUrl('https://duckduckgo.com'), 'DuckDuckGo', 2) ] tabbed_browser_stubs[1].tabs = [ fake_web_tab(QUrl('https://wiki.archlinux.org'), 'ArchWiki', 0), ] model = miscmodels.TabCompletionModel() view = _mock_view_index(model, 0, 1, qtbot) qtbot.add_widget(view) model.delete_cur_item(view) actual = [tab.url() for tab in tabbed_browser_stubs[0].tabs] assert actual == [QUrl('https://github.com'), QUrl('https://duckduckgo.com')] def test_setting_section_completion(monkeypatch, stubs): module = 'qutebrowser.completion.models.configmodel' _patch_configdata(monkeypatch, stubs, module + '.configdata.DATA') _patch_config_section_desc(monkeypatch, stubs, module + '.configdata.SECTION_DESC') actual = _get_completions(configmodel.SettingSectionCompletionModel()) assert actual == [ ("Sections", [ ('general', 'General/miscellaneous options.', ''), ('ui', 'General options related to the user interface.', ''), ]) ] def test_setting_option_completion(monkeypatch, stubs, config_stub): module = 'qutebrowser.completion.models.configmodel' _patch_configdata(monkeypatch, stubs, module + '.configdata.DATA') config_stub.data = {'ui': {'gesture': 'off', 'mind': 'on', 'voice': 'sometimes'}} actual = _get_completions(configmodel.SettingOptionCompletionModel('ui')) assert actual == [ ("ui", [ ('gesture', 'Waggle your hands to control qutebrowser', 'off'), ('mind', 'Enable mind-control ui (experimental)', 'on'), ('voice', 'Whether to respond to voice commands', 'sometimes'), ]) ] def test_setting_value_completion(monkeypatch, stubs, config_stub): module = 'qutebrowser.completion.models.configmodel' _patch_configdata(monkeypatch, stubs, module + '.configdata.DATA') config_stub.data = {'general': {'volume': '0'}} model = configmodel.SettingValueCompletionModel('general', 'volume') actual = _get_completions(model) assert actual == [ ("Current/Default", [ ('0', 'Current value', ''), ('11', 'Default value', ''), ]), ("Completions", [ ('0', '', ''), ('11', '', ''), ]) ]
gpl-3.0
ecino/compassion-modules
partner_communication/__manifest__.py
2
2656
# -*- coding: utf-8 -*- ############################################################################## # # ______ Releasing children from poverty _ # / ____/___ ____ ___ ____ ____ ___________(_)___ ____ # / / / __ \/ __ `__ \/ __ \/ __ `/ ___/ ___/ / __ \/ __ \ # / /___/ /_/ / / / / / / /_/ / /_/ (__ |__ ) / /_/ / / / / # \____/\____/_/ /_/ /_/ .___/\__,_/____/____/_/\____/_/ /_/ # /_/ # in Jesus' name # # Copyright (C) 2016 Compassion CH (http://www.compassion.ch) # @author: Emanuel Cino <ecino@compassion.ch> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## # pylint: disable=C8101 { 'name': 'Partner Communication', 'version': '10.0.1.2.1', 'category': 'Other', 'author': 'Compassion CH', 'license': 'AGPL-3', 'website': 'http://www.compassion.ch', 'depends': ['crm_phone', 'base_report_to_printer', 'hr', 'contacts', 'compassion_dashboard', 'queue_job', 'utm', 'web_ckeditor4', 'report'], 'external_dependencies': { 'python': ['phonenumbers', 'pyPdf'] }, 'data': [ 'security/ir.model.access.csv', 'security/communication_job_security.xml', 'report/a4_no_margin.xml', 'views/partner_communication.xml', 'views/communication_job_view.xml', 'views/communication_config_view.xml', 'views/call_wizard_view.xml', 'views/res_partner_view.xml', 'views/change_text_wizard_view.xml', 'views/pdf_wizard_view.xml', 'views/generate_communication_wizard_view.xml', 'views/communication_dashboard.xml', 'views/crm_phone_view.xml', 'views/ir_attachment_view.xml', 'data/default_communication.xml' ], 'qweb': ["static/src/xml/communication_dashboard.xml"], 'demo': ["demo/demo_data.xml"], 'installable': True, 'auto_install': False }
agpl-3.0
TeamTwisted/leanKernel-shamu
tools/perf/scripts/python/failed-syscalls-by-pid.py
11180
2058
# failed system call counts, by pid # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide failed system call totals, broken down by pid. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n"; for_comm = None for_pid = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: try: for_pid = int(sys.argv[1]) except: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_error_totals() def raw_syscalls__sys_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, ret): if (for_comm and common_comm != for_comm) or \ (for_pid and common_pid != for_pid ): return if ret < 0: try: syscalls[common_comm][common_pid][id][ret] += 1 except TypeError: syscalls[common_comm][common_pid][id][ret] = 1 def print_error_totals(): if for_comm is not None: print "\nsyscall errors for %s:\n\n" % (for_comm), else: print "\nsyscall errors:\n\n", print "%-30s %10s\n" % ("comm [pid]", "count"), print "%-30s %10s\n" % ("------------------------------", \ "----------"), comm_keys = syscalls.keys() for comm in comm_keys: pid_keys = syscalls[comm].keys() for pid in pid_keys: print "\n%s [%d]\n" % (comm, pid), id_keys = syscalls[comm][pid].keys() for id in id_keys: print " syscall: %-16s\n" % syscall_name(id), ret_keys = syscalls[comm][pid][id].keys() for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True): print " err = %-20s %10d\n" % (strerror(ret), val),
gpl-2.0
benfinke/ns_python
build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/ssl/sslservice_args.py
3
1205
# # Copyright (c) 2008-2015 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # class sslservice_args : ur""" Provides additional arguments required for fetching the sslservice resource. """ def __init__(self) : self._cipherdetails = False @property def cipherdetails(self) : ur"""Display details of the individual ciphers bound to the SSL service. """ try : return self._cipherdetails except Exception as e: raise e @cipherdetails.setter def cipherdetails(self, cipherdetails) : ur"""Display details of the individual ciphers bound to the SSL service. """ try : self._cipherdetails = cipherdetails except Exception as e: raise e
apache-2.0
deeplook/bokeh
examples/glyphs/widgets_server.py
42
2426
from __future__ import print_function from datetime import date from random import randint from bokeh.browserlib import view from bokeh.document import Document from bokeh.models.glyphs import Line, Circle from bokeh.models import ( Plot, ColumnDataSource, DataRange1d, LinearAxis, DatetimeAxis, Grid, HoverTool ) from bokeh.session import Session from bokeh.models.widgets import ( VBox, Button, TableColumn, DataTable, DateEditor, DateFormatter, IntEditor) document = Document() session = Session() session.use_doc('widgets_server') session.load_document(document) def make_data(): n = randint(5, 10) return dict( dates=[ date(2014, 3, i+1) for i in range(n) ], downloads=[ randint(0, 100) for i in range(n) ], ) source = ColumnDataSource(make_data()) def make_plot(): xdr = DataRange1d() ydr = DataRange1d() plot = Plot(title="Product downloads", x_range=xdr, y_range=ydr, plot_width=400, plot_height=400) line = Line(x="dates", y="downloads", line_color="blue") plot.add_glyph(source, line) circle = Circle(x="dates", y="downloads", fill_color="red") plot.add_glyph(source, circle) xaxis = DatetimeAxis() plot.add_layout(xaxis, 'below') yaxis = LinearAxis() plot.add_layout(yaxis, 'left') plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker)) plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker)) plot.add_tools(HoverTool(tooltips=dict(downloads="@downloads"))) return plot, source def click_handler(): source.data = make_data() session.store_document(document) def make_layout(): plot, source = make_plot() columns = [ TableColumn(field="dates", title="Date", editor=DateEditor(), formatter=DateFormatter()), TableColumn(field="downloads", title="Downloads", editor=IntEditor()), ] data_table = DataTable(source=source, columns=columns, width=400, height=400, editable=True) button = Button(label="Randomize data", type="success") button.on_click(click_handler) buttons = VBox(children=[button]) vbox = VBox(children=[buttons, plot, data_table]) return vbox document.add(make_layout()) session.store_document(document) if __name__ == "__main__": link = session.object_link(document.context) print("Please visit %s to see the plots" % link) view(link) print("\npress ctrl-C to exit") session.poll_document(document)
bsd-3-clause
mariodebian/jclic-browser
jclic_downloader.py
1
14268
#!/usr/bin/python # -*- coding: UTF-8 -*- # # Por Mario Izquierdo Rodríguez # # JclicDownloader descarga las actividades que se le pasen como filtro # import sgmllib import sys import urllib import os from time import sleep import getopt import zipfile from xml.dom import minidom # si test es True se usan archivos descargados # sino se descarga de la web ( en una conexion lenta puede tardar años test=False debug=False # leemos los enlaces con este link install_url="http://clic.xtec.net/jnlp/jclic/install.jnlp" MAX=1000 # todas las actividades todas_url="http://clic.xtec.es/db/listact_es.jsp?lang=es&ordre=0&desc=1&from=1&area=*&idioma=*&nivell=*&text_titol=&text_aut=&text_desc=&num=1000" # tipo de url de cada actividad act_url="http://clic.xtec.es/db/act_es.jsp?id=" #http://clic.xtec.net/projects/sis2x2/jclic/sis2x2.jclic.inst proy_url="http://clic.xtec.net/projects" zips_dir="/var/lib/jclic_browser/zips" img_dirs="/var/lib/jclic_browser/imgs" class HTMLParser(sgmllib.SGMLParser): def __init__(self, mycod=None): sgmllib.SGMLParser.__init__(self) self.insideTag = 0 self.links = [] self.links_with_name = {} self.project_names = {} self.mycod=mycod def parse(self, data): self.feed(data) self.close() #return self.links def start_a(self, args): for key, value in args: if key.lower() == 'href': self.insideTag = 1 self.lastHref = value def handle_data(self, data): if self.insideTag: self.hrefText = data def end_a(self): #self.links.append( [self.lastHref, self.hrefText] ) self.links.append( self.lastHref ) self.insideTag = 0 cod_act=self.get_cod_act(self.lastHref) if cod_act != "": #print "cod_act=%s nombre=%s" %(cod_act, self.hrefText) self.links_with_name[ cod_act ]=self.hrefText if self.mycod != None: project=self.read_project(self.lastHref) if project != "": self.project_names[ self.mycod ] = project def get_cod_act(self, params): if not "id=" in params: return "" params=params.split("=") return params[1] def get_act_names(self): return self.links_with_name def get_act_name(self, id_act): for link in self.links_with_name: if link == id_act : return self.links_with_name[id_act] def read_project(self, params): if not "argument=" in params: return "" params=params.split("=") return params[1] def get_project_names(self, params): return self.project_names def get_project_name(self, id_act): #print "buscando id_act=%s" %(id_act) for project in self.project_names: if project == id_act: return self.project_names[id_act] def get_hyperlinks(self): return self.links class JclicDownloader: def __init__(self): if debug: print "__init()__" self.todas=None def get_todas(self): if debug: print "get_todas()" if not test: f=urllib.urlopen(todas_url) if test: f=open("lista.html","r") self.todas = f.read() f.close() return self.todas def get_proy_inst(self, id_act): #if not test: print "Leyendo proyecto num: %s" %(id_act) if not test: f=urllib.urlopen(act_url + id_act) if test: f=open("proy.html", "r") myparser2 = HTMLParser(id_act) myparser2.parse( f.read() ) enlaces =myparser2.get_hyperlinks() f.close return myparser2.get_project_name(id_act) def crea_directorio(self, dir_name): if os.path.isdir(dir_name): return path_completo="/" for path in dir_name.split("/"): if path != "": path_completo+=path + "/" if not os.path.isdir(path_completo): os.mkdir(path_completo) def get_proy_filelist(self, url): #check if file exists file_name=url.split('/')[-1] proy_dir=zips_dir + "/" + url.split('/')[-3] if os.path.isfile(proy_dir + "/" + file_name): #if debug: print "%s encontrado, no descargando de nuevo..." %(file_name) f=open(proy_dir + "/" + file_name, "r") else: if debug: print "Descargando %s" %(url) f=urllib.urlopen(url) # read file_src=[] folder=None name=None data=f.readlines() f.close for line in data: if "file src" in line: file_src.append( line.split('"')[1] ) if "folder" in line: folder=line.split('folder=')[1] folder=folder.split('"')[1] if "title" in line: name=line.split('title=')[1] name=name.split('"')[1] if not os.path.isfile(proy_dir + "/" + file_name): # save inst file proy_dir=zips_dir + "/" + folder if not os.path.isdir(proy_dir): print "Creando directorio %s" %(proy_dir) self.crea_directorio(proy_dir) #if debug: print "Guardando proyecto en:" + proy_dir + "/" + file_name f=open(proy_dir + "/" + file_name, "w") f.write("".join(data)) f.close() if folder!= None: # return data return [folder, name, file_src] def get_todas_id(self): if debug: print "get_todas_id()" self.get_todas() myparser = HTMLParser() if debug: print "parsing...%d" %len(self.todas) myparser.parse(self.todas) enlaces=myparser.get_hyperlinks() self.id_todas=myparser.get_act_names() self.actividades={} parametros={} cod_act=None counter=0 for enlace in enlaces: if counter > max_files: continue #print enlace cod_act=None if not "?" in enlace: continue enlace = enlace.split('?',1)[1] if not "id=" in enlace: continue enlace=enlace.split('=') self.actividades[enlace[1]]=[ ] counter+=1 if debug: print "Encontradas %d actividades." %len(self.actividades) return self.actividades def get_file_list(self): if debug: print "get_file_list()" if debug: print "Leyendo información de proyectos... (tarda un rato)" counter=0 for act in self.actividades: if counter > max_files: continue inst_file=self.get_proy_inst(act) self.actividades[act].append( inst_file ) if inst_file != None: tmp=self.get_proy_filelist(inst_file) folder=tmp[0] name=tmp[1] src=tmp[2] else: folder=None name=None src=[] self.actividades[act].append( folder ) self.actividades[act].append( name ) self.actividades[act].append( src ) counter+=1 #print self.actividades #sys.exit(1) def download_file(self, url, destino): #if debug: print "download_file(%s, %s)" %(url, destino) print ":::>>> Descargando %s" %(url.split("/")[-1]) basedir="/".join(destino.split("/")[:-1]) self.crea_directorio(basedir) f=urllib.urlopen(url) data=f.read() f.close f=open(destino, "w") f.write(data) f.close() def get_zips(self): if debug: print "get_zips()" counter=1 for id_act in self.actividades: if counter > max_files : continue if len(self.actividades[id_act]) == 0: continue #print self.actividades[id_act] if not self.actividades[id_act][1]: continue proy_dir=zips_dir + "/" + self.actividades[id_act][1] files=self.actividades[id_act][3] #check for files for _file in files: if not os.path.isfile(proy_dir + "/" + _file): #http://clic.xtec.net/projects/sis2x2/jclic/sis2x2.jclic.inst url=proy_url + "/" + self.actividades[id_act][1] + "/jclic/" + _file #print url self.download_file(url, proy_dir + "/" + _file) else: print "El archivo %s ya existe" %(_file) counter+=1 def read_jclic_xml(self, data): parsed={} import StringIO xmldoc = minidom.parse(StringIO.StringIO(str(data))) try: parsed["title"]=xmldoc.firstChild.childNodes[1].childNodes[1].firstChild.nodeValue except: pass try: parsed["revision_date"]=xmldoc.firstChild.childNodes[1].childNodes[3].getAttribute("date") except: pass try: parsed["revision_description"]=xmldoc.firstChild.childNodes[1].childNodes[3].getAttribute("description") except: pass try: parsed["author_mail"]=xmldoc.firstChild.childNodes[1].childNodes[7].getAttribute("mail") except: pass try: parsed["author_name"]=xmldoc.firstChild.childNodes[1].childNodes[7].getAttribute("name") except: pass try: parsed["language"]=xmldoc.firstChild.childNodes[1].childNodes[9].firstChild.nodeValue except: pass try: parsed["description"]=xmldoc.firstChild.childNodes[1].childNodes[11].childNodes[1].firstChild.toxml() except: pass try: parsed["descriptors"]=xmldoc.firstChild.childNodes[1].childNodes[13].firstChild.nodeValue except: pass try: parsed["descriptors_area"]=xmldoc.firstChild.childNodes[1].childNodes[13].getAttribute("area") except: pass try: parsed["descriptors_level"]= xmldoc.firstChild.childNodes[1].childNodes[13].getAttribute("level") except: pass return parsed def read_jclic_from_zip(self, zip_file): #print "Reading ZIP %s" %(zip_file) """ z = zipfile.ZipFile(zip_file, "r") for filename in z.namelist(): if filename.split(".")[-1] != "jclic" : continue print "Parseando %s" %filename bytes=z.read(filename) data=self.read_jclic_xml(bytes) print data return data """ try: z = zipfile.ZipFile(zip_file, "r") for filename in z.namelist(): if filename.split(".")[-1] != "jclic" : continue print "Parseando %s" %filename bytes=z.read(filename) data=self.read_jclic_xml(bytes) print data return data except: pass #print "Error reading ZIP file %s" %(zip_file.split("/")[-1] ) def read_zips(self): counter=0 self.zip_files={} for id_act in self.actividades: if counter > max_files: continue if len(self.actividades[id_act]) == 0: continue #print self.actividades[id_act] if not self.actividades[id_act][1]: continue proy_dir=zips_dir + "/" + self.actividades[id_act][1] files=self.actividades[id_act][3] for _file in files: if _file.split(".")[-1] == "zip": self.zip_files[id_act]=[zips_dir + "/" + self.actividades[id_act][1] + "/" + _file] self.read_jclic_from_zip(zips_dir + "/" + self.actividades[id_act][1] + "/" + _file) #print self.zip_files ########################################## def usage(): print "" print "jclic_downloader" print " Usage:" print " --help (this help)" print " --debug (show verbose text)" print "" print " --update-inst (update/download all inst files)" print " --update-zips (parse ints files and get jclic.zip files)" print " --update-imgs (get all image activities)" print "" print " --read-zips (read all zips info)" # parametros de arranque options=["help", "debug", "update-inst", "update-zips", "update-imgs", "max=", "read-zips"] try: opts, args = getopt.getopt(sys.argv[1:], ":hd", options) except getopt.error, msg: print msg print "for command line options use jclic_downloader --help" sys.exit(2) mode=0 max_files=MAX # process options for o, a in opts: #print o #print a #print "-----" if o in ("-d", "--debug"): print "DEBUG ACTIVE" debug = True if o == "--update-inst": mode=1 if o == "--update-zips": mode=2 if o == "--update-imgs": mode=3 if o == "--read-zips": mode=4 if o == "--max": max_files=int(a) if o in ("-h", "--help"): usage() sys.exit() ########################################## # self.actividades es un diccionario # * el key es el id de actividad # * el valor es una lista # lista[0] = fichero jclic.inst # lista[1] = directorio de descarga # lista[2] = Nombre de actividad (sacado del jclic.inst) # lista[3] = otra lista con los ficheros que contiene if __name__ == "__main__": if max_files != MAX: print "Límite de número de actividades=%d" %(max_files) app = JclicDownloader() app.get_todas_id() app.get_file_list() if mode == 2: app.get_zips() if mode == 3: app.get_imgs() if mode == 4: app.read_zips() sys.exit(0)
gpl-2.0
lscheinkman/nupic.core
bindings/py/tests/connections_test.py
9
18655
#!/usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2017, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """Unit tests for connections classes""" import numpy as np import unittest from nupic.bindings.math import Random, SparseMatrixConnections class ConnectionsTest(unittest.TestCase): def test_computeActivity(self): for (name, cells, inputs, activeInputs, initialPermanence, expected) in (("Basic test", [1, 2, 3], [42, 43, 44], [42, 44], 0.45, [2, 2, 2]), ("Small permanence", [1, 2, 3], [42, 43, 44], [42, 44], 0.01, [2, 2, 2]), ("No segments", [], [42, 43, 44], [42, 44], 0.45, []), ("No active inputs", [1, 2, 3], [42, 43, 44], [], 0.45, [0, 0, 0]) ): connections = SparseMatrixConnections(2048, 2048) segments = connections.createSegments(cells) connections.growSynapses(segments, inputs, initialPermanence) overlaps = connections.computeActivity(activeInputs) np.testing.assert_equal(overlaps[segments], expected, name) def test_computeActivity_thresholded(self): for (name, cells, inputs, activeInputs, initialPermanence, connectedPermanence, expected) in (("Accepted", [1, 2, 3], [42, 43, 44], [42, 44], 0.55, 0.5, [2, 2, 2]), ("Rejected", [1, 2, 3], [42, 43, 44], [42, 44], 0.55, 0.6, [0, 0, 0]), ("No segments", [], [42, 43, 44], [42, 44], 0.55, 0.5, []), ("No active inputs", [1, 2, 3], [42, 43, 44], [], 0.55, 0.5, [0, 0, 0]) ): connections = SparseMatrixConnections(2048, 2048) segments = connections.createSegments(cells) connections.growSynapses(segments, inputs, initialPermanence) overlaps = connections.computeActivity(activeInputs, connectedPermanence) np.testing.assert_equal(overlaps[segments], expected, name) def test_adjustSynapses(self): for (name, cells, inputs, adjustedSegments, activeInputs, initialPermanence, activeDelta, inactiveDelta, connectedPermanence, expected) in (("Basic test", [1, 2, 3], [42, 43, 44], [0, 2], [42, 44], 0.45, 0.1, -0.1, 0.5, [2, 0, 2]), ("Reward inactive", [1, 2, 3], [42, 43, 44], [0, 2], [42, 44], 0.45, -0.1, 0.1, 0.5, [1, 0, 1]), ("No segments", [1, 2, 3], [42, 43, 44], [], [42, 44], 0.45, 0.1, -0.1, 0.5, [0, 0, 0]), ("No active synapses", [1, 2, 3], [42, 43, 44], [0, 2], [], 0.45, 0.1, -0.1, 0.5, [0, 0, 0]), ("Delta of zero", [1, 2, 3], [42, 43, 44], [0, 2], [42, 44], 0.55, 0.0, 0.0, 0.5, [3, 3, 3]) ): connections = SparseMatrixConnections(2048, 2048) segments = connections.createSegments(cells) connections.growSynapses(segments, inputs, initialPermanence) connections.adjustSynapses(segments[adjustedSegments], activeInputs, activeDelta, inactiveDelta) overlaps = connections.computeActivity(inputs, connectedPermanence) np.testing.assert_equal(overlaps[segments], expected, name) def test_adjustActiveSynapses(self): for (name, cells, inputs, adjustedSegments, activeInputs, initialPermanence, delta, connectedPermanence, expected) in (("Basic test", [1, 2, 3], [42, 43, 44], [0, 2], [42, 44], 0.45, 0.1, 0.5, [2, 0, 2]), ("Negative increment", [1, 2, 3], [42, 43, 44], [0, 2], [42, 44], 0.55, -0.1, 0.5, [1, 3, 1]), ("No segments", [1, 2, 3], [42, 43, 44], [], [42, 44], 0.45, 0.1, 0.5, [0, 0, 0]), ("No active synapses", [1, 2, 3], [42, 43, 44], [0, 2], [], 0.45, 0.1, 0.5, [0, 0, 0]), ("Delta of zero", [1, 2, 3], [42, 43, 44], [0, 2], [42, 44], 0.55, 0.0, 0.5, [3, 3, 3]) ): connections = SparseMatrixConnections(2048, 2048) segments = connections.createSegments(cells) connections.growSynapses(segments, inputs, initialPermanence) connections.adjustActiveSynapses(segments[adjustedSegments], activeInputs, delta) overlaps = connections.computeActivity(inputs, connectedPermanence) np.testing.assert_equal(overlaps[segments], expected, name) def test_adjustInactiveSynapses(self): for (name, cells, inputs, adjustedSegments, activeInputs, initialPermanence, delta, connectedPermanence, expected) in (("Basic test", [1, 2, 3], [42, 43, 44], [0, 2], [42, 44], 0.45, 0.1, 0.5, [1, 0, 1]), ("Negative increment", [1, 2, 3], [42, 43, 44], [0, 2], [42, 44], 0.55, -0.1, 0.5, [2, 3, 2]), ("No segments", [1, 2, 3], [42, 43, 44], [], [42, 44], 0.45, 0.1, 0.5, [0, 0, 0]), ("No active synapses", [1, 2, 3], [42, 43, 44], [0, 2], [], 0.45, 0.1, 0.5, [3, 0, 3]), ("Delta of zero", [1, 2, 3], [42, 43, 44], [0, 2], [42, 44], 0.55, 0.0, 0.5, [3, 3, 3]) ): connections = SparseMatrixConnections(2048, 2048) segments = connections.createSegments(cells) connections.growSynapses(segments, inputs, initialPermanence) connections.adjustInactiveSynapses(segments[adjustedSegments], activeInputs, delta) overlaps = connections.computeActivity(inputs, connectedPermanence) np.testing.assert_equal(overlaps[segments], expected, name) def test_whenPermanenceFallsBelowZero(self): connections = SparseMatrixConnections(2048, 2048) segments = connections.createSegments([1, 2, 3]) connections.growSynapses(segments, [42, 43], 0.05) connections.adjustSynapses(segments, [42, 43], -0.06, 0.0) np.testing.assert_equal(connections.mapSegmentsToSynapseCounts(segments), [0, 0, 0]) connections.growSynapses(segments, [42, 43], 0.05) connections.adjustSynapses(segments, [], 0.0, -0.06) np.testing.assert_equal(connections.mapSegmentsToSynapseCounts(segments), [0, 0, 0]) connections.growSynapses(segments, [42, 43], 0.05) connections.adjustActiveSynapses(segments, [42, 43], -0.06) np.testing.assert_equal(connections.mapSegmentsToSynapseCounts(segments), [0, 0, 0]) connections.growSynapses(segments, [42, 43], 0.05) connections.adjustInactiveSynapses(segments, [], -0.06) np.testing.assert_equal(connections.mapSegmentsToSynapseCounts(segments), [0, 0, 0]) def test_growSynapses(self): for (name, cells, growingSegments, presynapticInputs, activeInputs, initialPermanence, connectedPermanence, expected) in (("Basic test", [1, 2, 3], [0, 2], [42, 43, 44], [42, 43], 0.55, 0.5, [2, 0, 2]), ("No segments selected", [1, 2, 3], [], [42, 43, 44], [42, 43], 0.55, 0.5, [0, 0, 0]), ("No inputs selected", [1, 2, 3], [0, 2], [], [42, 43], 0.55, 0.5, [0, 0, 0]) ): connections = SparseMatrixConnections(2048, 2048) segments = connections.createSegments(cells) connections.growSynapses(segments[growingSegments], presynapticInputs, initialPermanence) overlaps = connections.computeActivity(activeInputs, connectedPermanence) np.testing.assert_equal(overlaps[segments], expected, name) def test_growSynapsesToSample_single(self): rng = Random() for (name, cells, growingSegments, initialConnectedInputs, presynapticInputs, activeInputs, initialPermanence, connectedPermanence, sampleSize, expected) in (("Basic test", [1, 2, 3], [0, 2], [], [42, 43, 44, 45], [42, 43, 44, 45], 0.55, 0.5, 2, [2, 0, 2]), ("One already connected", [1, 2, 3], [0, 2], [42], [42, 43, 44, 45], [42, 43, 44, 45], 0.55, 0.5, 2, [3, 0, 3]), ("Higher sample size than axon count", [1, 2, 3], [0, 2], [], [42, 43, 44, 45], [42, 43, 44, 45], 0.55, 0.5, 10, [4, 0, 4]), ("Higher sample size than available axon count", [1, 2, 3], [0, 2], [42, 43], [42, 43, 44, 45], [42, 43, 44, 45], 0.55, 0.5, 3, [4, 0, 4]) ): connections = SparseMatrixConnections(2048, 2048) segments = connections.createSegments(cells) connections.growSynapses( segments[growingSegments], initialConnectedInputs, initialPermanence) connections.growSynapsesToSample( segments[growingSegments], presynapticInputs, sampleSize, initialPermanence, rng) overlaps = connections.computeActivity(activeInputs, connectedPermanence) np.testing.assert_equal(overlaps[segments], expected, name) for (name, cells, growingSegments, initialConnectedInputs, presynapticInputs, activeInputs, initialPermanence, connectedPermanence, sampleSize) in (("Basic randomness test", [1, 2, 3], [0, 2], [], [42, 43, 44, 45], [42, 43], 0.55, 0.5, 2), ): # Activate a subset of the inputs. The resulting overlaps should # differ on various trials. firstResult = None differingResults = False for _ in xrange(20): connections = SparseMatrixConnections(2048, 2048) segments = connections.createSegments(cells) connections.growSynapses( segments[growingSegments], initialConnectedInputs, initialPermanence) connections.growSynapsesToSample( segments[growingSegments], presynapticInputs, sampleSize, initialPermanence, rng) overlaps = connections.computeActivity(activeInputs, connectedPermanence) if firstResult is None: firstResult = overlaps[segments] else: differingResults = not np.array_equal(overlaps[segments], firstResult) if differingResults: break self.assertTrue(differingResults, name) def test_growSynapsesToSample_multi(self): rng = Random() for (name, cells, growingSegments, initialConnectedInputs, presynapticInputs, activeInputs, initialPermanence, connectedPermanence, sampleSizes, expected) in (("Basic test", [1, 2, 3], [0, 2], [], [42, 43, 44, 45], [42, 43, 44, 45], 0.55, 0.5, [2, 3], [2, 0, 3]), ("One already connected", [1, 2, 3], [0, 2], [42], [42, 43, 44, 45], [42, 43, 44, 45], 0.55, 0.5, [1, 2], [2, 0, 3]), ("Higher sample size than axon count", [1, 2, 3], [0, 2], [], [42, 43, 44, 45], [42, 43, 44, 45], 0.55, 0.5, [5, 10], [4, 0, 4]), ("Higher sample size than available axon count", [1, 2, 3], [0, 2], [42, 43], [42, 43, 44, 45], [42, 43, 44, 45], 0.55, 0.5, [3, 3], [4, 0, 4]) ): connections = SparseMatrixConnections(2048, 2048) segments = connections.createSegments(cells) connections.growSynapses( segments[growingSegments], initialConnectedInputs, initialPermanence) connections.growSynapsesToSample( segments[growingSegments], presynapticInputs, sampleSizes, initialPermanence, rng) overlaps = connections.computeActivity(activeInputs, connectedPermanence) np.testing.assert_equal(overlaps[segments], expected, name) for (name, cells, growingSegments, initialConnectedInputs, presynapticInputs, activeInputs, initialPermanence, connectedPermanence, sampleSizes) in (("Basic randomness test", [1, 2, 3], [0, 2], [], [42, 43, 44, 45], [42, 43], 0.55, 0.5, [2, 3]), ): # Activate a subset of the inputs. The resulting overlaps should # differ on various trials. firstResult = None differingResults = False for _ in xrange(20): connections = SparseMatrixConnections(2048, 2048) segments = connections.createSegments(cells) connections.growSynapses( segments[growingSegments], initialConnectedInputs, initialPermanence) connections.growSynapsesToSample( segments[growingSegments], presynapticInputs, sampleSizes, initialPermanence, rng) overlaps = connections.computeActivity(activeInputs, connectedPermanence) if firstResult is None: firstResult = overlaps[segments] else: differingResults = not np.array_equal(overlaps[segments], firstResult) if differingResults: break self.assertTrue(differingResults, name) def test_clipPermanences(self): connections = SparseMatrixConnections(2048, 2048) # Destroy synapses with permanences <= 0.0 segments = connections.createSegments([1, 2, 3]) connections.growSynapses(segments, [42, 43, 44], 0.05) connections.growSynapses(segments, [45, 46], 0.1) connections.adjustInactiveSynapses(segments, [], -0.1) connections.clipPermanences(segments) np.testing.assert_equal(connections.mapSegmentsToSynapseCounts(segments), [0, 0, 0]) # Clip permanences to 1.0 connections.growSynapses(segments, [42, 43, 44], 0.95) connections.adjustInactiveSynapses(segments, [], 0.50) connections.clipPermanences(segments) np.testing.assert_equal(connections.mapSegmentsToSynapseCounts(segments), [3, 3, 3]) connections.adjustInactiveSynapses(segments, [], -0.5) overlaps1 = connections.computeActivity([42, 43, 44], 0.49) overlaps2 = connections.computeActivity([42, 43, 44], 0.51) np.testing.assert_equal(overlaps1, [3, 3, 3]) np.testing.assert_equal(overlaps2, [0, 0, 0]) def test_mapSegmentsToSynapseCounts(self): connections = SparseMatrixConnections(2048, 2048) segments = connections.createSegments([1, 2, 3]) connections.growSynapses(segments, [42, 43, 44], 0.5) np.testing.assert_equal(connections.mapSegmentsToSynapseCounts(segments), [3, 3, 3]) segments2 = connections.createSegments([4, 5]) np.testing.assert_equal(connections.mapSegmentsToSynapseCounts(segments2), [0, 0]) np.testing.assert_equal(connections.mapSegmentsToSynapseCounts([]), [])
agpl-3.0
ultilix/catawampus
tr/cwmpbool.py
6
1433
# Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Boolean handling for CWMP. TR-069 Amendment 3, Annex A says: Boolean, where the allowed values are "0", "1", "true", and "false". The values "1" and "true" are considered interchangeable, where both equivalently represent the logical value true. Similarly, the values "0" and "false" are considered interchangeable, where both equivalently represent the logical value false. """ __author__ = 'dgentry@google.com (Denton Gentry)' def format(arg): """Print a CWMP boolean object.""" return '1' if arg else '0' def parse(arg): lower = str(arg).lower() if lower == 'false' or lower == '0': return False elif lower == 'true' or lower == '1': return True else: raise ValueError('Invalid CWMP boolean') def valid(arg): # pylint: disable-msg=W0702 try: parse(arg) except: return False return True
apache-2.0
adamnew123456/myweb
myweb/frontend/cli.py
1
9534
""" A command-line based frontend for interacting with myweb. """ from myweb.backend import config, db, query, utils import argparse import os import re import sys import tempfile HELP = """myweb-cli - A command-line interface to myweb. Usage: myweb-cli <command> <args> Commands: search QUERY Searches the database using the given query. print URL Shows the content of the URL, including backlinks, to stdout. view [--no-backlinks] URL Dumps the content of the given URL to stdout. view-backlinks URL Dumps the list of backlinks for the given URL to stdout. view-tags URL Dumps the list of tags for the given URL to stdout. create URL [TAG...] Creates a new article with the given tag, and with the contents of the article coming from stdin. update URL Updates the content of the URL from the contents of stdin. edit URL Invokes $VISUAL (or $EDITOR) on the content of the given URL, and then saves the result back into the database. set-tags URL [TAG...] Updates the list of tags for the URL. delete Removes the given URL from the database. help Show a complete help page. """ def load_config(): """ Loads a configuration object. """ return config.load_config({}) def init_db(config_opts): """ Loads the database. """ db.load_database(config_opts['myweb']['db']) def main(): "Parses the command line and initializes the given action." arg_parser = argparse.ArgumentParser() sub_args = arg_parser.add_subparsers(help='Commands', dest='command') help_parser = sub_args.add_parser('help', help='Shows a complete help page') search_parser = sub_args.add_parser('search', help='Search the database, printing out a list of matching URLs') search_parser.add_argument('QUERY', help='A well-formed myweb query') print_parser = sub_args.add_parser('print', help='Prints the article for the URL, plus backlinks, to stdout.') print_parser.add_argument('URL', help='A URL which exists in the database') view_parser = sub_args.add_parser('view', help='Dump the article for the URL to stdout') view_parser.add_argument('URL', help='A URL which exists in the database') view_backlinks_parser = sub_args.add_parser('view-backlinks', help='Dumps the backlinks of the given article to stdout') view_backlinks_parser.add_argument('URL', help='A URL which exists in the database') view_tags_parser = sub_args.add_parser('view-tags', help='Dumps the tags of the given article to stdout') view_tags_parser.add_argument('URL', help='A URL which exists in the database') create_parser = sub_args.add_parser('create', help='Adds the article for the URL by reading stdin') create_parser.add_argument('URL', help='A URL which does not exist in the database') create_parser.add_argument('TAGS', nargs='+', help='The tags to give to the new article') update_parser = sub_args.add_parser('update', help='Replaces the article for the URL by reading stdin') update_parser.add_argument('URL', help='A URL which exists in the database') edit_parser = sub_args.add_parser('edit', help='Invokes $VISUAL (or $EDITOR) to edit an article') edit_parser.add_argument('URL', help='A URL which exists in the database') set_tags_parser = sub_args.add_parser('set-tags', help='Sets the list of tags on an article') set_tags_parser.add_argument('URL', help='A URL which exists in the database') set_tags_parser.add_argument('TAGS', nargs='+', help='The tags to give to the article') delete_parser = sub_args.add_parser('delete', help='Removes an article from the database') delete_parser.add_argument('URL', help='A URL which exists in the database') arg_context = arg_parser.parse_args(sys.argv[1:]) if arg_context.command is None: # We weren't provided with a command, so show the short help listing arg_parser.print_usage() return 1 elif arg_context.command == 'help': arg_parser.print_help() elif arg_context.command == 'search': config_opts = load_config() init_db(config_opts) try: parsed = query.parse_query(arg_context.QUERY) arg_contexts = db.execute_query(parsed) for arg_context in arg_contexts: print(arg_context) except (IndexError, SyntaxError) as ex: print('Invalid query string "{}"'.format(arg_context.QUERY), file=sys.stderr) print('\t' + str(ex), file=sys.stderr) return 1 elif arg_context.command == 'print': config_opts = load_config() init_db(config_opts) try: article = db.get_article(arg_context.URL) except KeyError: print('No article exists for', arg_context.URL, file=sys.stderr) return 1 print(article.content) print('\n----- Backlinks -----') for backlink in article.backlinks: print(' - ', backlink) print('\n----- Tags -----') for tag in article.tags: print(' - ', tag) elif arg_context.command == 'view': config_opts = load_config() init_db(config_opts) try: article = db.get_article(arg_context.URL) except KeyError: print('No article exists for', arg_context.URL, file=sys.stderr) return 1 print(article.content) elif arg_context.command == 'view-backlinks': config_opts = load_config() init_db(config_opts) try: article = db.get_article(arg_context.URL) except KeyError: print('No article exists for', arg_context.URL, file=sys.stderr) return 1 for backlink in article.backlinks: print(backlink) elif arg_context.command == 'view-tags': config_opts = load_config() init_db(config_opts) try: article = db.get_article(arg_context.URL) except KeyError: print('No article exists for', arg_context.URL, file=sys.stderr) return 1 for tag in article.tags: print(tag) elif arg_context.command == 'create': config_opts = load_config() init_db(config_opts) article = sys.stdin.read() tags = set(arg_context.TAGS) links = utils.get_links(article) try: db.create_article(arg_context.URL, article, links, tags) except KeyError: print('Article for', arg_context.URL, 'already exists', file=sys.stderr) return 1 elif arg_context.command == 'update': config_opts = load_config() init_db(config_opts) article = sys.stdin.read() try: old_article = db.get_article(arg_context.URL) links = utils.get_links(article) db.update_article(arg_context.URL, article, links, old_article.tags) except KeyError: print('Article for', arg_context.URL, 'does not exist', file=sys.stderr) return 1 elif arg_context.command == 'edit': config_opts = load_config() init_db(config_opts) if not os.environ.get('VISUAL', ''): if not os.environ.get('EDITOR', ''): print('No setting for $VISUAL or $EDITOR', file=sys.stderr) return 1 else: editor = os.environ['EDITOR'] else: editor = os.environ['VISUAL'] try: article = db.get_article(arg_context.URL) # Dump the article to a temp file, so that the editor has # something to edit (we *could* pass the text in via stdin, but # if the user screwed up, they would have no original copy to # work from - you can't run :e! in Vim on stdin, for example). with tempfile.NamedTemporaryFile(mode='w+') as article_file: article_file.write(article.content) article_file.flush() os.system(editor + ' ' + article_file.name) article_file.seek(0) new_article_text = article_file.read() links = utils.get_links(new_article_text) db.update_article(arg_context.URL, new_article_text, links, article.tags) except KeyError: print('Article for', arg_context.URL, 'does not exist', file=sys.stderr) return 1 elif arg_context.command == 'set-tags': config_opts = load_config() init_db(config_opts) try: old_article = db.get_article(arg_context.URL) tags = set(arg_context.TAGS) db.update_article(arg_context.URL, old_article.content, old_article.links, tags) except KeyError: print('Article for', arg_context.URL, 'does not exist', file=sys.stderr) return 1 elif arg_context.command == 'delete': config_opts = load_config() init_db(config_opts) db.delete_article(arg_context.URL)
bsd-2-clause
Foxfanmedium/python_training
OnlineCoursera/mail_ru/Python_1/env/Lib/site-packages/prompt_toolkit/renderer.py
7
19732
""" Renders the command line on the console. (Redraws parts of the input line that were changed.) """ from __future__ import unicode_literals from prompt_toolkit.filters import to_cli_filter from prompt_toolkit.layout.mouse_handlers import MouseHandlers from prompt_toolkit.layout.screen import Point, Screen, WritePosition from prompt_toolkit.output import Output from prompt_toolkit.styles import Style from prompt_toolkit.token import Token from prompt_toolkit.utils import is_windows from six.moves import range __all__ = ( 'Renderer', 'print_tokens', ) def _output_screen_diff(output, screen, current_pos, previous_screen=None, last_token=None, is_done=False, use_alternate_screen=False, attrs_for_token=None, size=None, previous_width=0): # XXX: drop is_done """ Render the diff between this screen and the previous screen. This takes two `Screen` instances. The one that represents the output like it was during the last rendering and one that represents the current output raster. Looking at these two `Screen` instances, this function will render the difference by calling the appropriate methods of the `Output` object that only paint the changes to the terminal. This is some performance-critical code which is heavily optimized. Don't change things without profiling first. :param current_pos: Current cursor position. :param last_token: `Token` instance that represents the output attributes of the last drawn character. (Color/attributes.) :param attrs_for_token: :class:`._TokenToAttrsCache` instance. :param width: The width of the terminal. :param prevous_width: The width of the terminal during the last rendering. """ width, height = size.columns, size.rows #: Remember the last printed character. last_token = [last_token] # nonlocal #: Variable for capturing the output. write = output.write write_raw = output.write_raw # Create locals for the most used output methods. # (Save expensive attribute lookups.) _output_set_attributes = output.set_attributes _output_reset_attributes = output.reset_attributes _output_cursor_forward = output.cursor_forward _output_cursor_up = output.cursor_up _output_cursor_backward = output.cursor_backward # Hide cursor before rendering. (Avoid flickering.) output.hide_cursor() def reset_attributes(): " Wrapper around Output.reset_attributes. " _output_reset_attributes() last_token[0] = None # Forget last char after resetting attributes. def move_cursor(new): " Move cursor to this `new` point. Returns the given Point. " current_x, current_y = current_pos.x, current_pos.y if new.y > current_y: # Use newlines instead of CURSOR_DOWN, because this meight add new lines. # CURSOR_DOWN will never create new lines at the bottom. # Also reset attributes, otherwise the newline could draw a # background color. reset_attributes() write('\r\n' * (new.y - current_y)) current_x = 0 _output_cursor_forward(new.x) return new elif new.y < current_y: _output_cursor_up(current_y - new.y) if current_x >= width - 1: write('\r') _output_cursor_forward(new.x) elif new.x < current_x or current_x >= width - 1: _output_cursor_backward(current_x - new.x) elif new.x > current_x: _output_cursor_forward(new.x - current_x) return new def output_char(char): """ Write the output of this character. """ # If the last printed character has the same token, it also has the # same style, so we don't output it. the_last_token = last_token[0] if the_last_token and the_last_token == char.token: write(char.char) else: _output_set_attributes(attrs_for_token[char.token]) write(char.char) last_token[0] = char.token # Render for the first time: reset styling. if not previous_screen: reset_attributes() # Disable autowrap. (When entering a the alternate screen, or anytime when # we have a prompt. - In the case of a REPL, like IPython, people can have # background threads, and it's hard for debugging if their output is not # wrapped.) if not previous_screen or not use_alternate_screen: output.disable_autowrap() # When the previous screen has a different size, redraw everything anyway. # Also when we are done. (We meight take up less rows, so clearing is important.) if is_done or not previous_screen or previous_width != width: # XXX: also consider height?? current_pos = move_cursor(Point(0, 0)) reset_attributes() output.erase_down() previous_screen = Screen() # Get height of the screen. # (height changes as we loop over data_buffer, so remember the current value.) # (Also make sure to clip the height to the size of the output.) current_height = min(screen.height, height) # Loop over the rows. row_count = min(max(screen.height, previous_screen.height), height) c = 0 # Column counter. for y in range(row_count): new_row = screen.data_buffer[y] previous_row = previous_screen.data_buffer[y] zero_width_escapes_row = screen.zero_width_escapes[y] new_max_line_len = min(width - 1, max(new_row.keys()) if new_row else 0) previous_max_line_len = min(width - 1, max(previous_row.keys()) if previous_row else 0) # Loop over the columns. c = 0 while c < new_max_line_len + 1: new_char = new_row[c] old_char = previous_row[c] char_width = (new_char.width or 1) # When the old and new character at this position are different, # draw the output. (Because of the performance, we don't call # `Char.__ne__`, but inline the same expression.) if new_char.char != old_char.char or new_char.token != old_char.token: current_pos = move_cursor(Point(y=y, x=c)) # Send injected escape sequences to output. if c in zero_width_escapes_row: write_raw(zero_width_escapes_row[c]) output_char(new_char) current_pos = current_pos._replace(x=current_pos.x + char_width) c += char_width # If the new line is shorter, trim it. if previous_screen and new_max_line_len < previous_max_line_len: current_pos = move_cursor(Point(y=y, x=new_max_line_len+1)) reset_attributes() output.erase_end_of_line() # Correctly reserve vertical space as required by the layout. # When this is a new screen (drawn for the first time), or for some reason # higher than the previous one. Move the cursor once to the bottom of the # output. That way, we're sure that the terminal scrolls up, even when the # lower lines of the canvas just contain whitespace. # The most obvious reason that we actually want this behaviour is the avoid # the artifact of the input scrolling when the completion menu is shown. # (If the scrolling is actually wanted, the layout can still be build in a # way to behave that way by setting a dynamic height.) if current_height > previous_screen.height: current_pos = move_cursor(Point(y=current_height - 1, x=0)) # Move cursor: if is_done: current_pos = move_cursor(Point(y=current_height, x=0)) output.erase_down() else: current_pos = move_cursor(screen.cursor_position) if is_done or not use_alternate_screen: output.enable_autowrap() # Always reset the color attributes. This is important because a background # thread could print data to stdout and we want that to be displayed in the # default colors. (Also, if a background color has been set, many terminals # give weird artifacs on resize events.) reset_attributes() if screen.show_cursor or is_done: output.show_cursor() return current_pos, last_token[0] class HeightIsUnknownError(Exception): " Information unavailable. Did not yet receive the CPR response. " class _TokenToAttrsCache(dict): """ A cache structure that maps Pygments Tokens to :class:`.Attr`. (This is an important speed up.) """ def __init__(self, get_style_for_token): self.get_style_for_token = get_style_for_token def __missing__(self, token): try: result = self.get_style_for_token(token) except KeyError: result = None self[token] = result return result class Renderer(object): """ Typical usage: :: output = Vt100_Output.from_pty(sys.stdout) r = Renderer(style, output) r.render(cli, layout=...) """ def __init__(self, style, output, use_alternate_screen=False, mouse_support=False): assert isinstance(style, Style) assert isinstance(output, Output) self.style = style self.output = output self.use_alternate_screen = use_alternate_screen self.mouse_support = to_cli_filter(mouse_support) self._in_alternate_screen = False self._mouse_support_enabled = False self._bracketed_paste_enabled = False # Waiting for CPR flag. True when we send the request, but didn't got a # response. self.waiting_for_cpr = False self.reset(_scroll=True) def reset(self, _scroll=False, leave_alternate_screen=True): # Reset position self._cursor_pos = Point(x=0, y=0) # Remember the last screen instance between renderers. This way, # we can create a `diff` between two screens and only output the # difference. It's also to remember the last height. (To show for # instance a toolbar at the bottom position.) self._last_screen = None self._last_size = None self._last_token = None # When the style hash changes, we have to do a full redraw as well as # clear the `_attrs_for_token` dictionary. self._last_style_hash = None self._attrs_for_token = None # Default MouseHandlers. (Just empty.) self.mouse_handlers = MouseHandlers() # Remember the last title. Only set the title when it changes. self._last_title = None #: Space from the top of the layout, until the bottom of the terminal. #: We don't know this until a `report_absolute_cursor_row` call. self._min_available_height = 0 # In case of Windown, also make sure to scroll to the current cursor # position. (Only when rendering the first time.) if is_windows() and _scroll: self.output.scroll_buffer_to_prompt() # Quit alternate screen. if self._in_alternate_screen and leave_alternate_screen: self.output.quit_alternate_screen() self._in_alternate_screen = False # Disable mouse support. if self._mouse_support_enabled: self.output.disable_mouse_support() self._mouse_support_enabled = False # Disable bracketed paste. if self._bracketed_paste_enabled: self.output.disable_bracketed_paste() self._bracketed_paste_enabled = False # Flush output. `disable_mouse_support` needs to write to stdout. self.output.flush() @property def height_is_known(self): """ True when the height from the cursor until the bottom of the terminal is known. (It's often nicer to draw bottom toolbars only if the height is known, in order to avoid flickering when the CPR response arrives.) """ return self.use_alternate_screen or self._min_available_height > 0 or \ is_windows() # On Windows, we don't have to wait for a CPR. @property def rows_above_layout(self): """ Return the number of rows visible in the terminal above the layout. """ if self._in_alternate_screen: return 0 elif self._min_available_height > 0: total_rows = self.output.get_size().rows last_screen_height = self._last_screen.height if self._last_screen else 0 return total_rows - max(self._min_available_height, last_screen_height) else: raise HeightIsUnknownError('Rows above layout is unknown.') def request_absolute_cursor_position(self): """ Get current cursor position. For vt100: Do CPR request. (answer will arrive later.) For win32: Do API call. (Answer comes immediately.) """ # Only do this request when the cursor is at the top row. (after a # clear or reset). We will rely on that in `report_absolute_cursor_row`. assert self._cursor_pos.y == 0 # For Win32, we have an API call to get the number of rows below the # cursor. if is_windows(): self._min_available_height = self.output.get_rows_below_cursor_position() else: if self.use_alternate_screen: self._min_available_height = self.output.get_size().rows else: # Asks for a cursor position report (CPR). self.waiting_for_cpr = True self.output.ask_for_cpr() def report_absolute_cursor_row(self, row): """ To be called when we know the absolute cursor position. (As an answer of a "Cursor Position Request" response.) """ # Calculate the amount of rows from the cursor position until the # bottom of the terminal. total_rows = self.output.get_size().rows rows_below_cursor = total_rows - row + 1 # Set the self._min_available_height = rows_below_cursor self.waiting_for_cpr = False def render(self, cli, layout, is_done=False): """ Render the current interface to the output. :param is_done: When True, put the cursor at the end of the interface. We won't print any changes to this part. """ output = self.output # Enter alternate screen. if self.use_alternate_screen and not self._in_alternate_screen: self._in_alternate_screen = True output.enter_alternate_screen() # Enable bracketed paste. if not self._bracketed_paste_enabled: self.output.enable_bracketed_paste() self._bracketed_paste_enabled = True # Enable/disable mouse support. needs_mouse_support = self.mouse_support(cli) if needs_mouse_support and not self._mouse_support_enabled: output.enable_mouse_support() self._mouse_support_enabled = True elif not needs_mouse_support and self._mouse_support_enabled: output.disable_mouse_support() self._mouse_support_enabled = False # Create screen and write layout to it. size = output.get_size() screen = Screen() screen.show_cursor = False # Hide cursor by default, unless one of the # containers decides to display it. mouse_handlers = MouseHandlers() if is_done: height = 0 # When we are done, we don't necessary want to fill up until the bottom. else: height = self._last_screen.height if self._last_screen else 0 height = max(self._min_available_height, height) # When te size changes, don't consider the previous screen. if self._last_size != size: self._last_screen = None # When we render using another style, do a full repaint. (Forget about # the previous rendered screen.) # (But note that we still use _last_screen to calculate the height.) if self.style.invalidation_hash() != self._last_style_hash: self._last_screen = None self._attrs_for_token = None if self._attrs_for_token is None: self._attrs_for_token = _TokenToAttrsCache(self.style.get_attrs_for_token) self._last_style_hash = self.style.invalidation_hash() layout.write_to_screen(cli, screen, mouse_handlers, WritePosition( xpos=0, ypos=0, width=size.columns, height=(size.rows if self.use_alternate_screen else height), extended_height=size.rows, )) # When grayed. Replace all tokens in the new screen. if cli.is_aborting or cli.is_exiting: screen.replace_all_tokens(Token.Aborted) # Process diff and write to output. self._cursor_pos, self._last_token = _output_screen_diff( output, screen, self._cursor_pos, self._last_screen, self._last_token, is_done, use_alternate_screen=self.use_alternate_screen, attrs_for_token=self._attrs_for_token, size=size, previous_width=(self._last_size.columns if self._last_size else 0)) self._last_screen = screen self._last_size = size self.mouse_handlers = mouse_handlers # Write title if it changed. new_title = cli.terminal_title if new_title != self._last_title: if new_title is None: self.output.clear_title() else: self.output.set_title(new_title) self._last_title = new_title output.flush() def erase(self, leave_alternate_screen=True, erase_title=True): """ Hide all output and put the cursor back at the first line. This is for instance used for running a system command (while hiding the CLI) and later resuming the same CLI.) :param leave_alternate_screen: When True, and when inside an alternate screen buffer, quit the alternate screen. :param erase_title: When True, clear the title from the title bar. """ output = self.output output.cursor_backward(self._cursor_pos.x) output.cursor_up(self._cursor_pos.y) output.erase_down() output.reset_attributes() output.enable_autowrap() output.flush() # Erase title. if self._last_title and erase_title: output.clear_title() self.reset(leave_alternate_screen=leave_alternate_screen) def clear(self): """ Clear screen and go to 0,0 """ # Erase current output first. self.erase() # Send "Erase Screen" command and go to (0, 0). output = self.output output.erase_screen() output.cursor_goto(0, 0) output.flush() self.request_absolute_cursor_position() def print_tokens(output, tokens, style): """ Print a list of (Token, text) tuples in the given style to the output. """ assert isinstance(output, Output) assert isinstance(style, Style) # Reset first. output.reset_attributes() output.enable_autowrap() # Print all (token, text) tuples. attrs_for_token = _TokenToAttrsCache(style.get_attrs_for_token) for token, text in tokens: attrs = attrs_for_token[token] if attrs: output.set_attributes(attrs) else: output.reset_attributes() output.write(text) # Reset again. output.reset_attributes() output.flush()
apache-2.0
Billups/TileStache
TileStache/Goodies/VecTiles/topojson.py
4
8259
from shapely.wkb import loads import json from ... import getTile from ...Core import KnownUnknown def get_tiles(names, config, coord): ''' Retrieve a list of named TopoJSON layer tiles from a TileStache config. Check integrity and compatibility of each, looking at known layers, correct JSON mime-types, "Topology" in the type attributes, and matching affine transformations. ''' unknown_layers = set(names) - set(config.layers.keys()) if unknown_layers: raise KnownUnknown("%s.get_tiles didn't recognize %s when trying to load %s." % (__name__, ', '.join(unknown_layers), ', '.join(names))) layers = [config.layers[name] for name in names] mimes, bodies = zip(*[getTile(layer, coord, 'topojson') for layer in layers]) bad_mimes = [(name, mime) for (mime, name) in zip(mimes, names) if not mime.endswith('/json')] if bad_mimes: raise KnownUnknown('%s.get_tiles encountered a non-JSON mime-type in %s sub-layer: "%s"' % ((__name__, ) + bad_mimes[0])) topojsons = [json.loads(body.decode('utf8')) for body in bodies] bad_types = [(name, topo['type']) for (topo, name) in zip(topojsons, names) if topo['type'] != 'Topology'] if bad_types: raise KnownUnknown('%s.get_tiles encountered a non-Topology type in %s sub-layer: "%s"' % ((__name__, ) + bad_types[0])) transforms = [topo['transform'] for topo in topojsons] unique_xforms = set([tuple(xform['scale'] + xform['translate']) for xform in transforms]) if len(unique_xforms) > 1: raise KnownUnknown('%s.get_tiles encountered incompatible transforms: %s' % (__name__, list(unique_xforms))) return topojsons def update_arc_indexes(geometry, merged_arcs, old_arcs): ''' Updated geometry arc indexes, and add arcs to merged_arcs along the way. Arguments are modified in-place, and nothing is returned. ''' if geometry['type'] in ('Point', 'MultiPoint'): return elif geometry['type'] == 'LineString': for (arc_index, old_arc) in enumerate(geometry['arcs']): geometry['arcs'][arc_index] = len(merged_arcs) merged_arcs.append(old_arcs[old_arc]) elif geometry['type'] == 'Polygon': for ring in geometry['arcs']: for (arc_index, old_arc) in enumerate(ring): ring[arc_index] = len(merged_arcs) merged_arcs.append(old_arcs[old_arc]) elif geometry['type'] == 'MultiLineString': for part in geometry['arcs']: for (arc_index, old_arc) in enumerate(part): part[arc_index] = len(merged_arcs) merged_arcs.append(old_arcs[old_arc]) elif geometry['type'] == 'MultiPolygon': for part in geometry['arcs']: for ring in part: for (arc_index, old_arc) in enumerate(ring): ring[arc_index] = len(merged_arcs) merged_arcs.append(old_arcs[old_arc]) else: raise NotImplementedError("Can't do %s geometries" % geometry['type']) def get_transform(bounds, size=1024): ''' Return a TopoJSON transform dictionary and a point-transforming function. Size is the tile size in pixels and sets the implicit output resolution. ''' tx, ty = bounds[0], bounds[1] sx, sy = (bounds[2] - bounds[0]) / size, (bounds[3] - bounds[1]) / size def forward(lon, lat): ''' Transform a longitude and latitude to TopoJSON integer space. ''' return int(round((lon - tx) / sx)), int(round((lat - ty) / sy)) return dict(translate=(tx, ty), scale=(sx, sy)), forward def diff_encode(line, transform): ''' Differentially encode a shapely linestring or ring. ''' coords = [transform(x, y) for (x, y) in line.coords] pairs = zip(coords[:], coords[1:]) diffs = [(x2 - x1, y2 - y1) for ((x1, y1), (x2, y2)) in pairs] return coords[:1] + [(x, y) for (x, y) in diffs if (x, y) != (0, 0)] def decode(file): ''' Stub function to decode a TopoJSON file into a list of features. Not currently implemented, modeled on geojson.decode(). ''' raise NotImplementedError('topojson.decode() not yet written') def encode(file, features, bounds, is_clipped): ''' Encode a list of (WKB, property dict) features into a TopoJSON stream. Also accept three-element tuples as features: (WKB, property dict, id). Geometries in the features list are assumed to be unprojected lon, lats. Bounds are given in geographic coordinates as (xmin, ymin, xmax, ymax). ''' transform, forward = get_transform(bounds) geometries, arcs = list(), list() for feature in features: shape = loads(feature[0]) geometry = dict(properties=feature[1]) geometries.append(geometry) if is_clipped: geometry.update(dict(clipped=True)) if len(feature) >= 2: # ID is an optional third element in the feature tuple geometry.update(dict(id=feature[2])) if shape.type == 'GeometryCollection': geometries.pop() continue elif shape.type == 'Point': geometry.update(dict(type='Point', coordinates=forward(shape.x, shape.y))) elif shape.type == 'LineString': geometry.update(dict(type='LineString', arcs=[len(arcs)])) arcs.append(diff_encode(shape, forward)) elif shape.type == 'Polygon': geometry.update(dict(type='Polygon', arcs=[])) rings = [shape.exterior] + list(shape.interiors) for ring in rings: geometry['arcs'].append([len(arcs)]) arcs.append(diff_encode(ring, forward)) elif shape.type == 'MultiPoint': geometry.update(dict(type='MultiPoint', coordinates=[])) for point in shape.geoms: geometry['coordinates'].append(forward(point.x, point.y)) elif shape.type == 'MultiLineString': geometry.update(dict(type='MultiLineString', arcs=[])) for line in shape.geoms: geometry['arcs'].append([len(arcs)]) arcs.append(diff_encode(line, forward)) elif shape.type == 'MultiPolygon': geometry.update(dict(type='MultiPolygon', arcs=[])) for polygon in shape.geoms: rings = [polygon.exterior] + list(polygon.interiors) polygon_arcs = [] for ring in rings: polygon_arcs.append([len(arcs)]) arcs.append(diff_encode(ring, forward)) geometry['arcs'].append(polygon_arcs) else: raise NotImplementedError("Can't do %s geometries" % shape.type) result = { 'type': 'Topology', 'transform': transform, 'objects': { 'vectile': { 'type': 'GeometryCollection', 'geometries': geometries } }, 'arcs': arcs } file.write(json.dumps(result, separators=(',', ':')).encode('utf8')) def merge(file, names, config, coord): ''' Retrieve a list of TopoJSON tile responses and merge them into one. get_tiles() retrieves data and performs basic integrity checks. ''' inputs = get_tiles(names, config, coord) output = { 'type': 'Topology', 'transform': inputs[0]['transform'], 'objects': dict(), 'arcs': list() } for (name, input) in zip(names, inputs): for (index, object) in enumerate(input['objects'].values()): if len(input['objects']) > 1: output['objects']['%(name)s-%(index)d' % locals()] = object else: output['objects'][name] = object for geometry in object['geometries']: update_arc_indexes(geometry, output['arcs'], input['arcs']) file.write(json.dumps(output, separators=(',', ':')).encode('utf8'))
bsd-3-clause
szymex/xbmc-finnish-tv
plugin.video.yleareena/win32/Crypto/SelfTest/__init__.py
111
3418
# -*- coding: utf-8 -*- # # SelfTest/__init__.py: Self-test for PyCrypto # # Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net> # # =================================================================== # The contents of this file are dedicated to the public domain. To # the extent that dedication to the public domain is not available, # everyone is granted a worldwide, perpetual, royalty-free, # non-exclusive license to exercise all rights associated with the # contents of this file for any purpose whatsoever. # No rights are reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================== """Self tests These tests should perform quickly and can ideally be used every time an application runs. """ __revision__ = "$Id$" import sys import unittest from StringIO import StringIO class SelfTestError(Exception): def __init__(self, message, result): Exception.__init__(self, message, result) self.message = message self.result = result def run(module=None, verbosity=0, stream=None, tests=None, config=None, **kwargs): """Execute self-tests. This raises SelfTestError if any test is unsuccessful. You may optionally pass in a sub-module of SelfTest if you only want to perform some of the tests. For example, the following would test only the hash modules: Crypto.SelfTest.run(Crypto.SelfTest.Hash) """ if config is None: config = {} suite = unittest.TestSuite() if module is None: if tests is None: tests = get_tests(config=config) suite.addTests(tests) else: if tests is None: suite.addTests(module.get_tests(config=config)) else: raise ValueError("'module' and 'tests' arguments are mutually exclusive") if stream is None: kwargs['stream'] = StringIO() runner = unittest.TextTestRunner(verbosity=verbosity, **kwargs) result = runner.run(suite) if not result.wasSuccessful(): if stream is None: sys.stderr.write(stream.getvalue()) raise SelfTestError("Self-test failed", result) return result def get_tests(config={}): tests = [] from Crypto.SelfTest import Cipher; tests += Cipher.get_tests(config=config) from Crypto.SelfTest import Hash; tests += Hash.get_tests(config=config) from Crypto.SelfTest import Protocol; tests += Protocol.get_tests(config=config) from Crypto.SelfTest import PublicKey; tests += PublicKey.get_tests(config=config) from Crypto.SelfTest import Random; tests += Random.get_tests(config=config) from Crypto.SelfTest import Util; tests += Util.get_tests(config=config) from Crypto.SelfTest import Signature; tests += Signature.get_tests(config=config) return tests if __name__ == '__main__': suite = lambda: unittest.TestSuite(get_tests()) unittest.main(defaultTest='suite') # vim:set ts=4 sw=4 sts=4 expandtab:
gpl-3.0
reingart/vb2py
vb2py/vb/test3/test/frmImage.rsrc.py
2
3258
{'stack': {'backgrounds': [{'components': [{'label': 'Set Strech', 'name': 'Command1', 'position': (248, 48), 'size': (89, 25), 'type': 'VBButton'}, {'label': 'Load New', 'name': 'Command2', 'position': (248, 88), 'size': (89, 25), 'type': 'VBButton'}, {'label': 'Make Visible', 'name': 'Command4', 'position': (248, 16), 'size': (89, 25), 'type': 'VBButton'}, {'label': 'Move ', 'name': 'Command5', 'position': (344, 16), 'size': (89, 25), 'type': 'VBButton'}, {'label': 'Size ', 'name': 'Command6', 'position': (440, 16), 'size': (89, 25), 'type': 'VBButton'}, {'label': 'Enable', 'name': 'Command7', 'position': (536, 16), 'size': (97, 25), 'type': 'VBButton'}, {'Stretch': 0, 'name': 'Image1', 'position': (16, 16), 'size': (158, 92), 'type': 'VBBitmapCanvas'}, {'name': 'Label1', 'position': (344, 56), 'text': 'Label1', 'type': 'VBStaticText'}, {'name': 'Label2', 'position': (344, 96), 'text': 'Label2', 'type': 'VBStaticText'}], 'menubar': {'menus': [], 'type': 'MenuBar'}, 'name': 'frmImage', 'position': (4, 30), 'size': (770, 271), 'style': ['resizeable'], 'title': 'Image', 'type': 'Background'}], 'name': 'Template', 'type': 'Stack'}}
gpl-3.0
qiwsir/vincent
examples/map_examples.py
11
6721
# -*- coding: utf-8 -*- """ Vincent Map Examples """ #Build a map from scratch from vincent import * world_topo = r'world-countries.topo.json' state_topo = r'us_states.topo.json' lake_topo = r'lakes_50m.topo.json' county_geo = r'us_counties.geo.json' county_topo = r'us_counties.topo.json' or_topo = r'or_counties.topo.json' vis = Visualization(width=960, height=500) vis.data['countries'] = Data( name='countries', url=world_topo, format={'type': 'topojson', 'feature': 'world-countries'} ) geo_transform = Transform( type='geopath', value="data", projection='winkel3', scale=200, translate=[480, 250] ) geo_from = MarkRef(data='countries', transform=[geo_transform]) enter_props = PropertySet( stroke=ValueRef(value='#000000'), path=ValueRef(field='path') ) update_props = PropertySet(fill=ValueRef(value='steelblue')) mark_props = MarkProperties(enter=enter_props, update=update_props) vis.marks.append( Mark(type='path', from_=geo_from, properties=mark_props) ) vis.to_json('vega.json') #Convenience Method geo_data = [{'name': 'countries', 'url': world_topo, 'feature': 'world-countries'}] vis = Map(geo_data=geo_data, scale=200) vis.to_json('vega.json') #States & Counties geo_data = [{'name': 'counties', 'url': county_topo, 'feature': 'us_counties.geo'}, {'name': 'states', 'url': state_topo, 'feature': 'us_states.geo'} ] vis = Map(geo_data=geo_data, scale=1000, projection='albersUsa') del vis.marks[1].properties.update vis.marks[0].properties.update.fill.value = '#084081' vis.marks[1].properties.enter.stroke.value = '#fff' vis.marks[0].properties.enter.stroke.value = '#7bccc4' vis.to_json('vega.json') #Choropleth import json import pandas as pd #Map the county codes we have in our geometry to those in the #county_data file, which contains additional rows we don't need with open('us_counties.topo.json', 'r') as f: get_id = json.load(f) #A little FIPS code munging new_geoms = [] for geom in get_id['objects']['us_counties.geo']['geometries']: geom['properties']['FIPS'] = int(geom['properties']['FIPS']) new_geoms.append(geom) get_id['objects']['us_counties.geo']['geometries'] = new_geoms with open('us_counties.topo.json', 'w') as f: json.dump(get_id, f) #Grab the FIPS codes and load them into a dataframe geometries = get_id['objects']['us_counties.geo']['geometries'] county_codes = [x['properties']['FIPS'] for x in geometries] county_df = pd.DataFrame({'FIPS': county_codes}, dtype=str) county_df = county_df.astype(int) #Read into Dataframe, cast to int for consistency df = pd.read_csv('data/us_county_data.csv', na_values=[' ']) df['FIPS'] = df['FIPS'].astype(int) #Perform an inner join, pad NA's with data from nearest county merged = pd.merge(df, county_df, on='FIPS', how='inner') merged = merged.fillna(method='pad') geo_data = [{'name': 'counties', 'url': county_topo, 'feature': 'us_counties.geo'}] vis = Map(data=merged, geo_data=geo_data, scale=1100, projection='albersUsa', data_bind='Employed_2011', data_key='FIPS', map_key={'counties': 'properties.FIPS'}) vis.marks[0].properties.enter.stroke_opacity = ValueRef(value=0.5) #Change our domain for an even inteager vis.scales['color'].domain = [0, 189000] vis.legend(title='Number Employed 2011') vis.to_json('vega.json') #Lets look at different stats vis.rebind(column='Civilian_labor_force_2011', brew='BuPu') vis.to_json('vega.json') vis.rebind(column='Unemployed_2011', brew='PuBu') vis.to_json('vega.json') vis.rebind(column='Unemployment_rate_2011', brew='YlGnBu') vis.to_json('vega.json') vis.rebind(column='Median_Household_Income_2011', brew='RdPu') vis.to_json('vega.json') #Mapping US State Level Data state_data = pd.read_csv('data/US_Unemployment_Oct2012.csv') geo_data = [{'name': 'states', 'url': state_topo, 'feature': 'us_states.geo'}] vis = Map(data=state_data, geo_data=geo_data, scale=1000, projection='albersUsa', data_bind='Unemployment', data_key='NAME', map_key={'states': 'properties.NAME'}) vis.legend(title='Unemployment (%)') vis.to_json('vega.json') #Iterating State Level Data yoy = pd.read_table('data/State_Unemp_YoY.txt', delim_whitespace=True) #Standardize State names to match TopoJSON for keying names = [] for row in yoy.iterrows(): pieces = row[1]['NAME'].split('_') together = ' '.join(pieces) names.append(together.title()) yoy['NAME'] = names geo_data = [{'name': 'states', 'url': state_topo, 'feature': 'us_states.geo'}] vis = Map(data=yoy, geo_data=geo_data, scale=1000, projection='albersUsa', data_bind='AUG_2012', data_key='NAME', map_key={'states': 'properties.NAME'}, brew='YlGnBu') #Custom threshold scale vis.scales[0].type='threshold' vis.scales[0].domain = [0, 2, 4, 6, 8, 10, 12] vis.legend(title='Unemployment (%)') vis.to_json('vega.json') #Rebind and set our scale again vis.rebind(column='AUG_2013', brew='YlGnBu') vis.scales[0].type='threshold' vis.scales[0].domain = [0, 2, 4, 6, 8, 10, 12] vis.to_json('vega.json') vis.rebind(column='CHANGE', brew='YlGnBu') vis.scales[0].type='threshold' vis.scales[0].domain = [-1.5, -1.3, -1.1, 0, 0.1, 0.3, 0.5, 0.8] vis.legends[0].title = "YoY Change in Unemployment (%)" vis.to_json('vega.json') #Oregon County-level population data or_data = pd.read_table('data/OR_County_Data.txt', delim_whitespace=True) or_data['July_2012_Pop']= or_data['July_2012_Pop'].astype(int) #Standardize keys with open('or_counties.topo.json', 'r') as f: counties = json.load(f) def split_county(name): parts = name.split(' ') parts.pop(-1) return ''.join(parts).upper() #A little FIPS code munging new_geoms = [] for geom in counties['objects']['or_counties.geo']['geometries']: geom['properties']['COUNTY'] = split_county(geom['properties']['COUNTY']) new_geoms.append(geom) counties['objects']['or_counties.geo']['geometries'] = new_geoms with open('or_counties.topo.json', 'w') as f: json.dump(counties, f) geo_data = [{'name': 'states', 'url': state_topo, 'feature': 'us_states.geo'}, {'name': 'or_counties', 'url': or_topo, 'feature': 'or_counties.geo'}] vis = Map(data=or_data, geo_data=geo_data, scale=3700, translate=[1480, 830], projection='albersUsa', data_bind='July_2012_Pop', data_key='NAME', map_key={'or_counties': 'properties.COUNTY'}) vis.marks[0].properties.update.fill.value = '#c2c2c2' vis.to_json('vega.json')
mit
F5Networks/f5-common-python
f5/bigip/tm/util/clientssl_ciphers.py
1
1459
# coding=utf-8 # # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """BIG-IP® utility module REST URI ``http://localhost/mgmt/tm/util/clientssl-ciphers`` GUI Path N/A REST Kind ``tm:util:clientssl-ciphers:*`` """ from f5.bigip.mixins import CommandExecutionMixin from f5.bigip.resource import UnnamedResource class Clientssl_Ciphers(UnnamedResource, CommandExecutionMixin): """BIG-IP® utility command .. note:: This is an unnamed resource so it has no ~Partition~Name pattern at the end of its URI. """ def __init__(self, util): super(Clientssl_Ciphers, self).__init__(util) self._meta_data['required_command_parameters'].update(('utilCmdArgs',)) self._meta_data['required_json_kind'] =\ 'tm:util:clientssl-ciphers:runstate' self._meta_data['allowed_commands'].append('run') self._meta_data['minimum_version'] = '12.1.0'
apache-2.0
8u1a/plaso
tests/multi_processing/process_info.py
3
1024
#!/usr/bin/python # -*- coding: utf-8 -*- """Tests the class to get process information.""" import os import unittest from plaso.multi_processing import process_info class ProcessInfoTest(unittest.TestCase): """Tests the process information object.""" def testInitialization(self): """Tests the initialization.""" pid = os.getpid() process_information = process_info.ProcessInfo(pid) self.assertNotEqual(process_information, None) def testProperties(self): """Tests the properties.""" pid = os.getpid() process_information = process_info.ProcessInfo(pid) self.assertEqual( process_information.status, process_information.STATUS_RUNNING) def testGetMemoryInformation(self): """Tests the GetMemoryInformation function.""" pid = os.getpid() process_information = process_info.ProcessInfo(pid) memory_information = process_information.GetMemoryInformation() self.assertNotEqual(memory_information, None) if __name__ == '__main__': unittest.main()
apache-2.0
Lamaw/Newzer
lemonde_extractor.py
1
6762
# -*- coding: utf-8 -*- """ This page defines the Implementation of an analyzer for "www.lemonde.fr" """ from HTMLParser import HTMLParser from Isite_extractor import ISiteExtractor class LeMondeExtractor(ISiteExtractor): """ This class implements the Page analyzer interface for "lemonde.fr" website """ def __init__(self): self.base_url = "http://www.lemonde.fr" def get_news_feed(self): """ Get the news feed with newly published articles from the website home base_url :return type: str :return: the url on the news feed webpage """ try: news_feed_url = self.base_url + "/actualite-en-continu/" except: news_feed_url = None return news_feed_url def get_article_webpage_list(self, news_feed_webpage): """ Get the article webpage list from the webpage containing all the newly added articles. :type news_feed_webpage: str :param news_feed_webpage: the html page where articles' urls are :return type: list() :return: the list of urls for each article webpage """ url_list = list() # Use HTML parser to extract appropriates urls lemonde_parser = LeMondeHTMLParser() lemonde_parser.feed(news_feed_webpage) partial_url_list = lemonde_parser.links # add the base url of the website if not present in the article url for url in partial_url_list: if not 'http' in url: url_list.append(self.base_url + url) else: url_list.append(url) return url_list def get_article_text(self, article_webpage): """ Extract the text of the article from the raw webpage :type article_webpage: str :param article_webpage: The webpage containing the article to extract :return type: str :return: the text from the article on a web page """ lemonde_parser = LeMondeHTMLParser() lemonde_parser.feed(article_webpage) return lemonde_parser.article_data def get_article_category(self, article_webpage): """ Extract the category of the article from the raw webpage :type article_webpage: str :param article_webpage: The webpage containing the article to extract :return type: str :return: the category from the article on a web page (e.g: sport, economy, politics, etc...) """ lemonde_parser = LeMondeHTMLParser() lemonde_parser.feed(article_webpage) return lemonde_parser.category def get_article_author(self, article_webpage): """ Extract the author of the article from the raw webpage :type article_webpage: str :param article_webpage: The webpage containing the article to extract :return type: str :return: the author name from the article on a web page """ pass class LeMondeHTMLParser(HTMLParser): """ Class implementating some methods of the HTMLParser pytho lib, in order to acquire specific data for Lemonde website """ def __init__(self): HTMLParser.__init__(self) # Parents constructor self.links = list() # The list of links from the news feed self.article_section = False # Flag for news feed parsing self.article_body = False # Flag for article text acquisition self.suspend_acquisition = False # flag to suspend data aqcuisition in the article body self.div_open_in_article_body = 0 # Number of open inside the main article div self.article_data = "" # store the text from the article self.category = "" # store the category of the article def handle_starttag(self, tag, attrs): """ Method that manage tag opening in the HTML source code, to retrieve article content """ try: if tag == "article": # Set flag for news feed parsing to true for name, value in attrs: if name == 'class' and 'grid_12 alpha enrichi' in value: self.article_section = True elif tag == "a" and self.article_section == True: # get a link from the news feed for name, value in attrs: if name == "href": if value not in self.links and "/journaliste/" not in value: self.links.append(value) elif tag == "div" and not self.article_body: # Set flag from article body to true for name, value in attrs: if name == 'id' and value == 'articleBody': self.article_body = True elif tag == 'div' and self.article_body: # Increment number of open div in the main div of article (used to determine when the main article div is closed) self.div_open_in_article_body += 1 elif tag == 'p' and self.article_body: # Suspend aqcuisition for "lire aussi" section for name, value in attrs: if name == 'class' and value == 'lire': self.suspend_acquisition = True elif tag == 'section' and self.article_body: self.suspend_acquisition == True elif tag == 'iframe' and self.article_body: self.suspend_acquisition == True elif tag == 'body': for name, value in attrs: if name == "class": self.category = value except: pass def handle_endtag(self, tag): """ Method that Manage tag ending, in order to determine when parsing get out of appropriate sections """ try: if tag == "article": self.article_section = False elif tag == "div" and self.article_body and self.div_open_in_article_body == 0: self.article_body = False elif tag == 'div' and self.article_body and self.div_open_in_article_body > 0: self.div_open_in_article_body -= 1 elif tag == 'p' and self.suspend_acquisition == True: self.suspend_acquisition == False elif tag == 'section' and self.suspend_acquisition == True: self.suspend_acquisition == False elif tag == 'iframe' and self.suspend_acquisition == True: self.suspend_acquisition == False except: pass def handle_data(self, data): """ Store data when in right section of parsing """ if self.article_body: if not self.suspend_acquisition: self.article_data += data
mit
jalexvig/tensorflow
tensorflow/contrib/data/python/kernel_tests/serialization/unbatch_dataset_serialization_test.py
14
1952
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the UnbatchDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.data.python.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.contrib.data.python.ops import batching from tensorflow.python.data.ops import dataset_ops from tensorflow.python.platform import test class UnbatchDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def build_dataset(self, multiplier=15.0, tensor_slice_len=2, batch_size=2): components = ( np.arange(tensor_slice_len), np.array([[1, 2, 3]]) * np.arange(tensor_slice_len)[:, np.newaxis], np.array(multiplier) * np.arange(tensor_slice_len)) return dataset_ops.Dataset.from_tensor_slices(components).batch( batch_size).apply(batching.unbatch()) def testCore(self): tensor_slice_len = 8 batch_size = 2 num_outputs = tensor_slice_len self.run_core_tests( lambda: self.build_dataset(15.0, tensor_slice_len, batch_size), lambda: self.build_dataset(20.0, tensor_slice_len, batch_size), num_outputs) if __name__ == "__main__": test.main()
apache-2.0
hhstore/learning-notes
python/src/exercise/py27/03_Network/RPC/XMLRPC/02_rpc_serv_cli/rpc_server.py
2
1201
# -*- coding:utf8 -*- ''' 功能: XML-RPC 服务端实现 依赖: SimpleXMLRPCServer 说明: 1. 命令行,先运行服务器端,再运行客户端. 2. ctrl+c, 退出服务器端服务. ''' __author__ = 'hhstore' from SimpleXMLRPCServer import SimpleXMLRPCServer # 自定义的类,待注册 class StringFunction(object): def __init__(self): import string self.python_string = string def _prinvateFunction(self): return "never get this result on the client." def chop_in_half(self, a_str): return a_str[:len(a_str)/2] def repeat(self, a_str, times): return a_str * times def main(): addr = ("localhost", 5000) # 主机名, 端口 server = SimpleXMLRPCServer(addr) # 创建RPC服务.在指定端口,监听请求. server.register_instance(StringFunction()) # 注册自定义的类 server.register_function(lambda a_str: "_" + a_str, name="_string") # 注册一个lambda函数,并命名为 _string() print "server on..." server.serve_forever() # 启动RPC服务,死循环. if __name__ == '__main__': main()
mit
chenbaihu/grpc
tools/buildgen/mako_renderer.py
12
3800
#!/usr/bin/python2.7 # Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Simple Mako renderer. Just a wrapper around the mako rendering library. """ import getopt import imp import os import sys from mako.lookup import TemplateLookup from mako.runtime import Context from mako.template import Template import simplejson import bunch # Imports a plugin def import_plugin(name): _, base_ex = os.path.split(name) base, _ = os.path.splitext(base_ex) with open(name, 'r') as plugin_file: plugin_code = plugin_file.read() plugin_module = imp.new_module(base) exec plugin_code in plugin_module.__dict__ return plugin_module def out(msg): print >> sys.stderr, msg def showhelp(): out('mako-renderer.py [-o out] [-m cache] [-d dict] [-d dict...] template') def main(argv): got_input = False module_directory = None dictionary = {} json_dict = {} got_output = False output_file = sys.stdout plugins = [] try: opts, args = getopt.getopt(argv, 'hm:d:o:p:') except getopt.GetoptError: out('Unknown option') showhelp() sys.exit(2) for opt, arg in opts: if opt == '-h': out('Displaying showhelp') showhelp() sys.exit() elif opt == '-o': if got_output: out('Got more than one output') showhelp() sys.exit(3) got_output = True output_file = open(arg, 'w') elif opt == '-m': if module_directory is not None: out('Got more than one cache directory') showhelp() sys.exit(4) module_directory = arg elif opt == '-d': dict_file = open(arg, 'r') bunch.merge_json(json_dict, simplejson.loads(dict_file.read())) dict_file.close() elif opt == '-p': plugins.append(import_plugin(arg)) for plugin in plugins: plugin.mako_plugin(json_dict) for k, v in json_dict.items(): dictionary[k] = bunch.to_bunch(v) ctx = Context(output_file, **dictionary) for arg in args: got_input = True template = Template(filename=arg, module_directory=module_directory, lookup=TemplateLookup(directories=['.'])) template.render_context(ctx) if not got_input: out('Got nothing to do') showhelp() output_file.close() if __name__ == '__main__': main(sys.argv[1:])
bsd-3-clause
Dandandan/wikiprogramming
jsrepl/extern/python/closured/lib/python2.7/htmllib.py
312
12869
"""HTML 2.0 parser. See the HTML 2.0 specification: http://www.w3.org/hypertext/WWW/MarkUp/html-spec/html-spec_toc.html """ from warnings import warnpy3k warnpy3k("the htmllib module has been removed in Python 3.0", stacklevel=2) del warnpy3k import sgmllib from formatter import AS_IS __all__ = ["HTMLParser", "HTMLParseError"] class HTMLParseError(sgmllib.SGMLParseError): """Error raised when an HTML document can't be parsed.""" class HTMLParser(sgmllib.SGMLParser): """This is the basic HTML parser class. It supports all entity names required by the XHTML 1.0 Recommendation. It also defines handlers for all HTML 2.0 and many HTML 3.0 and 3.2 elements. """ from htmlentitydefs import entitydefs def __init__(self, formatter, verbose=0): """Creates an instance of the HTMLParser class. The formatter parameter is the formatter instance associated with the parser. """ sgmllib.SGMLParser.__init__(self, verbose) self.formatter = formatter def error(self, message): raise HTMLParseError(message) def reset(self): sgmllib.SGMLParser.reset(self) self.savedata = None self.isindex = 0 self.title = None self.base = None self.anchor = None self.anchorlist = [] self.nofill = 0 self.list_stack = [] # ------ Methods used internally; some may be overridden # --- Formatter interface, taking care of 'savedata' mode; # shouldn't need to be overridden def handle_data(self, data): if self.savedata is not None: self.savedata = self.savedata + data else: if self.nofill: self.formatter.add_literal_data(data) else: self.formatter.add_flowing_data(data) # --- Hooks to save data; shouldn't need to be overridden def save_bgn(self): """Begins saving character data in a buffer instead of sending it to the formatter object. Retrieve the stored data via the save_end() method. Use of the save_bgn() / save_end() pair may not be nested. """ self.savedata = '' def save_end(self): """Ends buffering character data and returns all data saved since the preceding call to the save_bgn() method. If the nofill flag is false, whitespace is collapsed to single spaces. A call to this method without a preceding call to the save_bgn() method will raise a TypeError exception. """ data = self.savedata self.savedata = None if not self.nofill: data = ' '.join(data.split()) return data # --- Hooks for anchors; should probably be overridden def anchor_bgn(self, href, name, type): """This method is called at the start of an anchor region. The arguments correspond to the attributes of the <A> tag with the same names. The default implementation maintains a list of hyperlinks (defined by the HREF attribute for <A> tags) within the document. The list of hyperlinks is available as the data attribute anchorlist. """ self.anchor = href if self.anchor: self.anchorlist.append(href) def anchor_end(self): """This method is called at the end of an anchor region. The default implementation adds a textual footnote marker using an index into the list of hyperlinks created by the anchor_bgn()method. """ if self.anchor: self.handle_data("[%d]" % len(self.anchorlist)) self.anchor = None # --- Hook for images; should probably be overridden def handle_image(self, src, alt, *args): """This method is called to handle images. The default implementation simply passes the alt value to the handle_data() method. """ self.handle_data(alt) # --------- Top level elememts def start_html(self, attrs): pass def end_html(self): pass def start_head(self, attrs): pass def end_head(self): pass def start_body(self, attrs): pass def end_body(self): pass # ------ Head elements def start_title(self, attrs): self.save_bgn() def end_title(self): self.title = self.save_end() def do_base(self, attrs): for a, v in attrs: if a == 'href': self.base = v def do_isindex(self, attrs): self.isindex = 1 def do_link(self, attrs): pass def do_meta(self, attrs): pass def do_nextid(self, attrs): # Deprecated pass # ------ Body elements # --- Headings def start_h1(self, attrs): self.formatter.end_paragraph(1) self.formatter.push_font(('h1', 0, 1, 0)) def end_h1(self): self.formatter.end_paragraph(1) self.formatter.pop_font() def start_h2(self, attrs): self.formatter.end_paragraph(1) self.formatter.push_font(('h2', 0, 1, 0)) def end_h2(self): self.formatter.end_paragraph(1) self.formatter.pop_font() def start_h3(self, attrs): self.formatter.end_paragraph(1) self.formatter.push_font(('h3', 0, 1, 0)) def end_h3(self): self.formatter.end_paragraph(1) self.formatter.pop_font() def start_h4(self, attrs): self.formatter.end_paragraph(1) self.formatter.push_font(('h4', 0, 1, 0)) def end_h4(self): self.formatter.end_paragraph(1) self.formatter.pop_font() def start_h5(self, attrs): self.formatter.end_paragraph(1) self.formatter.push_font(('h5', 0, 1, 0)) def end_h5(self): self.formatter.end_paragraph(1) self.formatter.pop_font() def start_h6(self, attrs): self.formatter.end_paragraph(1) self.formatter.push_font(('h6', 0, 1, 0)) def end_h6(self): self.formatter.end_paragraph(1) self.formatter.pop_font() # --- Block Structuring Elements def do_p(self, attrs): self.formatter.end_paragraph(1) def start_pre(self, attrs): self.formatter.end_paragraph(1) self.formatter.push_font((AS_IS, AS_IS, AS_IS, 1)) self.nofill = self.nofill + 1 def end_pre(self): self.formatter.end_paragraph(1) self.formatter.pop_font() self.nofill = max(0, self.nofill - 1) def start_xmp(self, attrs): self.start_pre(attrs) self.setliteral('xmp') # Tell SGML parser def end_xmp(self): self.end_pre() def start_listing(self, attrs): self.start_pre(attrs) self.setliteral('listing') # Tell SGML parser def end_listing(self): self.end_pre() def start_address(self, attrs): self.formatter.end_paragraph(0) self.formatter.push_font((AS_IS, 1, AS_IS, AS_IS)) def end_address(self): self.formatter.end_paragraph(0) self.formatter.pop_font() def start_blockquote(self, attrs): self.formatter.end_paragraph(1) self.formatter.push_margin('blockquote') def end_blockquote(self): self.formatter.end_paragraph(1) self.formatter.pop_margin() # --- List Elements def start_ul(self, attrs): self.formatter.end_paragraph(not self.list_stack) self.formatter.push_margin('ul') self.list_stack.append(['ul', '*', 0]) def end_ul(self): if self.list_stack: del self.list_stack[-1] self.formatter.end_paragraph(not self.list_stack) self.formatter.pop_margin() def do_li(self, attrs): self.formatter.end_paragraph(0) if self.list_stack: [dummy, label, counter] = top = self.list_stack[-1] top[2] = counter = counter+1 else: label, counter = '*', 0 self.formatter.add_label_data(label, counter) def start_ol(self, attrs): self.formatter.end_paragraph(not self.list_stack) self.formatter.push_margin('ol') label = '1.' for a, v in attrs: if a == 'type': if len(v) == 1: v = v + '.' label = v self.list_stack.append(['ol', label, 0]) def end_ol(self): if self.list_stack: del self.list_stack[-1] self.formatter.end_paragraph(not self.list_stack) self.formatter.pop_margin() def start_menu(self, attrs): self.start_ul(attrs) def end_menu(self): self.end_ul() def start_dir(self, attrs): self.start_ul(attrs) def end_dir(self): self.end_ul() def start_dl(self, attrs): self.formatter.end_paragraph(1) self.list_stack.append(['dl', '', 0]) def end_dl(self): self.ddpop(1) if self.list_stack: del self.list_stack[-1] def do_dt(self, attrs): self.ddpop() def do_dd(self, attrs): self.ddpop() self.formatter.push_margin('dd') self.list_stack.append(['dd', '', 0]) def ddpop(self, bl=0): self.formatter.end_paragraph(bl) if self.list_stack: if self.list_stack[-1][0] == 'dd': del self.list_stack[-1] self.formatter.pop_margin() # --- Phrase Markup # Idiomatic Elements def start_cite(self, attrs): self.start_i(attrs) def end_cite(self): self.end_i() def start_code(self, attrs): self.start_tt(attrs) def end_code(self): self.end_tt() def start_em(self, attrs): self.start_i(attrs) def end_em(self): self.end_i() def start_kbd(self, attrs): self.start_tt(attrs) def end_kbd(self): self.end_tt() def start_samp(self, attrs): self.start_tt(attrs) def end_samp(self): self.end_tt() def start_strong(self, attrs): self.start_b(attrs) def end_strong(self): self.end_b() def start_var(self, attrs): self.start_i(attrs) def end_var(self): self.end_i() # Typographic Elements def start_i(self, attrs): self.formatter.push_font((AS_IS, 1, AS_IS, AS_IS)) def end_i(self): self.formatter.pop_font() def start_b(self, attrs): self.formatter.push_font((AS_IS, AS_IS, 1, AS_IS)) def end_b(self): self.formatter.pop_font() def start_tt(self, attrs): self.formatter.push_font((AS_IS, AS_IS, AS_IS, 1)) def end_tt(self): self.formatter.pop_font() def start_a(self, attrs): href = '' name = '' type = '' for attrname, value in attrs: value = value.strip() if attrname == 'href': href = value if attrname == 'name': name = value if attrname == 'type': type = value.lower() self.anchor_bgn(href, name, type) def end_a(self): self.anchor_end() # --- Line Break def do_br(self, attrs): self.formatter.add_line_break() # --- Horizontal Rule def do_hr(self, attrs): self.formatter.add_hor_rule() # --- Image def do_img(self, attrs): align = '' alt = '(image)' ismap = '' src = '' width = 0 height = 0 for attrname, value in attrs: if attrname == 'align': align = value if attrname == 'alt': alt = value if attrname == 'ismap': ismap = value if attrname == 'src': src = value if attrname == 'width': try: width = int(value) except ValueError: pass if attrname == 'height': try: height = int(value) except ValueError: pass self.handle_image(src, alt, ismap, align, width, height) # --- Really Old Unofficial Deprecated Stuff def do_plaintext(self, attrs): self.start_pre(attrs) self.setnomoretags() # Tell SGML parser # --- Unhandled tags def unknown_starttag(self, tag, attrs): pass def unknown_endtag(self, tag): pass def test(args = None): import sys, formatter if not args: args = sys.argv[1:] silent = args and args[0] == '-s' if silent: del args[0] if args: file = args[0] else: file = 'test.html' if file == '-': f = sys.stdin else: try: f = open(file, 'r') except IOError, msg: print file, ":", msg sys.exit(1) data = f.read() if f is not sys.stdin: f.close() if silent: f = formatter.NullFormatter() else: f = formatter.AbstractFormatter(formatter.DumbWriter()) p = HTMLParser(f) p.feed(data) p.close() if __name__ == '__main__': test()
mit
rdoh/pixelated-user-agent
service/test/functional/features/environment.py
5
2804
# # Copyright (c) 2014 ThoughtWorks, Inc. # # Pixelated is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Pixelated is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Pixelated. If not, see <http://www.gnu.org/licenses/>. import logging import uuid from crochet import setup, wait_for from leap.common.events.server import ensure_server from twisted.internet import defer from test.support.dispatcher.proxy import Proxy from test.support.integration import AppTestClient from selenium import webdriver from pixelated.resources.features_resource import FeaturesResource from steps.common import * setup() @wait_for(timeout=5.0) def start_app_test_client(client): ensure_server() return client.start_client() def before_all(context): logging.disable('INFO') client = AppTestClient() start_app_test_client(client) client.listenTCP() proxy = Proxy(proxy_port='8889', app_port='4567') FeaturesResource.DISABLED_FEATURES.append('autoRefresh') context.client = client context.call_to_terminate_proxy = proxy.run_on_a_thread() def after_all(context): context.call_to_terminate_proxy() def before_feature(context, feature): # context.browser = webdriver.Firefox() context.browser = webdriver.PhantomJS() context.browser.set_window_size(1280, 1024) context.browser.implicitly_wait(DEFAULT_IMPLICIT_WAIT_TIMEOUT_IN_S) context.browser.set_page_load_timeout(60) # wait for data context.browser.get('http://localhost:8889/') def after_step(context, step): if step.status == 'failed': id = str(uuid.uuid4()) context.browser.save_screenshot('failed ' + str(step.name) + '_' + id + ".png") save_source(context, 'failed ' + str(step.name) + '_' + id + ".html") def after_feature(context, feature): context.browser.quit() cleanup_all_mails(context) context.last_mail = None @wait_for(timeout=10.0) def cleanup_all_mails(context): @defer.inlineCallbacks def _delete_all_mails(): mails = yield context.client.mail_store.all_mails() for mail in mails: yield context.client.mail_store.delete_mail(mail.ident) return _delete_all_mails() def save_source(context, filename='/tmp/source.html'): with open(filename, 'w') as out: out.write(context.browser.page_source.encode('utf8'))
agpl-3.0
zhangg/trove
trove/tests/unittests/api/test_versions.py
3
8368
# Copyright 2013 OpenStack Foundation # Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import Mock from trove.tests.unittests import trove_testtools from trove.versions import BaseVersion from trove.versions import Version from trove.versions import VersionDataView from trove.versions import VERSIONS from trove.versions import VersionsAPI from trove.versions import VersionsController from trove.versions import VersionsDataView BASE_URL = 'http://localhost' id = VERSIONS['1.0']['id'] status = VERSIONS['1.0']['status'] base_url = BASE_URL updated = VERSIONS['1.0']['updated'] class VersionsControllerTest(trove_testtools.TestCase): def setUp(self): super(VersionsControllerTest, self).setUp() self.controller = VersionsController() self.assertIsNotNone(self.controller, "VersionsController instance was None") def test_index_json(self): request = Mock() result = self.controller.index(request) self.assertIsNotNone(result, 'Result was None') result._data = Mock() result._data.data_for_json = \ lambda: {'status': 'CURRENT', 'updated': '2012-08-01T00:00:00Z', 'id': 'v1.0', 'links': [{'href': 'http://localhost/v1.0/', 'rel': 'self'}]} # can be anything but xml json_data = result.data("application/json") self.assertIsNotNone(json_data, 'Result json_data was None') self.assertEqual('v1.0', json_data['id'], 'Version id is incorrect') self.assertEqual('CURRENT', json_data['status'], 'Version status is incorrect') self.assertEqual('2012-08-01T00:00:00Z', json_data['updated'], 'Version updated value is incorrect') def test_show_json(self): request = Mock() request.url_version = '1.0' result = self.controller.show(request) self.assertIsNotNone(result, 'Result was None') json_data = result.data("application/json") self.assertIsNotNone(json_data, "JSON data was None") version = json_data.get('version', None) self.assertIsNotNone(version, "Version was None") self.assertEqual('CURRENT', version['status'], "Version status was not 'CURRENT'") self.assertEqual('2012-08-01T00:00:00Z', version['updated'], "Version updated was not '2012-08-01T00:00:00Z'") self.assertEqual('v1.0', version['id'], "Version id was not 'v1.0'") class BaseVersionTestCase(trove_testtools.TestCase): def setUp(self): super(BaseVersionTestCase, self).setUp() self.base_version = BaseVersion(id, status, base_url, updated) self.assertIsNotNone(self.base_version, 'BaseVersion instance was None') def test_data(self): data = self.base_version.data() self.assertIsNotNone(data, 'Base Version data was None') self.assertTrue(type(data) is dict, "Base Version data is not a dict") self.assertEqual('CURRENT', data['status'], "Data status was not 'CURRENT'") self.assertEqual('2012-08-01T00:00:00Z', data['updated'], "Data updated was not '2012-08-01T00:00:00Z'") self.assertEqual('v1.0', data['id'], "Data status was not 'v1.0'") def test_url(self): url = self.base_version.url() self.assertIsNotNone(url, 'Url was None') self.assertEqual('http://localhost/v1.0/', url, "Base Version url is incorrect") class VersionTestCase(trove_testtools.TestCase): def setUp(self): super(VersionTestCase, self).setUp() self.version = Version(id, status, base_url, updated) self.assertIsNotNone(self.version, 'Version instance was None') def test_url_no_trailing_slash(self): url = self.version.url() self.assertIsNotNone(url, 'Version url was None') self.assertEqual(BASE_URL + '/', url, 'Base url value was incorrect') def test_url_with_trailing_slash(self): self.version.base_url = 'http://localhost/' url = self.version.url() self.assertEqual(BASE_URL + '/', url, 'Base url value was incorrect') class VersionDataViewTestCase(trove_testtools.TestCase): def setUp(self): super(VersionDataViewTestCase, self).setUp() # get a version object first self.version = Version(id, status, base_url, updated) self.assertIsNotNone(self.version, 'Version instance was None') # then create an instance of VersionDataView self.version_data_view = VersionDataView(self.version) self.assertIsNotNone(self.version_data_view, 'Version Data view instance was None') def test_data_for_json(self): json_data = self.version_data_view.data_for_json() self.assertIsNotNone(json_data, "JSON data was None") self.assertTrue(type(json_data) is dict, "JSON version data is not a dict") self.assertIsNotNone(json_data.get('version'), "Dict json_data has no key 'version'") data = json_data['version'] self.assertIsNotNone(data, "JSON data version was None") self.assertEqual('CURRENT', data['status'], "Data status was not 'CURRENT'") self.assertEqual('2012-08-01T00:00:00Z', data['updated'], "Data updated was not '2012-08-01T00:00:00Z'") self.assertEqual('v1.0', data['id'], "Data status was not 'v1.0'") class VersionsDataViewTestCase(trove_testtools.TestCase): def setUp(self): super(VersionsDataViewTestCase, self).setUp() # get a version object, put it in a list self.versions = [] self.version = Version(id, status, base_url, updated) self.assertIsNotNone(self.version, 'Version instance was None') self.versions.append(self.version) # then create an instance of VersionsDataView self.versions_data_view = VersionsDataView(self.versions) self.assertIsNotNone(self.versions_data_view, 'Versions Data view instance was None') def test_data_for_json(self): json_data = self.versions_data_view.data_for_json() self.assertIsNotNone(json_data, "JSON data was None") self.assertTrue(type(json_data) is dict, "JSON versions data is not a dict") self.assertIsNotNone(json_data.get('versions', None), "Dict json_data has no key 'versions'") versions = json_data['versions'] self.assertIsNotNone(versions, "Versions was None") self.assertEqual(1, len(versions), "Versions length != 1") # explode the version object versions_data = [v.data() for v in self.versions] d1 = versions_data.pop() d2 = versions.pop() self.assertEqual(d1['id'], d2['id'], "Version ids are not equal") class VersionAPITestCase(trove_testtools.TestCase): def setUp(self): super(VersionAPITestCase, self).setUp() def test_instance(self): self.versions_api = VersionsAPI() self.assertIsNotNone(self.versions_api, "VersionsAPI instance was None")
apache-2.0
murat1985/bagpipe-bgp
bagpipe/bgp/engine/worker.py
2
5205
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # encoding: utf-8 # Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import traceback from Queue import Queue from threading import Event from bagpipe.bgp.engine import RouteEntry, RouteEvent, \ Subscription, Unsubscription from bagpipe.bgp.common.looking_glass import LookingGlass, LGMap log = logging.getLogger(__name__) class Worker(LookingGlass): """This is the base class for objects that interact with the route table manager to produce and consume events related to BGP routes. These objects will: * use _subscribe(...) and _unsubscribe(...) to subscribe to routing events * will specialize _onEvent(event) to react to received events * use _pushEvent(event) to publish routing events """ stopEvent = object() def __init__(self, bgpManager, workerName): self.bgpManager = bgpManager self._queue = Queue() self._pleaseStop = Event() log.debug("Setting worker name to %s", workerName) self.name = workerName assert(self.name is not None) log.debug("Instantiated %s worker", self.name) def stop(self): """ Stop this worker. Set the _pleaseStop internal event to stop the event processor loop and indicate to the route table manager that this worker is stopped. Then call _stopped() to let a subclass implement any further work. """ self._pleaseStop.set() self._queue.put(Worker.stopEvent) self.bgpManager.cleanup(self) self._stopped() def _stopped(self): """ Hook for subclasses to react when Worker is stopped (NoOp in base Worker class) """ def _eventQueueProcessorLoop(self): """ Main loop where the worker consumes events. """ while not self._pleaseStop.isSet(): # log.debug("%s worker waiting on queue",self.name ) event = self._dequeue() if (event == Worker.stopEvent): log.debug("StopEvent, breaking queue processor loop") self._pleaseStop.set() break # log.debug("%s worker calling _onEvent for %s",self.name,event) try: self._onEvent(event) except Exception as e: log.error("Exception raised on subclass._onEvent: %s", e) log.error("%s", traceback.format_exc()) def run(self): self._eventQueueProcessorLoop() def _onEvent(self, event): """ This method is implemented by subclasses to react to routing events. """ log.debug("Worker %s _onEvent: %s", self.name, event) raise NotImplementedError def _dequeue(self): return self._queue.get() def enqueue(self, event): # TODO(tmmorin): replace Queue by a PriorityQueue and use a higher # priority for ReInit event self._queue.put(event) def _subscribe(self, afi, safi, rt=None): subobj = Subscription(afi, safi, rt, self) log.info("Subscribe: %s ", subobj) self.bgpManager.routeEventSubUnsub(subobj) def _unsubscribe(self, afi, safi, rt=None): subobj = Unsubscription(afi, safi, rt, self) log.info("Unsubscribe: %s ", subobj) self.bgpManager.routeEventSubUnsub(subobj) def getWorkerSubscriptions(self): return self.bgpManager.routeTableManager.getWorkerSubscriptions(self) def getWorkerRouteEntries(self): return self.bgpManager.routeTableManager.getWorkerRouteEntries(self) def _pushEvent(self, routeEvent): assert(isinstance(routeEvent, RouteEvent)) log.debug("Pushing route event to BGPManager") if routeEvent.source is None: routeEvent.source = self self.bgpManager._pushEvent(routeEvent) def _newRouteEntry(self, afi, safi, rts, nlri, attributes): return RouteEntry(afi, safi, rts, nlri, attributes, self) def __repr__(self): return "Worker %s" % (self.name) # Looking glass ### def getLookingGlassLocalInfo(self, pathPrefix): return { "name": self.name, "internals": { "event queue length": self._queue.qsize(), "subscriptions": [repr(sub) for sub in self.getWorkerSubscriptions()], } } def getLGMap(self): return { "routes": (LGMap.SUBTREE, self.getLGRoutes) } def getLGRoutes(self, pathPrefix): return [route.getLookingGlassInfo(pathPrefix) for route in self.getWorkerRouteEntries()]
apache-2.0
olapaola/olapaola-android-scripting
python/src/Lib/distutils/command/register.py
49
11509
"""distutils.command.register Implements the Distutils 'register' command (register with the repository). """ # created 2002/10/21, Richard Jones __revision__ = "$Id: register.py 67944 2008-12-27 13:28:42Z tarek.ziade $" import os, string, urllib2, getpass, urlparse import StringIO from distutils.core import PyPIRCCommand from distutils.errors import * from distutils import log class register(PyPIRCCommand): description = ("register the distribution with the Python package index") user_options = PyPIRCCommand.user_options + [ ('list-classifiers', None, 'list the valid Trove classifiers'), ] boolean_options = PyPIRCCommand.boolean_options + [ 'verify', 'list-classifiers'] def initialize_options(self): PyPIRCCommand.initialize_options(self) self.list_classifiers = 0 def run(self): self.finalize_options() self._set_config() self.check_metadata() if self.dry_run: self.verify_metadata() elif self.list_classifiers: self.classifiers() else: self.send_metadata() def check_metadata(self): """Ensure that all required elements of meta-data (name, version, URL, (author and author_email) or (maintainer and maintainer_email)) are supplied by the Distribution object; warn if any are missing. """ metadata = self.distribution.metadata missing = [] for attr in ('name', 'version', 'url'): if not (hasattr(metadata, attr) and getattr(metadata, attr)): missing.append(attr) if missing: self.warn("missing required meta-data: " + string.join(missing, ", ")) if metadata.author: if not metadata.author_email: self.warn("missing meta-data: if 'author' supplied, " + "'author_email' must be supplied too") elif metadata.maintainer: if not metadata.maintainer_email: self.warn("missing meta-data: if 'maintainer' supplied, " + "'maintainer_email' must be supplied too") else: self.warn("missing meta-data: either (author and author_email) " + "or (maintainer and maintainer_email) " + "must be supplied") def _set_config(self): ''' Reads the configuration file and set attributes. ''' config = self._read_pypirc() if config != {}: self.username = config['username'] self.password = config['password'] self.repository = config['repository'] self.realm = config['realm'] self.has_config = True else: if self.repository not in ('pypi', self.DEFAULT_REPOSITORY): raise ValueError('%s not found in .pypirc' % self.repository) if self.repository == 'pypi': self.repository = self.DEFAULT_REPOSITORY self.has_config = False def classifiers(self): ''' Fetch the list of classifiers from the server. ''' response = urllib2.urlopen(self.repository+'?:action=list_classifiers') print response.read() def verify_metadata(self): ''' Send the metadata to the package index server to be checked. ''' # send the info to the server and report the result (code, result) = self.post_to_server(self.build_post_data('verify')) print 'Server response (%s): %s'%(code, result) def send_metadata(self): ''' Send the metadata to the package index server. Well, do the following: 1. figure who the user is, and then 2. send the data as a Basic auth'ed POST. First we try to read the username/password from $HOME/.pypirc, which is a ConfigParser-formatted file with a section [distutils] containing username and password entries (both in clear text). Eg: [distutils] index-servers = pypi [pypi] username: fred password: sekrit Otherwise, to figure who the user is, we offer the user three choices: 1. use existing login, 2. register as a new user, or 3. set the password to a random string and email the user. ''' # see if we can short-cut and get the username/password from the # config if self.has_config: choice = '1' username = self.username password = self.password else: choice = 'x' username = password = '' # get the user's login info choices = '1 2 3 4'.split() while choice not in choices: self.announce('''\ We need to know who you are, so please choose either: 1. use your existing login, 2. register as a new user, 3. have the server generate a new password for you (and email it to you), or 4. quit Your selection [default 1]: ''', log.INFO) choice = raw_input() if not choice: choice = '1' elif choice not in choices: print 'Please choose one of the four options!' if choice == '1': # get the username and password while not username: username = raw_input('Username: ') while not password: password = getpass.getpass('Password: ') # set up the authentication auth = urllib2.HTTPPasswordMgr() host = urlparse.urlparse(self.repository)[1] auth.add_password(self.realm, host, username, password) # send the info to the server and report the result code, result = self.post_to_server(self.build_post_data('submit'), auth) self.announce('Server response (%s): %s' % (code, result), log.INFO) # possibly save the login if not self.has_config and code == 200: self.announce(('I can store your PyPI login so future ' 'submissions will be faster.'), log.INFO) self.announce('(the login will be stored in %s)' % \ self._get_rc_file(), log.INFO) choice = 'X' while choice.lower() not in 'yn': choice = raw_input('Save your login (y/N)?') if not choice: choice = 'n' if choice.lower() == 'y': self._store_pypirc(username, password) elif choice == '2': data = {':action': 'user'} data['name'] = data['password'] = data['email'] = '' data['confirm'] = None while not data['name']: data['name'] = raw_input('Username: ') while data['password'] != data['confirm']: while not data['password']: data['password'] = getpass.getpass('Password: ') while not data['confirm']: data['confirm'] = getpass.getpass(' Confirm: ') if data['password'] != data['confirm']: data['password'] = '' data['confirm'] = None print "Password and confirm don't match!" while not data['email']: data['email'] = raw_input(' EMail: ') code, result = self.post_to_server(data) if code != 200: print 'Server response (%s): %s'%(code, result) else: print 'You will receive an email shortly.' print 'Follow the instructions in it to complete registration.' elif choice == '3': data = {':action': 'password_reset'} data['email'] = '' while not data['email']: data['email'] = raw_input('Your email address: ') code, result = self.post_to_server(data) print 'Server response (%s): %s'%(code, result) def build_post_data(self, action): # figure the data to send - the metadata plus some additional # information used by the package server meta = self.distribution.metadata data = { ':action': action, 'metadata_version' : '1.0', 'name': meta.get_name(), 'version': meta.get_version(), 'summary': meta.get_description(), 'home_page': meta.get_url(), 'author': meta.get_contact(), 'author_email': meta.get_contact_email(), 'license': meta.get_licence(), 'description': meta.get_long_description(), 'keywords': meta.get_keywords(), 'platform': meta.get_platforms(), 'classifiers': meta.get_classifiers(), 'download_url': meta.get_download_url(), # PEP 314 'provides': meta.get_provides(), 'requires': meta.get_requires(), 'obsoletes': meta.get_obsoletes(), } if data['provides'] or data['requires'] or data['obsoletes']: data['metadata_version'] = '1.1' return data def post_to_server(self, data, auth=None): ''' Post a query to the server, and return a string response. ''' self.announce('Registering %s to %s' % (data['name'], self.repository), log.INFO) # Build up the MIME payload for the urllib2 POST data boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' sep_boundary = '\n--' + boundary end_boundary = sep_boundary + '--' body = StringIO.StringIO() for key, value in data.items(): # handle multiple entries for the same name if type(value) not in (type([]), type( () )): value = [value] for value in value: value = unicode(value).encode("utf-8") body.write(sep_boundary) body.write('\nContent-Disposition: form-data; name="%s"'%key) body.write("\n\n") body.write(value) if value and value[-1] == '\r': body.write('\n') # write an extra newline (lurve Macs) body.write(end_boundary) body.write("\n") body = body.getvalue() # build the Request headers = { 'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8'%boundary, 'Content-length': str(len(body)) } req = urllib2.Request(self.repository, body, headers) # handle HTTP and include the Basic Auth handler opener = urllib2.build_opener( urllib2.HTTPBasicAuthHandler(password_mgr=auth) ) data = '' try: result = opener.open(req) except urllib2.HTTPError, e: if self.show_response: data = e.fp.read() result = e.code, e.msg except urllib2.URLError, e: result = 500, str(e) else: if self.show_response: data = result.read() result = 200, 'OK' if self.show_response: print '-'*75, data, '-'*75 return result
apache-2.0
heartherumble/django-tinymce
testtinymce/settings.py
5
3360
# Django settings for testtinymce project. import os ROOT_PATH = os.path.dirname(os.path.realpath(__file__)) DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@domain.com'), ) MANAGERS = ADMINS DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'. DATABASE_NAME = 'database.sqlite' # Or path to database file if using sqlite3. DATABASE_USER = '' # Not used with sqlite3. DATABASE_PASSWORD = '' # Not used with sqlite3. DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3. DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3. # Local time zone for this installation. Choices can be found here: # http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE # although not all variations may be possible on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes # http://blogs.law.harvard.edu/tech/stories/storyReader$15 LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = "%s/%s/" % (ROOT_PATH, 'media') # URL that handles the media served from MEDIA_ROOT. # Example: "http://media.lawrence.com" MEDIA_URL = '/media/' # URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a # trailing slash. # Examples: "http://foo.com/media/", "/media/". ADMIN_MEDIA_PREFIX = '/media/admin/' # Make this unique, and don't share it with anybody. SECRET_KEY = '7j6o^y6#w@#067^-dr)h_)*^^@b&mgmd@1_y309w+)rk!4p^0!' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.load_template_source', 'django.template.loaders.app_directories.load_template_source', # 'django.template.loaders.eggs.load_template_source', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.middleware.doc.XViewMiddleware', 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware', ) ROOT_URLCONF = 'testtinymce.urls' TEMPLATE_DIRS = ( "%s/%s/" % (ROOT_PATH, 'templates'), ) INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.flatpages', 'tinymce', 'testapp', ) TINYMCE_SPELLCHECKER = True TINYMCE_JS_URL = "%sjs/tiny_mce/tiny_mce_src.js" % MEDIA_URL TINYMCE_COMPRESSOR = False # Check to see if django-filebrowser is installed try: import filebrowser INSTALLED_APPS += ('filebrowser',) TINYMCE_FILEBROWSER = True except ImportError: pass TINYMCE_DEFAULT_CONFIG = { 'theme': "advanced", 'plugins': "spellchecker", 'theme_advanced_buttons3_add': "|,spellchecker", }
mit
spirrello/spirrello-pynet-work
applied_python/lib/python2.7/site-packages/pysnmp/proto/secmod/rfc3414/priv/des.py
4
4328
# # This file is part of pysnmp software. # # Copyright (c) 2005-2016, Ilya Etingof <ilya@glas.net> # License: http://pysnmp.sf.net/license.html # import random from pysnmp.proto.secmod.rfc3414.priv import base from pysnmp.proto.secmod.rfc3414.auth import hmacmd5, hmacsha from pysnmp.proto.secmod.rfc3414 import localkey from pysnmp.proto import errind, error from pyasn1.type import univ from sys import version_info try: from Crypto.Cipher import DES except ImportError: DES = None random.seed() # 8.2.4 class Des(base.AbstractEncryptionService): serviceID = (1, 3, 6, 1, 6, 3, 10, 1, 2, 2) # usmDESPrivProtocol if version_info < (2, 3): _localInt = int(random.random()*0xffffffff) else: _localInt = random.randrange(0, 0xffffffff) def hashPassphrase(self, authProtocol, privKey): if authProtocol == hmacmd5.HmacMd5.serviceID: return localkey.hashPassphraseMD5(privKey) elif authProtocol == hmacsha.HmacSha.serviceID: return localkey.hashPassphraseSHA(privKey) else: raise error.ProtocolError( 'Unknown auth protocol %s' % (authProtocol,) ) def localizeKey(self, authProtocol, privKey, snmpEngineID): if authProtocol == hmacmd5.HmacMd5.serviceID: localPrivKey = localkey.localizeKeyMD5(privKey, snmpEngineID) elif authProtocol == hmacsha.HmacSha.serviceID: localPrivKey = localkey.localizeKeySHA(privKey, snmpEngineID) else: raise error.ProtocolError( 'Unknown auth protocol %s' % (authProtocol,) ) return localPrivKey[:32] # key+IV # 8.1.1.1 def __getEncryptionKey(self, privKey, snmpEngineBoots): desKey = privKey[:8] preIV = privKey[8:16] securityEngineBoots = int(snmpEngineBoots) salt = [securityEngineBoots>>24&0xff, securityEngineBoots>>16&0xff, securityEngineBoots>>8&0xff, securityEngineBoots&0xff, self._localInt>>24&0xff, self._localInt>>16&0xff, self._localInt>>8&0xff, self._localInt&0xff] if self._localInt == 0xffffffff: self._localInt = 0 else: self._localInt += 1 return (desKey.asOctets(), univ.OctetString(salt).asOctets(), univ.OctetString(map(lambda x, y: x^y, salt, preIV.asNumbers())).asOctets()) def __getDecryptionKey(self, privKey, salt): return (privKey[:8].asOctets(), univ.OctetString(map(lambda x, y: x^y, salt.asNumbers(), privKey[8:16].asNumbers())).asOctets()) # 8.2.4.1 def encryptData(self, encryptKey, privParameters, dataToEncrypt): if DES is None: raise error.StatusInformation( errorIndication=errind.encryptionError ) snmpEngineBoots, snmpEngineTime, salt = privParameters # 8.3.1.1 desKey, salt, iv = self.__getEncryptionKey( encryptKey, snmpEngineBoots ) # 8.3.1.2 privParameters = univ.OctetString(salt) # 8.1.1.2 desObj = DES.new(desKey, DES.MODE_CBC, iv) plaintext = dataToEncrypt + univ.OctetString((0,) * (8 - len(dataToEncrypt) % 8)).asOctets() ciphertext = desObj.encrypt(plaintext) # 8.3.1.3 & 4 return univ.OctetString(ciphertext), privParameters # 8.2.4.2 def decryptData(self, decryptKey, privParameters, encryptedData): if DES is None: raise error.StatusInformation( errorIndication=errind.decryptionError ) snmpEngineBoots, snmpEngineTime, salt = privParameters # 8.3.2.1 if len(salt) != 8: raise error.StatusInformation( errorIndication=errind.decryptionError ) # 8.3.2.2 noop # 8.3.2.3 desKey, iv = self.__getDecryptionKey(decryptKey, salt) # 8.3.2.4 -> 8.1.1.3 if len(encryptedData) % 8 != 0: raise error.StatusInformation( errorIndication=errind.decryptionError ) desObj = DES.new(desKey, DES.MODE_CBC, iv) # 8.3.2.6 return desObj.decrypt(encryptedData.asOctets())
gpl-3.0
shubhdev/edx-platform
common/test/acceptance/tests/lms/test_lms_problems.py
21
3301
# -*- coding: utf-8 -*- """ Bok choy acceptance tests for problems in the LMS See also old lettuce tests in lms/djangoapps/courseware/features/problems.feature """ from ..helpers import UniqueCourseTest from ...pages.studio.auto_auth import AutoAuthPage from ...pages.lms.courseware import CoursewarePage from ...pages.lms.problem import ProblemPage from ...fixtures.course import CourseFixture, XBlockFixtureDesc from textwrap import dedent class ProblemsTest(UniqueCourseTest): """ Base class for tests of problems in the LMS. """ USERNAME = "joe_student" EMAIL = "joe@example.com" def setUp(self): super(ProblemsTest, self).setUp() self.xqueue_grade_response = None self.courseware_page = CoursewarePage(self.browser, self.course_id) # Install a course with a hierarchy and problems course_fixture = CourseFixture( self.course_info['org'], self.course_info['number'], self.course_info['run'], self.course_info['display_name'] ) problem = self.get_problem() course_fixture.add_children( XBlockFixtureDesc('chapter', 'Test Section').add_children( XBlockFixtureDesc('sequential', 'Test Subsection').add_children(problem) ) ).install() # Auto-auth register for the course. AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL, course_id=self.course_id, staff=False).visit() def get_problem(self): """ Subclasses should override this to complete the fixture """ raise NotImplementedError() class ProblemClarificationTest(ProblemsTest): """ Tests the <clarification> element that can be used in problem XML. """ def get_problem(self): """ Create a problem with a <clarification> """ xml = dedent(""" <problem markdown="null"> <text> <p> Given the data in Table 7 <clarification>Table 7: "Example PV Installation Costs", Page 171 of Roberts textbook</clarification>, compute the ROI <clarification>Return on Investment <strong>(per year)</strong></clarification> over 20 years. </p> <numericalresponse answer="6.5"> <textline label="Enter the annual ROI" trailing_text="%" /> </numericalresponse> </text> </problem> """) return XBlockFixtureDesc('problem', 'TOOLTIP TEST PROBLEM', data=xml) def test_clarification(self): """ Test that we can see the <clarification> tooltips. """ self.courseware_page.visit() problem_page = ProblemPage(self.browser) self.assertEqual(problem_page.problem_name, 'TOOLTIP TEST PROBLEM') problem_page.click_clarification(0) self.assertIn('"Example PV Installation Costs"', problem_page.visible_tooltip_text) problem_page.click_clarification(1) tooltip_text = problem_page.visible_tooltip_text self.assertIn('Return on Investment', tooltip_text) self.assertIn('per year', tooltip_text) self.assertNotIn('strong', tooltip_text)
agpl-3.0
julianwachholz/praw
tests/test_config.py
7
1353
"""Tests for Config class.""" from __future__ import print_function, unicode_literals import unittest from praw import Config from praw.errors import ClientException try: import ConfigParser as configparser except ImportError: import configparser # NOQA pylint: disable=F0401 class ConfigTest(unittest.TestCase): def test_default_site(self): config = Config('reddit') self.assertEqual(0, config.log_requests) self.assertEqual(2, config.api_request_delay) self.assertEqual(None, config.user) self.assertEqual(None, config.pswd) def test_default_site__with_overrides(self): config = Config('reddit', log_requests=15, api_request_delay=21) self.assertEqual(15, config.log_requests) self.assertEqual(21, config.api_request_delay) def test_default_site__with_username_and_password(self): config = Config('reddit', user='foo', pswd='bar') self.assertEqual('foo', config.user) self.assertEqual('bar', config.pswd) def test_invalid_site(self): self.assertRaises(configparser.NoSectionError, Config, 'invalid') def test_local_site(self): config = Config('local_example') try: config.short_domain self.fail('Did not raise ClientException') except ClientException: pass
gpl-3.0
Balannen/LSMASOMM
atom3/Kernel/Layout/RandomLayout.py
1
2169
""" RandomLayout.py Generates a random layout by moving all the nodes positions randomly in a 640x480 pixel box. The connections are then optimized for the new layout. Guaranteed to hit an aesthetic layout at infinity, not recognize it, and keep on going for another infinity :p Created Summer 2004, Denis Dube """ from random import randint from Utilities import selectAllVisibleObjects, optimizeLinks from ModelSpecificCode import isEntityNode def applyLayout(self): for nodetype in self.ASGroot.nodeTypes: for node in self.ASGroot.listNodes[nodetype]: if( isEntityNode( node.graphObject_ ) ): # Move the nodes around currPos = node.graphObject_.getCenterCoord() newPos = [ randint(0,640), randint(0,480) ] node.graphObject_.Move( -currPos[0],-currPos[1], False) # Go back to the origin node.graphObject_.Move( newPos[0], newPos[1], False) # Move to random location else: # Move the links around currPos= node.graphObject_.getCenterCoord() newPos = [ randint(0,640), randint(0,480) ] node.graphObject_.Move( -currPos[0],-currPos[1]) # Go back to the origin node.graphObject_.Move( newPos[0], newPos[1]) # Move to random location selectAllVisibleObjects( self ) optimizeLinks( self.cb ) """ # This code fragment can spill all the co-ordinates making up an edge for nodetype in core.ASGroot.nodeTypes: for node in core.ASGroot.listNodes[nodetype]: size = node.graphObject_.getSize() if( size[0] == 0 ): print "Size is 0", node, node.graphObject_.getCenterCoord(), "<--conns" node.graphObject_.Move(20,20) else: if( node.graphObject_.getConnectionCoordinates( "OUT", node.graphObject_) != None ): coords = node.graphObject_.getConnectionCoordinates( "OUT", node.graphObject_)[0] middlePos = [coords[2],coords[3] ] print node,middlePos, "<--getConn" """
gpl-3.0
FireWRT/OpenWrt-Firefly-Libraries
staging_dir/host/lib/python2.7/test/test_codecmaps_jp.py
32
1857
# # test_codecmaps_jp.py # Codec mapping tests for Japanese encodings # from test import test_support from test import test_multibytecodec_support import unittest class TestCP932Map(test_multibytecodec_support.TestBase_Mapping, unittest.TestCase): encoding = 'cp932' mapfileurl = 'http://www.pythontest.net/unicode/CP932.TXT' supmaps = [ ('\x80', u'\u0080'), ('\xa0', u'\uf8f0'), ('\xfd', u'\uf8f1'), ('\xfe', u'\uf8f2'), ('\xff', u'\uf8f3'), ] for i in range(0xa1, 0xe0): supmaps.append((chr(i), unichr(i+0xfec0))) class TestEUCJPCOMPATMap(test_multibytecodec_support.TestBase_Mapping, unittest.TestCase): encoding = 'euc_jp' mapfilename = 'EUC-JP.TXT' mapfileurl = 'http://www.pythontest.net/unicode/EUC-JP.TXT' class TestSJISCOMPATMap(test_multibytecodec_support.TestBase_Mapping, unittest.TestCase): encoding = 'shift_jis' mapfilename = 'SHIFTJIS.TXT' mapfileurl = 'http://www.pythontest.net/unicode/SHIFTJIS.TXT' pass_enctest = [ ('\x81_', u'\\'), ] pass_dectest = [ ('\\', u'\xa5'), ('~', u'\u203e'), ('\x81_', u'\\'), ] class TestEUCJISX0213Map(test_multibytecodec_support.TestBase_Mapping, unittest.TestCase): encoding = 'euc_jisx0213' mapfilename = 'EUC-JISX0213.TXT' mapfileurl = 'http://www.pythontest.net/unicode/EUC-JISX0213.TXT' class TestSJISX0213Map(test_multibytecodec_support.TestBase_Mapping, unittest.TestCase): encoding = 'shift_jisx0213' mapfilename = 'SHIFT_JISX0213.TXT' mapfileurl = 'http://www.pythontest.net/unicode/SHIFT_JISX0213.TXT' def test_main(): test_support.run_unittest(__name__) if __name__ == "__main__": test_main()
gpl-2.0
thaumos/ansible
lib/ansible/modules/network/fortios/fortios_router_multicast6.py
21
10553
#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # # the lib use python logging can get it if the following is set in your # Ansible config. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_router_multicast6 short_description: Configure IPv6 multicast in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS by allowing the user to set and modify router feature and multicast6 category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.2 version_added: "2.8" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate ip address. required: true username: description: - FortiOS or FortiGate username. required: true password: description: - FortiOS or FortiGate password. default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol type: bool default: true router_multicast6: description: - Configure IPv6 multicast. default: null suboptions: interface: description: - Protocol Independent Multicast (PIM) interfaces. suboptions: hello-holdtime: description: - Time before old neighbour information expires (1 - 65535 sec, default = 105). hello-interval: description: - Interval between sending PIM hello messages (1 - 65535 sec, default = 30).. name: description: - Interface name. Source system.interface.name. required: true multicast-pmtu: description: - Enable/disable PMTU for IPv6 multicast. choices: - enable - disable multicast-routing: description: - Enable/disable IPv6 multicast routing. choices: - enable - disable pim-sm-global: description: - PIM sparse-mode global settings. suboptions: register-rate-limit: description: - Limit of packets/sec per source registered through this RP (0 means unlimited). rp-address: description: - Statically configured RP addresses. suboptions: id: description: - ID of the entry. required: true ip6-address: description: - RP router IPv6 address. ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" tasks: - name: Configure IPv6 multicast. fortios_router_multicast6: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" router_multicast6: interface: - hello-holdtime: "4" hello-interval: "5" name: "default_name_6 (source system.interface.name)" multicast-pmtu: "enable" multicast-routing: "enable" pim-sm-global: register-rate-limit: "10" rp-address: - id: "12" ip6-address: "<your_own_value>" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule fos = None def login(data): host = data['host'] username = data['username'] password = data['password'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password) def filter_router_multicast6_data(json): option_list = ['interface', 'multicast-pmtu', 'multicast-routing', 'pim-sm-global'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def flatten_multilists_attributes(data): multilist_attrs = [] for attr in multilist_attrs: try: path = "data['" + "']['".join(elem for elem in attr) + "']" current_val = eval(path) flattened_val = ' '.join(elem for elem in current_val) exec(path + '= flattened_val') except BaseException: pass return data def router_multicast6(data, fos): vdom = data['vdom'] router_multicast6_data = data['router_multicast6'] flattened_data = flatten_multilists_attributes(router_multicast6_data) filtered_data = filter_router_multicast6_data(flattened_data) return fos.set('router', 'multicast6', data=filtered_data, vdom=vdom) def fortios_router(data, fos): login(data) if data['router_multicast6']: resp = router_multicast6(data, fos) fos.logout() return not resp['status'] == "success", resp['status'] == "success", resp def main(): fields = { "host": {"required": True, "type": "str"}, "username": {"required": True, "type": "str"}, "password": {"required": False, "type": "str", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "router_multicast6": { "required": False, "type": "dict", "options": { "interface": {"required": False, "type": "list", "options": { "hello-holdtime": {"required": False, "type": "int"}, "hello-interval": {"required": False, "type": "int"}, "name": {"required": True, "type": "str"} }}, "multicast-pmtu": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "multicast-routing": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "pim-sm-global": {"required": False, "type": "dict", "options": { "register-rate-limit": {"required": False, "type": "int"}, "rp-address": {"required": False, "type": "list", "options": { "id": {"required": True, "type": "int"}, "ip6-address": {"required": False, "type": "str"} }} }} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") global fos fos = FortiOSAPI() is_error, has_changed, result = fortios_router(module.params, fos) if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
gpl-3.0
DazWorrall/ansible-modules-extras
windows/win_unzip.py
98
3637
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Phil Schwartz <schwartzmx@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name DOCUMENTATION = ''' --- module: win_unzip version_added: "2.0" short_description: Unzips compressed files and archives on the Windows node description: - Unzips compressed files and archives. For extracting any compression types other than .zip, the PowerShellCommunityExtensions (PSCX) Module is required. This module (in conjunction with PSCX) has the ability to recursively unzip files within the src zip file provided and also functionality for many other compression types. If the destination directory does not exist, it will be created before unzipping the file. Specifying rm parameter will force removal of the src file after extraction. options: src: description: - File to be unzipped (provide absolute path) required: true dest: description: - Destination of zip file (provide absolute path of directory). If it does not exist, the directory will be created. required: true rm: description: - Remove the zip file, after unzipping required: no choices: - true - false - yes - no default: false recurse: description: - Recursively expand zipped files within the src file. required: no default: false choices: - true - false - yes - no creates: description: - If this file or directory exists the specified src will not be extracted. required: no default: null author: Phil Schwartz ''' EXAMPLES = ''' # This unzips a library that was downloaded with win_get_url, and removes the file after extraction $ ansible -i hosts -m win_unzip -a "src=C:\\LibraryToUnzip.zip dest=C:\\Lib rm=true" all # Playbook example # Simple unzip --- - name: Unzip a bz2 (BZip) file win_unzip: src: "C:\Users\Phil\Logs.bz2" dest: "C:\Users\Phil\OldLogs" creates: "C:\Users\Phil\OldLogs" # This playbook example unzips a .zip file and recursively decompresses the contained .gz files and removes all unneeded compressed files after completion. --- - name: Unzip ApplicationLogs.zip and decompress all GZipped log files hosts: all gather_facts: false tasks: - name: Recursively decompress GZ files in ApplicationLogs.zip win_unzip: src: C:\Downloads\ApplicationLogs.zip dest: C:\Application\Logs recurse: yes rm: true # Install PSCX to use for extracting a gz file - name: Grab PSCX msi win_get_url: url: 'http://download-codeplex.sec.s-msft.com/Download/Release?ProjectName=pscx&DownloadId=923562&FileTime=130585918034470000&Build=20959' dest: 'C:\\pscx.msi' - name: Install PSCX win_msi: path: 'C:\\pscx.msi' - name: Unzip gz log win_unzip: src: "C:\\Logs\\application-error-logs.gz" dest: "C:\\ExtractedLogs\\application-error-logs" '''
gpl-3.0
bbiiggppiigg/PokemonGo-Bot
pokemongo_bot/api_wrapper.py
3
11695
import time import logging import random, base64, struct import hashlib import os import json from pgoapi.exceptions import (ServerSideRequestThrottlingException, NotLoggedInException, ServerBusyOrOfflineException, NoPlayerPositionSetException, EmptySubrequestChainException, UnexpectedResponseException) from pgoapi.pgoapi import PGoApi, PGoApiRequest, RpcApi from pgoapi.protos.POGOProtos.Networking.Requests.RequestType_pb2 import RequestType from pgoapi.protos.POGOProtos.Networking.Envelopes.Signature_pb2 import Signature from pgoapi.utilities import get_time from pokemongo_bot.datastore import Datastore from human_behaviour import sleep, gps_noise_rng from pokemongo_bot.base_dir import _base_dir class PermaBannedException(Exception): pass class ApiWrapper(Datastore, PGoApi): DEVICE_ID = None def __init__(self, config=None): PGoApi.__init__(self) # Set to default, just for CI... self.actual_lat, self.actual_lng, self.actual_alt = PGoApi.get_position(self) self.teleporting = False self.noised_lat, self.noised_lng, self.noised_alt = self.actual_lat, self.actual_lng, self.actual_alt self.useVanillaRequest = False self.config = config if self.config is None or self.config.username is None: ApiWrapper.DEVICE_ID = "3d65919ca1c2fc3a8e2bd7cc3f974c34" return file_salt = None did_path = os.path.join(_base_dir, 'data', 'deviceid-%s.txt' % self.config.username) if os.path.exists(did_path): file_salt = open(did_path, 'r').read() if self.config is not None: key_string = self.config.username if file_salt is not None: # Config and file are set, so use those. ApiWrapper.DEVICE_ID = hashlib.md5(key_string + file_salt).hexdigest() else: # Config is set, but file isn't, so make it. rand_float = random.SystemRandom().random() salt = base64.b64encode((struct.pack('!d', rand_float))) ApiWrapper.DEVICE_ID = hashlib.md5(key_string + salt).hexdigest() with open(did_path, "w") as text_file: text_file.write("{0}".format(salt)) else: if file_salt is not None: # No config, but there's a file, use it. ApiWrapper.DEVICE_ID = hashlib.md5(file_salt).hexdigest() else: # No config or file, so make up a reasonable default. ApiWrapper.DEVICE_ID = "3d65919ca1c2fc3a8e2bd7cc3f974c34" def create_request(self): RequestClass = ApiRequest if self.useVanillaRequest: RequestClass = PGoApiRequest return RequestClass( self, self._position_lat, self._position_lng, self._position_alt ) def login(self, *args): # login needs base class "create_request" self.useVanillaRequest = True try: ret_value = PGoApi.login(self, *args) finally: # cleanup code self.useVanillaRequest = False return ret_value def set_position(self, lat, lng, alt=None, teleporting=False): self.actual_lat = lat self.actual_lng = lng if None != alt: self.actual_alt = alt else: alt = self.actual_alt self.teleporting = teleporting if self.config.replicate_gps_xy_noise: lat_noise = gps_noise_rng(self.config.gps_xy_noise_range) lng_noise = gps_noise_rng(self.config.gps_xy_noise_range) lat = lat + lat_noise lng = lng + lng_noise if self.config.replicate_gps_z_noise: alt_noise = gps_noise_rng(self.config.gps_z_noise_range) alt = alt + alt_noise self.noised_lat, self.noised_lng, self.noised_alt = lat, lng, alt PGoApi.set_position(self, lat, lng, alt) def get_position(self): return (self.actual_lat, self.actual_lng, self.actual_alt) class ApiRequest(PGoApiRequest): def __init__(self, *args): PGoApiRequest.__init__(self, *args) self.logger = logging.getLogger(__name__) self.request_callers = [] self.last_api_request_time = None self.requests_per_seconds = 2 def can_call(self): if not self._req_method_list: raise EmptySubrequestChainException() if (self._position_lat is None) or (self._position_lng is None) or (self._position_alt is None): raise NoPlayerPositionSetException() if self._auth_provider is None or not self._auth_provider.is_login(): self.log.info('Not logged in') raise NotLoggedInException() return True def _call(self): # Need fill in the location_fix location_fix = [Signature.LocationFix( provider='fused', timestamp_snapshot=(get_time(ms=True) - RpcApi.START_TIME) - random.randint(100, 300), latitude=self._position_lat, longitude=self._position_lng, horizontal_accuracy=round(random.uniform(50, 250), 7), altitude=self._position_alt, vertical_accuracy=random.randint(2, 5), provider_status=3, location_type=1 )] sensor_info = Signature.SensorInfo( timestamp_snapshot=(get_time(ms=True) - RpcApi.START_TIME) - random.randint(200, 400), magnetometer_x=random.uniform(-0.139084026217, 0.138112977147), magnetometer_y=random.uniform(-0.2, 0.19), magnetometer_z=random.uniform(-0.2, 0.4), angle_normalized_x=random.uniform(-47.149471283, 61.8397789001), angle_normalized_y=random.uniform(-47.149471283, 61.8397789001), angle_normalized_z=random.uniform(-47.149471283, 5), accel_raw_x=random.uniform(0.0729667818829, 0.0729667818829), accel_raw_y=random.uniform(-2.788630499244109, 3.0586791383810468), accel_raw_z=random.uniform(-0.34825887123552773, 0.19347580173737935), gyroscope_raw_x=random.uniform(-0.9703824520111084, 0.8556089401245117), gyroscope_raw_y=random.uniform(-1.7470258474349976, 1.4218578338623047), gyroscope_raw_z=random.uniform(-0.9681901931762695, 0.8396636843681335), accel_normalized_x=random.uniform(-0.31110161542892456, 0.1681540310382843), accel_normalized_y=random.uniform(-0.6574847102165222, -0.07290205359458923), accel_normalized_z=random.uniform(-0.9943905472755432, -0.7463029026985168), accelerometer_axes=3 ) device_info = Signature.DeviceInfo( device_id=ApiWrapper.DEVICE_ID, device_brand='Apple', device_model='iPhone', device_model_boot='iPhone8,2', hardware_manufacturer='Apple', hardware_model='N66AP', firmware_brand='iPhone OS', firmware_type='9.3.3' ) activity_status = Signature.ActivityStatus( # walking=True, # stationary=True, # automotive=True, # tilting=True ) signature = Signature( location_fix=location_fix, sensor_info=sensor_info, device_info=device_info, activity_status=activity_status, unknown25=-8537042734809897855 ) return PGoApiRequest.call(self, signature) def _pop_request_callers(self): r = self.request_callers self.request_callers = [] return [i.upper() for i in r] def is_response_valid(self, result, request_callers): if not result or result is None or not isinstance(result, dict): return False if not 'responses' in result or not 'status_code' in result: return False if not isinstance(result['responses'], dict): return False try: # Permaban symptom is empty response to GET_INVENTORY and status_code = 3 if result['status_code'] == 3 and 'GET_INVENTORY' in request_callers and not result['responses']['GET_INVENTORY']: raise PermaBannedException except KeyError: # Still wrong return False # the response can still programatically be valid at this point # but still be wrong. we need to check if the server did sent what we asked it for request_caller in request_callers: if not request_caller in result['responses']: return False return True def call(self, max_retry=15): request_callers = self._pop_request_callers() if not self.can_call(): return False # currently this is never ran, exceptions are raised before request_timestamp = None api_req_method_list = self._req_method_list result = None try_cnt = 0 throttling_retry = 0 unexpected_response_retry = 0 while True: request_timestamp = self.throttle_sleep() # self._call internally clear this field, so save it self._req_method_list = [req_method for req_method in api_req_method_list] should_throttle_retry = False should_unexpected_response_retry = False try: result = self._call() except ServerSideRequestThrottlingException: should_throttle_retry = True except UnexpectedResponseException: should_unexpected_response_retry = True if should_throttle_retry: throttling_retry += 1 if throttling_retry >= max_retry: raise ServerSideRequestThrottlingException('Server throttled too many times') sleep(1) # huge sleep ? continue # skip response checking if should_unexpected_response_retry: unexpected_response_retry += 1 if unexpected_response_retry >= 5: self.logger.warning( 'Server is not responding correctly to our requests. Waiting for 30 seconds to reconnect.') sleep(30) else: sleep(2) continue if not self.is_response_valid(result, request_callers): try_cnt += 1 if try_cnt > 3: self.logger.warning( 'Server seems to be busy or offline - try again - {}/{}'.format(try_cnt, max_retry)) if try_cnt >= max_retry: raise ServerBusyOrOfflineException() sleep(1) else: break self.last_api_request_time = request_timestamp return result def __getattr__(self, func): if func.upper() in RequestType.keys(): self.request_callers.append(func) return PGoApiRequest.__getattr__(self, func) def throttle_sleep(self): now_milliseconds = time.time() * 1000 required_delay_between_requests = 1000 / self.requests_per_seconds difference = now_milliseconds - (self.last_api_request_time if self.last_api_request_time else 0) if self.last_api_request_time != None and difference < required_delay_between_requests: sleep_time = required_delay_between_requests - difference time.sleep(sleep_time / 1000) return now_milliseconds
mit
aam-at/tensorflow
tensorflow/python/ops/linalg/inverse_registrations.py
18
9206
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Registrations for LinearOperator.inverse.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops import math_ops from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_addition from tensorflow.python.ops.linalg import linear_operator_algebra from tensorflow.python.ops.linalg import linear_operator_block_diag from tensorflow.python.ops.linalg import linear_operator_block_lower_triangular from tensorflow.python.ops.linalg import linear_operator_circulant from tensorflow.python.ops.linalg import linear_operator_diag from tensorflow.python.ops.linalg import linear_operator_full_matrix from tensorflow.python.ops.linalg import linear_operator_householder from tensorflow.python.ops.linalg import linear_operator_identity from tensorflow.python.ops.linalg import linear_operator_inversion from tensorflow.python.ops.linalg import linear_operator_kronecker # By default, return LinearOperatorInversion which switched the .matmul # and .solve methods. @linear_operator_algebra.RegisterInverse(linear_operator.LinearOperator) def _inverse_linear_operator(linop): return linear_operator_inversion.LinearOperatorInversion( linop, is_non_singular=linop.is_non_singular, is_self_adjoint=linop.is_self_adjoint, is_positive_definite=linop.is_positive_definite, is_square=linop.is_square) @linear_operator_algebra.RegisterInverse( linear_operator_inversion.LinearOperatorInversion) def _inverse_inverse_linear_operator(linop_inversion): return linop_inversion.operator @linear_operator_algebra.RegisterInverse( linear_operator_diag.LinearOperatorDiag) def _inverse_diag(diag_operator): return linear_operator_diag.LinearOperatorDiag( 1. / diag_operator.diag, is_non_singular=diag_operator.is_non_singular, is_self_adjoint=diag_operator.is_self_adjoint, is_positive_definite=diag_operator.is_positive_definite, is_square=True) @linear_operator_algebra.RegisterInverse( linear_operator_identity.LinearOperatorIdentity) def _inverse_identity(identity_operator): return identity_operator @linear_operator_algebra.RegisterInverse( linear_operator_identity.LinearOperatorScaledIdentity) def _inverse_scaled_identity(identity_operator): return linear_operator_identity.LinearOperatorScaledIdentity( num_rows=identity_operator._num_rows, # pylint: disable=protected-access multiplier=1. / identity_operator.multiplier, is_non_singular=identity_operator.is_non_singular, is_self_adjoint=True, is_positive_definite=identity_operator.is_positive_definite, is_square=True) @linear_operator_algebra.RegisterInverse( linear_operator_block_diag.LinearOperatorBlockDiag) def _inverse_block_diag(block_diag_operator): # We take the inverse of each block on the diagonal. return linear_operator_block_diag.LinearOperatorBlockDiag( operators=[ operator.inverse() for operator in block_diag_operator.operators], is_non_singular=block_diag_operator.is_non_singular, is_self_adjoint=block_diag_operator.is_self_adjoint, is_positive_definite=block_diag_operator.is_positive_definite, is_square=True) @linear_operator_algebra.RegisterInverse( linear_operator_block_lower_triangular.LinearOperatorBlockLowerTriangular) def _inverse_block_lower_triangular(block_lower_triangular_operator): """Inverse of LinearOperatorBlockLowerTriangular. We recursively apply the identity: ```none |A 0|' = | A' 0| |B C| |-C'BA' C'| ``` where `A` is n-by-n, `B` is m-by-n, `C` is m-by-m, and `'` denotes inverse. This identity can be verified through multiplication: ```none |A 0|| A' 0| |B C||-C'BA' C'| = | AA' 0| |BA'-CC'BA' CC'| = |I 0| |0 I| ``` Args: block_lower_triangular_operator: Instance of `LinearOperatorBlockLowerTriangular`. Returns: block_lower_triangular_operator_inverse: Instance of `LinearOperatorBlockLowerTriangular`, the inverse of `block_lower_triangular_operator`. """ if len(block_lower_triangular_operator.operators) == 1: return (linear_operator_block_lower_triangular. LinearOperatorBlockLowerTriangular( [[block_lower_triangular_operator.operators[0][0].inverse()]], is_non_singular=block_lower_triangular_operator.is_non_singular, is_self_adjoint=block_lower_triangular_operator.is_self_adjoint, is_positive_definite=(block_lower_triangular_operator. is_positive_definite), is_square=True)) blockwise_dim = len(block_lower_triangular_operator.operators) # Calculate the inverse of the `LinearOperatorBlockLowerTriangular` # representing all but the last row of `block_lower_triangular_operator` with # a recursive call (the matrix `A'` in the docstring definition). upper_left_inverse = ( linear_operator_block_lower_triangular.LinearOperatorBlockLowerTriangular( block_lower_triangular_operator.operators[:-1]).inverse()) bottom_row = block_lower_triangular_operator.operators[-1] bottom_right_inverse = bottom_row[-1].inverse() # Find the bottom row of the inverse (equal to `[-C'BA', C']` in the docstring # definition, where `C` is the bottom-right operator of # `block_lower_triangular_operator` and `B` is the set of operators in the # bottom row excluding `C`). To find `-C'BA'`, we first iterate over the # column partitions of `A'`. inverse_bottom_row = [] for i in range(blockwise_dim - 1): # Find the `i`-th block of `BA'`. blocks = [] for j in range(i, blockwise_dim - 1): result = bottom_row[j].matmul(upper_left_inverse.operators[j][i]) if not any(isinstance(result, op_type) for op_type in linear_operator_addition.SUPPORTED_OPERATORS): result = linear_operator_full_matrix.LinearOperatorFullMatrix( result.to_dense()) blocks.append(result) summed_blocks = linear_operator_addition.add_operators(blocks) assert len(summed_blocks) == 1 block = summed_blocks[0] # Find the `i`-th block of `-C'BA'`. block = bottom_right_inverse.matmul(block) block = linear_operator_identity.LinearOperatorScaledIdentity( num_rows=bottom_right_inverse.domain_dimension_tensor(), multiplier=math_ops.cast(-1, dtype=block.dtype)).matmul(block) inverse_bottom_row.append(block) # `C'` is the last block of the inverted linear operator. inverse_bottom_row.append(bottom_right_inverse) return ( linear_operator_block_lower_triangular.LinearOperatorBlockLowerTriangular( upper_left_inverse.operators + [inverse_bottom_row], is_non_singular=block_lower_triangular_operator.is_non_singular, is_self_adjoint=block_lower_triangular_operator.is_self_adjoint, is_positive_definite=(block_lower_triangular_operator. is_positive_definite), is_square=True)) @linear_operator_algebra.RegisterInverse( linear_operator_kronecker.LinearOperatorKronecker) def _inverse_kronecker(kronecker_operator): # Inverse decomposition of a Kronecker product is the Kronecker product # of inverse decompositions. return linear_operator_kronecker.LinearOperatorKronecker( operators=[ operator.inverse() for operator in kronecker_operator.operators], is_non_singular=kronecker_operator.is_non_singular, is_self_adjoint=kronecker_operator.is_self_adjoint, is_positive_definite=kronecker_operator.is_positive_definite, is_square=True) @linear_operator_algebra.RegisterInverse( linear_operator_circulant.LinearOperatorCirculant) def _inverse_circulant(circulant_operator): # Inverting the spectrum is sufficient to get the inverse. return linear_operator_circulant.LinearOperatorCirculant( spectrum=1. / circulant_operator.spectrum, is_non_singular=circulant_operator.is_non_singular, is_self_adjoint=circulant_operator.is_self_adjoint, is_positive_definite=circulant_operator.is_positive_definite, is_square=True, input_output_dtype=circulant_operator.dtype) @linear_operator_algebra.RegisterInverse( linear_operator_householder.LinearOperatorHouseholder) def _inverse_householder(householder_operator): return householder_operator
apache-2.0
HiroIshikawa/21playground
flask-sample/hello/venv/lib/python3.5/site-packages/pkg_resources/__init__.py
211
106670
""" Package resource API -------------------- A resource is a logical file contained within a package, or a logical subdirectory thereof. The package resource API expects resource names to have their path parts separated with ``/``, *not* whatever the local path separator is. Do not use os.path operations to manipulate resource names being passed into the API. The package resource API is designed to work with normal filesystem packages, .egg files, and unpacked .egg files. It can also work in a limited way with .zip files and with custom PEP 302 loaders that support the ``get_data()`` method. """ from __future__ import absolute_import import sys import os import io import time import re import types import zipfile import zipimport import warnings import stat import functools import pkgutil import token import symbol import operator import platform import collections import plistlib import email.parser import tempfile import textwrap from pkgutil import get_importer try: import _imp except ImportError: # Python 3.2 compatibility import imp as _imp PY3 = sys.version_info > (3,) PY2 = not PY3 if PY3: from urllib.parse import urlparse, urlunparse if PY2: from urlparse import urlparse, urlunparse if PY3: string_types = str, else: string_types = str, eval('unicode') iteritems = (lambda i: i.items()) if PY3 else lambda i: i.iteritems() # capture these to bypass sandboxing from os import utime try: from os import mkdir, rename, unlink WRITE_SUPPORT = True except ImportError: # no write support, probably under GAE WRITE_SUPPORT = False from os import open as os_open from os.path import isdir, split # Avoid try/except due to potential problems with delayed import mechanisms. if sys.version_info >= (3, 3) and sys.implementation.name == "cpython": import importlib.machinery as importlib_machinery else: importlib_machinery = None try: import parser except ImportError: pass try: import pkg_resources._vendor.packaging.version import pkg_resources._vendor.packaging.specifiers packaging = pkg_resources._vendor.packaging except ImportError: # fallback to naturally-installed version; allows system packagers to # omit vendored packages. import packaging.version import packaging.specifiers # declare some globals that will be defined later to # satisfy the linters. require = None working_set = None class PEP440Warning(RuntimeWarning): """ Used when there is an issue with a version or specifier not complying with PEP 440. """ class _SetuptoolsVersionMixin(object): def __hash__(self): return super(_SetuptoolsVersionMixin, self).__hash__() def __lt__(self, other): if isinstance(other, tuple): return tuple(self) < other else: return super(_SetuptoolsVersionMixin, self).__lt__(other) def __le__(self, other): if isinstance(other, tuple): return tuple(self) <= other else: return super(_SetuptoolsVersionMixin, self).__le__(other) def __eq__(self, other): if isinstance(other, tuple): return tuple(self) == other else: return super(_SetuptoolsVersionMixin, self).__eq__(other) def __ge__(self, other): if isinstance(other, tuple): return tuple(self) >= other else: return super(_SetuptoolsVersionMixin, self).__ge__(other) def __gt__(self, other): if isinstance(other, tuple): return tuple(self) > other else: return super(_SetuptoolsVersionMixin, self).__gt__(other) def __ne__(self, other): if isinstance(other, tuple): return tuple(self) != other else: return super(_SetuptoolsVersionMixin, self).__ne__(other) def __getitem__(self, key): return tuple(self)[key] def __iter__(self): component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE) replace = { 'pre': 'c', 'preview': 'c', '-': 'final-', 'rc': 'c', 'dev': '@', }.get def _parse_version_parts(s): for part in component_re.split(s): part = replace(part, part) if not part or part == '.': continue if part[:1] in '0123456789': # pad for numeric comparison yield part.zfill(8) else: yield '*'+part # ensure that alpha/beta/candidate are before final yield '*final' def old_parse_version(s): parts = [] for part in _parse_version_parts(s.lower()): if part.startswith('*'): # remove '-' before a prerelease tag if part < '*final': while parts and parts[-1] == '*final-': parts.pop() # remove trailing zeros from each series of numeric parts while parts and parts[-1] == '00000000': parts.pop() parts.append(part) return tuple(parts) # Warn for use of this function warnings.warn( "You have iterated over the result of " "pkg_resources.parse_version. This is a legacy behavior which is " "inconsistent with the new version class introduced in setuptools " "8.0. In most cases, conversion to a tuple is unnecessary. For " "comparison of versions, sort the Version instances directly. If " "you have another use case requiring the tuple, please file a " "bug with the setuptools project describing that need.", RuntimeWarning, stacklevel=1, ) for part in old_parse_version(str(self)): yield part class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version): pass class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin, packaging.version.LegacyVersion): pass def parse_version(v): try: return SetuptoolsVersion(v) except packaging.version.InvalidVersion: return SetuptoolsLegacyVersion(v) _state_vars = {} def _declare_state(vartype, **kw): globals().update(kw) _state_vars.update(dict.fromkeys(kw, vartype)) def __getstate__(): state = {} g = globals() for k, v in _state_vars.items(): state[k] = g['_sget_'+v](g[k]) return state def __setstate__(state): g = globals() for k, v in state.items(): g['_sset_'+_state_vars[k]](k, g[k], v) return state def _sget_dict(val): return val.copy() def _sset_dict(key, ob, state): ob.clear() ob.update(state) def _sget_object(val): return val.__getstate__() def _sset_object(key, ob, state): ob.__setstate__(state) _sget_none = _sset_none = lambda *args: None def get_supported_platform(): """Return this platform's maximum compatible version. distutils.util.get_platform() normally reports the minimum version of Mac OS X that would be required to *use* extensions produced by distutils. But what we want when checking compatibility is to know the version of Mac OS X that we are *running*. To allow usage of packages that explicitly require a newer version of Mac OS X, we must also know the current version of the OS. If this condition occurs for any other platform with a version in its platform strings, this function should be extended accordingly. """ plat = get_build_platform() m = macosVersionString.match(plat) if m is not None and sys.platform == "darwin": try: plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3)) except ValueError: # not Mac OS X pass return plat __all__ = [ # Basic resource access and distribution/entry point discovery 'require', 'run_script', 'get_provider', 'get_distribution', 'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points', 'resource_string', 'resource_stream', 'resource_filename', 'resource_listdir', 'resource_exists', 'resource_isdir', # Environmental control 'declare_namespace', 'working_set', 'add_activation_listener', 'find_distributions', 'set_extraction_path', 'cleanup_resources', 'get_default_cache', # Primary implementation classes 'Environment', 'WorkingSet', 'ResourceManager', 'Distribution', 'Requirement', 'EntryPoint', # Exceptions 'ResolutionError', 'VersionConflict', 'DistributionNotFound', 'UnknownExtra', 'ExtractionError', # Warnings 'PEP440Warning', # Parsing functions and string utilities 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', # filesystem utilities 'ensure_directory', 'normalize_path', # Distribution "precedence" constants 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', # "Provider" interfaces, implementations, and registration/lookup APIs 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', 'register_finder', 'register_namespace_handler', 'register_loader_type', 'fixup_namespace_packages', 'get_importer', # Deprecated/backward compatibility only 'run_main', 'AvailableDistributions', ] class ResolutionError(Exception): """Abstract base for dependency resolution errors""" def __repr__(self): return self.__class__.__name__+repr(self.args) class VersionConflict(ResolutionError): """ An already-installed version conflicts with the requested version. Should be initialized with the installed Distribution and the requested Requirement. """ _template = "{self.dist} is installed but {self.req} is required" @property def dist(self): return self.args[0] @property def req(self): return self.args[1] def report(self): return self._template.format(**locals()) def with_context(self, required_by): """ If required_by is non-empty, return a version of self that is a ContextualVersionConflict. """ if not required_by: return self args = self.args + (required_by,) return ContextualVersionConflict(*args) class ContextualVersionConflict(VersionConflict): """ A VersionConflict that accepts a third parameter, the set of the requirements that required the installed Distribution. """ _template = VersionConflict._template + ' by {self.required_by}' @property def required_by(self): return self.args[2] class DistributionNotFound(ResolutionError): """A requested distribution was not found""" _template = ("The '{self.req}' distribution was not found " "and is required by {self.requirers_str}") @property def req(self): return self.args[0] @property def requirers(self): return self.args[1] @property def requirers_str(self): if not self.requirers: return 'the application' return ', '.join(self.requirers) def report(self): return self._template.format(**locals()) def __str__(self): return self.report() class UnknownExtra(ResolutionError): """Distribution doesn't have an "extra feature" of the given name""" _provider_factories = {} PY_MAJOR = sys.version[:3] EGG_DIST = 3 BINARY_DIST = 2 SOURCE_DIST = 1 CHECKOUT_DIST = 0 DEVELOP_DIST = -1 def register_loader_type(loader_type, provider_factory): """Register `provider_factory` to make providers for `loader_type` `loader_type` is the type or class of a PEP 302 ``module.__loader__``, and `provider_factory` is a function that, passed a *module* object, returns an ``IResourceProvider`` for that module. """ _provider_factories[loader_type] = provider_factory def get_provider(moduleOrReq): """Return an IResourceProvider for the named module or requirement""" if isinstance(moduleOrReq, Requirement): return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] try: module = sys.modules[moduleOrReq] except KeyError: __import__(moduleOrReq) module = sys.modules[moduleOrReq] loader = getattr(module, '__loader__', None) return _find_adapter(_provider_factories, loader)(module) def _macosx_vers(_cache=[]): if not _cache: version = platform.mac_ver()[0] # fallback for MacPorts if version == '': plist = '/System/Library/CoreServices/SystemVersion.plist' if os.path.exists(plist): if hasattr(plistlib, 'readPlist'): plist_content = plistlib.readPlist(plist) if 'ProductVersion' in plist_content: version = plist_content['ProductVersion'] _cache.append(version.split('.')) return _cache[0] def _macosx_arch(machine): return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) def get_build_platform(): """Return this platform's string for platform-specific distributions XXX Currently this is the same as ``distutils.util.get_platform()``, but it needs some hacks for Linux and Mac OS X. """ try: # Python 2.7 or >=3.2 from sysconfig import get_platform except ImportError: from distutils.util import get_platform plat = get_platform() if sys.platform == "darwin" and not plat.startswith('macosx-'): try: version = _macosx_vers() machine = os.uname()[4].replace(" ", "_") return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]), _macosx_arch(machine)) except ValueError: # if someone is running a non-Mac darwin system, this will fall # through to the default implementation pass return plat macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") # XXX backward compat get_platform = get_build_platform def compatible_platforms(provided, required): """Can code for the `provided` platform run on the `required` platform? Returns true if either platform is ``None``, or the platforms are equal. XXX Needs compatibility checks for Linux and other unixy OSes. """ if provided is None or required is None or provided==required: # easy case return True # Mac OS X special cases reqMac = macosVersionString.match(required) if reqMac: provMac = macosVersionString.match(provided) # is this a Mac package? if not provMac: # this is backwards compatibility for packages built before # setuptools 0.6. All packages built after this point will # use the new macosx designation. provDarwin = darwinVersionString.match(provided) if provDarwin: dversion = int(provDarwin.group(1)) macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) if dversion == 7 and macosversion >= "10.3" or \ dversion == 8 and macosversion >= "10.4": return True # egg isn't macosx or legacy darwin return False # are they the same major version and machine type? if provMac.group(1) != reqMac.group(1) or \ provMac.group(3) != reqMac.group(3): return False # is the required OS major update >= the provided one? if int(provMac.group(2)) > int(reqMac.group(2)): return False return True # XXX Linux and other platforms' special cases should go here return False def run_script(dist_spec, script_name): """Locate distribution `dist_spec` and run its `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name require(dist_spec)[0].run_script(script_name, ns) # backward compatibility run_main = run_script def get_distribution(dist): """Return a current distribution object for a Requirement or string""" if isinstance(dist, string_types): dist = Requirement.parse(dist) if isinstance(dist, Requirement): dist = get_provider(dist) if not isinstance(dist, Distribution): raise TypeError("Expected string, Requirement, or Distribution", dist) return dist def load_entry_point(dist, group, name): """Return `name` entry point of `group` for `dist` or raise ImportError""" return get_distribution(dist).load_entry_point(group, name) def get_entry_map(dist, group=None): """Return the entry point map for `group`, or the full entry map""" return get_distribution(dist).get_entry_map(group) def get_entry_info(dist, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return get_distribution(dist).get_entry_info(group, name) class IMetadataProvider: def has_metadata(name): """Does the package's distribution contain the named metadata?""" def get_metadata(name): """The named metadata resource as a string""" def get_metadata_lines(name): """Yield named metadata resource as list of non-blank non-comment lines Leading and trailing whitespace is stripped from each line, and lines with ``#`` as the first non-blank character are omitted.""" def metadata_isdir(name): """Is the named metadata a directory? (like ``os.path.isdir()``)""" def metadata_listdir(name): """List of metadata names in the directory (like ``os.listdir()``)""" def run_script(script_name, namespace): """Execute the named script in the supplied namespace dictionary""" class IResourceProvider(IMetadataProvider): """An object that provides access to package resources""" def get_resource_filename(manager, resource_name): """Return a true filesystem path for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_stream(manager, resource_name): """Return a readable file-like object for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_string(manager, resource_name): """Return a string containing the contents of `resource_name` `manager` must be an ``IResourceManager``""" def has_resource(resource_name): """Does the package contain the named resource?""" def resource_isdir(resource_name): """Is the named resource a directory? (like ``os.path.isdir()``)""" def resource_listdir(resource_name): """List of resource names in the directory (like ``os.listdir()``)""" class WorkingSet(object): """A collection of active distributions on sys.path (or a similar list)""" def __init__(self, entries=None): """Create working set from list of path entries (default=sys.path)""" self.entries = [] self.entry_keys = {} self.by_key = {} self.callbacks = [] if entries is None: entries = sys.path for entry in entries: self.add_entry(entry) @classmethod def _build_master(cls): """ Prepare the master working set. """ ws = cls() try: from __main__ import __requires__ except ImportError: # The main program does not list any requirements return ws # ensure the requirements are met try: ws.require(__requires__) except VersionConflict: return cls._build_from_requirements(__requires__) return ws @classmethod def _build_from_requirements(cls, req_spec): """ Build a working set from a requirement spec. Rewrites sys.path. """ # try it without defaults already on sys.path # by starting with an empty path ws = cls([]) reqs = parse_requirements(req_spec) dists = ws.resolve(reqs, Environment()) for dist in dists: ws.add(dist) # add any missing entries from sys.path for entry in sys.path: if entry not in ws.entries: ws.add_entry(entry) # then copy back to sys.path sys.path[:] = ws.entries return ws def add_entry(self, entry): """Add a path item to ``.entries``, finding any distributions on it ``find_distributions(entry, True)`` is used to find distributions corresponding to the path entry, and they are added. `entry` is always appended to ``.entries``, even if it is already present. (This is because ``sys.path`` can contain the same value more than once, and the ``.entries`` of the ``sys.path`` WorkingSet should always equal ``sys.path``.) """ self.entry_keys.setdefault(entry, []) self.entries.append(entry) for dist in find_distributions(entry, True): self.add(dist, entry, False) def __contains__(self, dist): """True if `dist` is the active distribution for its project""" return self.by_key.get(dist.key) == dist def find(self, req): """Find a distribution matching requirement `req` If there is an active distribution for the requested project, this returns it as long as it meets the version requirement specified by `req`. But, if there is an active distribution for the project and it does *not* meet the `req` requirement, ``VersionConflict`` is raised. If there is no active distribution for the requested project, ``None`` is returned. """ dist = self.by_key.get(req.key) if dist is not None and dist not in req: # XXX add more info raise VersionConflict(dist, req) return dist def iter_entry_points(self, group, name=None): """Yield entry point objects from `group` matching `name` If `name` is None, yields all entry points in `group` from all distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order). """ for dist in self: entries = dist.get_entry_map(group) if name is None: for ep in entries.values(): yield ep elif name in entries: yield entries[name] def run_script(self, requires, script_name): """Locate distribution for `requires` and run `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name self.require(requires)[0].run_script(script_name, ns) def __iter__(self): """Yield distributions for non-duplicate projects in the working set The yield order is the order in which the items' path entries were added to the working set. """ seen = {} for item in self.entries: if item not in self.entry_keys: # workaround a cache issue continue for key in self.entry_keys[item]: if key not in seen: seen[key]=1 yield self.by_key[key] def add(self, dist, entry=None, insert=True, replace=False): """Add `dist` to working set, associated with `entry` If `entry` is unspecified, it defaults to the ``.location`` of `dist`. On exit from this routine, `entry` is added to the end of the working set's ``.entries`` (if it wasn't already present). `dist` is only added to the working set if it's for a project that doesn't already have a distribution in the set, unless `replace=True`. If it's added, any callbacks registered with the ``subscribe()`` method will be called. """ if insert: dist.insert_on(self.entries, entry) if entry is None: entry = dist.location keys = self.entry_keys.setdefault(entry,[]) keys2 = self.entry_keys.setdefault(dist.location,[]) if not replace and dist.key in self.by_key: # ignore hidden distros return self.by_key[dist.key] = dist if dist.key not in keys: keys.append(dist.key) if dist.key not in keys2: keys2.append(dist.key) self._added_new(dist) def resolve(self, requirements, env=None, installer=None, replace_conflicting=False): """List all distributions needed to (recursively) meet `requirements` `requirements` must be a sequence of ``Requirement`` objects. `env`, if supplied, should be an ``Environment`` instance. If not supplied, it defaults to all distributions available within any entry or distribution in the working set. `installer`, if supplied, will be invoked with each requirement that cannot be met by an already-installed distribution; it should return a ``Distribution`` or ``None``. Unless `replace_conflicting=True`, raises a VersionConflict exception if any requirements are found on the path that have the correct name but the wrong version. Otherwise, if an `installer` is supplied it will be invoked to obtain the correct version of the requirement and activate it. """ # set up the stack requirements = list(requirements)[::-1] # set of processed requirements processed = {} # key -> dist best = {} to_activate = [] # Mapping of requirement to set of distributions that required it; # useful for reporting info about conflicts. required_by = collections.defaultdict(set) while requirements: # process dependencies breadth-first req = requirements.pop(0) if req in processed: # Ignore cyclic or redundant dependencies continue dist = best.get(req.key) if dist is None: # Find the best distribution and add it to the map dist = self.by_key.get(req.key) if dist is None or (dist not in req and replace_conflicting): ws = self if env is None: if dist is None: env = Environment(self.entries) else: # Use an empty environment and workingset to avoid # any further conflicts with the conflicting # distribution env = Environment([]) ws = WorkingSet([]) dist = best[req.key] = env.best_match(req, ws, installer) if dist is None: requirers = required_by.get(req, None) raise DistributionNotFound(req, requirers) to_activate.append(dist) if dist not in req: # Oops, the "best" so far conflicts with a dependency dependent_req = required_by[req] raise VersionConflict(dist, req).with_context(dependent_req) # push the new requirements onto the stack new_requirements = dist.requires(req.extras)[::-1] requirements.extend(new_requirements) # Register the new requirements needed by req for new_requirement in new_requirements: required_by[new_requirement].add(req.project_name) processed[req] = True # return list of distros to activate return to_activate def find_plugins(self, plugin_env, full_env=None, installer=None, fallback=True): """Find all activatable distributions in `plugin_env` Example usage:: distributions, errors = working_set.find_plugins( Environment(plugin_dirlist) ) # add plugins+libs to sys.path map(working_set.add, distributions) # display errors print('Could not load', errors) The `plugin_env` should be an ``Environment`` instance that contains only distributions that are in the project's "plugin directory" or directories. The `full_env`, if supplied, should be an ``Environment`` contains all currently-available distributions. If `full_env` is not supplied, one is created automatically from the ``WorkingSet`` this method is called on, which will typically mean that every directory on ``sys.path`` will be scanned for distributions. `installer` is a standard installer callback as used by the ``resolve()`` method. The `fallback` flag indicates whether we should attempt to resolve older versions of a plugin if the newest version cannot be resolved. This method returns a 2-tuple: (`distributions`, `error_info`), where `distributions` is a list of the distributions found in `plugin_env` that were loadable, along with any other distributions that are needed to resolve their dependencies. `error_info` is a dictionary mapping unloadable plugin distributions to an exception instance describing the error that occurred. Usually this will be a ``DistributionNotFound`` or ``VersionConflict`` instance. """ plugin_projects = list(plugin_env) # scan project names in alphabetic order plugin_projects.sort() error_info = {} distributions = {} if full_env is None: env = Environment(self.entries) env += plugin_env else: env = full_env + plugin_env shadow_set = self.__class__([]) # put all our entries in shadow_set list(map(shadow_set.add, self)) for project_name in plugin_projects: for dist in plugin_env[project_name]: req = [dist.as_requirement()] try: resolvees = shadow_set.resolve(req, env, installer) except ResolutionError as v: # save error info error_info[dist] = v if fallback: # try the next older version of project continue else: # give up on this project, keep going break else: list(map(shadow_set.add, resolvees)) distributions.update(dict.fromkeys(resolvees)) # success, no need to try any more versions of this project break distributions = list(distributions) distributions.sort() return distributions, error_info def require(self, *requirements): """Ensure that distributions matching `requirements` are activated `requirements` must be a string or a (possibly-nested) sequence thereof, specifying the distributions and versions required. The return value is a sequence of the distributions that needed to be activated to fulfill the requirements; all relevant distributions are included, even if they were already activated in this working set. """ needed = self.resolve(parse_requirements(requirements)) for dist in needed: self.add(dist) return needed def subscribe(self, callback): """Invoke `callback` for all distributions (including existing ones)""" if callback in self.callbacks: return self.callbacks.append(callback) for dist in self: callback(dist) def _added_new(self, dist): for callback in self.callbacks: callback(dist) def __getstate__(self): return ( self.entries[:], self.entry_keys.copy(), self.by_key.copy(), self.callbacks[:] ) def __setstate__(self, e_k_b_c): entries, keys, by_key, callbacks = e_k_b_c self.entries = entries[:] self.entry_keys = keys.copy() self.by_key = by_key.copy() self.callbacks = callbacks[:] class Environment(object): """Searchable snapshot of distributions on a search path""" def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR): """Snapshot distributions available on a search path Any distributions found on `search_path` are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. `platform` is an optional string specifying the name of the platform that platform-specific distributions must be compatible with. If unspecified, it defaults to the current platform. `python` is an optional string naming the desired version of Python (e.g. ``'3.3'``); it defaults to the current version. You may explicitly set `platform` (and/or `python`) to ``None`` if you wish to map *all* distributions, not just those compatible with the running platform or Python version. """ self._distmap = {} self.platform = platform self.python = python self.scan(search_path) def can_add(self, dist): """Is distribution `dist` acceptable for this environment? The distribution must match the platform and python version requirements specified when this environment was created, or False is returned. """ return (self.python is None or dist.py_version is None or dist.py_version==self.python) \ and compatible_platforms(dist.platform, self.platform) def remove(self, dist): """Remove `dist` from the environment""" self._distmap[dist.key].remove(dist) def scan(self, search_path=None): """Scan `search_path` for distributions usable in this environment Any distributions found are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. Only distributions conforming to the platform/python version defined at initialization are added. """ if search_path is None: search_path = sys.path for item in search_path: for dist in find_distributions(item): self.add(dist) def __getitem__(self, project_name): """Return a newest-to-oldest list of distributions for `project_name` Uses case-insensitive `project_name` comparison, assuming all the project's distributions use their project's name converted to all lowercase as their key. """ distribution_key = project_name.lower() return self._distmap.get(distribution_key, []) def add(self, dist): """Add `dist` if we ``can_add()`` it and it has not already been added """ if self.can_add(dist) and dist.has_version(): dists = self._distmap.setdefault(dist.key, []) if dist not in dists: dists.append(dist) dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) def best_match(self, req, working_set, installer=None): """Find distribution best matching `req` and usable on `working_set` This calls the ``find(req)`` method of the `working_set` to see if a suitable distribution is already active. (This may raise ``VersionConflict`` if an unsuitable version of the project is already active in the specified `working_set`.) If a suitable distribution isn't active, this method returns the newest distribution in the environment that meets the ``Requirement`` in `req`. If no suitable distribution is found, and `installer` is supplied, then the result of calling the environment's ``obtain(req, installer)`` method will be returned. """ dist = working_set.find(req) if dist is not None: return dist for dist in self[req.key]: if dist in req: return dist # try to download/install return self.obtain(req, installer) def obtain(self, requirement, installer=None): """Obtain a distribution matching `requirement` (e.g. via download) Obtain a distro that matches requirement (e.g. via download). In the base ``Environment`` class, this routine just returns ``installer(requirement)``, unless `installer` is None, in which case None is returned instead. This method is a hook that allows subclasses to attempt other ways of obtaining a distribution before falling back to the `installer` argument.""" if installer is not None: return installer(requirement) def __iter__(self): """Yield the unique project names of the available distributions""" for key in self._distmap.keys(): if self[key]: yield key def __iadd__(self, other): """In-place addition of a distribution or environment""" if isinstance(other, Distribution): self.add(other) elif isinstance(other, Environment): for project in other: for dist in other[project]: self.add(dist) else: raise TypeError("Can't add %r to environment" % (other,)) return self def __add__(self, other): """Add an environment or distribution to an environment""" new = self.__class__([], platform=None, python=None) for env in self, other: new += env return new # XXX backward compatibility AvailableDistributions = Environment class ExtractionError(RuntimeError): """An error occurred extracting a resource The following attributes are available from instances of this exception: manager The resource manager that raised this exception cache_path The base directory for resource extraction original_error The exception instance that caused extraction to fail """ class ResourceManager: """Manage resource extraction and packages""" extraction_path = None def __init__(self): self.cached_files = {} def resource_exists(self, package_or_requirement, resource_name): """Does the named resource exist?""" return get_provider(package_or_requirement).has_resource(resource_name) def resource_isdir(self, package_or_requirement, resource_name): """Is the named resource an existing directory?""" return get_provider(package_or_requirement).resource_isdir( resource_name ) def resource_filename(self, package_or_requirement, resource_name): """Return a true filesystem path for specified resource""" return get_provider(package_or_requirement).get_resource_filename( self, resource_name ) def resource_stream(self, package_or_requirement, resource_name): """Return a readable file-like object for specified resource""" return get_provider(package_or_requirement).get_resource_stream( self, resource_name ) def resource_string(self, package_or_requirement, resource_name): """Return specified resource as a string""" return get_provider(package_or_requirement).get_resource_string( self, resource_name ) def resource_listdir(self, package_or_requirement, resource_name): """List the contents of the named resource directory""" return get_provider(package_or_requirement).resource_listdir( resource_name ) def extraction_error(self): """Give an error message for problems extracting file(s)""" old_exc = sys.exc_info()[1] cache_path = self.extraction_path or get_default_cache() err = ExtractionError("""Can't extract file(s) to egg cache The following error occurred while trying to extract file(s) to the Python egg cache: %s The Python egg cache directory is currently set to: %s Perhaps your account does not have write access to this directory? You can change the cache directory by setting the PYTHON_EGG_CACHE environment variable to point to an accessible directory. """ % (old_exc, cache_path) ) err.manager = self err.cache_path = cache_path err.original_error = old_exc raise err def get_cache_path(self, archive_name, names=()): """Return absolute location in cache for `archive_name` and `names` The parent directory of the resulting path will be created if it does not already exist. `archive_name` should be the base filename of the enclosing egg (which may not be the name of the enclosing zipfile!), including its ".egg" extension. `names`, if provided, should be a sequence of path name parts "under" the egg's extraction location. This method should only be called by resource providers that need to obtain an extraction location, and only for names they intend to extract, as it tracks the generated names for possible cleanup later. """ extract_path = self.extraction_path or get_default_cache() target_path = os.path.join(extract_path, archive_name+'-tmp', *names) try: _bypass_ensure_directory(target_path) except: self.extraction_error() self._warn_unsafe_extraction_path(extract_path) self.cached_files[target_path] = 1 return target_path @staticmethod def _warn_unsafe_extraction_path(path): """ If the default extraction path is overridden and set to an insecure location, such as /tmp, it opens up an opportunity for an attacker to replace an extracted file with an unauthorized payload. Warn the user if a known insecure location is used. See Distribute #375 for more details. """ if os.name == 'nt' and not path.startswith(os.environ['windir']): # On Windows, permissions are generally restrictive by default # and temp directories are not writable by other users, so # bypass the warning. return mode = os.stat(path).st_mode if mode & stat.S_IWOTH or mode & stat.S_IWGRP: msg = ("%s is writable by group/others and vulnerable to attack " "when " "used with get_resource_filename. Consider a more secure " "location (set with .set_extraction_path or the " "PYTHON_EGG_CACHE environment variable)." % path) warnings.warn(msg, UserWarning) def postprocess(self, tempname, filename): """Perform any platform-specific postprocessing of `tempname` This is where Mac header rewrites should be done; other platforms don't have anything special they should do. Resource providers should call this method ONLY after successfully extracting a compressed resource. They must NOT call it on resources that are already in the filesystem. `tempname` is the current (temporary) name of the file, and `filename` is the name it will be renamed to by the caller after this routine returns. """ if os.name == 'posix': # Make the resource executable mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777 os.chmod(tempname, mode) def set_extraction_path(self, path): """Set the base path where resources will be extracted to, if needed. If you do not call this routine before any extractions take place, the path defaults to the return value of ``get_default_cache()``. (Which is based on the ``PYTHON_EGG_CACHE`` environment variable, with various platform-specific fallbacks. See that routine's documentation for more details.) Resources are extracted to subdirectories of this path based upon information given by the ``IResourceProvider``. You may set this to a temporary directory, but then you must call ``cleanup_resources()`` to delete the extracted files when done. There is no guarantee that ``cleanup_resources()`` will be able to remove all extracted files. (Note: you may not change the extraction path for a given resource manager once resources have been extracted, unless you first call ``cleanup_resources()``.) """ if self.cached_files: raise ValueError( "Can't change extraction path, files already extracted" ) self.extraction_path = path def cleanup_resources(self, force=False): """ Delete all extracted resource files and directories, returning a list of the file and directory names that could not be successfully removed. This function does not have any concurrency protection, so it should generally only be called when the extraction path is a temporary directory exclusive to a single process. This method is not automatically called; you must call it explicitly or register it as an ``atexit`` function if you wish to ensure cleanup of a temporary directory used for extractions. """ # XXX def get_default_cache(): """Determine the default cache location This returns the ``PYTHON_EGG_CACHE`` environment variable, if set. Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the "Application Data" directory. On all other systems, it's "~/.python-eggs". """ try: return os.environ['PYTHON_EGG_CACHE'] except KeyError: pass if os.name!='nt': return os.path.expanduser('~/.python-eggs') # XXX this may be locale-specific! app_data = 'Application Data' app_homes = [ # best option, should be locale-safe (('APPDATA',), None), (('USERPROFILE',), app_data), (('HOMEDRIVE','HOMEPATH'), app_data), (('HOMEPATH',), app_data), (('HOME',), None), # 95/98/ME (('WINDIR',), app_data), ] for keys, subdir in app_homes: dirname = '' for key in keys: if key in os.environ: dirname = os.path.join(dirname, os.environ[key]) else: break else: if subdir: dirname = os.path.join(dirname, subdir) return os.path.join(dirname, 'Python-Eggs') else: raise RuntimeError( "Please set the PYTHON_EGG_CACHE enviroment variable" ) def safe_name(name): """Convert an arbitrary string to a standard distribution name Any runs of non-alphanumeric/. characters are replaced with a single '-'. """ return re.sub('[^A-Za-z0-9.]+', '-', name) def safe_version(version): """ Convert an arbitrary string to a standard version string """ try: # normalize the version return str(packaging.version.Version(version)) except packaging.version.InvalidVersion: version = version.replace(' ','.') return re.sub('[^A-Za-z0-9.]+', '-', version) def safe_extra(extra): """Convert an arbitrary string to a standard 'extra' name Any runs of non-alphanumeric characters are replaced with a single '_', and the result is always lowercased. """ return re.sub('[^A-Za-z0-9.]+', '_', extra).lower() def to_filename(name): """Convert a project or version name to its filename-escaped form Any '-' characters are currently replaced with '_'. """ return name.replace('-','_') class MarkerEvaluation(object): values = { 'os_name': lambda: os.name, 'sys_platform': lambda: sys.platform, 'python_full_version': platform.python_version, 'python_version': lambda: platform.python_version()[:3], 'platform_version': platform.version, 'platform_machine': platform.machine, 'python_implementation': platform.python_implementation, } @classmethod def is_invalid_marker(cls, text): """ Validate text as a PEP 426 environment marker; return an exception if invalid or False otherwise. """ try: cls.evaluate_marker(text) except SyntaxError as e: return cls.normalize_exception(e) return False @staticmethod def normalize_exception(exc): """ Given a SyntaxError from a marker evaluation, normalize the error message: - Remove indications of filename and line number. - Replace platform-specific error messages with standard error messages. """ subs = { 'unexpected EOF while parsing': 'invalid syntax', 'parenthesis is never closed': 'invalid syntax', } exc.filename = None exc.lineno = None exc.msg = subs.get(exc.msg, exc.msg) return exc @classmethod def and_test(cls, nodelist): # MUST NOT short-circuit evaluation, or invalid syntax can be skipped! items = [ cls.interpret(nodelist[i]) for i in range(1, len(nodelist), 2) ] return functools.reduce(operator.and_, items) @classmethod def test(cls, nodelist): # MUST NOT short-circuit evaluation, or invalid syntax can be skipped! items = [ cls.interpret(nodelist[i]) for i in range(1, len(nodelist), 2) ] return functools.reduce(operator.or_, items) @classmethod def atom(cls, nodelist): t = nodelist[1][0] if t == token.LPAR: if nodelist[2][0] == token.RPAR: raise SyntaxError("Empty parentheses") return cls.interpret(nodelist[2]) msg = "Language feature not supported in environment markers" raise SyntaxError(msg) @classmethod def comparison(cls, nodelist): if len(nodelist) > 4: msg = "Chained comparison not allowed in environment markers" raise SyntaxError(msg) comp = nodelist[2][1] cop = comp[1] if comp[0] == token.NAME: if len(nodelist[2]) == 3: if cop == 'not': cop = 'not in' else: cop = 'is not' try: cop = cls.get_op(cop) except KeyError: msg = repr(cop) + " operator not allowed in environment markers" raise SyntaxError(msg) return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3])) @classmethod def get_op(cls, op): ops = { symbol.test: cls.test, symbol.and_test: cls.and_test, symbol.atom: cls.atom, symbol.comparison: cls.comparison, 'not in': lambda x, y: x not in y, 'in': lambda x, y: x in y, '==': operator.eq, '!=': operator.ne, '<': operator.lt, '>': operator.gt, '<=': operator.le, '>=': operator.ge, } if hasattr(symbol, 'or_test'): ops[symbol.or_test] = cls.test return ops[op] @classmethod def evaluate_marker(cls, text, extra=None): """ Evaluate a PEP 426 environment marker on CPython 2.4+. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. This implementation uses the 'parser' module, which is not implemented on Jython and has been superseded by the 'ast' module in Python 2.6 and later. """ return cls.interpret(parser.expr(text).totuple(1)[1]) @classmethod def _markerlib_evaluate(cls, text): """ Evaluate a PEP 426 environment marker using markerlib. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. """ import _markerlib # markerlib implements Metadata 1.2 (PEP 345) environment markers. # Translate the variables to Metadata 2.0 (PEP 426). env = _markerlib.default_environment() for key in env.keys(): new_key = key.replace('.', '_') env[new_key] = env.pop(key) try: result = _markerlib.interpret(text, env) except NameError as e: raise SyntaxError(e.args[0]) return result if 'parser' not in globals(): # Fall back to less-complete _markerlib implementation if 'parser' module # is not available. evaluate_marker = _markerlib_evaluate @classmethod def interpret(cls, nodelist): while len(nodelist)==2: nodelist = nodelist[1] try: op = cls.get_op(nodelist[0]) except KeyError: raise SyntaxError("Comparison or logical expression expected") return op(nodelist) @classmethod def evaluate(cls, nodelist): while len(nodelist)==2: nodelist = nodelist[1] kind = nodelist[0] name = nodelist[1] if kind==token.NAME: try: op = cls.values[name] except KeyError: raise SyntaxError("Unknown name %r" % name) return op() if kind==token.STRING: s = nodelist[1] if not cls._safe_string(s): raise SyntaxError( "Only plain strings allowed in environment markers") return s[1:-1] msg = "Language feature not supported in environment markers" raise SyntaxError(msg) @staticmethod def _safe_string(cand): return ( cand[:1] in "'\"" and not cand.startswith('"""') and not cand.startswith("'''") and '\\' not in cand ) invalid_marker = MarkerEvaluation.is_invalid_marker evaluate_marker = MarkerEvaluation.evaluate_marker class NullProvider: """Try to implement resources and metadata for arbitrary PEP 302 loaders""" egg_name = None egg_info = None loader = None def __init__(self, module): self.loader = getattr(module, '__loader__', None) self.module_path = os.path.dirname(getattr(module, '__file__', '')) def get_resource_filename(self, manager, resource_name): return self._fn(self.module_path, resource_name) def get_resource_stream(self, manager, resource_name): return io.BytesIO(self.get_resource_string(manager, resource_name)) def get_resource_string(self, manager, resource_name): return self._get(self._fn(self.module_path, resource_name)) def has_resource(self, resource_name): return self._has(self._fn(self.module_path, resource_name)) def has_metadata(self, name): return self.egg_info and self._has(self._fn(self.egg_info, name)) if sys.version_info <= (3,): def get_metadata(self, name): if not self.egg_info: return "" return self._get(self._fn(self.egg_info, name)) else: def get_metadata(self, name): if not self.egg_info: return "" return self._get(self._fn(self.egg_info, name)).decode("utf-8") def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) def resource_isdir(self, resource_name): return self._isdir(self._fn(self.module_path, resource_name)) def metadata_isdir(self, name): return self.egg_info and self._isdir(self._fn(self.egg_info, name)) def resource_listdir(self, resource_name): return self._listdir(self._fn(self.module_path, resource_name)) def metadata_listdir(self, name): if self.egg_info: return self._listdir(self._fn(self.egg_info, name)) return [] def run_script(self, script_name, namespace): script = 'scripts/'+script_name if not self.has_metadata(script): raise ResolutionError("No script named %r" % script_name) script_text = self.get_metadata(script).replace('\r\n', '\n') script_text = script_text.replace('\r', '\n') script_filename = self._fn(self.egg_info, script) namespace['__file__'] = script_filename if os.path.exists(script_filename): source = open(script_filename).read() code = compile(source, script_filename, 'exec') exec(code, namespace, namespace) else: from linecache import cache cache[script_filename] = ( len(script_text), 0, script_text.split('\n'), script_filename ) script_code = compile(script_text, script_filename,'exec') exec(script_code, namespace, namespace) def _has(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _isdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _listdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _fn(self, base, resource_name): if resource_name: return os.path.join(base, *resource_name.split('/')) return base def _get(self, path): if hasattr(self.loader, 'get_data'): return self.loader.get_data(path) raise NotImplementedError( "Can't perform this operation for loaders without 'get_data()'" ) register_loader_type(object, NullProvider) class EggProvider(NullProvider): """Provider based on a virtual filesystem""" def __init__(self, module): NullProvider.__init__(self, module) self._setup_prefix() def _setup_prefix(self): # we assume here that our metadata may be nested inside a "basket" # of multiple eggs; that's why we use module_path instead of .archive path = self.module_path old = None while path!=old: if path.lower().endswith('.egg'): self.egg_name = os.path.basename(path) self.egg_info = os.path.join(path, 'EGG-INFO') self.egg_root = path break old = path path, base = os.path.split(path) class DefaultProvider(EggProvider): """Provides access to package resources in the filesystem""" def _has(self, path): return os.path.exists(path) def _isdir(self, path): return os.path.isdir(path) def _listdir(self, path): return os.listdir(path) def get_resource_stream(self, manager, resource_name): return open(self._fn(self.module_path, resource_name), 'rb') def _get(self, path): with open(path, 'rb') as stream: return stream.read() register_loader_type(type(None), DefaultProvider) if importlib_machinery is not None: register_loader_type(importlib_machinery.SourceFileLoader, DefaultProvider) class EmptyProvider(NullProvider): """Provider that returns nothing for all requests""" _isdir = _has = lambda self, path: False _get = lambda self, path: '' _listdir = lambda self, path: [] module_path = None def __init__(self): pass empty_provider = EmptyProvider() class ZipManifests(dict): """ zip manifest builder """ @classmethod def build(cls, path): """ Build a dictionary similar to the zipimport directory caches, except instead of tuples, store ZipInfo objects. Use a platform-specific path separator (os.sep) for the path keys for compatibility with pypy on Windows. """ with ContextualZipFile(path) as zfile: items = ( ( name.replace('/', os.sep), zfile.getinfo(name), ) for name in zfile.namelist() ) return dict(items) load = build class MemoizedZipManifests(ZipManifests): """ Memoized zipfile manifests. """ manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') def load(self, path): """ Load a manifest at path or return a suitable manifest already loaded. """ path = os.path.normpath(path) mtime = os.stat(path).st_mtime if path not in self or self[path].mtime != mtime: manifest = self.build(path) self[path] = self.manifest_mod(manifest, mtime) return self[path].manifest class ContextualZipFile(zipfile.ZipFile): """ Supplement ZipFile class to support context manager for Python 2.6 """ def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def __new__(cls, *args, **kwargs): """ Construct a ZipFile or ContextualZipFile as appropriate """ if hasattr(zipfile.ZipFile, '__exit__'): return zipfile.ZipFile(*args, **kwargs) return super(ContextualZipFile, cls).__new__(cls) class ZipProvider(EggProvider): """Resource support for zips and eggs""" eagers = None _zip_manifests = MemoizedZipManifests() def __init__(self, module): EggProvider.__init__(self, module) self.zip_pre = self.loader.archive+os.sep def _zipinfo_name(self, fspath): # Convert a virtual filename (full path to file) into a zipfile subpath # usable with the zipimport directory cache for our target archive if fspath.startswith(self.zip_pre): return fspath[len(self.zip_pre):] raise AssertionError( "%s is not a subpath of %s" % (fspath, self.zip_pre) ) def _parts(self, zip_path): # Convert a zipfile subpath into an egg-relative path part list. # pseudo-fs path fspath = self.zip_pre+zip_path if fspath.startswith(self.egg_root+os.sep): return fspath[len(self.egg_root)+1:].split(os.sep) raise AssertionError( "%s is not a subpath of %s" % (fspath, self.egg_root) ) @property def zipinfo(self): return self._zip_manifests.load(self.loader.archive) def get_resource_filename(self, manager, resource_name): if not self.egg_name: raise NotImplementedError( "resource_filename() only supported for .egg, not .zip" ) # no need to lock for extraction, since we use temp names zip_path = self._resource_to_zip(resource_name) eagers = self._get_eager_resources() if '/'.join(self._parts(zip_path)) in eagers: for name in eagers: self._extract_resource(manager, self._eager_to_zip(name)) return self._extract_resource(manager, zip_path) @staticmethod def _get_date_and_size(zip_stat): size = zip_stat.file_size # ymdhms+wday, yday, dst date_time = zip_stat.date_time + (0, 0, -1) # 1980 offset already done timestamp = time.mktime(date_time) return timestamp, size def _extract_resource(self, manager, zip_path): if zip_path in self._index(): for name in self._index()[zip_path]: last = self._extract_resource( manager, os.path.join(zip_path, name) ) # return the extracted directory name return os.path.dirname(last) timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not WRITE_SUPPORT: raise IOError('"os.rename" and "os.unlink" are not supported ' 'on this platform') try: real_path = manager.get_cache_path( self.egg_name, self._parts(zip_path) ) if self._is_current(real_path, zip_path): return real_path outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path)) os.write(outf, self.loader.get_data(zip_path)) os.close(outf) utime(tmpnam, (timestamp, timestamp)) manager.postprocess(tmpnam, real_path) try: rename(tmpnam, real_path) except os.error: if os.path.isfile(real_path): if self._is_current(real_path, zip_path): # the file became current since it was checked above, # so proceed. return real_path # Windows, del old file and retry elif os.name=='nt': unlink(real_path) rename(tmpnam, real_path) return real_path raise except os.error: # report a user-friendly error manager.extraction_error() return real_path def _is_current(self, file_path, zip_path): """ Return True if the file_path is current for this zip_path """ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not os.path.isfile(file_path): return False stat = os.stat(file_path) if stat.st_size!=size or stat.st_mtime!=timestamp: return False # check that the contents match zip_contents = self.loader.get_data(zip_path) with open(file_path, 'rb') as f: file_contents = f.read() return zip_contents == file_contents def _get_eager_resources(self): if self.eagers is None: eagers = [] for name in ('native_libs.txt', 'eager_resources.txt'): if self.has_metadata(name): eagers.extend(self.get_metadata_lines(name)) self.eagers = eagers return self.eagers def _index(self): try: return self._dirindex except AttributeError: ind = {} for path in self.zipinfo: parts = path.split(os.sep) while parts: parent = os.sep.join(parts[:-1]) if parent in ind: ind[parent].append(parts[-1]) break else: ind[parent] = [parts.pop()] self._dirindex = ind return ind def _has(self, fspath): zip_path = self._zipinfo_name(fspath) return zip_path in self.zipinfo or zip_path in self._index() def _isdir(self, fspath): return self._zipinfo_name(fspath) in self._index() def _listdir(self, fspath): return list(self._index().get(self._zipinfo_name(fspath), ())) def _eager_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.egg_root, resource_name)) def _resource_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.module_path, resource_name)) register_loader_type(zipimport.zipimporter, ZipProvider) class FileMetadata(EmptyProvider): """Metadata handler for standalone PKG-INFO files Usage:: metadata = FileMetadata("/path/to/PKG-INFO") This provider rejects all data and metadata requests except for PKG-INFO, which is treated as existing, and will be the contents of the file at the provided location. """ def __init__(self, path): self.path = path def has_metadata(self, name): return name=='PKG-INFO' def get_metadata(self, name): if name=='PKG-INFO': with open(self.path,'rU') as f: metadata = f.read() return metadata raise KeyError("No metadata except PKG-INFO is available") def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) class PathMetadata(DefaultProvider): """Metadata provider for egg directories Usage:: # Development eggs: egg_info = "/path/to/PackageName.egg-info" base_dir = os.path.dirname(egg_info) metadata = PathMetadata(base_dir, egg_info) dist_name = os.path.splitext(os.path.basename(egg_info))[0] dist = Distribution(basedir, project_name=dist_name, metadata=metadata) # Unpacked egg directories: egg_path = "/path/to/PackageName-ver-pyver-etc.egg" metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) dist = Distribution.from_filename(egg_path, metadata=metadata) """ def __init__(self, path, egg_info): self.module_path = path self.egg_info = egg_info class EggMetadata(ZipProvider): """Metadata provider for .egg files""" def __init__(self, importer): """Create a metadata provider from a zipimporter""" self.zip_pre = importer.archive+os.sep self.loader = importer if importer.prefix: self.module_path = os.path.join(importer.archive, importer.prefix) else: self.module_path = importer.archive self._setup_prefix() _declare_state('dict', _distribution_finders = {}) def register_finder(importer_type, distribution_finder): """Register `distribution_finder` to find distributions in sys.path items `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `distribution_finder` is a callable that, passed a path item and the importer instance, yields ``Distribution`` instances found on that path item. See ``pkg_resources.find_on_path`` for an example.""" _distribution_finders[importer_type] = distribution_finder def find_distributions(path_item, only=False): """Yield distributions accessible via `path_item`""" importer = get_importer(path_item) finder = _find_adapter(_distribution_finders, importer) return finder(importer, path_item, only) def find_eggs_in_zip(importer, path_item, only=False): """ Find eggs in zip files; possibly multiple nested eggs. """ if importer.archive.endswith('.whl'): # wheels are not supported with this finder # they don't have PKG-INFO metadata, and won't ever contain eggs return metadata = EggMetadata(importer) if metadata.has_metadata('PKG-INFO'): yield Distribution.from_filename(path_item, metadata=metadata) if only: # don't yield nested distros return for subitem in metadata.resource_listdir('/'): if subitem.endswith('.egg'): subpath = os.path.join(path_item, subitem) for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath): yield dist register_finder(zipimport.zipimporter, find_eggs_in_zip) def find_nothing(importer, path_item, only=False): return () register_finder(object, find_nothing) def find_on_path(importer, path_item, only=False): """Yield distributions accessible on a sys.path directory""" path_item = _normalize_cached(path_item) if os.path.isdir(path_item) and os.access(path_item, os.R_OK): if path_item.lower().endswith('.egg'): # unpacked egg yield Distribution.from_filename( path_item, metadata=PathMetadata( path_item, os.path.join(path_item,'EGG-INFO') ) ) else: # scan for .egg and .egg-info in directory for entry in os.listdir(path_item): lower = entry.lower() if lower.endswith('.egg-info') or lower.endswith('.dist-info'): fullpath = os.path.join(path_item, entry) if os.path.isdir(fullpath): # egg-info directory, allow getting metadata metadata = PathMetadata(path_item, fullpath) else: metadata = FileMetadata(fullpath) yield Distribution.from_location( path_item, entry, metadata, precedence=DEVELOP_DIST ) elif not only and lower.endswith('.egg'): dists = find_distributions(os.path.join(path_item, entry)) for dist in dists: yield dist elif not only and lower.endswith('.egg-link'): with open(os.path.join(path_item, entry)) as entry_file: entry_lines = entry_file.readlines() for line in entry_lines: if not line.strip(): continue path = os.path.join(path_item, line.rstrip()) dists = find_distributions(path) for item in dists: yield item break register_finder(pkgutil.ImpImporter, find_on_path) if importlib_machinery is not None: register_finder(importlib_machinery.FileFinder, find_on_path) _declare_state('dict', _namespace_handlers={}) _declare_state('dict', _namespace_packages={}) def register_namespace_handler(importer_type, namespace_handler): """Register `namespace_handler` to declare namespace packages `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `namespace_handler` is a callable like this:: def namespace_handler(importer, path_entry, moduleName, module): # return a path_entry to use for child packages Namespace handlers are only called if the importer object has already agreed that it can handle the relevant path item, and they should only return a subpath if the module __path__ does not already contain an equivalent subpath. For an example namespace handler, see ``pkg_resources.file_ns_handler``. """ _namespace_handlers[importer_type] = namespace_handler def _handle_ns(packageName, path_item): """Ensure that named package includes a subpath of path_item (if needed)""" importer = get_importer(path_item) if importer is None: return None loader = importer.find_module(packageName) if loader is None: return None module = sys.modules.get(packageName) if module is None: module = sys.modules[packageName] = types.ModuleType(packageName) module.__path__ = [] _set_parent_ns(packageName) elif not hasattr(module,'__path__'): raise TypeError("Not a package:", packageName) handler = _find_adapter(_namespace_handlers, importer) subpath = handler(importer, path_item, packageName, module) if subpath is not None: path = module.__path__ path.append(subpath) loader.load_module(packageName) for path_item in path: if path_item not in module.__path__: module.__path__.append(path_item) return subpath def declare_namespace(packageName): """Declare that package 'packageName' is a namespace package""" _imp.acquire_lock() try: if packageName in _namespace_packages: return path, parent = sys.path, None if '.' in packageName: parent = '.'.join(packageName.split('.')[:-1]) declare_namespace(parent) if parent not in _namespace_packages: __import__(parent) try: path = sys.modules[parent].__path__ except AttributeError: raise TypeError("Not a package:", parent) # Track what packages are namespaces, so when new path items are added, # they can be updated _namespace_packages.setdefault(parent,[]).append(packageName) _namespace_packages.setdefault(packageName,[]) for path_item in path: # Ensure all the parent's path items are reflected in the child, # if they apply _handle_ns(packageName, path_item) finally: _imp.release_lock() def fixup_namespace_packages(path_item, parent=None): """Ensure that previously-declared namespace packages include path_item""" _imp.acquire_lock() try: for package in _namespace_packages.get(parent,()): subpath = _handle_ns(package, path_item) if subpath: fixup_namespace_packages(subpath, package) finally: _imp.release_lock() def file_ns_handler(importer, path_item, packageName, module): """Compute an ns-package subpath for a filesystem or zipfile importer""" subpath = os.path.join(path_item, packageName.split('.')[-1]) normalized = _normalize_cached(subpath) for item in module.__path__: if _normalize_cached(item)==normalized: break else: # Only return the path if it's not already there return subpath register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) register_namespace_handler(zipimport.zipimporter, file_ns_handler) if importlib_machinery is not None: register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) def null_ns_handler(importer, path_item, packageName, module): return None register_namespace_handler(object, null_ns_handler) def normalize_path(filename): """Normalize a file/dir name for comparison purposes""" return os.path.normcase(os.path.realpath(filename)) def _normalize_cached(filename, _cache={}): try: return _cache[filename] except KeyError: _cache[filename] = result = normalize_path(filename) return result def _set_parent_ns(packageName): parts = packageName.split('.') name = parts.pop() if parts: parent = '.'.join(parts) setattr(sys.modules[parent], name, sys.modules[packageName]) def yield_lines(strs): """Yield non-empty/non-comment lines of a string or sequence""" if isinstance(strs, string_types): for s in strs.splitlines(): s = s.strip() # skip blank lines/comments if s and not s.startswith('#'): yield s else: for ss in strs: for s in yield_lines(ss): yield s # whitespace and comment LINE_END = re.compile(r"\s*(#.*)?$").match # line continuation CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # Distribution or extra DISTRO = re.compile(r"\s*((\w|[-.])+)").match # ver. info VERSION = re.compile(r"\s*(<=?|>=?|===?|!=|~=)\s*((\w|[-.*_!+])+)").match # comma between items COMMA = re.compile(r"\s*,").match OBRACKET = re.compile(r"\s*\[").match CBRACKET = re.compile(r"\s*\]").match MODULE = re.compile(r"\w+(\.\w+)*$").match EGG_NAME = re.compile( r""" (?P<name>[^-]+) ( -(?P<ver>[^-]+) ( -py(?P<pyver>[^-]+) ( -(?P<plat>.+) )? )? )? """, re.VERBOSE | re.IGNORECASE, ).match class EntryPoint(object): """Object representing an advertised importable object""" def __init__(self, name, module_name, attrs=(), extras=(), dist=None): if not MODULE(module_name): raise ValueError("Invalid module name", module_name) self.name = name self.module_name = module_name self.attrs = tuple(attrs) self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras self.dist = dist def __str__(self): s = "%s = %s" % (self.name, self.module_name) if self.attrs: s += ':' + '.'.join(self.attrs) if self.extras: s += ' [%s]' % ','.join(self.extras) return s def __repr__(self): return "EntryPoint.parse(%r)" % str(self) def load(self, require=True, *args, **kwargs): """ Require packages for this EntryPoint, then resolve it. """ if not require or args or kwargs: warnings.warn( "Parameters to load are deprecated. Call .resolve and " ".require separately.", DeprecationWarning, stacklevel=2, ) if require: self.require(*args, **kwargs) return self.resolve() def resolve(self): """ Resolve the entry point from its module and attrs. """ module = __import__(self.module_name, fromlist=['__name__'], level=0) try: return functools.reduce(getattr, self.attrs, module) except AttributeError as exc: raise ImportError(str(exc)) def require(self, env=None, installer=None): if self.extras and not self.dist: raise UnknownExtra("Can't require() without a distribution", self) reqs = self.dist.requires(self.extras) items = working_set.resolve(reqs, env, installer) list(map(working_set.add, items)) pattern = re.compile( r'\s*' r'(?P<name>.+?)\s*' r'=\s*' r'(?P<module>[\w.]+)\s*' r'(:\s*(?P<attr>[\w.]+))?\s*' r'(?P<extras>\[.*\])?\s*$' ) @classmethod def parse(cls, src, dist=None): """Parse a single entry point from string `src` Entry point syntax follows the form:: name = some.module:some.attr [extra1, extra2] The entry name and module name are required, but the ``:attrs`` and ``[extras]`` parts are optional """ m = cls.pattern.match(src) if not m: msg = "EntryPoint must be in 'name=module:attrs [extras]' format" raise ValueError(msg, src) res = m.groupdict() extras = cls._parse_extras(res['extras']) attrs = res['attr'].split('.') if res['attr'] else () return cls(res['name'], res['module'], attrs, extras, dist) @classmethod def _parse_extras(cls, extras_spec): if not extras_spec: return () req = Requirement.parse('x' + extras_spec) if req.specs: raise ValueError() return req.extras @classmethod def parse_group(cls, group, lines, dist=None): """Parse an entry point group""" if not MODULE(group): raise ValueError("Invalid group name", group) this = {} for line in yield_lines(lines): ep = cls.parse(line, dist) if ep.name in this: raise ValueError("Duplicate entry point", group, ep.name) this[ep.name]=ep return this @classmethod def parse_map(cls, data, dist=None): """Parse a map of entry point groups""" if isinstance(data, dict): data = data.items() else: data = split_sections(data) maps = {} for group, lines in data: if group is None: if not lines: continue raise ValueError("Entry points must be listed in groups") group = group.strip() if group in maps: raise ValueError("Duplicate group name", group) maps[group] = cls.parse_group(group, lines, dist) return maps def _remove_md5_fragment(location): if not location: return '' parsed = urlparse(location) if parsed[-1].startswith('md5='): return urlunparse(parsed[:-1] + ('',)) return location class Distribution(object): """Wrap an actual or potential sys.path entry w/metadata""" PKG_INFO = 'PKG-INFO' def __init__(self, location=None, metadata=None, project_name=None, version=None, py_version=PY_MAJOR, platform=None, precedence=EGG_DIST): self.project_name = safe_name(project_name or 'Unknown') if version is not None: self._version = safe_version(version) self.py_version = py_version self.platform = platform self.location = location self.precedence = precedence self._provider = metadata or empty_provider @classmethod def from_location(cls, location, basename, metadata=None,**kw): project_name, version, py_version, platform = [None]*4 basename, ext = os.path.splitext(basename) if ext.lower() in _distributionImpl: # .dist-info gets much metadata differently match = EGG_NAME(basename) if match: project_name, version, py_version, platform = match.group( 'name','ver','pyver','plat' ) cls = _distributionImpl[ext.lower()] return cls( location, metadata, project_name=project_name, version=version, py_version=py_version, platform=platform, **kw ) @property def hashcmp(self): return ( self.parsed_version, self.precedence, self.key, _remove_md5_fragment(self.location), self.py_version or '', self.platform or '', ) def __hash__(self): return hash(self.hashcmp) def __lt__(self, other): return self.hashcmp < other.hashcmp def __le__(self, other): return self.hashcmp <= other.hashcmp def __gt__(self, other): return self.hashcmp > other.hashcmp def __ge__(self, other): return self.hashcmp >= other.hashcmp def __eq__(self, other): if not isinstance(other, self.__class__): # It's not a Distribution, so they are not equal return False return self.hashcmp == other.hashcmp def __ne__(self, other): return not self == other # These properties have to be lazy so that we don't have to load any # metadata until/unless it's actually needed. (i.e., some distributions # may not know their name or version without loading PKG-INFO) @property def key(self): try: return self._key except AttributeError: self._key = key = self.project_name.lower() return key @property def parsed_version(self): if not hasattr(self, "_parsed_version"): self._parsed_version = parse_version(self.version) return self._parsed_version def _warn_legacy_version(self): LV = packaging.version.LegacyVersion is_legacy = isinstance(self._parsed_version, LV) if not is_legacy: return # While an empty version is technically a legacy version and # is not a valid PEP 440 version, it's also unlikely to # actually come from someone and instead it is more likely that # it comes from setuptools attempting to parse a filename and # including it in the list. So for that we'll gate this warning # on if the version is anything at all or not. if not self.version: return tmpl = textwrap.dedent(""" '{project_name} ({version})' is being parsed as a legacy, non PEP 440, version. You may find odd behavior and sort order. In particular it will be sorted as less than 0.0. It is recommended to migrate to PEP 440 compatible versions. """).strip().replace('\n', ' ') warnings.warn(tmpl.format(**vars(self)), PEP440Warning) @property def version(self): try: return self._version except AttributeError: for line in self._get_metadata(self.PKG_INFO): if line.lower().startswith('version:'): self._version = safe_version(line.split(':',1)[1].strip()) return self._version else: tmpl = "Missing 'Version:' header and/or %s file" raise ValueError(tmpl % self.PKG_INFO, self) @property def _dep_map(self): try: return self.__dep_map except AttributeError: dm = self.__dep_map = {None: []} for name in 'requires.txt', 'depends.txt': for extra, reqs in split_sections(self._get_metadata(name)): if extra: if ':' in extra: extra, marker = extra.split(':', 1) if invalid_marker(marker): # XXX warn reqs=[] elif not evaluate_marker(marker): reqs=[] extra = safe_extra(extra) or None dm.setdefault(extra,[]).extend(parse_requirements(reqs)) return dm def requires(self, extras=()): """List of Requirements needed for this distro if `extras` are used""" dm = self._dep_map deps = [] deps.extend(dm.get(None, ())) for ext in extras: try: deps.extend(dm[safe_extra(ext)]) except KeyError: raise UnknownExtra( "%s has no such extra feature %r" % (self, ext) ) return deps def _get_metadata(self, name): if self.has_metadata(name): for line in self.get_metadata_lines(name): yield line def activate(self, path=None): """Ensure distribution is importable on `path` (default=sys.path)""" if path is None: path = sys.path self.insert_on(path) if path is sys.path: fixup_namespace_packages(self.location) for pkg in self._get_metadata('namespace_packages.txt'): if pkg in sys.modules: declare_namespace(pkg) def egg_name(self): """Return what this distribution's standard .egg filename should be""" filename = "%s-%s-py%s" % ( to_filename(self.project_name), to_filename(self.version), self.py_version or PY_MAJOR ) if self.platform: filename += '-' + self.platform return filename def __repr__(self): if self.location: return "%s (%s)" % (self, self.location) else: return str(self) def __str__(self): try: version = getattr(self, 'version', None) except ValueError: version = None version = version or "[unknown version]" return "%s %s" % (self.project_name, version) def __getattr__(self, attr): """Delegate all unrecognized public attributes to .metadata provider""" if attr.startswith('_'): raise AttributeError(attr) return getattr(self._provider, attr) @classmethod def from_filename(cls, filename, metadata=None, **kw): return cls.from_location( _normalize_cached(filename), os.path.basename(filename), metadata, **kw ) def as_requirement(self): """Return a ``Requirement`` that matches this distribution exactly""" if isinstance(self.parsed_version, packaging.version.Version): spec = "%s==%s" % (self.project_name, self.parsed_version) else: spec = "%s===%s" % (self.project_name, self.parsed_version) return Requirement.parse(spec) def load_entry_point(self, group, name): """Return the `name` entry point of `group` or raise ImportError""" ep = self.get_entry_info(group, name) if ep is None: raise ImportError("Entry point %r not found" % ((group, name),)) return ep.load() def get_entry_map(self, group=None): """Return the entry point map for `group`, or the full entry map""" try: ep_map = self._ep_map except AttributeError: ep_map = self._ep_map = EntryPoint.parse_map( self._get_metadata('entry_points.txt'), self ) if group is not None: return ep_map.get(group,{}) return ep_map def get_entry_info(self, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return self.get_entry_map(group).get(name) def insert_on(self, path, loc = None): """Insert self.location in path before its nearest parent directory""" loc = loc or self.location if not loc: return nloc = _normalize_cached(loc) bdir = os.path.dirname(nloc) npath= [(p and _normalize_cached(p) or p) for p in path] for p, item in enumerate(npath): if item == nloc: break elif item == bdir and self.precedence == EGG_DIST: # if it's an .egg, give it precedence over its directory if path is sys.path: self.check_version_conflict() path.insert(p, loc) npath.insert(p, nloc) break else: if path is sys.path: self.check_version_conflict() path.append(loc) return # p is the spot where we found or inserted loc; now remove duplicates while True: try: np = npath.index(nloc, p+1) except ValueError: break else: del npath[np], path[np] # ha! p = np return def check_version_conflict(self): if self.key == 'setuptools': # ignore the inevitable setuptools self-conflicts :( return nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) loc = normalize_path(self.location) for modname in self._get_metadata('top_level.txt'): if (modname not in sys.modules or modname in nsp or modname in _namespace_packages): continue if modname in ('pkg_resources', 'setuptools', 'site'): continue fn = getattr(sys.modules[modname], '__file__', None) if fn and (normalize_path(fn).startswith(loc) or fn.startswith(self.location)): continue issue_warning( "Module %s was already imported from %s, but %s is being added" " to sys.path" % (modname, fn, self.location), ) def has_version(self): try: self.version except ValueError: issue_warning("Unbuilt egg for " + repr(self)) return False return True def clone(self,**kw): """Copy this distribution, substituting in any changed keyword args""" names = 'project_name version py_version platform location precedence' for attr in names.split(): kw.setdefault(attr, getattr(self, attr, None)) kw.setdefault('metadata', self._provider) return self.__class__(**kw) @property def extras(self): return [dep for dep in self._dep_map if dep] class DistInfoDistribution(Distribution): """Wrap an actual or potential sys.path entry w/metadata, .dist-info style""" PKG_INFO = 'METADATA' EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") @property def _parsed_pkg_info(self): """Parse and cache metadata""" try: return self._pkg_info except AttributeError: metadata = self.get_metadata(self.PKG_INFO) self._pkg_info = email.parser.Parser().parsestr(metadata) return self._pkg_info @property def _dep_map(self): try: return self.__dep_map except AttributeError: self.__dep_map = self._compute_dependencies() return self.__dep_map def _preparse_requirement(self, requires_dist): """Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz') Split environment marker, add == prefix to version specifiers as necessary, and remove parenthesis. """ parts = requires_dist.split(';', 1) + [''] distvers = parts[0].strip() mark = parts[1].strip() distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers) distvers = distvers.replace('(', '').replace(')', '') return (distvers, mark) def _compute_dependencies(self): """Recompute this distribution's dependencies.""" from _markerlib import compile as compile_marker dm = self.__dep_map = {None: []} reqs = [] # Including any condition expressions for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: distvers, mark = self._preparse_requirement(req) parsed = next(parse_requirements(distvers)) parsed.marker_fn = compile_marker(mark) reqs.append(parsed) def reqs_for_extra(extra): for req in reqs: if req.marker_fn(override={'extra':extra}): yield req common = frozenset(reqs_for_extra(None)) dm[None].extend(common) for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: extra = safe_extra(extra.strip()) dm[extra] = list(frozenset(reqs_for_extra(extra)) - common) return dm _distributionImpl = { '.egg': Distribution, '.egg-info': Distribution, '.dist-info': DistInfoDistribution, } def issue_warning(*args,**kw): level = 1 g = globals() try: # find the first stack frame that is *not* code in # the pkg_resources module, to use for the warning while sys._getframe(level).f_globals is g: level += 1 except ValueError: pass warnings.warn(stacklevel=level + 1, *args, **kw) class RequirementParseError(ValueError): def __str__(self): return ' '.join(self.args) def parse_requirements(strs): """Yield ``Requirement`` objects for each specification in `strs` `strs` must be a string, or a (possibly-nested) iterable thereof. """ # create a steppable iterator, so we can handle \-continuations lines = iter(yield_lines(strs)) def scan_list(ITEM, TERMINATOR, line, p, groups, item_name): items = [] while not TERMINATOR(line, p): if CONTINUE(line, p): try: line = next(lines) p = 0 except StopIteration: msg = "\\ must not appear on the last nonblank line" raise RequirementParseError(msg) match = ITEM(line, p) if not match: msg = "Expected " + item_name + " in" raise RequirementParseError(msg, line, "at", line[p:]) items.append(match.group(*groups)) p = match.end() match = COMMA(line, p) if match: # skip the comma p = match.end() elif not TERMINATOR(line, p): msg = "Expected ',' or end-of-list in" raise RequirementParseError(msg, line, "at", line[p:]) match = TERMINATOR(line, p) # skip the terminator, if any if match: p = match.end() return line, p, items for line in lines: match = DISTRO(line) if not match: raise RequirementParseError("Missing distribution spec", line) project_name = match.group(1) p = match.end() extras = [] match = OBRACKET(line, p) if match: p = match.end() line, p, extras = scan_list( DISTRO, CBRACKET, line, p, (1,), "'extra' name" ) line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2), "version spec") specs = [(op, val) for op, val in specs] yield Requirement(project_name, specs, extras) class Requirement: def __init__(self, project_name, specs, extras): """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" self.unsafe_name, project_name = project_name, safe_name(project_name) self.project_name, self.key = project_name, project_name.lower() self.specifier = packaging.specifiers.SpecifierSet( ",".join(["".join([x, y]) for x, y in specs]) ) self.specs = specs self.extras = tuple(map(safe_extra, extras)) self.hashCmp = ( self.key, self.specifier, frozenset(self.extras), ) self.__hash = hash(self.hashCmp) def __str__(self): extras = ','.join(self.extras) if extras: extras = '[%s]' % extras return '%s%s%s' % (self.project_name, extras, self.specifier) def __eq__(self, other): return ( isinstance(other, Requirement) and self.hashCmp == other.hashCmp ) def __ne__(self, other): return not self == other def __contains__(self, item): if isinstance(item, Distribution): if item.key != self.key: return False item = item.version # Allow prereleases always in order to match the previous behavior of # this method. In the future this should be smarter and follow PEP 440 # more accurately. return self.specifier.contains(item, prereleases=True) def __hash__(self): return self.__hash def __repr__(self): return "Requirement.parse(%r)" % str(self) @staticmethod def parse(s): reqs = list(parse_requirements(s)) if reqs: if len(reqs) == 1: return reqs[0] raise ValueError("Expected only one requirement", s) raise ValueError("No requirements found", s) def _get_mro(cls): """Get an mro for a type or classic class""" if not isinstance(cls, type): class cls(cls, object): pass return cls.__mro__[1:] return cls.__mro__ def _find_adapter(registry, ob): """Return an adapter factory for `ob` from `registry`""" for t in _get_mro(getattr(ob, '__class__', type(ob))): if t in registry: return registry[t] def ensure_directory(path): """Ensure that the parent directory of `path` exists""" dirname = os.path.dirname(path) if not os.path.isdir(dirname): os.makedirs(dirname) def _bypass_ensure_directory(path): """Sandbox-bypassing version of ensure_directory()""" if not WRITE_SUPPORT: raise IOError('"os.mkdir" not supported on this platform.') dirname, filename = split(path) if dirname and filename and not isdir(dirname): _bypass_ensure_directory(dirname) mkdir(dirname, 0o755) def split_sections(s): """Split a string or iterable thereof into (section, content) pairs Each ``section`` is a stripped version of the section header ("[section]") and each ``content`` is a list of stripped lines excluding blank lines and comment-only lines. If there are any such lines before the first section header, they're returned in a first ``section`` of ``None``. """ section = None content = [] for line in yield_lines(s): if line.startswith("["): if line.endswith("]"): if section or content: yield section, content section = line[1:-1].strip() content = [] else: raise ValueError("Invalid section heading", line) else: content.append(line) # wrap up last segment yield section, content def _mkstemp(*args,**kw): old_open = os.open try: # temporarily bypass sandboxing os.open = os_open return tempfile.mkstemp(*args,**kw) finally: # and then put it back os.open = old_open # Silence the PEP440Warning by default, so that end users don't get hit by it # randomly just because they use pkg_resources. We want to append the rule # because we want earlier uses of filterwarnings to take precedence over this # one. warnings.filterwarnings("ignore", category=PEP440Warning, append=True) # from jaraco.functools 1.3 def _call_aside(f, *args, **kwargs): f(*args, **kwargs) return f @_call_aside def _initialize(g=globals()): "Set up global resource manager (deliberately not state-saved)" manager = ResourceManager() g['_manager'] = manager for name in dir(manager): if not name.startswith('_'): g[name] = getattr(manager, name) @_call_aside def _initialize_master_working_set(): """ Prepare the master working set and make the ``require()`` API available. This function has explicit effects on the global state of pkg_resources. It is intended to be invoked once at the initialization of this module. Invocation by other packages is unsupported and done at their own risk. """ working_set = WorkingSet._build_master() _declare_state('object', working_set=working_set) require = working_set.require iter_entry_points = working_set.iter_entry_points add_activation_listener = working_set.subscribe run_script = working_set.run_script # backward compatibility run_main = run_script # Activate all distributions already on sys.path, and ensure that # all distributions added to the working set in the future (e.g. by # calling ``require()``) will get activated as well. add_activation_listener(lambda dist: dist.activate()) working_set.entries=[] # match order list(map(working_set.add_entry, sys.path)) globals().update(locals())
mit
Ell/goonauth
profiles/migrations/0012_auto__add_oauthapplication.py
1
12056
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'OAuthApplication' db.create_table(u'profiles_oauthapplication', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('client', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['oauth2_provider.Application'])), ('description', self.gf('django.db.models.fields.TextField')()), )) db.send_create_signal(u'profiles', ['OAuthApplication']) def backwards(self, orm): # Deleting model 'OAuthApplication' db.delete_table(u'profiles_oauthapplication') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'oauth2_provider.application': { 'Meta': {'object_name': 'Application'}, 'authorization_grant_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'client_id': ('django.db.models.fields.CharField', [], {'default': "u'k=YN_eI9uae.;!dF2yJQau=ItrEFxgedxm8Py5-2'", 'unique': 'True', 'max_length': '100'}), 'client_secret': ('django.db.models.fields.CharField', [], {'default': "u'CLLYJQfFmPtCTaIlwbPpi4I!@AiwpphwdW6lxQ:;qeb.o!3HfvPjMGm@pG:hP;aahfZI:lK;IgmJ;WeqZv!oCQ9paPT0e9V83=Us5T-oW4YFtbjNBDU=_BL5UIf0MWu9'", 'max_length': '255', 'blank': 'True'}), 'client_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'redirect_uris': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, u'profiles.battlefieldfourprofile': { 'Meta': {'object_name': 'BattlefieldFourProfile'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}) }, u'profiles.blizzardprofile': { 'Meta': {'object_name': 'BlizzardProfile'}, 'email': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'realid': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}) }, u'profiles.eveonlineprofile': { 'Meta': {'object_name': 'EveOnlineProfile'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}) }, u'profiles.leagueoflegendsprofile': { 'Meta': {'object_name': 'LeagueOfLegendsProfile'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}) }, u'profiles.minecraftprofile': { 'Meta': {'object_name': 'MinecraftProfile'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}) }, u'profiles.nintendoprofile': { 'Meta': {'object_name': 'NintendoProfile'}, 'friendcode': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, u'profiles.oauthapplication': { 'Meta': {'object_name': 'OAuthApplication'}, 'client': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['oauth2_provider.Application']"}), 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, u'profiles.playstationnetworkprofile': { 'Meta': {'object_name': 'PlaystationNetworkProfile'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}) }, u'profiles.somethingawfulprofile': { 'Meta': {'object_name': 'SomethingAwfulProfile'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'postcount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'regdate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'userid': ('django.db.models.fields.TextField', [], {}), 'username': ('django.db.models.fields.TextField', [], {}) }, u'profiles.steamprofile': { 'Meta': {'object_name': 'SteamProfile'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'userid': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}) }, u'profiles.userprofile': { 'Meta': {'object_name': 'UserProfile'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'bf4': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.BattlefieldFourProfile']", 'null': 'True', 'blank': 'True'}), 'blizzard': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.BlizzardProfile']", 'null': 'True', 'blank': 'True'}), 'eveonline': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.EveOnlineProfile']", 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'leagueoflegends': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.LeagueOfLegendsProfile']", 'null': 'True', 'blank': 'True'}), 'minecraft': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.MinecraftProfile']", 'null': 'True', 'blank': 'True'}), 'nintendo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.NintendoProfile']", 'null': 'True', 'blank': 'True'}), 'psn': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.PlaystationNetworkProfile']", 'null': 'True', 'blank': 'True'}), 'somethingawful': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['profiles.SomethingAwfulProfile']", 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'steam': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.SteamProfile']", 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}), 'verification_code': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'worldoftanks': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.WorldOfTanksProfile']", 'null': 'True', 'blank': 'True'}), 'xbl': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.XboxLiveProfile']", 'null': 'True', 'blank': 'True'}) }, u'profiles.worldoftanksprofile': { 'Meta': {'object_name': 'WorldOfTanksProfile'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}) }, u'profiles.xboxliveprofile': { 'Meta': {'object_name': 'XboxLiveProfile'}, 'gamertag': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) } } complete_apps = ['profiles']
mit
BorgERP/borg-erp-6of3
verticals/garage61/acy_mrp_operator/mrp_operator.py
1
10435
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (c) 2011 Acysos S.L. (http://acysos.com) All Rights Reserved. # Ignacio Ibeas <ignacio@acysos.com> # $Id$ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from datetime import datetime from osv import osv, fields from tools.translate import _ import netsvc import time import tools class mrp_operator_registry(osv.osv): _description = 'MRP Operator Registry' _name = 'mrp.operator.registry' _columns = { 'name': fields.char('Reference', size=64, required=True, states={'draft':[('readonly',False)]}, readonly=True), 'date': fields.date('Date', required=True, select=True, states={'draft':[('readonly',False)]}, readonly=True), 'operator_id': fields.many2one('hr.employee', 'Operator', required=True, states={'draft':[('readonly',False)]}, readonly=True), 'workcenter_lines': fields.one2many('mrp.workcenter.registry', 'operator_registry_id', 'MRP Workcenter Registry', states={'draft':[('readonly',False)]}, readonly=True), 'state': fields.selection([('draft','Draft'),('confirmed','Confirmed'),('cancel','Cancelled')],'State', readonly=True), } _defaults = { 'name':lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'operator_registry'), 'date': lambda *a: time.strftime('%Y-%m-%d'), 'state': lambda *a: 'draft', } def action_confirm(self, cr, uid, ids, context=None): registry = self.browse(cr,uid,ids,context)[0] for workcenter_line in registry.workcenter_lines: if workcenter_line.production_id.id: sql = "SELECT MAX(sequence) FROM mrp_production_workcenter_line WHERE production_id = %s" % (workcenter_line.production_id.id) cr.execute(sql) sequence = cr.fetchone()[0] prod_obj = self.pool.get('mrp.production') stock_obj = self.pool.get('stock.move') if workcenter_line.production_id.state in ['draft','picking_except','cancel','done']: raise osv.except_osv(_('Error'), _("Can't make production if the Manufacturing order is %s") % (workcenter_line.production_id.state)) if workcenter_line.product_id: if not workcenter_line.workcenter_line_id: raise osv.except_osv(_('Error'), _("Can't produce a product without Workcenter %s") % (workcenter_line.product_id.name)) if workcenter_line.workcenter_line_id: if not workcenter_line.product_id: raise osv.except_osv(_('Error'), _("Can't use a workcenter without product %s") % (workcenter_line.workcenter_line_id.name)) prod_obj.action_in_production(cr,uid,workcenter_line.production_id.id) if sequence == workcenter_line.workcenter_line_id.sequence: if workcenter_line.go_product_qty > 0: prod_obj.action_produce(cr, uid,workcenter_line.production_id.id,workcenter_line.go_product_qty,'consume_produce',context) for workcenter_line2 in registry.workcenter_lines: if workcenter_line.production_id.id == workcenter_line2.production_id.id: if workcenter_line2.workcenter_line_id.sequence <= workcenter_line.workcenter_line_id.sequence: if workcenter_line.de_product_qty > 0: #mrp_routing_ids = self.pool.get('mrp.routing.workcenter').search(cr,uid,[('routing_id','=',workcenter_line2.production_id.routing_id.id)], order='sequence', context=context) #for mrp_routing_id in mrp_routing_ids: #product_line_id = self.pool.get('mrp.production.product.line').search(cr, uid, [('production_id','=',workcenter_line2.production_id.id),('consumed_on','=',mrp_routing_id)], context=context) #print product_line_id #if len(product_line_id) == 1: #break mrp_routing_id = self.pool.get('mrp.routing.workcenter').search(cr,uid,[('routing_id','=',workcenter_line2.production_id.routing_id.id),('workcenter_id','=',workcenter_line2.workcenter_id.id)], context=context) product_line_id = self.pool.get('mrp.production.product.line').search(cr, uid, [('production_id','=',workcenter_line2.production_id.id),('consumed_on','=',mrp_routing_id[0])], context=context) if len(product_line_id) > 0: product_line = self.pool.get('mrp.production.product.line').browse(cr, uid, product_line_id, context)[0] move_name = 'PROD:'+workcenter_line2.production_id.name stock_move_id = stock_obj.search(cr,uid,[('product_id','=',product_line.product_id.id),('state','=','assigned'),('name','=',move_name)],context=context) bom_id = self.pool.get('mrp.bom').search(cr, uid, [('bom_id','=',workcenter_line2.production_id.bom_id.id),('product_id','=',product_line.product_id.id),('consumed_on','=',mrp_routing_id[0])], context=context) bom = self.pool.get('mrp.bom').browse(cr, uid, bom_id, context)[0] defective_qty = bom.product_qty*bom.product_efficiency*workcenter_line.de_product_qty context = {'operator_registry':1,'location_src':workcenter_line2.production_id.location_src_id.id} stock_obj.action_scrap(cr, uid,stock_move_id,defective_qty,4,context) self.write(cr, uid, ids, {'state': 'confirmed'}) return True def action_cancel(self, cr, uid, ids, context=None): self.write(cr, uid, ids, {'state': 'cancel'}) return True def action_cancel_draft(self, cr, uid, ids, context=None): self.write(cr, uid, ids, {'state': 'draft'}) return True mrp_operator_registry() class mrp_production_workcenter_line(osv.osv): _inherit = 'mrp.production.workcenter.line' def _number_get(self,cr,uid,ids,name,arg,context={}): res={} for line in self.browse(cr,uid,ids,context): res[line.id] = line.production_id.name +'-'+ str(line.sequence) return res _columns = { 'number': fields.function(_number_get, method=True, store=True, type='char', size=64, string='Number', readonly=True), } _rec_name = "number" mrp_production_workcenter_line() class mrp_workcenter_registry_key(osv.osv): _name = 'mrp.workcenter.registry.key' _description = 'MRP Workcenter Registry Key' _columns = { 'name': fields.char('Name', required=True, size=46, translate=True), } mrp_workcenter_registry_key() class mrp_workcenter_registry(osv.osv): _description = 'MRP Workcenter Registry' _name = 'mrp.workcenter.registry' _columns = { 'key': fields.many2one('mrp.workcenter.registry.key','Key'), 'workcenter_line_id': fields.many2one('mrp.production.workcenter.line', 'Workcenter'), 'product_id': fields.many2one('product.product', 'Product'), 'name': fields.char('Operation Code', size=64, required=True), 'workcenter_id': fields.many2one('mrp.workcenter', 'Resource'), 'de_product_qty': fields.float('Defective Product Qty'), 'go_product_qty': fields.float('Good Product Qty'), 'date_start': fields.date('Date start'), 'time_start': fields.time('Time start'), 'date_stop': fields.date('Date stop'), 'time_stop': fields.time('Time stop'), 'note': fields.text('Notes'), 'operator_registry_id': fields.many2one('mrp.operator.registry', 'Operator registry', ondelete='cascade'), 'production_id': fields.many2one('mrp.production', 'Manufacturing Order', ondelete='set null'), 'operator_id': fields.related('operator_registry_id', 'operator_id', type='many2one', relation='hr.employee', string='Operator'), } _defaults = { 'name':'/', 'date_start': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'), 'date_stop': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'), } def workcenter_line_change(self, cr, uid, ids,workcenter_line_id,context={}): if (workcenter_line_id): workcenter_line = self.pool.get('mrp.production.workcenter.line').browse(cr, uid, [workcenter_line_id], context)[0] return {'value': {'workcenter_line_id': workcenter_line.id,'product_id':workcenter_line.production_id.product_id.id,'name':workcenter_line.name,'workcenter_id':workcenter_line.workcenter_id.id,'production_id':workcenter_line.production_id.id}} mrp_workcenter_registry() class mrp_production(osv.osv): _inherit = 'mrp.production' _columns = { 'operator_ids': fields.one2many('mrp.workcenter.registry', 'production_id', 'Operator Registry'), } mrp_production() class mrp_routing_workcenter(osv.osv): _inherit = 'mrp.routing.workcenter' _sql_constraints = [ ('sequence_routing_uniq', 'unique (sequence,routing_id)', 'The sequence must be unique per routing !') ] mrp_routing_workcenter()
agpl-3.0
dstrockis/outlook-autocategories
lib/flask/testsuite/testing.py
561
7411
# -*- coding: utf-8 -*- """ flask.testsuite.testing ~~~~~~~~~~~~~~~~~~~~~~~ Test client and more. :copyright: (c) 2011 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import flask import unittest from flask.testsuite import FlaskTestCase from flask._compat import text_type class TestToolsTestCase(FlaskTestCase): def test_environ_defaults_from_config(self): app = flask.Flask(__name__) app.testing = True app.config['SERVER_NAME'] = 'example.com:1234' app.config['APPLICATION_ROOT'] = '/foo' @app.route('/') def index(): return flask.request.url ctx = app.test_request_context() self.assert_equal(ctx.request.url, 'http://example.com:1234/foo/') with app.test_client() as c: rv = c.get('/') self.assert_equal(rv.data, b'http://example.com:1234/foo/') def test_environ_defaults(self): app = flask.Flask(__name__) app.testing = True @app.route('/') def index(): return flask.request.url ctx = app.test_request_context() self.assert_equal(ctx.request.url, 'http://localhost/') with app.test_client() as c: rv = c.get('/') self.assert_equal(rv.data, b'http://localhost/') def test_redirect_keep_session(self): app = flask.Flask(__name__) app.secret_key = 'testing' @app.route('/', methods=['GET', 'POST']) def index(): if flask.request.method == 'POST': return flask.redirect('/getsession') flask.session['data'] = 'foo' return 'index' @app.route('/getsession') def get_session(): return flask.session.get('data', '<missing>') with app.test_client() as c: rv = c.get('/getsession') assert rv.data == b'<missing>' rv = c.get('/') assert rv.data == b'index' assert flask.session.get('data') == 'foo' rv = c.post('/', data={}, follow_redirects=True) assert rv.data == b'foo' # This support requires a new Werkzeug version if not hasattr(c, 'redirect_client'): assert flask.session.get('data') == 'foo' rv = c.get('/getsession') assert rv.data == b'foo' def test_session_transactions(self): app = flask.Flask(__name__) app.testing = True app.secret_key = 'testing' @app.route('/') def index(): return text_type(flask.session['foo']) with app.test_client() as c: with c.session_transaction() as sess: self.assert_equal(len(sess), 0) sess['foo'] = [42] self.assert_equal(len(sess), 1) rv = c.get('/') self.assert_equal(rv.data, b'[42]') with c.session_transaction() as sess: self.assert_equal(len(sess), 1) self.assert_equal(sess['foo'], [42]) def test_session_transactions_no_null_sessions(self): app = flask.Flask(__name__) app.testing = True with app.test_client() as c: try: with c.session_transaction() as sess: pass except RuntimeError as e: self.assert_in('Session backend did not open a session', str(e)) else: self.fail('Expected runtime error') def test_session_transactions_keep_context(self): app = flask.Flask(__name__) app.testing = True app.secret_key = 'testing' with app.test_client() as c: rv = c.get('/') req = flask.request._get_current_object() self.assert_true(req is not None) with c.session_transaction(): self.assert_true(req is flask.request._get_current_object()) def test_session_transaction_needs_cookies(self): app = flask.Flask(__name__) app.testing = True c = app.test_client(use_cookies=False) try: with c.session_transaction() as s: pass except RuntimeError as e: self.assert_in('cookies', str(e)) else: self.fail('Expected runtime error') def test_test_client_context_binding(self): app = flask.Flask(__name__) @app.route('/') def index(): flask.g.value = 42 return 'Hello World!' @app.route('/other') def other(): 1 // 0 with app.test_client() as c: resp = c.get('/') self.assert_equal(flask.g.value, 42) self.assert_equal(resp.data, b'Hello World!') self.assert_equal(resp.status_code, 200) resp = c.get('/other') self.assert_false(hasattr(flask.g, 'value')) self.assert_in(b'Internal Server Error', resp.data) self.assert_equal(resp.status_code, 500) flask.g.value = 23 try: flask.g.value except (AttributeError, RuntimeError): pass else: raise AssertionError('some kind of exception expected') def test_reuse_client(self): app = flask.Flask(__name__) c = app.test_client() with c: self.assert_equal(c.get('/').status_code, 404) with c: self.assert_equal(c.get('/').status_code, 404) def test_test_client_calls_teardown_handlers(self): app = flask.Flask(__name__) called = [] @app.teardown_request def remember(error): called.append(error) with app.test_client() as c: self.assert_equal(called, []) c.get('/') self.assert_equal(called, []) self.assert_equal(called, [None]) del called[:] with app.test_client() as c: self.assert_equal(called, []) c.get('/') self.assert_equal(called, []) c.get('/') self.assert_equal(called, [None]) self.assert_equal(called, [None, None]) class SubdomainTestCase(FlaskTestCase): def setUp(self): self.app = flask.Flask(__name__) self.app.config['SERVER_NAME'] = 'example.com' self.client = self.app.test_client() self._ctx = self.app.test_request_context() self._ctx.push() def tearDown(self): if self._ctx is not None: self._ctx.pop() def test_subdomain(self): @self.app.route('/', subdomain='<company_id>') def view(company_id): return company_id url = flask.url_for('view', company_id='xxx') response = self.client.get(url) self.assert_equal(200, response.status_code) self.assert_equal(b'xxx', response.data) def test_nosubdomain(self): @self.app.route('/<company_id>') def view(company_id): return company_id url = flask.url_for('view', company_id='xxx') response = self.client.get(url) self.assert_equal(200, response.status_code) self.assert_equal(b'xxx', response.data) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(TestToolsTestCase)) suite.addTest(unittest.makeSuite(SubdomainTestCase)) return suite
apache-2.0
dokterbob/satchmo
docs/conf.py
5
6694
# -*- coding: utf-8 -*- # # Satchmo Project documentation build configuration file, created by # sphinx-quickstart on Tue Jul 8 21:21:45 2008. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.append(os.path.abspath('.')) # Add satchmo apps. sys.path.append(os.path.abspath('../satchmo/apps')) # Setup the 'simple' store. sys.path.append(os.path.abspath('../satchmo/projects')) os.environ['DJANGO_SETTINGS_MODULE'] = 'simple.settings' # For Sphinx to properly work with Satchmo, you need to make one small path to Sphinx: # Patch here - http://gist.github.com/345738 # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.txt' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Satchmo' copyright = u'2010, Chris Moffitt' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.9.2' # The full version, including alpha/beta/rc tags. release = '0.9.2-development' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'sphinxdoc' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_option = None # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'SatchmoProjectdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Satchmo.tex', u'Satchmo Documentation', u'Chris Moffitt', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. latex_use_modindex = False
bsd-3-clause
tealover/nova
nova/virt/ironic/driver.py
9
48340
# coding=utf-8 # # Copyright 2014 Red Hat, Inc. # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A driver wrapping the Ironic API, such that Nova may provision bare metal resources. """ import base64 import gzip import logging as py_logging import shutil import tempfile import time from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import importutils import six from nova.api.metadata import base as instance_metadata from nova.compute import arch from nova.compute import hv_type from nova.compute import power_state from nova.compute import task_states from nova.compute import vm_mode from nova.compute import vm_states from nova import context as nova_context from nova import exception from nova.i18n import _ from nova.i18n import _LE from nova.i18n import _LI from nova.i18n import _LW from nova import objects from nova.openstack.common import loopingcall from nova.virt import configdrive from nova.virt import driver as virt_driver from nova.virt import firewall from nova.virt import hardware from nova.virt.ironic import client_wrapper from nova.virt.ironic import ironic_states from nova.virt.ironic import patcher ironic = None LOG = logging.getLogger(__name__) opts = [ cfg.IntOpt('api_version', default=1, help='Version of Ironic API service endpoint.'), cfg.StrOpt('api_endpoint', help='URL for Ironic API endpoint.'), cfg.StrOpt('admin_username', help='Ironic keystone admin name'), cfg.StrOpt('admin_password', secret=True, help='Ironic keystone admin password.'), cfg.StrOpt('admin_auth_token', secret=True, deprecated_for_removal=True, help='Ironic keystone auth token.' 'DEPRECATED: use admin_username, admin_password, and ' 'admin_tenant_name instead'), cfg.StrOpt('admin_url', help='Keystone public API endpoint.'), cfg.StrOpt('client_log_level', deprecated_for_removal=True, help='Log level override for ironicclient. Set this in ' 'order to override the global "default_log_levels", ' '"verbose", and "debug" settings. ' 'DEPRECATED: use standard logging configuration.'), cfg.StrOpt('admin_tenant_name', help='Ironic keystone tenant name.'), cfg.IntOpt('api_max_retries', default=60, help='How many retries when a request does conflict.'), cfg.IntOpt('api_retry_interval', default=2, help='How often to retry in seconds when a request ' 'does conflict'), ] ironic_group = cfg.OptGroup(name='ironic', title='Ironic Options') CONF = cfg.CONF CONF.register_group(ironic_group) CONF.register_opts(opts, ironic_group) _POWER_STATE_MAP = { ironic_states.POWER_ON: power_state.RUNNING, ironic_states.NOSTATE: power_state.NOSTATE, ironic_states.POWER_OFF: power_state.SHUTDOWN, } def map_power_state(state): try: return _POWER_STATE_MAP[state] except KeyError: LOG.warning(_LW("Power state %s not found."), state) return power_state.NOSTATE def _validate_instance_and_node(ironicclient, instance): """Get the node associated with the instance. Check with the Ironic service that this instance is associated with a node, and return the node. """ try: return ironicclient.call("node.get_by_instance_uuid", instance.uuid) except ironic.exc.NotFound: raise exception.InstanceNotFound(instance_id=instance.uuid) def _get_nodes_supported_instances(cpu_arch=None): """Return supported instances for a node.""" if not cpu_arch: return [] return [(cpu_arch, hv_type.BAREMETAL, vm_mode.HVM)] def _log_ironic_polling(what, node, instance): power_state = (None if node.power_state is None else '"%s"' % node.power_state) tgt_power_state = (None if node.target_power_state is None else '"%s"' % node.target_power_state) prov_state = (None if node.provision_state is None else '"%s"' % node.provision_state) tgt_prov_state = (None if node.target_provision_state is None else '"%s"' % node.target_provision_state) LOG.debug('Still waiting for ironic node %(node)s to %(what)s: ' 'power_state=%(power_state)s, ' 'target_power_state=%(tgt_power_state)s, ' 'provision_state=%(prov_state)s, ' 'target_provision_state=%(tgt_prov_state)s', dict(what=what, node=node.uuid, power_state=power_state, tgt_power_state=tgt_power_state, prov_state=prov_state, tgt_prov_state=tgt_prov_state), instance=instance) class IronicDriver(virt_driver.ComputeDriver): """Hypervisor driver for Ironic - bare metal provisioning.""" capabilities = {"has_imagecache": False, "supports_recreate": False, "supports_migrate_to_same_host": False} def __init__(self, virtapi, read_only=False): super(IronicDriver, self).__init__(virtapi) global ironic if ironic is None: ironic = importutils.import_module('ironicclient') # NOTE(deva): work around a lack of symbols in the current version. if not hasattr(ironic, 'exc'): ironic.exc = importutils.import_module('ironicclient.exc') if not hasattr(ironic, 'client'): ironic.client = importutils.import_module( 'ironicclient.client') self.firewall_driver = firewall.load_driver( default='nova.virt.firewall.NoopFirewallDriver') self.node_cache = {} self.node_cache_time = 0 ironicclient_log_level = CONF.ironic.client_log_level if ironicclient_log_level: level = py_logging.getLevelName(ironicclient_log_level) logger = py_logging.getLogger('ironicclient') logger.setLevel(level) self.ironicclient = client_wrapper.IronicClientWrapper() def _node_resources_unavailable(self, node_obj): """Determine whether the node's resources are in an acceptable state. Determines whether the node's resources should be presented to Nova for use based on the current power, provision and maintenance state. This is called after _node_resources_used, so any node that is not used and not in AVAILABLE should be considered in a 'bad' state, and unavailable for scheduling. Returns True if unacceptable. """ bad_power_states = [ ironic_states.ERROR, ironic_states.NOSTATE] # keep NOSTATE around for compatibility good_provision_states = [ ironic_states.AVAILABLE, ironic_states.NOSTATE] return (node_obj.maintenance or node_obj.power_state in bad_power_states or node_obj.provision_state not in good_provision_states) def _node_resources_used(self, node_obj): """Determine whether the node's resources are currently used. Determines whether the node's resources should be considered used or not. A node is used when it is either in the process of putting a new instance on the node, has an instance on the node, or is in the process of cleaning up from a deleted instance. Returns True if used. If we report resources as consumed for a node that does not have an instance on it, the resource tracker will notice there's no instances consuming resources and try to correct us. So only nodes with an instance attached should report as consumed here. """ return node_obj.instance_uuid is not None def _parse_node_properties(self, node): """Helper method to parse the node's properties.""" properties = {} for prop in ('cpus', 'memory_mb', 'local_gb'): try: properties[prop] = int(node.properties.get(prop, 0)) except (TypeError, ValueError): LOG.warning(_LW('Node %(uuid)s has a malformed "%(prop)s". ' 'It should be an integer.'), {'uuid': node.uuid, 'prop': prop}) properties[prop] = 0 raw_cpu_arch = node.properties.get('cpu_arch', None) try: cpu_arch = arch.canonicalize(raw_cpu_arch) except exception.InvalidArchitectureName: cpu_arch = None if not cpu_arch: LOG.warning(_LW("cpu_arch not defined for node '%s'"), node.uuid) properties['cpu_arch'] = cpu_arch properties['raw_cpu_arch'] = raw_cpu_arch properties['capabilities'] = node.properties.get('capabilities') return properties def _node_resource(self, node): """Helper method to create resource dict from node stats.""" properties = self._parse_node_properties(node) vcpus = properties['cpus'] memory_mb = properties['memory_mb'] local_gb = properties['local_gb'] raw_cpu_arch = properties['raw_cpu_arch'] cpu_arch = properties['cpu_arch'] nodes_extra_specs = {} # NOTE(deva): In Havana and Icehouse, the flavor was required to link # to an arch-specific deploy kernel and ramdisk pair, and so the flavor # also had to have extra_specs['cpu_arch'], which was matched against # the ironic node.properties['cpu_arch']. # With Juno, the deploy image(s) may be referenced directly by the # node.driver_info, and a flavor no longer needs to contain any of # these three extra specs, though the cpu_arch may still be used # in a heterogeneous environment, if so desired. # NOTE(dprince): we use the raw cpu_arch here because extra_specs # filters aren't canonicalized nodes_extra_specs['cpu_arch'] = raw_cpu_arch # NOTE(gilliard): To assist with more precise scheduling, if the # node.properties contains a key 'capabilities', we expect the value # to be of the form "k1:v1,k2:v2,etc.." which we add directly as # key/value pairs into the node_extra_specs to be used by the # ComputeCapabilitiesFilter capabilities = properties['capabilities'] if capabilities: for capability in str(capabilities).split(','): parts = capability.split(':') if len(parts) == 2 and parts[0] and parts[1]: nodes_extra_specs[parts[0]] = parts[1] else: LOG.warning(_LW("Ignoring malformed capability '%s'. " "Format should be 'key:val'."), capability) vcpus_used = 0 memory_mb_used = 0 local_gb_used = 0 if self._node_resources_used(node): # Node is in the process of deploying, is deployed, or is in # the process of cleaning up from a deploy. Report all of its # resources as in use. vcpus_used = vcpus memory_mb_used = memory_mb local_gb_used = local_gb elif self._node_resources_unavailable(node): # The node's current state is such that it should not present any # of its resources to Nova vcpus = 0 memory_mb = 0 local_gb = 0 dic = { 'hypervisor_hostname': str(node.uuid), 'hypervisor_type': self._get_hypervisor_type(), 'hypervisor_version': self._get_hypervisor_version(), # The Ironic driver manages multiple hosts, so there are # likely many different CPU models in use. As such it is # impossible to provide any meaningful info on the CPU # model of the "host" 'cpu_info': None, 'vcpus': vcpus, 'vcpus_used': vcpus_used, 'local_gb': local_gb, 'local_gb_used': local_gb_used, 'disk_available_least': local_gb - local_gb_used, 'memory_mb': memory_mb, 'memory_mb_used': memory_mb_used, 'supported_instances': jsonutils.dumps( _get_nodes_supported_instances(cpu_arch)), 'stats': jsonutils.dumps(nodes_extra_specs), } return dic def _start_firewall(self, instance, network_info): self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) self.firewall_driver.apply_instance_filter(instance, network_info) def _stop_firewall(self, instance, network_info): self.firewall_driver.unfilter_instance(instance, network_info) def _add_driver_fields(self, node, instance, image_meta, flavor, preserve_ephemeral=None): patch = patcher.create(node).get_deploy_patch(instance, image_meta, flavor, preserve_ephemeral) # Associate the node with an instance patch.append({'path': '/instance_uuid', 'op': 'add', 'value': instance.uuid}) try: self.ironicclient.call('node.update', node.uuid, patch) except ironic.exc.BadRequest: msg = (_("Failed to add deploy parameters on node %(node)s " "when provisioning the instance %(instance)s") % {'node': node.uuid, 'instance': instance.uuid}) LOG.error(msg) raise exception.InstanceDeployFailure(msg) def _cleanup_deploy(self, context, node, instance, network_info, flavor=None): if flavor is None: flavor = instance.flavor patch = patcher.create(node).get_cleanup_patch(instance, network_info, flavor) # Unassociate the node patch.append({'op': 'remove', 'path': '/instance_uuid'}) try: self.ironicclient.call('node.update', node.uuid, patch) except ironic.exc.BadRequest: LOG.error(_LE("Failed to clean up the parameters on node %(node)s " "when unprovisioning the instance %(instance)s"), {'node': node.uuid, 'instance': instance.uuid}) reason = (_("Fail to clean up node %s parameters") % node.uuid) raise exception.InstanceTerminationFailure(reason=reason) self._unplug_vifs(node, instance, network_info) self._stop_firewall(instance, network_info) def _wait_for_active(self, ironicclient, instance): """Wait for the node to be marked as ACTIVE in Ironic.""" instance.refresh() if (instance.task_state == task_states.DELETING or instance.vm_state in (vm_states.ERROR, vm_states.DELETED)): raise exception.InstanceDeployFailure( _("Instance %s provisioning was aborted") % instance.uuid) node = _validate_instance_and_node(ironicclient, instance) if node.provision_state == ironic_states.ACTIVE: # job is done LOG.debug("Ironic node %(node)s is now ACTIVE", dict(node=node.uuid), instance=instance) raise loopingcall.LoopingCallDone() if node.target_provision_state in (ironic_states.DELETED, ironic_states.AVAILABLE): # ironic is trying to delete it now raise exception.InstanceNotFound(instance_id=instance.uuid) if node.provision_state in (ironic_states.NOSTATE, ironic_states.AVAILABLE): # ironic already deleted it raise exception.InstanceNotFound(instance_id=instance.uuid) if node.provision_state == ironic_states.DEPLOYFAIL: # ironic failed to deploy msg = (_("Failed to provision instance %(inst)s: %(reason)s") % {'inst': instance.uuid, 'reason': node.last_error}) raise exception.InstanceDeployFailure(msg) _log_ironic_polling('become ACTIVE', node, instance) def _wait_for_power_state(self, ironicclient, instance, message): """Wait for the node to complete a power state change.""" node = _validate_instance_and_node(ironicclient, instance) if node.target_power_state == ironic_states.NOSTATE: raise loopingcall.LoopingCallDone() _log_ironic_polling(message, node, instance) def init_host(self, host): """Initialize anything that is necessary for the driver to function. :param host: the hostname of the compute host. """ return def _get_hypervisor_type(self): """Get hypervisor type.""" return 'ironic' def _get_hypervisor_version(self): """Returns the version of the Ironic API service endpoint.""" return CONF.ironic.api_version def instance_exists(self, instance): """Checks the existence of an instance. Checks the existence of an instance. This is an override of the base method for efficiency. :param instance: The instance object. :returns: True if the instance exists. False if not. """ try: _validate_instance_and_node(self.ironicclient, instance) return True except exception.InstanceNotFound: return False def list_instances(self): """Return the names of all the instances provisioned. :returns: a list of instance names. """ # NOTE(lucasagomes): limit == 0 is an indicator to continue # pagination until there're no more values to be returned. node_list = self.ironicclient.call("node.list", associated=True, limit=0) context = nova_context.get_admin_context() return [objects.Instance.get_by_uuid(context, i.instance_uuid).name for i in node_list] def list_instance_uuids(self): """Return the UUIDs of all the instances provisioned. :returns: a list of instance UUIDs. """ # NOTE(lucasagomes): limit == 0 is an indicator to continue # pagination until there're no more values to be returned. node_list = self.ironicclient.call("node.list", associated=True, limit=0) return list(n.instance_uuid for n in node_list) def node_is_available(self, nodename): """Confirms a Nova hypervisor node exists in the Ironic inventory. :param nodename: The UUID of the node. :returns: True if the node exists, False if not. """ # NOTE(comstud): We can cheat and use caching here. This method # just needs to return True for nodes that exist. It doesn't # matter if the data is stale. Sure, it's possible that removing # node from Ironic will cause this method to return True until # the next call to 'get_available_nodes', but there shouldn't # be much harm. There's already somewhat of a race. if not self.node_cache: # Empty cache, try to populate it. self._refresh_cache() if nodename in self.node_cache: return True # NOTE(comstud): Fallback and check Ironic. This case should be # rare. try: self.ironicclient.call("node.get", nodename) return True except ironic.exc.NotFound: return False def _refresh_cache(self): # NOTE(lucasagomes): limit == 0 is an indicator to continue # pagination until there're no more values to be returned. node_list = self.ironicclient.call('node.list', detail=True, limit=0) node_cache = {} for node in node_list: node_cache[node.uuid] = node self.node_cache = node_cache self.node_cache_time = time.time() def get_available_nodes(self, refresh=False): """Returns the UUIDs of all nodes in the Ironic inventory. :param refresh: Boolean value; If True run update first. Ignored by this driver. :returns: a list of UUIDs """ # NOTE(jroll) we refresh the cache every time this is called # because it needs to happen in the resource tracker # periodic task. This task doesn't pass refresh=True, # unfortunately. self._refresh_cache() node_uuids = list(self.node_cache.keys()) LOG.debug("Returning %(num_nodes)s available node(s)", dict(num_nodes=len(node_uuids))) return node_uuids def get_available_resource(self, nodename): """Retrieve resource information. This method is called when nova-compute launches, and as part of a periodic task that records the results in the DB. :param nodename: the UUID of the node. :returns: a dictionary describing resources. """ # NOTE(comstud): We can cheat and use caching here. This method is # only called from a periodic task and right after the above # get_available_nodes() call is called. if not self.node_cache: # Well, it's also called from init_host(), so if we have empty # cache, let's try to populate it. self._refresh_cache() cache_age = time.time() - self.node_cache_time if nodename in self.node_cache: LOG.debug("Using cache for node %(node)s, age: %(age)s", {'node': nodename, 'age': cache_age}) node = self.node_cache[nodename] else: LOG.debug("Node %(node)s not found in cache, age: %(age)s", {'node': nodename, 'age': cache_age}) node = self.ironicclient.call("node.get", nodename) return self._node_resource(node) def get_info(self, instance): """Get the current state and resource usage for this instance. If the instance is not found this method returns (a dictionary with) NOSTATE and all resources == 0. :param instance: the instance object. :returns: a InstanceInfo object """ try: node = _validate_instance_and_node(self.ironicclient, instance) except exception.InstanceNotFound: return hardware.InstanceInfo( state=map_power_state(ironic_states.NOSTATE)) properties = self._parse_node_properties(node) memory_kib = properties['memory_mb'] * 1024 if memory_kib == 0: LOG.warning(_LW("Warning, memory usage is 0 for " "%(instance)s on baremetal node %(node)s."), {'instance': instance.uuid, 'node': instance.node}) num_cpu = properties['cpus'] if num_cpu == 0: LOG.warning(_LW("Warning, number of cpus is 0 for " "%(instance)s on baremetal node %(node)s."), {'instance': instance.uuid, 'node': instance.node}) return hardware.InstanceInfo(state=map_power_state(node.power_state), max_mem_kb=memory_kib, mem_kb=memory_kib, num_cpu=num_cpu) def deallocate_networks_on_reschedule(self, instance): """Does the driver want networks deallocated on reschedule? :param instance: the instance object. :returns: Boolean value. If True deallocate networks on reschedule. """ return True def macs_for_instance(self, instance): """List the MAC addresses of an instance. List of MAC addresses for the node which this instance is associated with. :param instance: the instance object. :return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])). None means 'no constraints', a set means 'these and only these MAC addresses'. """ try: node = self.ironicclient.call("node.get", instance.node) except ironic.exc.NotFound: return None ports = self.ironicclient.call("node.list_ports", node.uuid) return set([p.address for p in ports]) def _generate_configdrive(self, instance, node, network_info, extra_md=None, files=None): """Generate a config drive. :param instance: The instance object. :param node: The node object. :param network_info: Instance network information. :param extra_md: Optional, extra metadata to be added to the configdrive. :param files: Optional, a list of paths to files to be added to the configdrive. """ if not extra_md: extra_md = {} i_meta = instance_metadata.InstanceMetadata(instance, content=files, extra_md=extra_md, network_info=network_info) with tempfile.NamedTemporaryFile() as uncompressed: try: with configdrive.ConfigDriveBuilder(instance_md=i_meta) as cdb: cdb.make_drive(uncompressed.name) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Creating config drive failed with " "error: %s"), e, instance=instance) with tempfile.NamedTemporaryFile() as compressed: # compress config drive with gzip.GzipFile(fileobj=compressed, mode='wb') as gzipped: uncompressed.seek(0) shutil.copyfileobj(uncompressed, gzipped) # base64 encode config drive compressed.seek(0) return base64.b64encode(compressed.read()) def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): """Deploy an instance. :param context: The security context. :param instance: The instance object. :param image_meta: Image dict returned by nova.image.glance that defines the image from which to boot this instance. :param injected_files: User files to inject into instance. :param admin_password: Administrator password to set in instance. :param network_info: Instance network information. :param block_device_info: Instance block device information. Ignored by this driver. """ # The compute manager is meant to know the node uuid, so missing uuid # is a significant issue. It may mean we've been passed the wrong data. node_uuid = instance.get('node') if not node_uuid: raise ironic.exc.BadRequest( _("Ironic node uuid not supplied to " "driver for instance %s.") % instance.uuid) node = self.ironicclient.call("node.get", node_uuid) flavor = instance.flavor self._add_driver_fields(node, instance, image_meta, flavor) # NOTE(Shrews): The default ephemeral device needs to be set for # services (like cloud-init) that depend on it being returned by the # metadata server. Addresses bug https://launchpad.net/bugs/1324286. if flavor.ephemeral_gb: instance.default_ephemeral_device = '/dev/sda1' instance.save() # validate we are ready to do the deploy validate_chk = self.ironicclient.call("node.validate", node_uuid) if not validate_chk.deploy or not validate_chk.power: # something is wrong. undo what we have done self._cleanup_deploy(context, node, instance, network_info, flavor=flavor) raise exception.ValidationError(_( "Ironic node: %(id)s failed to validate." " (deploy: %(deploy)s, power: %(power)s)") % {'id': node.uuid, 'deploy': validate_chk.deploy, 'power': validate_chk.power}) # prepare for the deploy try: self._plug_vifs(node, instance, network_info) self._start_firewall(instance, network_info) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error preparing deploy for instance " "%(instance)s on baremetal node %(node)s."), {'instance': instance.uuid, 'node': node_uuid}) self._cleanup_deploy(context, node, instance, network_info, flavor=flavor) # Config drive configdrive_value = None if configdrive.required_by(instance): extra_md = {} if admin_password: extra_md['admin_pass'] = admin_password configdrive_value = self._generate_configdrive( instance, node, network_info, extra_md=extra_md, files=injected_files) LOG.info(_LI("Config drive for instance %(instance)s on " "baremetal node %(node)s created."), {'instance': instance['uuid'], 'node': node_uuid}) # trigger the node deploy try: self.ironicclient.call("node.set_provision_state", node_uuid, ironic_states.ACTIVE, configdrive=configdrive_value) except Exception as e: with excutils.save_and_reraise_exception(): msg = (_LE("Failed to request Ironic to provision instance " "%(inst)s: %(reason)s"), {'inst': instance.uuid, 'reason': six.text_type(e)}) LOG.error(msg) self._cleanup_deploy(context, node, instance, network_info, flavor=flavor) timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active, self.ironicclient, instance) try: timer.start(interval=CONF.ironic.api_retry_interval).wait() except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error deploying instance %(instance)s on " "baremetal node %(node)s."), {'instance': instance.uuid, 'node': node_uuid}) self.destroy(context, instance, network_info) def _unprovision(self, ironicclient, instance, node): """This method is called from destroy() to unprovision already provisioned node after required checks. """ try: ironicclient.call("node.set_provision_state", node.uuid, "deleted") except Exception as e: # if the node is already in a deprovisioned state, continue # This should be fixed in Ironic. # TODO(deva): This exception should be added to # python-ironicclient and matched directly, # rather than via __name__. if getattr(e, '__name__', None) != 'InstanceDeployFailure': raise # using a dict because this is modified in the local method data = {'tries': 0} def _wait_for_provision_state(): node = _validate_instance_and_node(ironicclient, instance) if node.provision_state in (ironic_states.NOSTATE, ironic_states.CLEANING, ironic_states.CLEANFAIL, ironic_states.AVAILABLE): # From a user standpoint, the node is unprovisioned. If a node # gets into CLEANFAIL state, it must be fixed in Ironic, but we # can consider the instance unprovisioned. LOG.debug("Ironic node %(node)s is in state %(state)s, " "instance is now unprovisioned.", dict(node=node.uuid, state=node.provision_state), instance=instance) raise loopingcall.LoopingCallDone() if data['tries'] >= CONF.ironic.api_max_retries: msg = (_("Error destroying the instance on node %(node)s. " "Provision state still '%(state)s'.") % {'state': node.provision_state, 'node': node.uuid}) LOG.error(msg) raise exception.NovaException(msg) else: data['tries'] += 1 _log_ironic_polling('unprovision', node, instance) # wait for the state transition to finish timer = loopingcall.FixedIntervalLoopingCall(_wait_for_provision_state) timer.start(interval=CONF.ironic.api_retry_interval).wait() def destroy(self, context, instance, network_info, block_device_info=None, destroy_disks=True, migrate_data=None): """Destroy the specified instance, if it can be found. :param context: The security context. :param instance: The instance object. :param network_info: Instance network information. :param block_device_info: Instance block device information. Ignored by this driver. :param destroy_disks: Indicates if disks should be destroyed. Ignored by this driver. :param migrate_data: implementation specific params. Ignored by this driver. """ try: node = _validate_instance_and_node(self.ironicclient, instance) except exception.InstanceNotFound: LOG.warning(_LW("Destroy called on non-existing instance %s."), instance.uuid) # NOTE(deva): if nova.compute.ComputeManager._delete_instance() # is called on a non-existing instance, the only way # to delete it is to return from this method # without raising any exceptions. return if node.provision_state in (ironic_states.ACTIVE, ironic_states.DEPLOYFAIL, ironic_states.ERROR, ironic_states.DEPLOYWAIT): self._unprovision(self.ironicclient, instance, node) self._cleanup_deploy(context, node, instance, network_info) def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None): """Reboot the specified instance. NOTE: Ironic does not support soft-off, so this method always performs a hard-reboot. NOTE: Unlike the libvirt driver, this method does not delete and recreate the instance; it preserves local state. :param context: The security context. :param instance: The instance object. :param network_info: Instance network information. Ignored by this driver. :param reboot_type: Either a HARD or SOFT reboot. Ignored by this driver. :param block_device_info: Info pertaining to attached volumes. Ignored by this driver. :param bad_volumes_callback: Function to handle any bad volumes encountered. Ignored by this driver. """ node = _validate_instance_and_node(self.ironicclient, instance) self.ironicclient.call("node.set_power_state", node.uuid, 'reboot') timer = loopingcall.FixedIntervalLoopingCall( self._wait_for_power_state, self.ironicclient, instance, 'reboot') timer.start(interval=CONF.ironic.api_retry_interval).wait() def power_off(self, instance, timeout=0, retry_interval=0): """Power off the specified instance. NOTE: Ironic does not support soft-off, so this method ignores timeout and retry_interval parameters. NOTE: Unlike the libvirt driver, this method does not delete and recreate the instance; it preserves local state. :param instance: The instance object. :param timeout: time to wait for node to shutdown. Ignored by this driver. :param retry_interval: How often to signal node while waiting for it to shutdown. Ignored by this driver. """ node = _validate_instance_and_node(self.ironicclient, instance) self.ironicclient.call("node.set_power_state", node.uuid, 'off') timer = loopingcall.FixedIntervalLoopingCall( self._wait_for_power_state, self.ironicclient, instance, 'power off') timer.start(interval=CONF.ironic.api_retry_interval).wait() def power_on(self, context, instance, network_info, block_device_info=None): """Power on the specified instance. NOTE: Unlike the libvirt driver, this method does not delete and recreate the instance; it preserves local state. :param context: The security context. :param instance: The instance object. :param network_info: Instance network information. Ignored by this driver. :param block_device_info: Instance block device information. Ignored by this driver. """ node = _validate_instance_and_node(self.ironicclient, instance) self.ironicclient.call("node.set_power_state", node.uuid, 'on') timer = loopingcall.FixedIntervalLoopingCall( self._wait_for_power_state, self.ironicclient, instance, 'power on') timer.start(interval=CONF.ironic.api_retry_interval).wait() def refresh_security_group_rules(self, security_group_id): """Refresh security group rules from data store. Invoked when security group rules are updated. :param security_group_id: The security group id. """ self.firewall_driver.refresh_security_group_rules(security_group_id) def refresh_security_group_members(self, security_group_id): """Refresh security group members from data store. Invoked when instances are added/removed to a security group. :param security_group_id: The security group id. """ self.firewall_driver.refresh_security_group_members(security_group_id) def refresh_provider_fw_rules(self): """Triggers a firewall update based on database changes.""" self.firewall_driver.refresh_provider_fw_rules() def refresh_instance_security_rules(self, instance): """Refresh security group rules from data store. Gets called when an instance gets added to or removed from the security group the instance is a member of or if the group gains or loses a rule. :param instance: The instance object. """ self.firewall_driver.refresh_instance_security_rules(instance) def ensure_filtering_rules_for_instance(self, instance, network_info): """Set up filtering rules. :param instance: The instance object. :param network_info: Instance network information. """ self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) def unfilter_instance(self, instance, network_info): """Stop filtering instance. :param instance: The instance object. :param network_info: Instance network information. """ self.firewall_driver.unfilter_instance(instance, network_info) def _plug_vifs(self, node, instance, network_info): # NOTE(PhilDay): Accessing network_info will block if the thread # it wraps hasn't finished, so do this ahead of time so that we # don't block while holding the logging lock. network_info_str = str(network_info) LOG.debug("plug: instance_uuid=%(uuid)s vif=%(network_info)s", {'uuid': instance.uuid, 'network_info': network_info_str}) # start by ensuring the ports are clear self._unplug_vifs(node, instance, network_info) ports = self.ironicclient.call("node.list_ports", node.uuid) if len(network_info) > len(ports): raise exception.VirtualInterfacePlugException(_( "Ironic node: %(id)s virtual to physical interface count" " missmatch" " (Vif count: %(vif_count)d, Pif count: %(pif_count)d)") % {'id': node.uuid, 'vif_count': len(network_info), 'pif_count': len(ports)}) if len(network_info) > 0: # not needed if no vif are defined for vif, pif in zip(network_info, ports): # attach what neutron needs directly to the port port_id = six.text_type(vif['id']) patch = [{'op': 'add', 'path': '/extra/vif_port_id', 'value': port_id}] self.ironicclient.call("port.update", pif.uuid, patch) def _unplug_vifs(self, node, instance, network_info): # NOTE(PhilDay): Accessing network_info will block if the thread # it wraps hasn't finished, so do this ahead of time so that we # don't block while holding the logging lock. network_info_str = str(network_info) LOG.debug("unplug: instance_uuid=%(uuid)s vif=%(network_info)s", {'uuid': instance.uuid, 'network_info': network_info_str}) if network_info and len(network_info) > 0: ports = self.ironicclient.call("node.list_ports", node.uuid, detail=True) # not needed if no vif are defined for vif, pif in zip(network_info, ports): if 'vif_port_id' in pif.extra: # we can not attach a dict directly patch = [{'op': 'remove', 'path': '/extra/vif_port_id'}] try: self.ironicclient.call("port.update", pif.uuid, patch) except ironic.exc.BadRequest: pass def plug_vifs(self, instance, network_info): """Plug VIFs into networks. :param instance: The instance object. :param network_info: Instance network information. """ node = self.ironicclient.call("node.get", instance.node) self._plug_vifs(node, instance, network_info) def unplug_vifs(self, instance, network_info): """Unplug VIFs from networks. :param instance: The instance object. :param network_info: Instance network information. """ node = self.ironicclient.call("node.get", instance.node) self._unplug_vifs(node, instance, network_info) def rebuild(self, context, instance, image_meta, injected_files, admin_password, bdms, detach_block_devices, attach_block_devices, network_info=None, recreate=False, block_device_info=None, preserve_ephemeral=False): """Rebuild/redeploy an instance. This version of rebuild() allows for supporting the option to preserve the ephemeral partition. We cannot call spawn() from here because it will attempt to set the instance_uuid value again, which is not allowed by the Ironic API. It also requires the instance to not have an 'active' provision state, but we cannot safely change that. Given that, we implement only the portions of spawn() we need within rebuild(). :param context: The security context. :param instance: The instance object. :param image_meta: Image object returned by nova.image.glance that defines the image from which to boot this instance. Ignored by this driver. :param injected_files: User files to inject into instance. Ignored by this driver. :param admin_password: Administrator password to set in instance. Ignored by this driver. :param bdms: block-device-mappings to use for rebuild. Ignored by this driver. :param detach_block_devices: function to detach block devices. See nova.compute.manager.ComputeManager:_rebuild_default_impl for usage. Ignored by this driver. :param attach_block_devices: function to attach block devices. See nova.compute.manager.ComputeManager:_rebuild_default_impl for usage. Ignored by this driver. :param network_info: Instance network information. Ignored by this driver. :param recreate: Boolean value; if True the instance is recreated on a new hypervisor - all the cleanup of old state is skipped. Ignored by this driver. :param block_device_info: Instance block device information. Ignored by this driver. :param preserve_ephemeral: Boolean value; if True the ephemeral must be preserved on rebuild. """ instance.task_state = task_states.REBUILD_SPAWNING instance.save(expected_task_state=[task_states.REBUILDING]) node_uuid = instance.node node = self.ironicclient.call("node.get", node_uuid) self._add_driver_fields(node, instance, image_meta, instance.flavor, preserve_ephemeral) # Trigger the node rebuild/redeploy. try: self.ironicclient.call("node.set_provision_state", node_uuid, ironic_states.REBUILD) except (exception.NovaException, # Retry failed ironic.exc.InternalServerError, # Validations ironic.exc.BadRequest) as e: # Maintenance msg = (_("Failed to request Ironic to rebuild instance " "%(inst)s: %(reason)s") % {'inst': instance.uuid, 'reason': six.text_type(e)}) raise exception.InstanceDeployFailure(msg) # Although the target provision state is REBUILD, it will actually go # to ACTIVE once the redeploy is finished. timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active, self.ironicclient, instance) timer.start(interval=CONF.ironic.api_retry_interval).wait()
apache-2.0
arborh/tensorflow
tensorflow/python/ops/ragged/ragged_from_sparse_op_test.py
9
4604
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for RaggedTensor.from_sparse.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedTensorToSparseOpTest(test_util.TensorFlowTestCase): def testDocStringExample(self): st = sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1], [0, 2], [1, 0], [3, 0]], values=[1, 2, 3, 4, 5], dense_shape=[4, 3]) rt = RaggedTensor.from_sparse(st) self.assertAllEqual(rt, [[1, 2, 3], [4], [], [5]]) def testEmpty(self): st = sparse_tensor.SparseTensor( indices=array_ops.zeros([0, 2], dtype=dtypes.int64), values=[], dense_shape=[4, 3]) rt = RaggedTensor.from_sparse(st) self.assertAllEqual(rt, [[], [], [], []]) def testBadSparseTensorRank(self): st1 = sparse_tensor.SparseTensor(indices=[[0]], values=[0], dense_shape=[3]) self.assertRaisesRegexp(ValueError, r'rank\(st_input\) must be 2', RaggedTensor.from_sparse, st1) st2 = sparse_tensor.SparseTensor( indices=[[0, 0, 0]], values=[0], dense_shape=[3, 3, 3]) self.assertRaisesRegexp(ValueError, r'rank\(st_input\) must be 2', RaggedTensor.from_sparse, st2) if not context.executing_eagerly(): st3 = sparse_tensor.SparseTensor( indices=array_ops.placeholder(dtypes.int64), values=[0], dense_shape=array_ops.placeholder(dtypes.int64)) self.assertRaisesRegexp(ValueError, r'rank\(st_input\) must be 2', RaggedTensor.from_sparse, st3) def testGoodPartialSparseTensorRank(self): if not context.executing_eagerly(): st1 = sparse_tensor.SparseTensor( indices=[[0, 0]], values=[0], dense_shape=array_ops.placeholder(dtypes.int64)) st2 = sparse_tensor.SparseTensor( indices=array_ops.placeholder(dtypes.int64), values=[0], dense_shape=[4, 3]) # Shouldn't throw ValueError RaggedTensor.from_sparse(st1) RaggedTensor.from_sparse(st2) def testNonRaggedSparseTensor(self): # "index_suffix" means the value of the innermost dimension of the index # (i.e., indices[i][-1]). # See comments in _assert_sparse_indices_are_ragged_right() for more # details/background. # index_suffix of first index is not zero. st1 = sparse_tensor.SparseTensor( indices=[[0, 1], [0, 2], [2, 0]], values=[1, 2, 3], dense_shape=[3, 3]) with self.assertRaisesRegexp(errors.InvalidArgumentError, r'.*SparseTensor is not right-ragged'): self.evaluate(RaggedTensor.from_sparse(st1)) # index_suffix of an index that starts a new row is not zero. st2 = sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1], [2, 1]], values=[1, 2, 3], dense_shape=[3, 3]) with self.assertRaisesRegexp(errors.InvalidArgumentError, r'.*SparseTensor is not right-ragged'): self.evaluate(RaggedTensor.from_sparse(st2)) # index_suffix of an index that continues a row skips a cell. st3 = sparse_tensor.SparseTensor( indices=[[0, 1], [0, 1], [0, 3]], values=[1, 2, 3], dense_shape=[3, 3]) with self.assertRaisesRegexp(errors.InvalidArgumentError, r'.*SparseTensor is not right-ragged'): self.evaluate(RaggedTensor.from_sparse(st3)) if __name__ == '__main__': googletest.main()
apache-2.0
wxiang7/airflow
airflow/operators/mssql_operator.py
5
1087
import logging from airflow.hooks import MsSqlHook from airflow.models import BaseOperator from airflow.utils.decorators import apply_defaults class MsSqlOperator(BaseOperator): """ Executes sql code in a specific Microsoft SQL database :param mssql_conn_id: reference to a specific mssql database :type mssql_conn_id: string :param sql: the sql code to be executed :type sql: string or string pointing to a template file. File must have a '.sql' extensions. """ template_fields = ('sql',) template_ext = ('.sql',) ui_color = '#ededed' @apply_defaults def __init__( self, sql, mssql_conn_id='mssql_default', parameters=None, *args, **kwargs): super(MsSqlOperator, self).__init__(*args, **kwargs) self.mssql_conn_id = mssql_conn_id self.sql = sql self.parameters = parameters def execute(self, context): logging.info('Executing: ' + str(self.sql)) hook = MsSqlHook(mssql_conn_id=self.mssql_conn_id) hook.run(self.sql, parameters=self.parameters)
apache-2.0
moto-timo/ironpython3
Src/StdLib/Lib/encodings/cp273.py
212
14132
""" Python Character Mapping Codec cp273 generated from 'python-mappings/CP273.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp273', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> NULL (NUL) '\x01' # 0x01 -> START OF HEADING (SOH) '\x02' # 0x02 -> START OF TEXT (STX) '\x03' # 0x03 -> END OF TEXT (ETX) '\x9c' # 0x04 -> STRING TERMINATOR (ST) '\t' # 0x05 -> CHARACTER TABULATION (HT) '\x86' # 0x06 -> START OF SELECTED AREA (SSA) '\x7f' # 0x07 -> DELETE (DEL) '\x97' # 0x08 -> END OF GUARDED AREA (EPA) '\x8d' # 0x09 -> REVERSE LINE FEED (RI) '\x8e' # 0x0A -> SINGLE-SHIFT TWO (SS2) '\x0b' # 0x0B -> LINE TABULATION (VT) '\x0c' # 0x0C -> FORM FEED (FF) '\r' # 0x0D -> CARRIAGE RETURN (CR) '\x0e' # 0x0E -> SHIFT OUT (SO) '\x0f' # 0x0F -> SHIFT IN (SI) '\x10' # 0x10 -> DATALINK ESCAPE (DLE) '\x11' # 0x11 -> DEVICE CONTROL ONE (DC1) '\x12' # 0x12 -> DEVICE CONTROL TWO (DC2) '\x13' # 0x13 -> DEVICE CONTROL THREE (DC3) '\x9d' # 0x14 -> OPERATING SYSTEM COMMAND (OSC) '\x85' # 0x15 -> NEXT LINE (NEL) '\x08' # 0x16 -> BACKSPACE (BS) '\x87' # 0x17 -> END OF SELECTED AREA (ESA) '\x18' # 0x18 -> CANCEL (CAN) '\x19' # 0x19 -> END OF MEDIUM (EM) '\x92' # 0x1A -> PRIVATE USE TWO (PU2) '\x8f' # 0x1B -> SINGLE-SHIFT THREE (SS3) '\x1c' # 0x1C -> FILE SEPARATOR (IS4) '\x1d' # 0x1D -> GROUP SEPARATOR (IS3) '\x1e' # 0x1E -> RECORD SEPARATOR (IS2) '\x1f' # 0x1F -> UNIT SEPARATOR (IS1) '\x80' # 0x20 -> PADDING CHARACTER (PAD) '\x81' # 0x21 -> HIGH OCTET PRESET (HOP) '\x82' # 0x22 -> BREAK PERMITTED HERE (BPH) '\x83' # 0x23 -> NO BREAK HERE (NBH) '\x84' # 0x24 -> INDEX (IND) '\n' # 0x25 -> LINE FEED (LF) '\x17' # 0x26 -> END OF TRANSMISSION BLOCK (ETB) '\x1b' # 0x27 -> ESCAPE (ESC) '\x88' # 0x28 -> CHARACTER TABULATION SET (HTS) '\x89' # 0x29 -> CHARACTER TABULATION WITH JUSTIFICATION (HTJ) '\x8a' # 0x2A -> LINE TABULATION SET (VTS) '\x8b' # 0x2B -> PARTIAL LINE FORWARD (PLD) '\x8c' # 0x2C -> PARTIAL LINE BACKWARD (PLU) '\x05' # 0x2D -> ENQUIRY (ENQ) '\x06' # 0x2E -> ACKNOWLEDGE (ACK) '\x07' # 0x2F -> BELL (BEL) '\x90' # 0x30 -> DEVICE CONTROL STRING (DCS) '\x91' # 0x31 -> PRIVATE USE ONE (PU1) '\x16' # 0x32 -> SYNCHRONOUS IDLE (SYN) '\x93' # 0x33 -> SET TRANSMIT STATE (STS) '\x94' # 0x34 -> CANCEL CHARACTER (CCH) '\x95' # 0x35 -> MESSAGE WAITING (MW) '\x96' # 0x36 -> START OF GUARDED AREA (SPA) '\x04' # 0x37 -> END OF TRANSMISSION (EOT) '\x98' # 0x38 -> START OF STRING (SOS) '\x99' # 0x39 -> SINGLE GRAPHIC CHARACTER INTRODUCER (SGCI) '\x9a' # 0x3A -> SINGLE CHARACTER INTRODUCER (SCI) '\x9b' # 0x3B -> CONTROL SEQUENCE INTRODUCER (CSI) '\x14' # 0x3C -> DEVICE CONTROL FOUR (DC4) '\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE (NAK) '\x9e' # 0x3E -> PRIVACY MESSAGE (PM) '\x1a' # 0x3F -> SUBSTITUTE (SUB) ' ' # 0x40 -> SPACE '\xa0' # 0x41 -> NO-BREAK SPACE '\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX '{' # 0x43 -> LEFT CURLY BRACKET '\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE '\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE '\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE '\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE '\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA '\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE '\xc4' # 0x4A -> LATIN CAPITAL LETTER A WITH DIAERESIS '.' # 0x4B -> FULL STOP '<' # 0x4C -> LESS-THAN SIGN '(' # 0x4D -> LEFT PARENTHESIS '+' # 0x4E -> PLUS SIGN '!' # 0x4F -> EXCLAMATION MARK '&' # 0x50 -> AMPERSAND '\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE '\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX '\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS '\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE '\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE '\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX '\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS '\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE '~' # 0x59 -> TILDE '\xdc' # 0x5A -> LATIN CAPITAL LETTER U WITH DIAERESIS '$' # 0x5B -> DOLLAR SIGN '*' # 0x5C -> ASTERISK ')' # 0x5D -> RIGHT PARENTHESIS ';' # 0x5E -> SEMICOLON '^' # 0x5F -> CIRCUMFLEX ACCENT '-' # 0x60 -> HYPHEN-MINUS '/' # 0x61 -> SOLIDUS '\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX '[' # 0x63 -> LEFT SQUARE BRACKET '\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE '\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE '\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE '\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE '\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA '\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE '\xf6' # 0x6A -> LATIN SMALL LETTER O WITH DIAERESIS ',' # 0x6B -> COMMA '%' # 0x6C -> PERCENT SIGN '_' # 0x6D -> LOW LINE '>' # 0x6E -> GREATER-THAN SIGN '?' # 0x6F -> QUESTION MARK '\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE '\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE '\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX '\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS '\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE '\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE '\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX '\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS '\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE '`' # 0x79 -> GRAVE ACCENT ':' # 0x7A -> COLON '#' # 0x7B -> NUMBER SIGN '\xa7' # 0x7C -> SECTION SIGN "'" # 0x7D -> APOSTROPHE '=' # 0x7E -> EQUALS SIGN '"' # 0x7F -> QUOTATION MARK '\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE 'a' # 0x81 -> LATIN SMALL LETTER A 'b' # 0x82 -> LATIN SMALL LETTER B 'c' # 0x83 -> LATIN SMALL LETTER C 'd' # 0x84 -> LATIN SMALL LETTER D 'e' # 0x85 -> LATIN SMALL LETTER E 'f' # 0x86 -> LATIN SMALL LETTER F 'g' # 0x87 -> LATIN SMALL LETTER G 'h' # 0x88 -> LATIN SMALL LETTER H 'i' # 0x89 -> LATIN SMALL LETTER I '\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\xf0' # 0x8C -> LATIN SMALL LETTER ETH (Icelandic) '\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE '\xfe' # 0x8E -> LATIN SMALL LETTER THORN (Icelandic) '\xb1' # 0x8F -> PLUS-MINUS SIGN '\xb0' # 0x90 -> DEGREE SIGN 'j' # 0x91 -> LATIN SMALL LETTER J 'k' # 0x92 -> LATIN SMALL LETTER K 'l' # 0x93 -> LATIN SMALL LETTER L 'm' # 0x94 -> LATIN SMALL LETTER M 'n' # 0x95 -> LATIN SMALL LETTER N 'o' # 0x96 -> LATIN SMALL LETTER O 'p' # 0x97 -> LATIN SMALL LETTER P 'q' # 0x98 -> LATIN SMALL LETTER Q 'r' # 0x99 -> LATIN SMALL LETTER R '\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR '\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR '\xe6' # 0x9C -> LATIN SMALL LETTER AE '\xb8' # 0x9D -> CEDILLA '\xc6' # 0x9E -> LATIN CAPITAL LETTER AE '\xa4' # 0x9F -> CURRENCY SIGN '\xb5' # 0xA0 -> MICRO SIGN '\xdf' # 0xA1 -> LATIN SMALL LETTER SHARP S (German) 's' # 0xA2 -> LATIN SMALL LETTER S 't' # 0xA3 -> LATIN SMALL LETTER T 'u' # 0xA4 -> LATIN SMALL LETTER U 'v' # 0xA5 -> LATIN SMALL LETTER V 'w' # 0xA6 -> LATIN SMALL LETTER W 'x' # 0xA7 -> LATIN SMALL LETTER X 'y' # 0xA8 -> LATIN SMALL LETTER Y 'z' # 0xA9 -> LATIN SMALL LETTER Z '\xa1' # 0xAA -> INVERTED EXCLAMATION MARK '\xbf' # 0xAB -> INVERTED QUESTION MARK '\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (Icelandic) '\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE '\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (Icelandic) '\xae' # 0xAF -> REGISTERED SIGN '\xa2' # 0xB0 -> CENT SIGN '\xa3' # 0xB1 -> POUND SIGN '\xa5' # 0xB2 -> YEN SIGN '\xb7' # 0xB3 -> MIDDLE DOT '\xa9' # 0xB4 -> COPYRIGHT SIGN '@' # 0xB5 -> COMMERCIAL AT '\xb6' # 0xB6 -> PILCROW SIGN '\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER '\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF '\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS '\xac' # 0xBA -> NOT SIGN '|' # 0xBB -> VERTICAL LINE '\u203e' # 0xBC -> OVERLINE '\xa8' # 0xBD -> DIAERESIS '\xb4' # 0xBE -> ACUTE ACCENT '\xd7' # 0xBF -> MULTIPLICATION SIGN '\xe4' # 0xC0 -> LATIN SMALL LETTER A WITH DIAERESIS 'A' # 0xC1 -> LATIN CAPITAL LETTER A 'B' # 0xC2 -> LATIN CAPITAL LETTER B 'C' # 0xC3 -> LATIN CAPITAL LETTER C 'D' # 0xC4 -> LATIN CAPITAL LETTER D 'E' # 0xC5 -> LATIN CAPITAL LETTER E 'F' # 0xC6 -> LATIN CAPITAL LETTER F 'G' # 0xC7 -> LATIN CAPITAL LETTER G 'H' # 0xC8 -> LATIN CAPITAL LETTER H 'I' # 0xC9 -> LATIN CAPITAL LETTER I '\xad' # 0xCA -> SOFT HYPHEN '\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX '\xa6' # 0xCC -> BROKEN BAR '\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE '\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE '\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE '\xfc' # 0xD0 -> LATIN SMALL LETTER U WITH DIAERESIS 'J' # 0xD1 -> LATIN CAPITAL LETTER J 'K' # 0xD2 -> LATIN CAPITAL LETTER K 'L' # 0xD3 -> LATIN CAPITAL LETTER L 'M' # 0xD4 -> LATIN CAPITAL LETTER M 'N' # 0xD5 -> LATIN CAPITAL LETTER N 'O' # 0xD6 -> LATIN CAPITAL LETTER O 'P' # 0xD7 -> LATIN CAPITAL LETTER P 'Q' # 0xD8 -> LATIN CAPITAL LETTER Q 'R' # 0xD9 -> LATIN CAPITAL LETTER R '\xb9' # 0xDA -> SUPERSCRIPT ONE '\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX '}' # 0xDC -> RIGHT CURLY BRACKET '\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE '\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE '\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS '\xd6' # 0xE0 -> LATIN CAPITAL LETTER O WITH DIAERESIS '\xf7' # 0xE1 -> DIVISION SIGN 'S' # 0xE2 -> LATIN CAPITAL LETTER S 'T' # 0xE3 -> LATIN CAPITAL LETTER T 'U' # 0xE4 -> LATIN CAPITAL LETTER U 'V' # 0xE5 -> LATIN CAPITAL LETTER V 'W' # 0xE6 -> LATIN CAPITAL LETTER W 'X' # 0xE7 -> LATIN CAPITAL LETTER X 'Y' # 0xE8 -> LATIN CAPITAL LETTER Y 'Z' # 0xE9 -> LATIN CAPITAL LETTER Z '\xb2' # 0xEA -> SUPERSCRIPT TWO '\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX '\\' # 0xEC -> REVERSE SOLIDUS '\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE '\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE '0' # 0xF0 -> DIGIT ZERO '1' # 0xF1 -> DIGIT ONE '2' # 0xF2 -> DIGIT TWO '3' # 0xF3 -> DIGIT THREE '4' # 0xF4 -> DIGIT FOUR '5' # 0xF5 -> DIGIT FIVE '6' # 0xF6 -> DIGIT SIX '7' # 0xF7 -> DIGIT SEVEN '8' # 0xF8 -> DIGIT EIGHT '9' # 0xF9 -> DIGIT NINE '\xb3' # 0xFA -> SUPERSCRIPT THREE '\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX ']' # 0xFC -> RIGHT SQUARE BRACKET '\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE '\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE '\x9f' # 0xFF -> APPLICATION PROGRAM COMMAND (APC) ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
apache-2.0
Samuel789/MediPi
MedManagementWeb/env/lib/python3.5/site-packages/setuptools/command/build_clib.py
314
4484
import distutils.command.build_clib as orig from distutils.errors import DistutilsSetupError from distutils import log from setuptools.dep_util import newer_pairwise_group class build_clib(orig.build_clib): """ Override the default build_clib behaviour to do the following: 1. Implement a rudimentary timestamp-based dependency system so 'compile()' doesn't run every time. 2. Add more keys to the 'build_info' dictionary: * obj_deps - specify dependencies for each object compiled. this should be a dictionary mapping a key with the source filename to a list of dependencies. Use an empty string for global dependencies. * cflags - specify a list of additional flags to pass to the compiler. """ def build_libraries(self, libraries): for (lib_name, build_info) in libraries: sources = build_info.get('sources') if sources is None or not isinstance(sources, (list, tuple)): raise DistutilsSetupError( "in 'libraries' option (library '%s'), " "'sources' must be present and must be " "a list of source filenames" % lib_name) sources = list(sources) log.info("building '%s' library", lib_name) # Make sure everything is the correct type. # obj_deps should be a dictionary of keys as sources # and a list/tuple of files that are its dependencies. obj_deps = build_info.get('obj_deps', dict()) if not isinstance(obj_deps, dict): raise DistutilsSetupError( "in 'libraries' option (library '%s'), " "'obj_deps' must be a dictionary of " "type 'source: list'" % lib_name) dependencies = [] # Get the global dependencies that are specified by the '' key. # These will go into every source's dependency list. global_deps = obj_deps.get('', list()) if not isinstance(global_deps, (list, tuple)): raise DistutilsSetupError( "in 'libraries' option (library '%s'), " "'obj_deps' must be a dictionary of " "type 'source: list'" % lib_name) # Build the list to be used by newer_pairwise_group # each source will be auto-added to its dependencies. for source in sources: src_deps = [source] src_deps.extend(global_deps) extra_deps = obj_deps.get(source, list()) if not isinstance(extra_deps, (list, tuple)): raise DistutilsSetupError( "in 'libraries' option (library '%s'), " "'obj_deps' must be a dictionary of " "type 'source: list'" % lib_name) src_deps.extend(extra_deps) dependencies.append(src_deps) expected_objects = self.compiler.object_filenames( sources, output_dir=self.build_temp ) if newer_pairwise_group(dependencies, expected_objects) != ([], []): # First, compile the source code to object files in the library # directory. (This should probably change to putting object # files in a temporary build directory.) macros = build_info.get('macros') include_dirs = build_info.get('include_dirs') cflags = build_info.get('cflags') objects = self.compiler.compile( sources, output_dir=self.build_temp, macros=macros, include_dirs=include_dirs, extra_postargs=cflags, debug=self.debug ) # Now "link" the object files together into a static library. # (On Unix at least, this isn't really linking -- it just # builds an archive. Whatever.) self.compiler.create_static_lib( expected_objects, lib_name, output_dir=self.build_clib, debug=self.debug )
apache-2.0
buntyke/Flask
microblog/flask/lib/python2.7/site-packages/pip/_vendor/distlib/version.py
426
23711
# -*- coding: utf-8 -*- # # Copyright (C) 2012-2014 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """ Implementation of a flexible versioning scheme providing support for PEP-386, distribute-compatible and semantic versioning. """ import logging import re from .compat import string_types __all__ = ['NormalizedVersion', 'NormalizedMatcher', 'LegacyVersion', 'LegacyMatcher', 'SemanticVersion', 'SemanticMatcher', 'UnsupportedVersionError', 'get_scheme'] logger = logging.getLogger(__name__) class UnsupportedVersionError(ValueError): """This is an unsupported version.""" pass class Version(object): def __init__(self, s): self._string = s = s.strip() self._parts = parts = self.parse(s) assert isinstance(parts, tuple) assert len(parts) > 0 def parse(self, s): raise NotImplementedError('please implement in a subclass') def _check_compatible(self, other): if type(self) != type(other): raise TypeError('cannot compare %r and %r' % (self, other)) def __eq__(self, other): self._check_compatible(other) return self._parts == other._parts def __ne__(self, other): return not self.__eq__(other) def __lt__(self, other): self._check_compatible(other) return self._parts < other._parts def __gt__(self, other): return not (self.__lt__(other) or self.__eq__(other)) def __le__(self, other): return self.__lt__(other) or self.__eq__(other) def __ge__(self, other): return self.__gt__(other) or self.__eq__(other) # See http://docs.python.org/reference/datamodel#object.__hash__ def __hash__(self): return hash(self._parts) def __repr__(self): return "%s('%s')" % (self.__class__.__name__, self._string) def __str__(self): return self._string @property def is_prerelease(self): raise NotImplementedError('Please implement in subclasses.') class Matcher(object): version_class = None dist_re = re.compile(r"^(\w[\s\w'.-]*)(\((.*)\))?") comp_re = re.compile(r'^(<=|>=|<|>|!=|={2,3}|~=)?\s*([^\s,]+)$') num_re = re.compile(r'^\d+(\.\d+)*$') # value is either a callable or the name of a method _operators = { '<': lambda v, c, p: v < c, '>': lambda v, c, p: v > c, '<=': lambda v, c, p: v == c or v < c, '>=': lambda v, c, p: v == c or v > c, '==': lambda v, c, p: v == c, '===': lambda v, c, p: v == c, # by default, compatible => >=. '~=': lambda v, c, p: v == c or v > c, '!=': lambda v, c, p: v != c, } def __init__(self, s): if self.version_class is None: raise ValueError('Please specify a version class') self._string = s = s.strip() m = self.dist_re.match(s) if not m: raise ValueError('Not valid: %r' % s) groups = m.groups('') self.name = groups[0].strip() self.key = self.name.lower() # for case-insensitive comparisons clist = [] if groups[2]: constraints = [c.strip() for c in groups[2].split(',')] for c in constraints: m = self.comp_re.match(c) if not m: raise ValueError('Invalid %r in %r' % (c, s)) groups = m.groups() op = groups[0] or '~=' s = groups[1] if s.endswith('.*'): if op not in ('==', '!='): raise ValueError('\'.*\' not allowed for ' '%r constraints' % op) # Could be a partial version (e.g. for '2.*') which # won't parse as a version, so keep it as a string vn, prefix = s[:-2], True if not self.num_re.match(vn): # Just to check that vn is a valid version self.version_class(vn) else: # Should parse as a version, so we can create an # instance for the comparison vn, prefix = self.version_class(s), False clist.append((op, vn, prefix)) self._parts = tuple(clist) def match(self, version): """ Check if the provided version matches the constraints. :param version: The version to match against this instance. :type version: Strring or :class:`Version` instance. """ if isinstance(version, string_types): version = self.version_class(version) for operator, constraint, prefix in self._parts: f = self._operators.get(operator) if isinstance(f, string_types): f = getattr(self, f) if not f: msg = ('%r not implemented ' 'for %s' % (operator, self.__class__.__name__)) raise NotImplementedError(msg) if not f(version, constraint, prefix): return False return True @property def exact_version(self): result = None if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='): result = self._parts[0][1] return result def _check_compatible(self, other): if type(self) != type(other) or self.name != other.name: raise TypeError('cannot compare %s and %s' % (self, other)) def __eq__(self, other): self._check_compatible(other) return self.key == other.key and self._parts == other._parts def __ne__(self, other): return not self.__eq__(other) # See http://docs.python.org/reference/datamodel#object.__hash__ def __hash__(self): return hash(self.key) + hash(self._parts) def __repr__(self): return "%s(%r)" % (self.__class__.__name__, self._string) def __str__(self): return self._string PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?' r'(\.(post)(\d+))?(\.(dev)(\d+))?' r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$') def _pep_440_key(s): s = s.strip() m = PEP440_VERSION_RE.match(s) if not m: raise UnsupportedVersionError('Not a valid version: %s' % s) groups = m.groups() nums = tuple(int(v) for v in groups[1].split('.')) while len(nums) > 1 and nums[-1] == 0: nums = nums[:-1] if not groups[0]: epoch = 0 else: epoch = int(groups[0]) pre = groups[4:6] post = groups[7:9] dev = groups[10:12] local = groups[13] if pre == (None, None): pre = () else: pre = pre[0], int(pre[1]) if post == (None, None): post = () else: post = post[0], int(post[1]) if dev == (None, None): dev = () else: dev = dev[0], int(dev[1]) if local is None: local = () else: parts = [] for part in local.split('.'): # to ensure that numeric compares as > lexicographic, avoid # comparing them directly, but encode a tuple which ensures # correct sorting if part.isdigit(): part = (1, int(part)) else: part = (0, part) parts.append(part) local = tuple(parts) if not pre: # either before pre-release, or final release and after if not post and dev: # before pre-release pre = ('a', -1) # to sort before a0 else: pre = ('z',) # to sort after all pre-releases # now look at the state of post and dev. if not post: post = ('_',) # sort before 'a' if not dev: dev = ('final',) #print('%s -> %s' % (s, m.groups())) return epoch, nums, pre, post, dev, local _normalized_key = _pep_440_key class NormalizedVersion(Version): """A rational version. Good: 1.2 # equivalent to "1.2.0" 1.2.0 1.2a1 1.2.3a2 1.2.3b1 1.2.3c1 1.2.3.4 TODO: fill this out Bad: 1 # mininum two numbers 1.2a # release level must have a release serial 1.2.3b """ def parse(self, s): result = _normalized_key(s) # _normalized_key loses trailing zeroes in the release # clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0 # However, PEP 440 prefix matching needs it: for example, # (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0). m = PEP440_VERSION_RE.match(s) # must succeed groups = m.groups() self._release_clause = tuple(int(v) for v in groups[1].split('.')) return result PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev']) @property def is_prerelease(self): return any(t[0] in self.PREREL_TAGS for t in self._parts if t) def _match_prefix(x, y): x = str(x) y = str(y) if x == y: return True if not x.startswith(y): return False n = len(y) return x[n] == '.' class NormalizedMatcher(Matcher): version_class = NormalizedVersion # value is either a callable or the name of a method _operators = { '~=': '_match_compatible', '<': '_match_lt', '>': '_match_gt', '<=': '_match_le', '>=': '_match_ge', '==': '_match_eq', '===': '_match_arbitrary', '!=': '_match_ne', } def _adjust_local(self, version, constraint, prefix): if prefix: strip_local = '+' not in constraint and version._parts[-1] else: # both constraint and version are # NormalizedVersion instances. # If constraint does not have a local component, # ensure the version doesn't, either. strip_local = not constraint._parts[-1] and version._parts[-1] if strip_local: s = version._string.split('+', 1)[0] version = self.version_class(s) return version, constraint def _match_lt(self, version, constraint, prefix): version, constraint = self._adjust_local(version, constraint, prefix) if version >= constraint: return False release_clause = constraint._release_clause pfx = '.'.join([str(i) for i in release_clause]) return not _match_prefix(version, pfx) def _match_gt(self, version, constraint, prefix): version, constraint = self._adjust_local(version, constraint, prefix) if version <= constraint: return False release_clause = constraint._release_clause pfx = '.'.join([str(i) for i in release_clause]) return not _match_prefix(version, pfx) def _match_le(self, version, constraint, prefix): version, constraint = self._adjust_local(version, constraint, prefix) return version <= constraint def _match_ge(self, version, constraint, prefix): version, constraint = self._adjust_local(version, constraint, prefix) return version >= constraint def _match_eq(self, version, constraint, prefix): version, constraint = self._adjust_local(version, constraint, prefix) if not prefix: result = (version == constraint) else: result = _match_prefix(version, constraint) return result def _match_arbitrary(self, version, constraint, prefix): return str(version) == str(constraint) def _match_ne(self, version, constraint, prefix): version, constraint = self._adjust_local(version, constraint, prefix) if not prefix: result = (version != constraint) else: result = not _match_prefix(version, constraint) return result def _match_compatible(self, version, constraint, prefix): version, constraint = self._adjust_local(version, constraint, prefix) if version == constraint: return True if version < constraint: return False # if not prefix: # return True release_clause = constraint._release_clause if len(release_clause) > 1: release_clause = release_clause[:-1] pfx = '.'.join([str(i) for i in release_clause]) return _match_prefix(version, pfx) _REPLACEMENTS = ( (re.compile('[.+-]$'), ''), # remove trailing puncts (re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start (re.compile('^[.-]'), ''), # remove leading puncts (re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses (re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion) (re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion) (re.compile('[.]{2,}'), '.'), # multiple runs of '.' (re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha (re.compile(r'\b(pre-alpha|prealpha)\b'), 'pre.alpha'), # standardise (re.compile(r'\(beta\)$'), 'beta'), # remove parentheses ) _SUFFIX_REPLACEMENTS = ( (re.compile('^[:~._+-]+'), ''), # remove leading puncts (re.compile('[,*")([\]]'), ''), # remove unwanted chars (re.compile('[~:+_ -]'), '.'), # replace illegal chars (re.compile('[.]{2,}'), '.'), # multiple runs of '.' (re.compile(r'\.$'), ''), # trailing '.' ) _NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)') def _suggest_semantic_version(s): """ Try to suggest a semantic form for a version for which _suggest_normalized_version couldn't come up with anything. """ result = s.strip().lower() for pat, repl in _REPLACEMENTS: result = pat.sub(repl, result) if not result: result = '0.0.0' # Now look for numeric prefix, and separate it out from # the rest. #import pdb; pdb.set_trace() m = _NUMERIC_PREFIX.match(result) if not m: prefix = '0.0.0' suffix = result else: prefix = m.groups()[0].split('.') prefix = [int(i) for i in prefix] while len(prefix) < 3: prefix.append(0) if len(prefix) == 3: suffix = result[m.end():] else: suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():] prefix = prefix[:3] prefix = '.'.join([str(i) for i in prefix]) suffix = suffix.strip() if suffix: #import pdb; pdb.set_trace() # massage the suffix. for pat, repl in _SUFFIX_REPLACEMENTS: suffix = pat.sub(repl, suffix) if not suffix: result = prefix else: sep = '-' if 'dev' in suffix else '+' result = prefix + sep + suffix if not is_semver(result): result = None return result def _suggest_normalized_version(s): """Suggest a normalized version close to the given version string. If you have a version string that isn't rational (i.e. NormalizedVersion doesn't like it) then you might be able to get an equivalent (or close) rational version from this function. This does a number of simple normalizations to the given string, based on observation of versions currently in use on PyPI. Given a dump of those version during PyCon 2009, 4287 of them: - 2312 (53.93%) match NormalizedVersion without change with the automatic suggestion - 3474 (81.04%) match when using this suggestion method @param s {str} An irrational version string. @returns A rational version string, or None, if couldn't determine one. """ try: _normalized_key(s) return s # already rational except UnsupportedVersionError: pass rs = s.lower() # part of this could use maketrans for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'), ('beta', 'b'), ('rc', 'c'), ('-final', ''), ('-pre', 'c'), ('-release', ''), ('.release', ''), ('-stable', ''), ('+', '.'), ('_', '.'), (' ', ''), ('.final', ''), ('final', '')): rs = rs.replace(orig, repl) # if something ends with dev or pre, we add a 0 rs = re.sub(r"pre$", r"pre0", rs) rs = re.sub(r"dev$", r"dev0", rs) # if we have something like "b-2" or "a.2" at the end of the # version, that is pobably beta, alpha, etc # let's remove the dash or dot rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs) # 1.0-dev-r371 -> 1.0.dev371 # 0.1-dev-r79 -> 0.1.dev79 rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs) # Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1 rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs) # Clean: v0.3, v1.0 if rs.startswith('v'): rs = rs[1:] # Clean leading '0's on numbers. #TODO: unintended side-effect on, e.g., "2003.05.09" # PyPI stats: 77 (~2%) better rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs) # Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers # zero. # PyPI stats: 245 (7.56%) better rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs) # the 'dev-rNNN' tag is a dev tag rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs) # clean the - when used as a pre delimiter rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs) # a terminal "dev" or "devel" can be changed into ".dev0" rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs) # a terminal "dev" can be changed into ".dev0" rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs) # a terminal "final" or "stable" can be removed rs = re.sub(r"(final|stable)$", "", rs) # The 'r' and the '-' tags are post release tags # 0.4a1.r10 -> 0.4a1.post10 # 0.9.33-17222 -> 0.9.33.post17222 # 0.9.33-r17222 -> 0.9.33.post17222 rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs) # Clean 'r' instead of 'dev' usage: # 0.9.33+r17222 -> 0.9.33.dev17222 # 1.0dev123 -> 1.0.dev123 # 1.0.git123 -> 1.0.dev123 # 1.0.bzr123 -> 1.0.dev123 # 0.1a0dev.123 -> 0.1a0.dev123 # PyPI stats: ~150 (~4%) better rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs) # Clean '.pre' (normalized from '-pre' above) instead of 'c' usage: # 0.2.pre1 -> 0.2c1 # 0.2-c1 -> 0.2c1 # 1.0preview123 -> 1.0c123 # PyPI stats: ~21 (0.62%) better rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs) # Tcl/Tk uses "px" for their post release markers rs = re.sub(r"p(\d+)$", r".post\1", rs) try: _normalized_key(rs) except UnsupportedVersionError: rs = None return rs # # Legacy version processing (distribute-compatible) # _VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I) _VERSION_REPLACE = { 'pre': 'c', 'preview': 'c', '-': 'final-', 'rc': 'c', 'dev': '@', '': None, '.': None, } def _legacy_key(s): def get_parts(s): result = [] for p in _VERSION_PART.split(s.lower()): p = _VERSION_REPLACE.get(p, p) if p: if '0' <= p[:1] <= '9': p = p.zfill(8) else: p = '*' + p result.append(p) result.append('*final') return result result = [] for p in get_parts(s): if p.startswith('*'): if p < '*final': while result and result[-1] == '*final-': result.pop() while result and result[-1] == '00000000': result.pop() result.append(p) return tuple(result) class LegacyVersion(Version): def parse(self, s): return _legacy_key(s) @property def is_prerelease(self): result = False for x in self._parts: if (isinstance(x, string_types) and x.startswith('*') and x < '*final'): result = True break return result class LegacyMatcher(Matcher): version_class = LegacyVersion _operators = dict(Matcher._operators) _operators['~='] = '_match_compatible' numeric_re = re.compile('^(\d+(\.\d+)*)') def _match_compatible(self, version, constraint, prefix): if version < constraint: return False m = self.numeric_re.match(str(constraint)) if not m: logger.warning('Cannot compute compatible match for version %s ' ' and constraint %s', version, constraint) return True s = m.groups()[0] if '.' in s: s = s.rsplit('.', 1)[0] return _match_prefix(version, s) # # Semantic versioning # _SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)' r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?' r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I) def is_semver(s): return _SEMVER_RE.match(s) def _semantic_key(s): def make_tuple(s, absent): if s is None: result = (absent,) else: parts = s[1:].split('.') # We can't compare ints and strings on Python 3, so fudge it # by zero-filling numeric values so simulate a numeric comparison result = tuple([p.zfill(8) if p.isdigit() else p for p in parts]) return result m = is_semver(s) if not m: raise UnsupportedVersionError(s) groups = m.groups() major, minor, patch = [int(i) for i in groups[:3]] # choose the '|' and '*' so that versions sort correctly pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*') return (major, minor, patch), pre, build class SemanticVersion(Version): def parse(self, s): return _semantic_key(s) @property def is_prerelease(self): return self._parts[1][0] != '|' class SemanticMatcher(Matcher): version_class = SemanticVersion class VersionScheme(object): def __init__(self, key, matcher, suggester=None): self.key = key self.matcher = matcher self.suggester = suggester def is_valid_version(self, s): try: self.matcher.version_class(s) result = True except UnsupportedVersionError: result = False return result def is_valid_matcher(self, s): try: self.matcher(s) result = True except UnsupportedVersionError: result = False return result def is_valid_constraint_list(self, s): """ Used for processing some metadata fields """ return self.is_valid_matcher('dummy_name (%s)' % s) def suggest(self, s): if self.suggester is None: result = None else: result = self.suggester(s) return result _SCHEMES = { 'normalized': VersionScheme(_normalized_key, NormalizedMatcher, _suggest_normalized_version), 'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s), 'semantic': VersionScheme(_semantic_key, SemanticMatcher, _suggest_semantic_version), } _SCHEMES['default'] = _SCHEMES['normalized'] def get_scheme(name): if name not in _SCHEMES: raise ValueError('unknown scheme name: %r' % name) return _SCHEMES[name]
mit
merlinmarek/ICTScan
Python/perspective_transformation/perspective_transformation.py
1
2001
#!/usr/bin/env python import sys import cv2 import numpy as np point_count = 0; y = []; x = []; def run_prespective_transform(): global src src_quad = np.array([(x[0], y[0]), (x[1], y[1]), (x[2], y[2]), (x[3], y[3])], np.float32); dst_quad = np.array([(0.0, 0.0), (1032.0, 0.0), (1032.0, 581.0), (0.0, 581.0)], np.float32); transf_matr = cv2.getPerspectiveTransform(src_quad, dst_quad); # src, dst, transf_img = cv2.warpPerspective(src, transf_matr, (1032, 581)); print transf_matr cv2.imwrite('pers_t.jpg', transf_img); cv2.namedWindow("Transformiert", cv2.WINDOW_AUTOSIZE); cv2.imshow("Transformiert", transf_img); grau = cv2.cvtColor(transf_img, cv2.COLOR_BGR2GRAY); cannyImg = cv2.Canny(grau, 50, 150, 3); cv2.namedWindow("Canny", cv2.WINDOW_AUTOSIZE); cv2.imshow("Canny", cannyImg); pass #par1 = 0 -> Mouse move #par1 = 1 -> Mouse down #par1 = 4 -> Mouse up #par 2 = x-coord #par3 = y-coord #par4 = ? #par5 = userdata def callback_onMouse(par1, par2, par3, par4, par5): global point_count; global src; if par1 == 1: point_count = point_count + 1; print("Point{2}: X:{0}; Y:{1}".format(par2, par3,point_count)); x.append(par2); y.append(par3); if point_count == 4: #cv2.line(src, (x[0], y[0]), (x[1], y[1]), (0, 0, 255), 1); #cv2.line(src, (x[1], y[1]), (x[2], y[2]), (0, 0, 255), 1); #cv2.line(src, (x[2], y[2]), (x[3], y[3]), (0, 0, 255), 1); #cv2.line(src, (x[3], y[3]), (x[0], y[0]), (0, 0, 255), 1); run_prespective_transform() cv2.imshow("Quelle", src); pass pass pass help_message = "USAGE: perspective_transform.py [<image>]\nSelect 4 Points in following order:\nupper-left, upper-right, bottom-right, bottom-left\nClose with 'Esc'\n" try: fn = sys.argv[1] except: print help_message exit() src = cv2.imread(fn, True); cv2.namedWindow("Quelle", cv2.WINDOW_NORMAL); cv2.imshow("Quelle", src); cv2.setMouseCallback("Quelle", callback_onMouse, "Hello World!"); c = 0; while c != 1048603: c = cv2.waitKey(0) print(c); pass
bsd-2-clause
scode/pants
src/python/pants/backend/codegen/tasks/antlr_gen.py
3
5069
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import logging import os import re from pants.backend.codegen.targets.java_antlr_library import JavaAntlrLibrary from pants.backend.codegen.tasks.simple_codegen_task import SimpleCodegenTask from pants.backend.jvm.targets.jar_dependency import JarDependency from pants.backend.jvm.targets.java_library import JavaLibrary from pants.backend.jvm.tasks.nailgun_task import NailgunTask from pants.base.exceptions import TaskError logger = logging.getLogger(__name__) def antlr4_jar(name): return JarDependency(org='org.antlr', name=name, rev='4.1') _DEFAULT_ANTLR_DEPS = { 'antlr3': ('//:antlr-3.4', [JarDependency(org='org.antlr', name='antlr', rev='3.4')]), 'antlr4': ('//:antlr-4', [antlr4_jar(name='antlr4'), antlr4_jar(name='antlr4-runtime')]) } class AntlrGen(SimpleCodegenTask, NailgunTask): class AmbiguousPackageError(TaskError): """Raised when a java package cannot be unambiguously determined for a JavaAntlrLibrary.""" class AntlrIsolatedCodegenStrategy(SimpleCodegenTask.IsolatedCodegenStrategy): def find_sources(self, target): sources = super(AntlrGen.AntlrIsolatedCodegenStrategy, self).find_sources(target) return [source for source in sources if source.endswith('.java')] @classmethod def register_options(cls, register): super(AntlrGen, cls).register_options(register) for key, (classpath_spec, classpath) in _DEFAULT_ANTLR_DEPS.items(): cls.register_jvm_tool(register, key, classpath=classpath, classpath_spec=classpath_spec) def is_gentarget(self, target): return isinstance(target, JavaAntlrLibrary) @property def synthetic_target_type(self): return JavaLibrary @classmethod def supported_strategy_types(cls): return [cls.AntlrIsolatedCodegenStrategy] def execute_codegen(self, targets): for target in targets: args = ['-o', self.codegen_workdir(target)] compiler = target.compiler if compiler == 'antlr3': if target.package is not None: logger.warn("The 'package' attribute is not supported for antlr3 and will be ignored.") java_main = 'org.antlr.Tool' elif compiler == 'antlr4': args.append('-visitor') # Generate Parse Tree Visitor As Well # Note that this assumes that there is no package set in the antlr file itself, # which is considered an ANTLR best practice. args.append('-package') if target.package is None: args.append(self._get_sources_package(target)) else: args.append(target.package) java_main = 'org.antlr.v4.Tool' antlr_classpath = self.tool_classpath(compiler) sources = self._calculate_sources([target]) args.extend(sources) result = self.runjava(classpath=antlr_classpath, main=java_main, args=args, workunit_name='antlr') if result != 0: raise TaskError('java {} ... exited non-zero ({})'.format(java_main, result)) if compiler == 'antlr3': for source in list(self.codegen_strategy.find_sources(target)): self._scrub_generated_timestamp(source) def synthetic_target_extra_dependencies(self, target): # Fetch the right java dependency from the target's compiler option compiler_classpath_spec = self.get_options()[target.compiler] return self.resolve_deps([compiler_classpath_spec]) # This checks to make sure that all of the sources have an identical package source structure, and # if they do, uses that as the package. If they are different, then the user will need to set the # package as it cannot be correctly inferred. def _get_sources_package(self, target): parents = set([os.path.dirname(source) for source in target.sources_relative_to_source_root()]) if len(parents) != 1: raise self.AmbiguousPackageError('Antlr sources in multiple directories, cannot infer ' 'package. Please set package member in antlr target.') return parents.pop().replace('/', '.') def _calculate_sources(self, targets): sources = set() def collect_sources(target): if self.is_gentarget(target): sources.update(target.sources_relative_to_buildroot()) for target in targets: target.walk(collect_sources) return sources _COMMENT_WITH_TIMESTAMP_RE = re.compile('^//.*\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d') def _scrub_generated_timestamp(self, source): # Removes the first line of comment if it contains a timestamp. with open(source) as f: lines = f.readlines() if len(lines) < 1: return with open(source, 'w') as f: if not self._COMMENT_WITH_TIMESTAMP_RE.match(lines[0]): f.write(lines[0]) for line in lines[1:]: f.write(line)
apache-2.0
t-abe/chainer
cupy/testing/array.py
15
1595
import numpy.testing import cupy # NumPy-like assertion functions that accept both NumPy and CuPy arrays def assert_allclose(actual, desired, rtol=1e-7, atol=0, err_msg='', verbose=True): numpy.testing.assert_allclose( cupy.asnumpy(actual), cupy.asnumpy(desired), rtol=rtol, atol=atol, err_msg=err_msg, verbose=verbose) def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): numpy.testing.assert_array_almost_equal( cupy.asnumpy(x), cupy.asnumpy(y), decimal=decimal, err_msg=err_msg, verbose=verbose) def assert_arrays_almost_equal_nulp(x, y, nulp=1): numpy.testing.assert_arrays_almost_equal_nulp( cupy.asnumpy(x), cupy.asnumpy(y), nulp=nulp) def assert_array_max_ulp(a, b, maxulp=1, dtype=None): numpy.testing.assert_array_max_ulp( cupy.asnumpy(a), cupy.asnumpy(b), maxulp=maxulp, dtype=dtype) def assert_array_equal(x, y, err_msg='', verbose=True): numpy.testing.assert_array_equal( cupy.asnumpy(x), cupy.asnumpy(y), err_msg=err_msg, verbose=verbose) def assert_array_list_equal(xlist, ylist, err_msg='', verbose=True): if len(xlist) != len(ylist): raise AssertionError('List size is different') for x, y in zip(xlist, ylist): numpy.testing.assert_array_equal( cupy.asnumpy(x), cupy.asnumpy(y), err_msg=err_msg, verbose=verbose) def assert_array_less(x, y, err_msg='', verbose=True): numpy.testing.assert_array_less( cupy.asnumpy(x), cupy.asnumpy(y), err_msg=err_msg, verbose=verbose)
mit
ticosax/django
django/db/backends/dummy/base.py
429
2570
""" Dummy database backend for Django. Django uses this if the database ENGINE setting is empty (None or empty string). Each of these API functions, except connection.close(), raises ImproperlyConfigured. """ from django.core.exceptions import ImproperlyConfigured from django.db.backends.base.base import BaseDatabaseWrapper from django.db.backends.base.client import BaseDatabaseClient from django.db.backends.base.creation import BaseDatabaseCreation from django.db.backends.base.introspection import BaseDatabaseIntrospection from django.db.backends.base.operations import BaseDatabaseOperations from django.db.backends.base.validation import BaseDatabaseValidation from django.db.backends.dummy.features import DummyDatabaseFeatures def complain(*args, **kwargs): raise ImproperlyConfigured("settings.DATABASES is improperly configured. " "Please supply the ENGINE value. Check " "settings documentation for more details.") def ignore(*args, **kwargs): pass class DatabaseError(Exception): pass class IntegrityError(DatabaseError): pass class DatabaseOperations(BaseDatabaseOperations): quote_name = complain class DatabaseClient(BaseDatabaseClient): runshell = complain class DatabaseCreation(BaseDatabaseCreation): create_test_db = ignore destroy_test_db = ignore class DatabaseIntrospection(BaseDatabaseIntrospection): get_table_list = complain get_table_description = complain get_relations = complain get_indexes = complain get_key_columns = complain class DatabaseWrapper(BaseDatabaseWrapper): operators = {} # Override the base class implementations with null # implementations. Anything that tries to actually # do something raises complain; anything that tries # to rollback or undo something raises ignore. _cursor = complain ensure_connection = complain _commit = complain _rollback = ignore _close = ignore _savepoint = ignore _savepoint_commit = complain _savepoint_rollback = ignore _set_autocommit = complain def __init__(self, *args, **kwargs): super(DatabaseWrapper, self).__init__(*args, **kwargs) self.features = DummyDatabaseFeatures(self) self.ops = DatabaseOperations(self) self.client = DatabaseClient(self) self.creation = DatabaseCreation(self) self.introspection = DatabaseIntrospection(self) self.validation = BaseDatabaseValidation(self) def is_usable(self): return True
bsd-3-clause
ff94315/hiwifi-openwrt-HC5661-HC5761
staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/io.py
191
3624
"""The io module provides the Python interfaces to stream handling. The builtin open function is defined in this module. At the top of the I/O hierarchy is the abstract base class IOBase. It defines the basic interface to a stream. Note, however, that there is no separation between reading and writing to streams; implementations are allowed to throw an IOError if they do not support a given operation. Extending IOBase is RawIOBase which deals simply with the reading and writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide an interface to OS files. BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer streams that are readable, writable, and both respectively. BufferedRandom provides a buffered interface to random access streams. BytesIO is a simple stream of in-memory bytes. Another IOBase subclass, TextIOBase, deals with the encoding and decoding of streams into text. TextIOWrapper, which extends it, is a buffered text interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO is a in-memory stream for text. Argument names are not part of the specification, and only the arguments of open() are intended to be used as keyword arguments. data: DEFAULT_BUFFER_SIZE An int containing the default buffer size used by the module's buffered I/O classes. open() uses the file's blksize (as obtained by os.stat) if possible. """ # New I/O library conforming to PEP 3116. # XXX edge cases when switching between reading/writing # XXX need to support 1 meaning line-buffered # XXX whenever an argument is None, use the default value # XXX read/write ops should check readable/writable # XXX buffered readinto should work with arbitrary buffer objects # XXX use incremental encoder for text output, at least for UTF-16 and UTF-8-SIG # XXX check writable, readable and seekable in appropriate places __author__ = ("Guido van Rossum <guido@python.org>, " "Mike Verdone <mike.verdone@gmail.com>, " "Mark Russell <mark.russell@zen.co.uk>, " "Antoine Pitrou <solipsis@pitrou.net>, " "Amaury Forgeot d'Arc <amauryfa@gmail.com>, " "Benjamin Peterson <benjamin@python.org>") __all__ = ["BlockingIOError", "open", "IOBase", "RawIOBase", "FileIO", "BytesIO", "StringIO", "BufferedIOBase", "BufferedReader", "BufferedWriter", "BufferedRWPair", "BufferedRandom", "TextIOBase", "TextIOWrapper", "UnsupportedOperation", "SEEK_SET", "SEEK_CUR", "SEEK_END"] import _io import abc from _io import (DEFAULT_BUFFER_SIZE, BlockingIOError, UnsupportedOperation, open, FileIO, BytesIO, StringIO, BufferedReader, BufferedWriter, BufferedRWPair, BufferedRandom, IncrementalNewlineDecoder, TextIOWrapper) OpenWrapper = _io.open # for compatibility with _pyio # for seek() SEEK_SET = 0 SEEK_CUR = 1 SEEK_END = 2 # Declaring ABCs in C is tricky so we do it here. # Method descriptions and default implementations are inherited from the C # version however. class IOBase(_io._IOBase): __metaclass__ = abc.ABCMeta class RawIOBase(_io._RawIOBase, IOBase): pass class BufferedIOBase(_io._BufferedIOBase, IOBase): pass class TextIOBase(_io._TextIOBase, IOBase): pass RawIOBase.register(FileIO) for klass in (BytesIO, BufferedReader, BufferedWriter, BufferedRandom, BufferedRWPair): BufferedIOBase.register(klass) for klass in (StringIO, TextIOWrapper): TextIOBase.register(klass) del klass
gpl-2.0
maciejkula/scipy
scipy/sparse/linalg/isolve/minres.py
128
9207
from __future__ import division, print_function, absolute_import from numpy import sqrt, inner, finfo, zeros from numpy.linalg import norm from .utils import make_system from .iterative import set_docstring __all__ = ['minres'] header = \ """Use MINimum RESidual iteration to solve Ax=b MINRES minimizes norm(A*x - b) for a real symmetric matrix A. Unlike the Conjugate Gradient method, A can be indefinite or singular. If shift != 0 then the method solves (A - shift*I)x = b """ Ainfo = "The real symmetric N-by-N matrix of the linear system" footer = \ """ Notes ----- THIS FUNCTION IS EXPERIMENTAL AND SUBJECT TO CHANGE! References ---------- Solution of sparse indefinite systems of linear equations, C. C. Paige and M. A. Saunders (1975), SIAM J. Numer. Anal. 12(4), pp. 617-629. http://www.stanford.edu/group/SOL/software/minres.html This file is a translation of the following MATLAB implementation: http://www.stanford.edu/group/SOL/software/minres/matlab/ """ @set_docstring(header, Ainfo, footer) def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None, xtype=None, M=None, callback=None, show=False, check=False): A,M,x,b,postprocess = make_system(A,M,x0,b,xtype) matvec = A.matvec psolve = M.matvec first = 'Enter minres. ' last = 'Exit minres. ' n = A.shape[0] if maxiter is None: maxiter = 5 * n msg = [' beta2 = 0. If M = I, b and x are eigenvectors ', # -1 ' beta1 = 0. The exact solution is x = 0 ', # 0 ' A solution to Ax = b was found, given rtol ', # 1 ' A least-squares solution was found, given rtol ', # 2 ' Reasonable accuracy achieved, given eps ', # 3 ' x has converged to an eigenvector ', # 4 ' acond has exceeded 0.1/eps ', # 5 ' The iteration limit was reached ', # 6 ' A does not define a symmetric matrix ', # 7 ' M does not define a symmetric matrix ', # 8 ' M does not define a pos-def preconditioner '] # 9 if show: print(first + 'Solution of symmetric Ax = b') print(first + 'n = %3g shift = %23.14e' % (n,shift)) print(first + 'itnlim = %3g rtol = %11.2e' % (maxiter,tol)) print() istop = 0 itn = 0 Anorm = 0 Acond = 0 rnorm = 0 ynorm = 0 xtype = x.dtype eps = finfo(xtype).eps x = zeros(n, dtype=xtype) # Set up y and v for the first Lanczos vector v1. # y = beta1 P' v1, where P = C**(-1). # v is really P' v1. y = b r1 = b y = psolve(b) beta1 = inner(b,y) if beta1 < 0: raise ValueError('indefinite preconditioner') elif beta1 == 0: return (postprocess(x), 0) beta1 = sqrt(beta1) if check: # are these too strict? # see if A is symmetric w = matvec(y) r2 = matvec(w) s = inner(w,w) t = inner(y,r2) z = abs(s - t) epsa = (s + eps) * eps**(1.0/3.0) if z > epsa: raise ValueError('non-symmetric matrix') # see if M is symmetric r2 = psolve(y) s = inner(y,y) t = inner(r1,r2) z = abs(s - t) epsa = (s + eps) * eps**(1.0/3.0) if z > epsa: raise ValueError('non-symmetric preconditioner') # Initialize other quantities oldb = 0 beta = beta1 dbar = 0 epsln = 0 qrnorm = beta1 phibar = beta1 rhs1 = beta1 rhs2 = 0 tnorm2 = 0 ynorm2 = 0 cs = -1 sn = 0 w = zeros(n, dtype=xtype) w2 = zeros(n, dtype=xtype) r2 = r1 if show: print() print() print(' Itn x(1) Compatible LS norm(A) cond(A) gbar/|A|') while itn < maxiter: itn += 1 s = 1.0/beta v = s*y y = matvec(v) y = y - shift * v if itn >= 2: y = y - (beta/oldb)*r1 alfa = inner(v,y) y = y - (alfa/beta)*r2 r1 = r2 r2 = y y = psolve(r2) oldb = beta beta = inner(r2,y) if beta < 0: raise ValueError('non-symmetric matrix') beta = sqrt(beta) tnorm2 += alfa**2 + oldb**2 + beta**2 if itn == 1: if beta/beta1 <= 10*eps: istop = -1 # Terminate later # tnorm2 = alfa**2 ?? gmax = abs(alfa) gmin = gmax # Apply previous rotation Qk-1 to get # [deltak epslnk+1] = [cs sn][dbark 0 ] # [gbar k dbar k+1] [sn -cs][alfak betak+1]. oldeps = epsln delta = cs * dbar + sn * alfa # delta1 = 0 deltak gbar = sn * dbar - cs * alfa # gbar 1 = alfa1 gbar k epsln = sn * beta # epsln2 = 0 epslnk+1 dbar = - cs * beta # dbar 2 = beta2 dbar k+1 root = norm([gbar, dbar]) Arnorm = phibar * root # Compute the next plane rotation Qk gamma = norm([gbar, beta]) # gammak gamma = max(gamma, eps) cs = gbar / gamma # ck sn = beta / gamma # sk phi = cs * phibar # phik phibar = sn * phibar # phibark+1 # Update x. denom = 1.0/gamma w1 = w2 w2 = w w = (v - oldeps*w1 - delta*w2) * denom x = x + phi*w # Go round again. gmax = max(gmax, gamma) gmin = min(gmin, gamma) z = rhs1 / gamma ynorm2 = z**2 + ynorm2 rhs1 = rhs2 - delta*z rhs2 = - epsln*z # Estimate various norms and test for convergence. Anorm = sqrt(tnorm2) ynorm = sqrt(ynorm2) epsa = Anorm * eps epsx = Anorm * ynorm * eps epsr = Anorm * ynorm * tol diag = gbar if diag == 0: diag = epsa qrnorm = phibar rnorm = qrnorm test1 = rnorm / (Anorm*ynorm) # ||r|| / (||A|| ||x||) test2 = root / Anorm # ||Ar|| / (||A|| ||r||) # Estimate cond(A). # In this version we look at the diagonals of R in the # factorization of the lower Hessenberg matrix, Q * H = R, # where H is the tridiagonal matrix from Lanczos with one # extra row, beta(k+1) e_k^T. Acond = gmax/gmin # See if any of the stopping criteria are satisfied. # In rare cases, istop is already -1 from above (Abar = const*I). if istop == 0: t1 = 1 + test1 # These tests work if tol < eps t2 = 1 + test2 if t2 <= 1: istop = 2 if t1 <= 1: istop = 1 if itn >= maxiter: istop = 6 if Acond >= 0.1/eps: istop = 4 if epsx >= beta: istop = 3 # if rnorm <= epsx : istop = 2 # if rnorm <= epsr : istop = 1 if test2 <= tol: istop = 2 if test1 <= tol: istop = 1 # See if it is time to print something. prnt = False if n <= 40: prnt = True if itn <= 10: prnt = True if itn >= maxiter-10: prnt = True if itn % 10 == 0: prnt = True if qrnorm <= 10*epsx: prnt = True if qrnorm <= 10*epsr: prnt = True if Acond <= 1e-2/eps: prnt = True if istop != 0: prnt = True if show and prnt: str1 = '%6g %12.5e %10.3e' % (itn, x[0], test1) str2 = ' %10.3e' % (test2,) str3 = ' %8.1e %8.1e %8.1e' % (Anorm, Acond, gbar/Anorm) print(str1 + str2 + str3) if itn % 10 == 0: print() if callback is not None: callback(x) if istop != 0: break # TODO check this if show: print() print(last + ' istop = %3g itn =%5g' % (istop,itn)) print(last + ' Anorm = %12.4e Acond = %12.4e' % (Anorm,Acond)) print(last + ' rnorm = %12.4e ynorm = %12.4e' % (rnorm,ynorm)) print(last + ' Arnorm = %12.4e' % (Arnorm,)) print(last + msg[istop+1]) if istop == 6: info = maxiter else: info = 0 return (postprocess(x),info) if __name__ == '__main__': from scipy import ones, arange from scipy.linalg import norm from scipy.sparse import spdiags n = 10 residuals = [] def cb(x): residuals.append(norm(b - A*x)) # A = poisson((10,),format='csr') A = spdiags([arange(1,n+1,dtype=float)], [0], n, n, format='csr') M = spdiags([1.0/arange(1,n+1,dtype=float)], [0], n, n, format='csr') A.psolve = M.matvec b = 0*ones(A.shape[0]) x = minres(A,b,tol=1e-12,maxiter=None,callback=cb) # x = cg(A,b,x0=b,tol=1e-12,maxiter=None,callback=cb)[0]
bsd-3-clause
Jonnymcc/ansible
lib/ansible/module_utils/shell.py
7
6101
# # (c) 2015 Peter Sprygada, <psprygada@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # import re import socket from ansible.module_utils.basic import get_exception # py2 vs py3; replace with six via ansiballz try: from StringIO import StringIO except ImportError: from io import StringIO try: import paramiko from paramiko.ssh_exception import AuthenticationException HAS_PARAMIKO = True except ImportError: HAS_PARAMIKO = False from ansible.module_utils.basic import get_exception ANSI_RE = re.compile(r'(\x1b\[\?1h\x1b=)') CLI_PROMPTS_RE = [ re.compile(r'[\r\n]?[a-zA-Z]{1}[a-zA-Z0-9-]*[>|#|%](?:\s*)$'), re.compile(r'[\r\n]?[a-zA-Z]{1}[a-zA-Z0-9-]*\(.+\)#(?:\s*)$') ] CLI_ERRORS_RE = [ re.compile(r"% ?Error"), re.compile(r"^% \w+", re.M), re.compile(r"% ?Bad secret"), re.compile(r"invalid input", re.I), re.compile(r"(?:incomplete|ambiguous) command", re.I), re.compile(r"connection timed out", re.I), re.compile(r"[^\r\n]+ not found", re.I), re.compile(r"'[^']' +returned error code: ?\d+"), re.compile(r"syntax error"), re.compile(r"unknown command") ] def to_list(val): if isinstance(val, (list, tuple)): return list(val) elif val is not None: return [val] else: return list() class ShellError(Exception): def __init__(self, msg, command=None): super(ShellError, self).__init__(msg) self.message = msg self.command = command class Command(object): def __init__(self, command, prompt=None, response=None): self.command = command self.prompt = prompt self.response = response def __str__(self): return self.command class Shell(object): def __init__(self, prompts_re=None, errors_re=None, kickstart=True): self.ssh = None self.shell = None self.kickstart = kickstart self._matched_prompt = None self.prompts = prompts_re or CLI_PROMPTS_RE self.errors = errors_re or CLI_ERRORS_RE def open(self, host, port=22, username=None, password=None, timeout=10, key_filename=None, pkey=None, look_for_keys=None, allow_agent=False): self.ssh = paramiko.SSHClient() self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # unless explicitly set, disable look for keys if a password is # present. this changes the default search order paramiko implements if not look_for_keys: look_for_keys = password is None try: self.ssh.connect(host, port=port, username=username, password=password, timeout=timeout, look_for_keys=look_for_keys, pkey=pkey, key_filename=key_filename, allow_agent=allow_agent) self.shell = self.ssh.invoke_shell() self.shell.settimeout(timeout) except socket.gaierror: raise ShellError("unable to resolve host name") except AuthenticationException: raise ShellError('Unable to authenticate to remote device') if self.kickstart: self.shell.sendall("\n") self.receive() def strip(self, data): return ANSI_RE.sub('', data) def receive(self, cmd=None): recv = StringIO() while True: data = self.shell.recv(200) recv.write(data) recv.seek(recv.tell() - 200) window = self.strip(recv.read()) if isinstance(cmd, Command): self.handle_input(window, prompt=cmd.prompt, response=cmd.response) try: if self.read(window): resp = self.strip(recv.getvalue()) return self.sanitize(cmd, resp) except ShellError: exc = get_exception() exc.command = cmd raise def send(self, commands): responses = list() try: for command in to_list(commands): cmd = '%s\r' % str(command) self.shell.sendall(cmd) responses.append(self.receive(command)) except socket.timeout: raise ShellError("timeout trying to send command", cmd) except socket.error: exc = get_exception() raise ShellError("problem sending command to host: %s" % exc.message) return responses def close(self): self.shell.close() def handle_input(self, resp, prompt, response): if not prompt or not response: return prompt = to_list(prompt) response = to_list(response) for pr, ans in zip(prompt, response): match = pr.search(resp) if match: cmd = '%s\r' % ans self.shell.sendall(cmd) def sanitize(self, cmd, resp): cleaned = [] for line in resp.splitlines(): if line.startswith(str(cmd)) or self.read(line): continue cleaned.append(line) return "\n".join(cleaned) def read(self, response): for regex in self.errors: if regex.search(response): raise ShellError('matched error in response: %s' % response) for regex in self.prompts: match = regex.search(response) if match: self._matched_prompt = match.group() return True
gpl-3.0
ayan-usgs/PubsWarehouse_UI
server/pubs_ui/pubswh/tests/test_utils.py
4
27699
""" Tests for pubswh blueprint's utility functions """ import unittest from unittest.mock import MagicMock, patch import arrow import requests as r import requests_mock from .test_data import ( crossref_200_ok, crossref_200_not_ok, crossref_200_ok_2_date_parts, crossref_200_ok_1_date_part, crossref_200_ok_message_empty, unpaywall_200_ok, landing_present, \ null_landing) from ..utils import manipulate_doi_information, generate_sb_data, update_geographic_extents, create_store_info, \ get_altmetric_badge_img_links, SearchPublications, get_crossref_data, check_public_access, \ get_published_online_date, get_unpaywall_data, has_oa_link from ... import app unittest.TestCase.maxDiff = None class ManipulateDoiInformationTestCase(unittest.TestCase): """ Tests for create_display_links """ # pylint: disable=C0103,R0201,C0301 def test_will_doi_link_be_generated_from_doi(self): """given a DOI, will an index link be generated?""" simple_pubsdata = { 'publicationSubtype': { 'text': 'Journal Article' }, 'doi': '10.65165468/asdflasdfnlasdkf', 'links': [] } expected_pubsdata = { 'publicationSubtype': { 'text': 'Journal Article' }, 'doi': '10.65165468/asdflasdfnlasdkf', 'links': [ { "rank": None, "text": "Publisher Index Page (via DOI)", "type": { "id": 15, "text": "Index Page" }, "url": "https://doi.org/10.65165468/asdflasdfnlasdkf" } ] } assert manipulate_doi_information(simple_pubsdata) == expected_pubsdata def test_will_doi_link_and_chorus_be_generated_from_doi(self): """given a DOI, will an index link be generated? """ chorus_pubsdata = { 'publicationSubtype': { 'text': 'Journal Article' }, "chorus": { "auditedOn": "7/27/2015", "authors": "Boano F., Harvey J. W., Marion A., Packman A. I., Revelli R., Ridolfi L., Worman A.", "journalName": "Reviews of Geophysics", "publicationDate": "10/20/2014", "publisher": "Wiley-Blackwell", "url": "http://dx.doi.org/10.1002/2012rg000417", "publiclyAccessibleDate": "10/20/2014" }, "doi": "10.1002/2012RG000417", 'links': [] } expected_chorus_pubsdata = { 'chorus': { 'auditedOn': '7/27/2015', 'authors': 'Boano F., Harvey J. W., Marion A., Packman A. I., Revelli R., Ridolfi L., Worman A.', 'journalName': 'Reviews of Geophysics', 'publicationDate': '10/20/2014', 'publiclyAccessibleDate': '10/20/2014', 'publisher': 'Wiley-Blackwell', 'url': 'http://dx.doi.org/10.1002/2012rg000417'}, 'doi': '10.1002/2012RG000417', 'links': [{ 'linkHelpText': 'Publicly accessible after 10/20/2014 (public access data via <a href="http://www.chorusaccess.org" title="link to Chorus.org homepage">CHORUS</a>)', 'rank': None, 'text': 'Publisher Index Page (via DOI)', 'type': {'id': 15, 'text': 'Index Page'}, 'url': 'https://doi.org/10.1002/2012RG000417' }], 'publicationSubtype': {'text': 'Journal Article'} } assert manipulate_doi_information(chorus_pubsdata) == expected_chorus_pubsdata def test_will_missing_link_list_be_generated(self): """given a DOI, will an index link be generated even if the links list doesn't exist?""" simple_pubsdata = { 'publicationSubtype': { 'text': 'Journal Article' }, 'doi': '10.65165468/asdflasdfnlasdkf' } expected_pubsdata = { 'publicationSubtype': { 'text': 'Journal Article' }, 'doi': '10.65165468/asdflasdfnlasdkf', 'links': [ { "rank": None, "text": "Publisher Index Page (via DOI)", "type": { "id": 15, "text": "Index Page" }, "url": "https://doi.org/10.65165468/asdflasdfnlasdkf" } ] } assert manipulate_doi_information(simple_pubsdata) == expected_pubsdata def test_will_an_existing_in_the_link_list_be_maintained(self): """given a DOI and a pre-populated links list, will the original link be maintained in the list""" simple_pubsdata = { 'publicationSubtype': { 'text': 'Journal Article' }, 'doi': '10.65165468/asdflasdfnlasdkf', 'links': [{ "id": 294043, "type": { "id": 24, "text": "Thumbnail" }, "url": "http://pubs.er.usgs.gov/thumbnails/outside_thumb.jpg" }] } expected_pubsdata = { 'publicationSubtype': { 'text': 'Journal Article' }, 'doi': '10.65165468/asdflasdfnlasdkf', 'links': [ { "id": 294043, "type": { "id": 24, "text": "Thumbnail" }, "url": "http://pubs.er.usgs.gov/thumbnails/outside_thumb.jpg" }, { "rank": None, "text": "Publisher Index Page (via DOI)", "type": { "id": 15, "text": "Index Page" }, "url": "https://doi.org/10.65165468/asdflasdfnlasdkf" } ] } assert manipulate_doi_information(simple_pubsdata) == expected_pubsdata class GenerateScienceBaseData(unittest.TestCase): """ Tests for generate_sb_data """ # pylint: disable=C0103,R0201,C0301 replace_pubs_with_pubs_test = False supersedes_url = "https://pubs.er.usgs.gov/service/citation/json/extras?" json_ld_id_base_url = "https://pubs.er.usgs.gov" def test_will_a_basic_sb_record_be_generated_from_a_basic_pubs_record(self): """given a basic pubs record, will a decent sciencebase record be generated?""" simple_pubsdata = { "indexId": "sir20165122", "id": 70176077, "lastModifiedDate": "2016-09-23T15:22:41", "title": "Environmental conditions in the Namskaket Marsh Area, Orleans, Massachusetts", "docAbstract": "There is fog and rain and tides and sometomes sun and the tide keeps rising", "publicationType": { "id": 18, "text": "Report" }, "usgsCitation": "A carefully formatted citation with lots of extraneous em and en dashes", "scienceBaseUri": "567922a9e4b0da412f4fb509", 'links': [], 'interactions': [] } expected_sbdata = { "title": "Environmental conditions in the Namskaket Marsh Area, Orleans, Massachusetts", "id": "567922a9e4b0da412f4fb509", "identifiers": [{ "type": "local-index", "scheme": "unknown", "key": "sir20165122" }, { "type": "local-pk", "scheme": "unknown", "key": 70176077 }], "body": "There is fog and rain and tides and sometomes sun and the tide keeps rising", "citation": "A carefully formatted citation with lots of extraneous em and en dashes", "contacts": [], "dates": [], "tags": [], "browseCategories": [ "Publication" ], "browseTypes": [ "Citation" ], 'webLinks': [{ "type": "webLink", "uri": "http://pubs.er.usgs.gov/publication/sir20165122", "rel": "related", "title": "Publications Warehouse Index Page", "hidden": False }], 'facets': [{ 'citationType': 'Report', 'className': 'gov.sciencebase.catalog.item.facet.CitationFacet', 'conference': None, 'edition': None, 'journal': None, 'language': None, 'note': '', 'parts': [], 'tableOfContents': None }], "parentId": app.config['SCIENCEBASE_PARENT_UUID'] } self.assertEqual( generate_sb_data(simple_pubsdata, self.replace_pubs_with_pubs_test, self.supersedes_url, self.json_ld_id_base_url), expected_sbdata ) class CreateStoreInfoTestCase(unittest.TestCase): # pylint: disable=C0103,R0201,C0301 def setUp(self): self.resp_with_store = r.Response() self.resp_with_store = MagicMock(status_code=200) self.resp_with_store.json = MagicMock(return_value={'indexId': 'abc091', 'stores': [{'publicationId': 7850, 'store': 'https://fake.store.gov', 'available': True, 'price': 18}]}) self.resp_pub_not_avail = r.Response() self.resp_pub_not_avail = MagicMock(status_code=200) self.resp_pub_not_avail.json = MagicMock(return_value={'indexId': 'efg845', 'stores': [{'publicationId': 6980, 'store': 'https://fake.store.gov', 'available': False, 'price': 17}]}) self.resp_without_store = r.Response() self.resp_without_store = MagicMock(status_code=200) self.resp_without_store.json = MagicMock(return_value={'indexId': 'xyz735', 'stores': []}) self.resp_no_store = r.Response() self.resp_no_store = MagicMock(status_code=200) self.resp_no_store.json = MagicMock(return_value={'indexId': 'mno426'}) self.bad_resp = r.Response() self.bad_resp = MagicMock(status_code=404) def test_store_data_is_created_if_present(self): result = create_store_info(self.resp_with_store) expected = {'offers': {'@context': {'schema': 'http://schema.org/'}, '@type': 'schema:ScholarlyArticle', 'schema:offers': {'schema:seller': {'schema:name': 'USGS Store', '@type': 'schema:Organization', 'schema:url': 'http://store.usgs.gov'}, 'schema:url': 'https://fake.store.gov', 'schema:price': 18, 'schema:availability': 'schema:InStock', 'schema:priceCurrency': 'USD', '@type': 'schema:Offer'}}, 'context_item': 'abc091'} self.assertEqual(result, expected) def test_store_data_is_listed_as_out_of_stock(self): result = create_store_info(self.resp_pub_not_avail) expected = {'offers': {'@context': {'schema': 'http://schema.org/'}, '@type': 'schema:ScholarlyArticle', 'schema:offers': {'schema:seller': {'schema:name': 'USGS Store', '@type': 'schema:Organization', 'schema:url': 'http://store.usgs.gov'}, 'schema:url': 'https://fake.store.gov', 'schema:price': 17, 'schema:availability': 'schema:OutOfStock', 'schema:priceCurrency': 'USD', '@type': 'schema:Offer'}}, 'context_item': 'efg845'} self.assertEqual(result, expected) def test_store_data_is_created_if_not_present(self): result = create_store_info(self.resp_without_store) expected = {'context_item': 'xyz735', 'offers': None} self.assertEqual(result, expected) def test_store_data_is_created_if_no_store(self): result = create_store_info(self.resp_no_store) expected = {'context_item': 'mno426', 'offers': None} self.assertEqual(result, expected) def test_store_data_with_bad_response(self): result = create_store_info(self.bad_resp) expected = {'context_item': None, 'offers': None} self.assertEqual(result, expected) class GetAltmetricBadgeImgLinksTestCase(unittest.TestCase): # pylint: disable=R0902,C0103 def setUp(self): self.fake_doi = '00.00001/bc.1729' self.fake_bad_doi = '00.00001/bc.1729ABC' self.fake_endpoint = 'https://fake.api.altmetric.com/v1/' self.fake_url = '{0}doi/{1}'.format(self.fake_endpoint, self.fake_doi) self.fake_404_url = '{0}doi/{1}'.format(self.fake_endpoint, self.fake_bad_doi) self.fake_altmetric_key = 'IfWeCanHitTheBullsEyeTheRestOfTheDominoesWillFallLikeAHouseOfCards.Checkmate!' self.verify_cert = False self.data_200 = {'images': {'small': 'small_url', 'medium': 'medium_url', 'large': 'large_url'}, 'details_url': 'https://some_url.fake'} @requests_mock.Mocker() def test_get_badge_images_from_indexed_doi(self, m): m.get(self.fake_url, status_code=200, json=self.data_200) result = get_altmetric_badge_img_links(self.fake_doi, self.fake_endpoint, self.fake_altmetric_key, self.verify_cert) expected = (self.data_200['images'], self.data_200['details_url']) self.assertTupleEqual(result, expected) @requests_mock.Mocker() def test_get_badge_images_from_unindexed_doi(self, m): m.get(self.fake_404_url, status_code=404) result = get_altmetric_badge_img_links(self.fake_bad_doi, self.fake_endpoint, self.fake_altmetric_key, self.verify_cert) expected = (None, None) self.assertTupleEqual(result, expected) class GetCrossrefDataTestCase(unittest.TestCase): # pylint: disable=R0902,C0103 def setUp(self): self.fake_doi = '00.00001/bc.1729' self.fake_doi_unregistered = '00.00001/bc.1729ABC' self.fake_endpoint = 'https://fake.api.crossref.org' self.fake_broken_endpoint = 'https://fake.api.croossref.org' self.fake_url = '{0}/works/{1}?mailto=pubs_tech_group%40usgs.gov'.format(self.fake_endpoint, self.fake_doi) self.fake_url_404 = '{0}/works/{1}?mailto=pubs_tech_group%40usgs.gov'.format(self.fake_endpoint, self.fake_doi_unregistered) self.fake_url_broken = '{0}/works/{1}?mailto=pubs_tech_group%40usgs.gov'.format(self.fake_broken_endpoint, self.fake_doi) self.verify_cert = False self.data_200 = crossref_200_ok @requests_mock.Mocker() def test_get_data_from_indexed_doi(self, m): m.get(self.fake_url, status_code=200, json=self.data_200) result = get_crossref_data(self.fake_doi, endpoint=self.fake_endpoint, verify=self.verify_cert) expected = self.data_200 self.assertEqual(result, expected) @requests_mock.Mocker() def test_connection_error(self, m): m.get(self.fake_url_broken, exc=r.exceptions.ConnectionError) result = get_crossref_data(doi=self.fake_doi, endpoint=self.fake_broken_endpoint, verify=self.verify_cert) expected = None self.assertEqual(result, expected) @requests_mock.Mocker() def test_get_data_from_unindexed_doi(self, m): m.get(self.fake_url_404, status_code=404) result = get_crossref_data(doi=self.fake_doi_unregistered, endpoint=self.fake_endpoint, verify=self.verify_cert) expected = None self.assertEqual(result, expected) def test_doi_is_None(self): result = get_crossref_data(None, endpoint=self.fake_endpoint, verify=self.verify_cert) expected = None self.assertEqual(result, expected) class GetUnpaywallDataTestCase(unittest.TestCase): # pylint: disable=R0902,C0103 def setUp(self): self.fake_doi = '1289018729847' self.fake_endpoint = 'https://fake.api.unpaywall.org/v2/' self.fake_broken_endpoint = 'https://fake.api.unpaywall.org/v2/1289018729847?email=pubs_tech_group@usgs.gov' self.fake_url = '{0}{1}?email=pubs_tech_group@usgs.gov'.format(self.fake_endpoint, self.fake_doi) self.data_200 = unpaywall_200_ok self.landing_present = landing_present self.null_landing = null_landing @requests_mock.Mocker() def test_get_data_from_indexed_doi(self, m): m.get(self.fake_url, status_code=200, json=self.data_200) result = get_unpaywall_data(self.fake_doi, endpoint=self.fake_endpoint) expected = self.data_200 self.assertEqual(result, expected) @requests_mock.Mocker() def test_connection_error(self, m): m.get(self.fake_broken_endpoint, status_code=404) result = get_unpaywall_data(self.fake_doi, endpoint=self.fake_endpoint) expected = None self.assertEqual(result, expected) @requests_mock.Mocker() def test_get_data_from_unindexed_doi(self, m): m.get(self.fake_broken_endpoint, status_code=404) result = get_unpaywall_data(doi=self.fake_doi, endpoint=self.fake_endpoint) expected = None self.assertEqual(result, expected) @requests_mock.Mocker() def test_landing_url_present(self, m): m.get('https://api.unpaywall.org/v2/1289018729847?email=pubs_tech_group%40usgs.gov', status_code=200, json=self.landing_present) pubdata = has_oa_link(self.landing_present) self.assertTrue('openAccessLink' in pubdata.keys()) @requests_mock.Mocker() def test_null_landing(self, m): m.get('https://api.unpaywall.org/v2/1289018729847?email=pubs_tech_group%40usgs.gov', status_code=200, json=self.null_landing) result = has_oa_link(self.null_landing) self.assertFalse('openAccessLink' in result.keys()) def test_doi_is_None(self): result = get_unpaywall_data(None, endpoint=self.fake_endpoint) expected = None self.assertEqual(result, expected) class CheckPublicAccessTestCase(unittest.TestCase): # pylint: disable=C0103 def setUp(self): self.current_date = arrow.get('2017-11-01') self.pubdata_future_disp_pub_date = {'displayToPublicDate': '2016-11-25T00:00:00'} self.pubdata_past_disp_pub_date = {'displayToPublicDate': '2016-10-25T00:00:00'} self.pubdata_past_disp_pub_date_before_oct_1_2016 = {'displayToPublicDate': '2016-09-01T00:00:00'} self.future_online_date = arrow.get('2016-12-01') self.past_online_date_after_oct_1_2016 = arrow.get('2016-10-15') self.past_online_date_before_oct_1_2016 = arrow.get('2016-09-01') def test_online_date_less_than_one_year_ago(self): result = check_public_access(pubdata=self.pubdata_future_disp_pub_date, online_date_arrow=self.future_online_date, current_date_time=self.current_date) expected = False self.assertEqual(result, expected) def test_online_date_more_than_one_year_ago_and_after_oct_1_2016(self): result = check_public_access(pubdata=self.pubdata_past_disp_pub_date, online_date_arrow=self.past_online_date_after_oct_1_2016, current_date_time=self.current_date) expected = True self.assertEqual(result, expected) def test_online_date_more_than_one_year_ago_and_before_oct_1_2016(self): result = check_public_access(pubdata=self.pubdata_past_disp_pub_date_before_oct_1_2016, online_date_arrow=self.past_online_date_before_oct_1_2016, current_date_time=self.current_date) expected = False self.assertEqual(result, expected) def test_disp_pub_date_less_than_one_year_ago(self): result = check_public_access(pubdata=self.pubdata_future_disp_pub_date, online_date_arrow=None, current_date_time=self.current_date) expected = False self.assertEqual(result, expected) def test_disp_pub_date_more_than_one_year_ago_and_after_oct_1_2016(self): result = check_public_access(pubdata=self.pubdata_past_disp_pub_date, online_date_arrow=None, current_date_time=self.current_date) expected = True self.assertEqual(result, expected) def test_disp_pub_date_more_than_one_year_ago_and_before_oct_1_2016(self): result = check_public_access(pubdata=self.pubdata_past_disp_pub_date_before_oct_1_2016, online_date_arrow=None, current_date_time=self.current_date) expected = False self.assertEqual(result, expected) class GetPublishedOnlineDateTestCase(unittest.TestCase): def setUp(self): self.good_crossref_data = crossref_200_ok self.not_good_crossref_data = crossref_200_not_ok self.good_crossref_2_parts = crossref_200_ok_2_date_parts self.good_crossref_1_part = crossref_200_ok_1_date_part self.ok_no_published_online = crossref_200_ok_message_empty def test_not_ok_data(self): result = get_published_online_date(self.not_good_crossref_data) expected = None self.assertEqual(result, expected) def test_ok_data_3_parts(self): result = get_published_online_date(self.good_crossref_data) expected = arrow.get(2016, 12, 9) self.assertEqual(result, expected) def test_ok_data_2_parts(self): result = get_published_online_date(self.good_crossref_2_parts) expected = arrow.get(2016, 12, 1) self.assertEqual(result, expected) def test_ok_data_1_part(self): result = get_published_online_date(self.good_crossref_1_part) expected = None self.assertEqual(result, expected) def test_ok_data_no_online_date(self): result = get_published_online_date(self.ok_no_published_online) expected = None self.assertEqual(result, expected) def test_crossref_is_none(self): result = get_published_online_date(None) expected = None self.assertEqual(result, expected) class UpdateGeographicExtentsTestCase(unittest.TestCase): # pylint: disable=C0103 def setUp(self): self.record = {'indexId': '1234', 'title': 'Title 1'} def test_record_with_no_geographic_extents(self): update_geographic_extents(self.record) self.assertEqual({'indexId': '1234', 'title': 'Title 1'}, self.record) def test_record_with_empty_geographic_extents(self): self.record['geographicExtents'] = '' update_geographic_extents(self.record) self.assertFalse('geographicExtentns' in self.record) def test_record_with_geographic_extents_with_invalid_json(self): self.record['geographicExtents'] = 'asdfasdfasdf' update_geographic_extents(self.record) self.assertFalse('geographicExtents' in self.record) def test_record_with_geographic_extents_with_single_feature(self): self.record['geographicExtents'] = '{"type" : "Feature", "geometry": {"type": "Polygon", ' \ + '"coordinates": [[ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0] ]]}}' update_geographic_extents(self.record) self.assertTrue('geographicExtents' in self.record) extents = self.record.get('geographicExtents') self.assertEqual(extents.get('type'), 'FeatureCollection') self.assertEqual(extents.get('properties'), {'title': 'Title 1'}) features = extents.get('features', []) self.assertEqual(len(features), 1) self.assertEqual(features[0].get('geometry').get('type'), 'Polygon') self.assertEqual(features[0].get('properties').get('title'), 'Title 1') self.assertEqual(features[0].get('properties').get('id'), '1234') def test_record_with_geographic_extents_with_feature_collection(self): self.record['geographicExtents'] = '{"type": "FeatureCollection", "features": [' \ + '{"type": "Feature",' \ +' "geometry": {"type": "Polygon", ' \ + '"coordinates": [[ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0] ]]}}]}' update_geographic_extents(self.record) self.assertTrue('geographicExtents' in self.record) extents = self.record.get('geographicExtents') self.assertEqual(extents.get('type'), 'FeatureCollection') self.assertEqual(extents.get('properties'), {'title': 'Title 1'}) features = extents.get('features', []) self.assertEqual(len(features), 1) self.assertEqual(features[0].get('geometry').get('type'), 'Polygon') self.assertEqual(features[0].get('properties').get('title'), 'Title 1') self.assertEqual(features[0].get('properties').get('id'), '1234') class SearchPublicationsGetPubsSearchResultsTestCase(unittest.TestCase): # pylint: disable=C0103 @requests_mock.Mocker() def test_bad_status_response(self, m): search_publications = SearchPublications('https://fake.com/search') m.get('https://fake.com/search', text="Server Error", status_code=500) result, status = search_publications.get_pubs_search_results() self.assertIsNone(result) self.assertEqual(status, 500) @requests_mock.Mocker() def test_good_status_with_valid_json(self, m): search_publications = SearchPublications('https://fake.com/search') m.get('https://fake.com/search', json={"a": 1, "b": 2}) result, status = search_publications.get_pubs_search_results() self.assertEqual(result, {"a": 1, "b": 2}) self.assertEqual(status, 200) @requests_mock.Mocker() def test_good_status_with_invalid_json(self, m): search_publications = SearchPublications('https://fake.com/search') m.get('https://fake.com/search', text="Hello") result, status = search_publications.get_pubs_search_results() self.assertIsNone(result) self.assertEqual(status, 200) @patch('requests.get') def test_request_without_params(self, mock_get): search_publications = SearchPublications('https://fake.com/search') search_publications.get_pubs_search_results() self.assertIsNone(mock_get.call_args[1]['params']) @patch('requests.get') def test_request_with_params(self, mock_get): search_publications = SearchPublications('https://fake.com/search') search_publications.get_pubs_search_results({'param1': 'V1', 'param2': 'V2'}) self.assertEqual(mock_get.call_args[1]['params'], {'param1': 'V1', 'param2': 'V2'})
unlicense
EBI-Metagenomics/emgapi
emgcli/routers.py
1
1165
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2017 EMBL - European Bioinformatics Institute # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from rest_framework import routers class ApiBrowserView(routers.APIRootView): """MGnify API provides programmatic access to the data for cross-database complex queries. For more details review the documentation.""" # noqa pass class DefaultRouter(routers.DefaultRouter): """ Custom default router extends the rest_framework DefaultRouter and adds in a default API root view """ APIRootView = ApiBrowserView def extend(self, router): self.registry.extend(router.registry)
apache-2.0
KnCMiner/bitcoin
qa/rpc-tests/getblocktemplate_proposals.py
145
6328
#!/usr/bin/env python2 # Copyright (c) 2014 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from binascii import a2b_hex, b2a_hex from hashlib import sha256 from struct import pack def check_array_result(object_array, to_match, expected): """ Pass in array of JSON objects, a dictionary with key/value pairs to match against, and another dictionary with expected key/value pairs. """ num_matched = 0 for item in object_array: all_match = True for key,value in to_match.items(): if item[key] != value: all_match = False if not all_match: continue for key,value in expected.items(): if item[key] != value: raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value))) num_matched = num_matched+1 if num_matched == 0: raise AssertionError("No objects matched %s"%(str(to_match))) def b2x(b): return b2a_hex(b).decode('ascii') # NOTE: This does not work for signed numbers (set the high bit) or zero (use b'\0') def encodeUNum(n): s = bytearray(b'\1') while n > 127: s[0] += 1 s.append(n % 256) n //= 256 s.append(n) return bytes(s) def varlenEncode(n): if n < 0xfd: return pack('<B', n) if n <= 0xffff: return b'\xfd' + pack('<H', n) if n <= 0xffffffff: return b'\xfe' + pack('<L', n) return b'\xff' + pack('<Q', n) def dblsha(b): return sha256(sha256(b).digest()).digest() def genmrklroot(leaflist): cur = leaflist while len(cur) > 1: n = [] if len(cur) & 1: cur.append(cur[-1]) for i in range(0, len(cur), 2): n.append(dblsha(cur[i] + cur[i+1])) cur = n return cur[0] def template_to_bytes(tmpl, txlist): blkver = pack('<L', tmpl['version']) mrklroot = genmrklroot(list(dblsha(a) for a in txlist)) timestamp = pack('<L', tmpl['curtime']) nonce = b'\0\0\0\0' blk = blkver + a2b_hex(tmpl['previousblockhash'])[::-1] + mrklroot + timestamp + a2b_hex(tmpl['bits'])[::-1] + nonce blk += varlenEncode(len(txlist)) for tx in txlist: blk += tx return blk def template_to_hex(tmpl, txlist): return b2x(template_to_bytes(tmpl, txlist)) def assert_template(node, tmpl, txlist, expect): rsp = node.getblocktemplate({'data':template_to_hex(tmpl, txlist),'mode':'proposal'}) if rsp != expect: raise AssertionError('unexpected: %s' % (rsp,)) class GetBlockTemplateProposalTest(BitcoinTestFramework): ''' Test block proposals with getblocktemplate. ''' def run_test(self): node = self.nodes[0] node.generate(1) # Mine a block to leave initial block download tmpl = node.getblocktemplate() if 'coinbasetxn' not in tmpl: rawcoinbase = encodeUNum(tmpl['height']) rawcoinbase += b'\x01-' hexcoinbase = b2x(rawcoinbase) hexoutval = b2x(pack('<Q', tmpl['coinbasevalue'])) tmpl['coinbasetxn'] = {'data': '01000000' + '01' + '0000000000000000000000000000000000000000000000000000000000000000ffffffff' + ('%02x' % (len(rawcoinbase),)) + hexcoinbase + 'fffffffe' + '01' + hexoutval + '00' + '00000000'} txlist = list(bytearray(a2b_hex(a['data'])) for a in (tmpl['coinbasetxn'],) + tuple(tmpl['transactions'])) # Test 0: Capability advertised assert('proposal' in tmpl['capabilities']) # NOTE: This test currently FAILS (regtest mode doesn't enforce block height in coinbase) ## Test 1: Bad height in coinbase #txlist[0][4+1+36+1+1] += 1 #assert_template(node, tmpl, txlist, 'FIXME') #txlist[0][4+1+36+1+1] -= 1 # Test 2: Bad input hash for gen tx txlist[0][4+1] += 1 assert_template(node, tmpl, txlist, 'bad-cb-missing') txlist[0][4+1] -= 1 # Test 3: Truncated final tx lastbyte = txlist[-1].pop() try: assert_template(node, tmpl, txlist, 'n/a') except JSONRPCException: pass # Expected txlist[-1].append(lastbyte) # Test 4: Add an invalid tx to the end (duplicate of gen tx) txlist.append(txlist[0]) assert_template(node, tmpl, txlist, 'bad-txns-duplicate') txlist.pop() # Test 5: Add an invalid tx to the end (non-duplicate) txlist.append(bytearray(txlist[0])) txlist[-1][4+1] = b'\xff' assert_template(node, tmpl, txlist, 'bad-txns-inputs-missingorspent') txlist.pop() # Test 6: Future tx lock time txlist[0][-4:] = b'\xff\xff\xff\xff' assert_template(node, tmpl, txlist, 'bad-txns-nonfinal') txlist[0][-4:] = b'\0\0\0\0' # Test 7: Bad tx count txlist.append(b'') try: assert_template(node, tmpl, txlist, 'n/a') except JSONRPCException: pass # Expected txlist.pop() # Test 8: Bad bits realbits = tmpl['bits'] tmpl['bits'] = '1c0000ff' # impossible in the real world assert_template(node, tmpl, txlist, 'bad-diffbits') tmpl['bits'] = realbits # Test 9: Bad merkle root rawtmpl = template_to_bytes(tmpl, txlist) rawtmpl[4+32] = (rawtmpl[4+32] + 1) % 0x100 rsp = node.getblocktemplate({'data':b2x(rawtmpl),'mode':'proposal'}) if rsp != 'bad-txnmrklroot': raise AssertionError('unexpected: %s' % (rsp,)) # Test 10: Bad timestamps realtime = tmpl['curtime'] tmpl['curtime'] = 0x7fffffff assert_template(node, tmpl, txlist, 'time-too-new') tmpl['curtime'] = 0 assert_template(node, tmpl, txlist, 'time-too-old') tmpl['curtime'] = realtime # Test 11: Valid block assert_template(node, tmpl, txlist, None) # Test 12: Orphan block tmpl['previousblockhash'] = 'ff00' * 16 assert_template(node, tmpl, txlist, 'inconclusive-not-best-prevblk') if __name__ == '__main__': GetBlockTemplateProposalTest().main()
mit
rzr/synapse
synapse/handlers/__init__.py
1
2767
# -*- coding: utf-8 -*- # Copyright 2014, 2015 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from synapse.appservice.scheduler import AppServiceScheduler from synapse.appservice.api import ApplicationServiceApi from .register import RegistrationHandler from .room import ( RoomCreationHandler, RoomMemberHandler, RoomListHandler ) from .message import MessageHandler from .events import EventStreamHandler, EventHandler from .federation import FederationHandler from .profile import ProfileHandler from .presence import PresenceHandler from .directory import DirectoryHandler from .typing import TypingNotificationHandler from .admin import AdminHandler from .appservice import ApplicationServicesHandler from .sync import SyncHandler from .auth import AuthHandler from .identity import IdentityHandler from .receipts import ReceiptsHandler class Handlers(object): """ A collection of all the event handlers. There's no need to lazily create these; we'll just make them all eagerly at construction time. """ def __init__(self, hs): self.registration_handler = RegistrationHandler(hs) self.message_handler = MessageHandler(hs) self.room_creation_handler = RoomCreationHandler(hs) self.room_member_handler = RoomMemberHandler(hs) self.event_stream_handler = EventStreamHandler(hs) self.event_handler = EventHandler(hs) self.federation_handler = FederationHandler(hs) self.profile_handler = ProfileHandler(hs) self.presence_handler = PresenceHandler(hs) self.room_list_handler = RoomListHandler(hs) self.directory_handler = DirectoryHandler(hs) self.typing_notification_handler = TypingNotificationHandler(hs) self.admin_handler = AdminHandler(hs) self.receipts_handler = ReceiptsHandler(hs) asapi = ApplicationServiceApi(hs) self.appservice_handler = ApplicationServicesHandler( hs, asapi, AppServiceScheduler( clock=hs.get_clock(), store=hs.get_datastore(), as_api=asapi ) ) self.sync_handler = SyncHandler(hs) self.auth_handler = AuthHandler(hs) self.identity_handler = IdentityHandler(hs)
apache-2.0
carlye566/IoT-POX
pox/lib/recoco/consumer.py
46
2959
# Copyright 2013 James McCauley # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Stuff for implementing simple producer/consumer work queues with recoco. """ from pox.core import core from pox.lib.recoco import Task from collections import deque log = core.getLogger() class BaseConsumer (Task): """ A basic consumer for overriding. add_work() adds work (whatever that is) _do_work is given work and should do something with it _on_exception is called if _do_work() raises an exception """ def __init__ (self, batch_size = 1, priority = 1, start = True): """ batch_size is the maximum number of work items per scheduling priority is the Task priority """ self.queue = deque() # work items self.running = True # Set to false to stop self.log = log super(BaseConsumer,self).__init__() self.priority = priority self.batch_size = batch_size if start: self.start() def add_work (self, work): """ Add a work item """ self.queue.appendleft(work) # Since we have work, make sure we're scheduled core.scheduler.schedule(self) def _on_exception (self, exception, work): """ Override to handle cases where a work item causes an exception work is the problematic work item return True to keep going """ self.log.error("While executing %s...", work) self.log.exception(exception) return True def _do_work (self, work): """ Do work. Override me. """ self.log.error("Don't know how to do work for %s!", work) def run (self): while core.running and self.running: for _ in xrange(min(self.batch_size, len(self.queue))): work = self.queue.pop() try: self._do_work(work) except Exception as e: if self._on_exception(e, work) is not True: self.log.debug("Quitting") self.running = False break if len(self.queue) == 0: yield False # Don't reschedule else: yield 0 # Reschedule ASAP (sleep 0) class FlexConsumer (BaseConsumer): """ A consumer where work items are callables and their parameters """ def add_work (__self, __callable, *__args, **__kw): """ Add a work item A work item is a callable with associated args/kwargs. """ super(FlexConsumer, __self).add_work(__callable, __args, __kw) def _do_work (self, work): f, args, kw = work f(*args, **kw)
apache-2.0
smesdaghi/geonode
geonode/upload/utils.py
31
2890
######################################################################### # # Copyright (C) 2012 OpenPlans # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ######################################################################### import logging import os from django.conf import settings from geoserver.catalog import FailedRequestError from geonode.geoserver.helpers import ogc_server_settings, gs_catalog logger = logging.getLogger(__name__) def create_geoserver_db_featurestore(store_type=None, store_name=None): cat = gs_catalog dsname = ogc_server_settings.DATASTORE # get or create datastore try: if store_type == 'geogig' and ogc_server_settings.GEOGIG_ENABLED: if store_name is not None: ds = cat.get_store(store_name) else: ds = cat.get_store(settings.GEOGIG_DATASTORE_NAME) elif dsname: ds = cat.get_store(dsname) else: return None except FailedRequestError: if store_type == 'geogig': if store_name is None and hasattr( settings, 'GEOGIG_DATASTORE_NAME'): store_name = settings.GEOGIG_DATASTORE_NAME logger.info( 'Creating target datastore %s' % settings.GEOGIG_DATASTORE_NAME) ds = cat.create_datastore(store_name) ds.type = "GeoGig" ds.connection_parameters.update( geogig_repository=os.path.join( ogc_server_settings.GEOGIG_DATASTORE_DIR, store_name), branch="master", create="true") cat.save(ds) ds = cat.get_store(store_name) else: logging.info( 'Creating target datastore %s' % dsname) ds = cat.create_datastore(dsname) db = ogc_server_settings.datastore_db ds.connection_parameters.update( host=db['HOST'], port=db['PORT'] or '5432', database=db['NAME'], user=db['USER'], passwd=db['PASSWORD'], dbtype='postgis') cat.save(ds) ds = cat.get_store(dsname) assert ds.enabled return ds
gpl-3.0
ramondelafuente/ansible-modules-extras
system/sefcontext.py
42
8030
#!/usr/bin/python # (c) 2016, Dag Wieers <dag@wieers.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: sefcontext short_description: Manages SELinux file context mapping definitions description: - Manages SELinux file context mapping definitions - Similar to the C(semanage fcontext) command version_added: "2.2" options: target: description: - Target path (expression). required: true default: null aliases: ['path'] ftype: description: - File type. required: false default: a setype: description: - SELinux type for the specified target. required: true default: null seuser: description: - SELinux user for the specified target. required: false default: null selevel: description: - SELinux range for the specified target. required: false default: null aliases: ['serange'] state: description: - Desired boolean value. required: false default: present choices: [ 'present', 'absent' ] reload: description: - Reload SELinux policy after commit. required: false default: yes notes: - The changes are persistent across reboots requirements: [ 'libselinux-python', 'policycoreutils-python' ] author: Dag Wieers ''' EXAMPLES = ''' # Allow apache to modify files in /srv/git_repos - sefcontext: target='/srv/git_repos(/.*)?' setype=httpd_git_rw_content_t state=present ''' RETURN = ''' # Default return values ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.pycompat24 import get_exception try: import selinux HAVE_SELINUX=True except ImportError: HAVE_SELINUX=False try: import seobject HAVE_SEOBJECT=True except ImportError: HAVE_SEOBJECT=False ### Make backward compatible option_to_file_type_str = { 'a': 'all files', 'b': 'block device', 'c': 'character device', 'd': 'directory', 'f': 'regular file', 'l': 'symbolic link', 's': 'socket file', 'p': 'named pipe', } def semanage_fcontext_exists(sefcontext, target, ftype): ''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. ''' record = (target, ftype) records = sefcontext.get_all() try: return records[record] except KeyError: return None def semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser, sestore=''): ''' Add or modify SELinux file context mapping definition to the policy. ''' changed = False prepared_diff = '' try: sefcontext = seobject.fcontextRecords(sestore) sefcontext.set_reload(do_reload) exists = semanage_fcontext_exists(sefcontext, target, ftype) if exists: # Modify existing entry orig_seuser, orig_serole, orig_setype, orig_serange = exists if seuser is None: seuser = orig_seuser if serange is None: serange = orig_serange if setype != orig_setype or seuser != orig_seuser or serange != orig_serange: if not module.check_mode: sefcontext.modify(target, setype, ftype, serange, seuser) changed = True if module._diff: prepared_diff += '# Change to semanage file context mappings\n' prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange) prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange) else: # Add missing entry if seuser is None: seuser = 'system_u' if serange is None: serange = 's0' if not module.check_mode: sefcontext.add(target, setype, ftype, serange, seuser) changed = True if module._diff: prepared_diff += '# Addition to semanage file context mappings\n' prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange) except Exception: e = get_exception() module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) if module._diff and prepared_diff: result['diff'] = dict(prepared=prepared_diff) module.exit_json(changed=changed, seuser=seuser, serange=serange, **result) def semanage_fcontext_delete(module, result, target, ftype, do_reload, sestore=''): ''' Delete SELinux file context mapping definition from the policy. ''' changed = False prepared_diff = '' try: sefcontext = seobject.fcontextRecords(sestore) sefcontext.set_reload(do_reload) exists = semanage_fcontext_exists(sefcontext, target, ftype) if exists: # Remove existing entry orig_seuser, orig_serole, orig_setype, orig_serange = exists if not module.check_mode: sefcontext.delete(target, ftype) changed = True if module._diff: prepared_diff += '# Deletion to semanage file context mappings\n' prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3]) except Exception: e = get_exception() module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) if module._diff and prepared_diff: result['diff'] = dict(prepared=prepared_diff) module.exit_json(changed=changed, **result) def main(): module = AnsibleModule( argument_spec = dict( target = dict(required=True, aliases=['path']), ftype = dict(required=False, choices=option_to_file_type_str.keys(), default='a'), setype = dict(required=True), seuser = dict(required=False, default=None), selevel = dict(required=False, default=None, aliases=['serange']), state = dict(required=False, choices=['present', 'absent'], default='present'), reload = dict(required=False, type='bool', default='yes'), ), supports_check_mode = True, ) if not HAVE_SELINUX: module.fail_json(msg="This module requires libselinux-python") if not HAVE_SEOBJECT: module.fail_json(msg="This module requires policycoreutils-python") if not selinux.is_selinux_enabled(): module.fail_json(msg="SELinux is disabled on this host.") target = module.params['target'] ftype = module.params['ftype'] setype = module.params['setype'] seuser = module.params['seuser'] serange = module.params['selevel'] state = module.params['state'] do_reload = module.params['reload'] result = dict(target=target, ftype=ftype, setype=setype, state=state) # Convert file types to (internally used) strings ftype = option_to_file_type_str[ftype] if state == 'present': semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser) elif state == 'absent': semanage_fcontext_delete(module, result, target, ftype, do_reload) else: module.fail_json(msg='Invalid value of argument "state": {0}'.format(state)) if __name__ == '__main__': main()
gpl-3.0
YangSongzhou/django
django/contrib/gis/geos/libgeos.py
345
6218
""" This module houses the ctypes initialization procedures, as well as the notice and error handler function callbacks (get called when an error occurs in GEOS). This module also houses GEOS Pointer utilities, including get_pointer_arr(), and GEOM_PTR. """ import logging import os import re from ctypes import CDLL, CFUNCTYPE, POINTER, Structure, c_char_p from ctypes.util import find_library from django.contrib.gis.geos.error import GEOSException from django.core.exceptions import ImproperlyConfigured from django.utils.functional import SimpleLazyObject logger = logging.getLogger('django.contrib.gis') def load_geos(): # Custom library path set? try: from django.conf import settings lib_path = settings.GEOS_LIBRARY_PATH except (AttributeError, EnvironmentError, ImportError, ImproperlyConfigured): lib_path = None # Setting the appropriate names for the GEOS-C library. if lib_path: lib_names = None elif os.name == 'nt': # Windows NT libraries lib_names = ['geos_c', 'libgeos_c-1'] elif os.name == 'posix': # *NIX libraries lib_names = ['geos_c', 'GEOS'] else: raise ImportError('Unsupported OS "%s"' % os.name) # Using the ctypes `find_library` utility to find the path to the GEOS # shared library. This is better than manually specifying each library name # and extension (e.g., libgeos_c.[so|so.1|dylib].). if lib_names: for lib_name in lib_names: lib_path = find_library(lib_name) if lib_path is not None: break # No GEOS library could be found. if lib_path is None: raise ImportError( 'Could not find the GEOS library (tried "%s"). ' 'Try setting GEOS_LIBRARY_PATH in your settings.' % '", "'.join(lib_names) ) # Getting the GEOS C library. The C interface (CDLL) is used for # both *NIX and Windows. # See the GEOS C API source code for more details on the library function calls: # http://geos.refractions.net/ro/doxygen_docs/html/geos__c_8h-source.html _lgeos = CDLL(lib_path) # Here we set up the prototypes for the initGEOS_r and finishGEOS_r # routines. These functions aren't actually called until they are # attached to a GEOS context handle -- this actually occurs in # geos/prototypes/threadsafe.py. _lgeos.initGEOS_r.restype = CONTEXT_PTR _lgeos.finishGEOS_r.argtypes = [CONTEXT_PTR] return _lgeos # The notice and error handler C function callback definitions. # Supposed to mimic the GEOS message handler (C below): # typedef void (*GEOSMessageHandler)(const char *fmt, ...); NOTICEFUNC = CFUNCTYPE(None, c_char_p, c_char_p) def notice_h(fmt, lst): fmt, lst = fmt.decode(), lst.decode() try: warn_msg = fmt % lst except TypeError: warn_msg = fmt logger.warning('GEOS_NOTICE: %s\n' % warn_msg) notice_h = NOTICEFUNC(notice_h) ERRORFUNC = CFUNCTYPE(None, c_char_p, c_char_p) def error_h(fmt, lst): fmt, lst = fmt.decode(), lst.decode() try: err_msg = fmt % lst except TypeError: err_msg = fmt logger.error('GEOS_ERROR: %s\n' % err_msg) error_h = ERRORFUNC(error_h) # #### GEOS Geometry C data structures, and utility functions. #### # Opaque GEOS geometry structures, used for GEOM_PTR and CS_PTR class GEOSGeom_t(Structure): pass class GEOSPrepGeom_t(Structure): pass class GEOSCoordSeq_t(Structure): pass class GEOSContextHandle_t(Structure): pass # Pointers to opaque GEOS geometry structures. GEOM_PTR = POINTER(GEOSGeom_t) PREPGEOM_PTR = POINTER(GEOSPrepGeom_t) CS_PTR = POINTER(GEOSCoordSeq_t) CONTEXT_PTR = POINTER(GEOSContextHandle_t) # Used specifically by the GEOSGeom_createPolygon and GEOSGeom_createCollection # GEOS routines def get_pointer_arr(n): "Gets a ctypes pointer array (of length `n`) for GEOSGeom_t opaque pointer." GeomArr = GEOM_PTR * n return GeomArr() lgeos = SimpleLazyObject(load_geos) class GEOSFuncFactory(object): """ Lazy loading of GEOS functions. """ argtypes = None restype = None errcheck = None def __init__(self, func_name, *args, **kwargs): self.func_name = func_name self.restype = kwargs.pop('restype', self.restype) self.errcheck = kwargs.pop('errcheck', self.errcheck) self.argtypes = kwargs.pop('argtypes', self.argtypes) self.args = args self.kwargs = kwargs self.func = None def __call__(self, *args, **kwargs): if self.func is None: self.func = self.get_func(*self.args, **self.kwargs) return self.func(*args, **kwargs) def get_func(self, *args, **kwargs): from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc func = GEOSFunc(self.func_name) func.argtypes = self.argtypes or [] func.restype = self.restype if self.errcheck: func.errcheck = self.errcheck return func # Returns the string version of the GEOS library. Have to set the restype # explicitly to c_char_p to ensure compatibility across 32 and 64-bit platforms. geos_version = GEOSFuncFactory('GEOSversion', restype=c_char_p) # Regular expression should be able to parse version strings such as # '3.0.0rc4-CAPI-1.3.3', '3.0.0-CAPI-1.4.1', '3.4.0dev-CAPI-1.8.0' or '3.4.0dev-CAPI-1.8.0 r0' version_regex = re.compile( r'^(?P<version>(?P<major>\d+)\.(?P<minor>\d+)\.(?P<subminor>\d+))' r'((rc(?P<release_candidate>\d+))|dev)?-CAPI-(?P<capi_version>\d+\.\d+\.\d+)( r\d+)?$' ) def geos_version_info(): """ Returns a dictionary containing the various version metadata parsed from the GEOS version string, including the version number, whether the version is a release candidate (and what number release candidate), and the C API version. """ ver = geos_version().decode() m = version_regex.match(ver) if not m: raise GEOSException('Could not parse version info string "%s"' % ver) return {key: m.group(key) for key in ( 'version', 'release_candidate', 'capi_version', 'major', 'minor', 'subminor')}
bsd-3-clause
oktools/coypu
quokka/modules/comments/views.py
10
1313
#!/usr/bin/env python # -*- coding: utf-8 -*- from flask import request from flask.views import MethodView from flask.ext.mongoengine.wtf import model_form from flask.ext.security import current_user from quokka.core.templates import render_template from .models import Comment class CommentView(MethodView): form = model_form( Comment, only=['author_name', 'author_email', 'body'] ) def render_context(self, path, form): comments = Comment.objects(path=path, published=True) return render_template('content/comments.html', comments=comments, form=form, path=path) def get(self, path): return self.render_context(path, form=self.form()) def post(self, path): form = self.form(request.form) if form.validate(): comment = Comment(path=path) form.populate_obj(comment) if current_user.is_authenticated(): comment.published = True comment.author_name = current_user.name comment.author_email = current_user.email comment.save() return self.render_context(path, form=self.form()) return self.render_context(path, form=form)
mit
profglavcho/mt6577-kernel-3.10.65
tools/perf/scripts/python/sched-migration.py
11215
11670
#!/usr/bin/python # # Cpu task migration overview toy # # Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com> # # perf script event handlers have been generated by perf script -g python # # This software is distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. import os import sys from collections import defaultdict from UserList import UserList sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from SchedGui import * threads = { 0 : "idle"} def thread_name(pid): return "%s:%d" % (threads[pid], pid) class RunqueueEventUnknown: @staticmethod def color(): return None def __repr__(self): return "unknown" class RunqueueEventSleep: @staticmethod def color(): return (0, 0, 0xff) def __init__(self, sleeper): self.sleeper = sleeper def __repr__(self): return "%s gone to sleep" % thread_name(self.sleeper) class RunqueueEventWakeup: @staticmethod def color(): return (0xff, 0xff, 0) def __init__(self, wakee): self.wakee = wakee def __repr__(self): return "%s woke up" % thread_name(self.wakee) class RunqueueEventFork: @staticmethod def color(): return (0, 0xff, 0) def __init__(self, child): self.child = child def __repr__(self): return "new forked task %s" % thread_name(self.child) class RunqueueMigrateIn: @staticmethod def color(): return (0, 0xf0, 0xff) def __init__(self, new): self.new = new def __repr__(self): return "task migrated in %s" % thread_name(self.new) class RunqueueMigrateOut: @staticmethod def color(): return (0xff, 0, 0xff) def __init__(self, old): self.old = old def __repr__(self): return "task migrated out %s" % thread_name(self.old) class RunqueueSnapshot: def __init__(self, tasks = [0], event = RunqueueEventUnknown()): self.tasks = tuple(tasks) self.event = event def sched_switch(self, prev, prev_state, next): event = RunqueueEventUnknown() if taskState(prev_state) == "R" and next in self.tasks \ and prev in self.tasks: return self if taskState(prev_state) != "R": event = RunqueueEventSleep(prev) next_tasks = list(self.tasks[:]) if prev in self.tasks: if taskState(prev_state) != "R": next_tasks.remove(prev) elif taskState(prev_state) == "R": next_tasks.append(prev) if next not in next_tasks: next_tasks.append(next) return RunqueueSnapshot(next_tasks, event) def migrate_out(self, old): if old not in self.tasks: return self next_tasks = [task for task in self.tasks if task != old] return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old)) def __migrate_in(self, new, event): if new in self.tasks: self.event = event return self next_tasks = self.tasks[:] + tuple([new]) return RunqueueSnapshot(next_tasks, event) def migrate_in(self, new): return self.__migrate_in(new, RunqueueMigrateIn(new)) def wake_up(self, new): return self.__migrate_in(new, RunqueueEventWakeup(new)) def wake_up_new(self, new): return self.__migrate_in(new, RunqueueEventFork(new)) def load(self): """ Provide the number of tasks on the runqueue. Don't count idle""" return len(self.tasks) - 1 def __repr__(self): ret = self.tasks.__repr__() ret += self.origin_tostring() return ret class TimeSlice: def __init__(self, start, prev): self.start = start self.prev = prev self.end = start # cpus that triggered the event self.event_cpus = [] if prev is not None: self.total_load = prev.total_load self.rqs = prev.rqs.copy() else: self.rqs = defaultdict(RunqueueSnapshot) self.total_load = 0 def __update_total_load(self, old_rq, new_rq): diff = new_rq.load() - old_rq.load() self.total_load += diff def sched_switch(self, ts_list, prev, prev_state, next, cpu): old_rq = self.prev.rqs[cpu] new_rq = old_rq.sched_switch(prev, prev_state, next) if old_rq is new_rq: return self.rqs[cpu] = new_rq self.__update_total_load(old_rq, new_rq) ts_list.append(self) self.event_cpus = [cpu] def migrate(self, ts_list, new, old_cpu, new_cpu): if old_cpu == new_cpu: return old_rq = self.prev.rqs[old_cpu] out_rq = old_rq.migrate_out(new) self.rqs[old_cpu] = out_rq self.__update_total_load(old_rq, out_rq) new_rq = self.prev.rqs[new_cpu] in_rq = new_rq.migrate_in(new) self.rqs[new_cpu] = in_rq self.__update_total_load(new_rq, in_rq) ts_list.append(self) if old_rq is not out_rq: self.event_cpus.append(old_cpu) self.event_cpus.append(new_cpu) def wake_up(self, ts_list, pid, cpu, fork): old_rq = self.prev.rqs[cpu] if fork: new_rq = old_rq.wake_up_new(pid) else: new_rq = old_rq.wake_up(pid) if new_rq is old_rq: return self.rqs[cpu] = new_rq self.__update_total_load(old_rq, new_rq) ts_list.append(self) self.event_cpus = [cpu] def next(self, t): self.end = t return TimeSlice(t, self) class TimeSliceList(UserList): def __init__(self, arg = []): self.data = arg def get_time_slice(self, ts): if len(self.data) == 0: slice = TimeSlice(ts, TimeSlice(-1, None)) else: slice = self.data[-1].next(ts) return slice def find_time_slice(self, ts): start = 0 end = len(self.data) found = -1 searching = True while searching: if start == end or start == end - 1: searching = False i = (end + start) / 2 if self.data[i].start <= ts and self.data[i].end >= ts: found = i end = i continue if self.data[i].end < ts: start = i elif self.data[i].start > ts: end = i return found def set_root_win(self, win): self.root_win = win def mouse_down(self, cpu, t): idx = self.find_time_slice(t) if idx == -1: return ts = self[idx] rq = ts.rqs[cpu] raw = "CPU: %d\n" % cpu raw += "Last event : %s\n" % rq.event.__repr__() raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000) raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6)) raw += "Load = %d\n" % rq.load() for t in rq.tasks: raw += "%s \n" % thread_name(t) self.root_win.update_summary(raw) def update_rectangle_cpu(self, slice, cpu): rq = slice.rqs[cpu] if slice.total_load != 0: load_rate = rq.load() / float(slice.total_load) else: load_rate = 0 red_power = int(0xff - (0xff * load_rate)) color = (0xff, red_power, red_power) top_color = None if cpu in slice.event_cpus: top_color = rq.event.color() self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end) def fill_zone(self, start, end): i = self.find_time_slice(start) if i == -1: return for i in xrange(i, len(self.data)): timeslice = self.data[i] if timeslice.start > end: return for cpu in timeslice.rqs: self.update_rectangle_cpu(timeslice, cpu) def interval(self): if len(self.data) == 0: return (0, 0) return (self.data[0].start, self.data[-1].end) def nr_rectangles(self): last_ts = self.data[-1] max_cpu = 0 for cpu in last_ts.rqs: if cpu > max_cpu: max_cpu = cpu return max_cpu class SchedEventProxy: def __init__(self): self.current_tsk = defaultdict(lambda : -1) self.timeslices = TimeSliceList() def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state, next_comm, next_pid, next_prio): """ Ensure the task we sched out this cpu is really the one we logged. Otherwise we may have missed traces """ on_cpu_task = self.current_tsk[headers.cpu] if on_cpu_task != -1 and on_cpu_task != prev_pid: print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \ (headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid) threads[prev_pid] = prev_comm threads[next_pid] = next_comm self.current_tsk[headers.cpu] = next_pid ts = self.timeslices.get_time_slice(headers.ts()) ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu) def migrate(self, headers, pid, prio, orig_cpu, dest_cpu): ts = self.timeslices.get_time_slice(headers.ts()) ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu) def wake_up(self, headers, comm, pid, success, target_cpu, fork): if success == 0: return ts = self.timeslices.get_time_slice(headers.ts()) ts.wake_up(self.timeslices, pid, target_cpu, fork) def trace_begin(): global parser parser = SchedEventProxy() def trace_end(): app = wx.App(False) timeslices = parser.timeslices frame = RootFrame(timeslices, "Migration") app.MainLoop() def sched__sched_stat_runtime(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, runtime, vruntime): pass def sched__sched_stat_iowait(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, delay): pass def sched__sched_stat_sleep(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, delay): pass def sched__sched_stat_wait(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, delay): pass def sched__sched_process_fork(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, parent_comm, parent_pid, child_comm, child_pid): pass def sched__sched_process_wait(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, prio): pass def sched__sched_process_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, prio): pass def sched__sched_process_free(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, prio): pass def sched__sched_migrate_task(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, prio, orig_cpu, dest_cpu): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm) parser.migrate(headers, pid, prio, orig_cpu, dest_cpu) def sched__sched_switch(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, prev_comm, prev_pid, prev_prio, prev_state, next_comm, next_pid, next_prio): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm) parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state, next_comm, next_pid, next_prio) def sched__sched_wakeup_new(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, prio, success, target_cpu): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm) parser.wake_up(headers, comm, pid, success, target_cpu, 1) def sched__sched_wakeup(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, prio, success, target_cpu): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm) parser.wake_up(headers, comm, pid, success, target_cpu, 0) def sched__sched_wait_task(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, prio): pass def sched__sched_kthread_stop_ret(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, ret): pass def sched__sched_kthread_stop(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid): pass def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm): pass
gpl-2.0
CapOM/ChromiumGStreamerBackend
third_party/cython/src/Cython/Compiler/ParseTreeTransforms.py
86
115877
import cython cython.declare(PyrexTypes=object, Naming=object, ExprNodes=object, Nodes=object, Options=object, UtilNodes=object, LetNode=object, LetRefNode=object, TreeFragment=object, EncodedString=object, error=object, warning=object, copy=object) import PyrexTypes import Naming import ExprNodes import Nodes import Options import Builtin from Cython.Compiler.Visitor import VisitorTransform, TreeVisitor from Cython.Compiler.Visitor import CythonTransform, EnvTransform, ScopeTrackingTransform from Cython.Compiler.UtilNodes import LetNode, LetRefNode, ResultRefNode from Cython.Compiler.TreeFragment import TreeFragment from Cython.Compiler.StringEncoding import EncodedString from Cython.Compiler.Errors import error, warning, CompileError, InternalError from Cython.Compiler.Code import UtilityCode import copy class NameNodeCollector(TreeVisitor): """Collect all NameNodes of a (sub-)tree in the ``name_nodes`` attribute. """ def __init__(self): super(NameNodeCollector, self).__init__() self.name_nodes = [] def visit_NameNode(self, node): self.name_nodes.append(node) def visit_Node(self, node): self._visitchildren(node, None) class SkipDeclarations(object): """ Variable and function declarations can often have a deep tree structure, and yet most transformations don't need to descend to this depth. Declaration nodes are removed after AnalyseDeclarationsTransform, so there is no need to use this for transformations after that point. """ def visit_CTypeDefNode(self, node): return node def visit_CVarDefNode(self, node): return node def visit_CDeclaratorNode(self, node): return node def visit_CBaseTypeNode(self, node): return node def visit_CEnumDefNode(self, node): return node def visit_CStructOrUnionDefNode(self, node): return node class NormalizeTree(CythonTransform): """ This transform fixes up a few things after parsing in order to make the parse tree more suitable for transforms. a) After parsing, blocks with only one statement will be represented by that statement, not by a StatListNode. When doing transforms this is annoying and inconsistent, as one cannot in general remove a statement in a consistent way and so on. This transform wraps any single statements in a StatListNode containing a single statement. b) The PassStatNode is a noop and serves no purpose beyond plugging such one-statement blocks; i.e., once parsed a ` "pass" can just as well be represented using an empty StatListNode. This means less special cases to worry about in subsequent transforms (one always checks to see if a StatListNode has no children to see if the block is empty). """ def __init__(self, context): super(NormalizeTree, self).__init__(context) self.is_in_statlist = False self.is_in_expr = False def visit_ExprNode(self, node): stacktmp = self.is_in_expr self.is_in_expr = True self.visitchildren(node) self.is_in_expr = stacktmp return node def visit_StatNode(self, node, is_listcontainer=False): stacktmp = self.is_in_statlist self.is_in_statlist = is_listcontainer self.visitchildren(node) self.is_in_statlist = stacktmp if not self.is_in_statlist and not self.is_in_expr: return Nodes.StatListNode(pos=node.pos, stats=[node]) else: return node def visit_StatListNode(self, node): self.is_in_statlist = True self.visitchildren(node) self.is_in_statlist = False return node def visit_ParallelAssignmentNode(self, node): return self.visit_StatNode(node, True) def visit_CEnumDefNode(self, node): return self.visit_StatNode(node, True) def visit_CStructOrUnionDefNode(self, node): return self.visit_StatNode(node, True) def visit_PassStatNode(self, node): """Eliminate PassStatNode""" if not self.is_in_statlist: return Nodes.StatListNode(pos=node.pos, stats=[]) else: return [] def visit_ExprStatNode(self, node): """Eliminate useless string literals""" if node.expr.is_string_literal: return self.visit_PassStatNode(node) else: return self.visit_StatNode(node) def visit_CDeclaratorNode(self, node): return node class PostParseError(CompileError): pass # error strings checked by unit tests, so define them ERR_CDEF_INCLASS = 'Cannot assign default value to fields in cdef classes, structs or unions' ERR_BUF_DEFAULTS = 'Invalid buffer defaults specification (see docs)' ERR_INVALID_SPECIALATTR_TYPE = 'Special attributes must not have a type declared' class PostParse(ScopeTrackingTransform): """ Basic interpretation of the parse tree, as well as validity checking that can be done on a very basic level on the parse tree (while still not being a problem with the basic syntax, as such). Specifically: - Default values to cdef assignments are turned into single assignments following the declaration (everywhere but in class bodies, where they raise a compile error) - Interpret some node structures into Python runtime values. Some nodes take compile-time arguments (currently: TemplatedTypeNode[args] and __cythonbufferdefaults__ = {args}), which should be interpreted. This happens in a general way and other steps should be taken to ensure validity. Type arguments cannot be interpreted in this way. - For __cythonbufferdefaults__ the arguments are checked for validity. TemplatedTypeNode has its directives interpreted: Any first positional argument goes into the "dtype" attribute, any "ndim" keyword argument goes into the "ndim" attribute and so on. Also it is checked that the directive combination is valid. - __cythonbufferdefaults__ attributes are parsed and put into the type information. Note: Currently Parsing.py does a lot of interpretation and reorganization that can be refactored into this transform if a more pure Abstract Syntax Tree is wanted. """ def __init__(self, context): super(PostParse, self).__init__(context) self.specialattribute_handlers = { '__cythonbufferdefaults__' : self.handle_bufferdefaults } def visit_ModuleNode(self, node): self.lambda_counter = 1 self.genexpr_counter = 1 return super(PostParse, self).visit_ModuleNode(node) def visit_LambdaNode(self, node): # unpack a lambda expression into the corresponding DefNode lambda_id = self.lambda_counter self.lambda_counter += 1 node.lambda_name = EncodedString(u'lambda%d' % lambda_id) collector = YieldNodeCollector() collector.visitchildren(node.result_expr) if collector.yields or isinstance(node.result_expr, ExprNodes.YieldExprNode): body = Nodes.ExprStatNode( node.result_expr.pos, expr=node.result_expr) else: body = Nodes.ReturnStatNode( node.result_expr.pos, value=node.result_expr) node.def_node = Nodes.DefNode( node.pos, name=node.name, lambda_name=node.lambda_name, args=node.args, star_arg=node.star_arg, starstar_arg=node.starstar_arg, body=body, doc=None) self.visitchildren(node) return node def visit_GeneratorExpressionNode(self, node): # unpack a generator expression into the corresponding DefNode genexpr_id = self.genexpr_counter self.genexpr_counter += 1 node.genexpr_name = EncodedString(u'genexpr%d' % genexpr_id) node.def_node = Nodes.DefNode(node.pos, name=node.name, doc=None, args=[], star_arg=None, starstar_arg=None, body=node.loop) self.visitchildren(node) return node # cdef variables def handle_bufferdefaults(self, decl): if not isinstance(decl.default, ExprNodes.DictNode): raise PostParseError(decl.pos, ERR_BUF_DEFAULTS) self.scope_node.buffer_defaults_node = decl.default self.scope_node.buffer_defaults_pos = decl.pos def visit_CVarDefNode(self, node): # This assumes only plain names and pointers are assignable on # declaration. Also, it makes use of the fact that a cdef decl # must appear before the first use, so we don't have to deal with # "i = 3; cdef int i = i" and can simply move the nodes around. try: self.visitchildren(node) stats = [node] newdecls = [] for decl in node.declarators: declbase = decl while isinstance(declbase, Nodes.CPtrDeclaratorNode): declbase = declbase.base if isinstance(declbase, Nodes.CNameDeclaratorNode): if declbase.default is not None: if self.scope_type in ('cclass', 'pyclass', 'struct'): if isinstance(self.scope_node, Nodes.CClassDefNode): handler = self.specialattribute_handlers.get(decl.name) if handler: if decl is not declbase: raise PostParseError(decl.pos, ERR_INVALID_SPECIALATTR_TYPE) handler(decl) continue # Remove declaration raise PostParseError(decl.pos, ERR_CDEF_INCLASS) first_assignment = self.scope_type != 'module' stats.append(Nodes.SingleAssignmentNode(node.pos, lhs=ExprNodes.NameNode(node.pos, name=declbase.name), rhs=declbase.default, first=first_assignment)) declbase.default = None newdecls.append(decl) node.declarators = newdecls return stats except PostParseError, e: # An error in a cdef clause is ok, simply remove the declaration # and try to move on to report more errors self.context.nonfatal_error(e) return None # Split parallel assignments (a,b = b,a) into separate partial # assignments that are executed rhs-first using temps. This # restructuring must be applied before type analysis so that known # types on rhs and lhs can be matched directly. It is required in # the case that the types cannot be coerced to a Python type in # order to assign from a tuple. def visit_SingleAssignmentNode(self, node): self.visitchildren(node) return self._visit_assignment_node(node, [node.lhs, node.rhs]) def visit_CascadedAssignmentNode(self, node): self.visitchildren(node) return self._visit_assignment_node(node, node.lhs_list + [node.rhs]) def _visit_assignment_node(self, node, expr_list): """Flatten parallel assignments into separate single assignments or cascaded assignments. """ if sum([ 1 for expr in expr_list if expr.is_sequence_constructor or expr.is_string_literal ]) < 2: # no parallel assignments => nothing to do return node expr_list_list = [] flatten_parallel_assignments(expr_list, expr_list_list) temp_refs = [] eliminate_rhs_duplicates(expr_list_list, temp_refs) nodes = [] for expr_list in expr_list_list: lhs_list = expr_list[:-1] rhs = expr_list[-1] if len(lhs_list) == 1: node = Nodes.SingleAssignmentNode(rhs.pos, lhs = lhs_list[0], rhs = rhs) else: node = Nodes.CascadedAssignmentNode(rhs.pos, lhs_list = lhs_list, rhs = rhs) nodes.append(node) if len(nodes) == 1: assign_node = nodes[0] else: assign_node = Nodes.ParallelAssignmentNode(nodes[0].pos, stats = nodes) if temp_refs: duplicates_and_temps = [ (temp.expression, temp) for temp in temp_refs ] sort_common_subsequences(duplicates_and_temps) for _, temp_ref in duplicates_and_temps[::-1]: assign_node = LetNode(temp_ref, assign_node) return assign_node def _flatten_sequence(self, seq, result): for arg in seq.args: if arg.is_sequence_constructor: self._flatten_sequence(arg, result) else: result.append(arg) return result def visit_DelStatNode(self, node): self.visitchildren(node) node.args = self._flatten_sequence(node, []) return node def visit_ExceptClauseNode(self, node): if node.is_except_as: # except-as must delete NameNode target at the end del_target = Nodes.DelStatNode( node.pos, args=[ExprNodes.NameNode( node.target.pos, name=node.target.name)], ignore_nonexisting=True) node.body = Nodes.StatListNode( node.pos, stats=[Nodes.TryFinallyStatNode( node.pos, body=node.body, finally_clause=Nodes.StatListNode( node.pos, stats=[del_target]))]) self.visitchildren(node) return node def eliminate_rhs_duplicates(expr_list_list, ref_node_sequence): """Replace rhs items by LetRefNodes if they appear more than once. Creates a sequence of LetRefNodes that set up the required temps and appends them to ref_node_sequence. The input list is modified in-place. """ seen_nodes = set() ref_nodes = {} def find_duplicates(node): if node.is_literal or node.is_name: # no need to replace those; can't include attributes here # as their access is not necessarily side-effect free return if node in seen_nodes: if node not in ref_nodes: ref_node = LetRefNode(node) ref_nodes[node] = ref_node ref_node_sequence.append(ref_node) else: seen_nodes.add(node) if node.is_sequence_constructor: for item in node.args: find_duplicates(item) for expr_list in expr_list_list: rhs = expr_list[-1] find_duplicates(rhs) if not ref_nodes: return def substitute_nodes(node): if node in ref_nodes: return ref_nodes[node] elif node.is_sequence_constructor: node.args = list(map(substitute_nodes, node.args)) return node # replace nodes inside of the common subexpressions for node in ref_nodes: if node.is_sequence_constructor: node.args = list(map(substitute_nodes, node.args)) # replace common subexpressions on all rhs items for expr_list in expr_list_list: expr_list[-1] = substitute_nodes(expr_list[-1]) def sort_common_subsequences(items): """Sort items/subsequences so that all items and subsequences that an item contains appear before the item itself. This is needed because each rhs item must only be evaluated once, so its value must be evaluated first and then reused when packing sequences that contain it. This implies a partial order, and the sort must be stable to preserve the original order as much as possible, so we use a simple insertion sort (which is very fast for short sequences, the normal case in practice). """ def contains(seq, x): for item in seq: if item is x: return True elif item.is_sequence_constructor and contains(item.args, x): return True return False def lower_than(a,b): return b.is_sequence_constructor and contains(b.args, a) for pos, item in enumerate(items): key = item[1] # the ResultRefNode which has already been injected into the sequences new_pos = pos for i in xrange(pos-1, -1, -1): if lower_than(key, items[i][0]): new_pos = i if new_pos != pos: for i in xrange(pos, new_pos, -1): items[i] = items[i-1] items[new_pos] = item def unpack_string_to_character_literals(literal): chars = [] pos = literal.pos stype = literal.__class__ sval = literal.value sval_type = sval.__class__ for char in sval: cval = sval_type(char) chars.append(stype(pos, value=cval, constant_result=cval)) return chars def flatten_parallel_assignments(input, output): # The input is a list of expression nodes, representing the LHSs # and RHS of one (possibly cascaded) assignment statement. For # sequence constructors, rearranges the matching parts of both # sides into a list of equivalent assignments between the # individual elements. This transformation is applied # recursively, so that nested structures get matched as well. rhs = input[-1] if (not (rhs.is_sequence_constructor or isinstance(rhs, ExprNodes.UnicodeNode)) or not sum([lhs.is_sequence_constructor for lhs in input[:-1]])): output.append(input) return complete_assignments = [] if rhs.is_sequence_constructor: rhs_args = rhs.args elif rhs.is_string_literal: rhs_args = unpack_string_to_character_literals(rhs) rhs_size = len(rhs_args) lhs_targets = [ [] for _ in xrange(rhs_size) ] starred_assignments = [] for lhs in input[:-1]: if not lhs.is_sequence_constructor: if lhs.is_starred: error(lhs.pos, "starred assignment target must be in a list or tuple") complete_assignments.append(lhs) continue lhs_size = len(lhs.args) starred_targets = sum([1 for expr in lhs.args if expr.is_starred]) if starred_targets > 1: error(lhs.pos, "more than 1 starred expression in assignment") output.append([lhs,rhs]) continue elif lhs_size - starred_targets > rhs_size: error(lhs.pos, "need more than %d value%s to unpack" % (rhs_size, (rhs_size != 1) and 's' or '')) output.append([lhs,rhs]) continue elif starred_targets: map_starred_assignment(lhs_targets, starred_assignments, lhs.args, rhs_args) elif lhs_size < rhs_size: error(lhs.pos, "too many values to unpack (expected %d, got %d)" % (lhs_size, rhs_size)) output.append([lhs,rhs]) continue else: for targets, expr in zip(lhs_targets, lhs.args): targets.append(expr) if complete_assignments: complete_assignments.append(rhs) output.append(complete_assignments) # recursively flatten partial assignments for cascade, rhs in zip(lhs_targets, rhs_args): if cascade: cascade.append(rhs) flatten_parallel_assignments(cascade, output) # recursively flatten starred assignments for cascade in starred_assignments: if cascade[0].is_sequence_constructor: flatten_parallel_assignments(cascade, output) else: output.append(cascade) def map_starred_assignment(lhs_targets, starred_assignments, lhs_args, rhs_args): # Appends the fixed-position LHS targets to the target list that # appear left and right of the starred argument. # # The starred_assignments list receives a new tuple # (lhs_target, rhs_values_list) that maps the remaining arguments # (those that match the starred target) to a list. # left side of the starred target for i, (targets, expr) in enumerate(zip(lhs_targets, lhs_args)): if expr.is_starred: starred = i lhs_remaining = len(lhs_args) - i - 1 break targets.append(expr) else: raise InternalError("no starred arg found when splitting starred assignment") # right side of the starred target for i, (targets, expr) in enumerate(zip(lhs_targets[-lhs_remaining:], lhs_args[starred + 1:])): targets.append(expr) # the starred target itself, must be assigned a (potentially empty) list target = lhs_args[starred].target # unpack starred node starred_rhs = rhs_args[starred:] if lhs_remaining: starred_rhs = starred_rhs[:-lhs_remaining] if starred_rhs: pos = starred_rhs[0].pos else: pos = target.pos starred_assignments.append([ target, ExprNodes.ListNode(pos=pos, args=starred_rhs)]) class PxdPostParse(CythonTransform, SkipDeclarations): """ Basic interpretation/validity checking that should only be done on pxd trees. A lot of this checking currently happens in the parser; but what is listed below happens here. - "def" functions are let through only if they fill the getbuffer/releasebuffer slots - cdef functions are let through only if they are on the top level and are declared "inline" """ ERR_INLINE_ONLY = "function definition in pxd file must be declared 'cdef inline'" ERR_NOGO_WITH_INLINE = "inline function definition in pxd file cannot be '%s'" def __call__(self, node): self.scope_type = 'pxd' return super(PxdPostParse, self).__call__(node) def visit_CClassDefNode(self, node): old = self.scope_type self.scope_type = 'cclass' self.visitchildren(node) self.scope_type = old return node def visit_FuncDefNode(self, node): # FuncDefNode always come with an implementation (without # an imp they are CVarDefNodes..) err = self.ERR_INLINE_ONLY if (isinstance(node, Nodes.DefNode) and self.scope_type == 'cclass' and node.name in ('__getbuffer__', '__releasebuffer__')): err = None # allow these slots if isinstance(node, Nodes.CFuncDefNode): if (u'inline' in node.modifiers and self.scope_type in ('pxd', 'cclass')): node.inline_in_pxd = True if node.visibility != 'private': err = self.ERR_NOGO_WITH_INLINE % node.visibility elif node.api: err = self.ERR_NOGO_WITH_INLINE % 'api' else: err = None # allow inline function else: err = self.ERR_INLINE_ONLY if err: self.context.nonfatal_error(PostParseError(node.pos, err)) return None else: return node class InterpretCompilerDirectives(CythonTransform, SkipDeclarations): """ After parsing, directives can be stored in a number of places: - #cython-comments at the top of the file (stored in ModuleNode) - Command-line arguments overriding these - @cython.directivename decorators - with cython.directivename: statements This transform is responsible for interpreting these various sources and store the directive in two ways: - Set the directives attribute of the ModuleNode for global directives. - Use a CompilerDirectivesNode to override directives for a subtree. (The first one is primarily to not have to modify with the tree structure, so that ModuleNode stay on top.) The directives are stored in dictionaries from name to value in effect. Each such dictionary is always filled in for all possible directives, using default values where no value is given by the user. The available directives are controlled in Options.py. Note that we have to run this prior to analysis, and so some minor duplication of functionality has to occur: We manually track cimports and which names the "cython" module may have been imported to. """ unop_method_nodes = { 'typeof': ExprNodes.TypeofNode, 'operator.address': ExprNodes.AmpersandNode, 'operator.dereference': ExprNodes.DereferenceNode, 'operator.preincrement' : ExprNodes.inc_dec_constructor(True, '++'), 'operator.predecrement' : ExprNodes.inc_dec_constructor(True, '--'), 'operator.postincrement': ExprNodes.inc_dec_constructor(False, '++'), 'operator.postdecrement': ExprNodes.inc_dec_constructor(False, '--'), # For backwards compatability. 'address': ExprNodes.AmpersandNode, } binop_method_nodes = { 'operator.comma' : ExprNodes.c_binop_constructor(','), } special_methods = set(['declare', 'union', 'struct', 'typedef', 'sizeof', 'cast', 'pointer', 'compiled', 'NULL', 'fused_type', 'parallel']) special_methods.update(unop_method_nodes.keys()) valid_parallel_directives = set([ "parallel", "prange", "threadid", # "threadsavailable", ]) def __init__(self, context, compilation_directive_defaults): super(InterpretCompilerDirectives, self).__init__(context) self.compilation_directive_defaults = {} for key, value in compilation_directive_defaults.items(): self.compilation_directive_defaults[unicode(key)] = copy.deepcopy(value) self.cython_module_names = set() self.directive_names = {} self.parallel_directives = {} def check_directive_scope(self, pos, directive, scope): legal_scopes = Options.directive_scopes.get(directive, None) if legal_scopes and scope not in legal_scopes: self.context.nonfatal_error(PostParseError(pos, 'The %s compiler directive ' 'is not allowed in %s scope' % (directive, scope))) return False else: if (directive not in Options.directive_defaults and directive not in Options.directive_types): error(pos, "Invalid directive: '%s'." % (directive,)) return True # Set up processing and handle the cython: comments. def visit_ModuleNode(self, node): for key, value in node.directive_comments.items(): if not self.check_directive_scope(node.pos, key, 'module'): self.wrong_scope_error(node.pos, key, 'module') del node.directive_comments[key] self.module_scope = node.scope directives = copy.deepcopy(Options.directive_defaults) directives.update(copy.deepcopy(self.compilation_directive_defaults)) directives.update(node.directive_comments) self.directives = directives node.directives = directives node.parallel_directives = self.parallel_directives self.visitchildren(node) node.cython_module_names = self.cython_module_names return node # The following four functions track imports and cimports that # begin with "cython" def is_cython_directive(self, name): return (name in Options.directive_types or name in self.special_methods or PyrexTypes.parse_basic_type(name)) def is_parallel_directive(self, full_name, pos): """ Checks to see if fullname (e.g. cython.parallel.prange) is a valid parallel directive. If it is a star import it also updates the parallel_directives. """ result = (full_name + ".").startswith("cython.parallel.") if result: directive = full_name.split('.') if full_name == u"cython.parallel": self.parallel_directives[u"parallel"] = u"cython.parallel" elif full_name == u"cython.parallel.*": for name in self.valid_parallel_directives: self.parallel_directives[name] = u"cython.parallel.%s" % name elif (len(directive) != 3 or directive[-1] not in self.valid_parallel_directives): error(pos, "No such directive: %s" % full_name) self.module_scope.use_utility_code( UtilityCode.load_cached("InitThreads", "ModuleSetupCode.c")) return result def visit_CImportStatNode(self, node): if node.module_name == u"cython": self.cython_module_names.add(node.as_name or u"cython") elif node.module_name.startswith(u"cython."): if node.module_name.startswith(u"cython.parallel."): error(node.pos, node.module_name + " is not a module") if node.module_name == u"cython.parallel": if node.as_name and node.as_name != u"cython": self.parallel_directives[node.as_name] = node.module_name else: self.cython_module_names.add(u"cython") self.parallel_directives[ u"cython.parallel"] = node.module_name self.module_scope.use_utility_code( UtilityCode.load_cached("InitThreads", "ModuleSetupCode.c")) elif node.as_name: self.directive_names[node.as_name] = node.module_name[7:] else: self.cython_module_names.add(u"cython") # if this cimport was a compiler directive, we don't # want to leave the cimport node sitting in the tree return None return node def visit_FromCImportStatNode(self, node): if (node.module_name == u"cython") or \ node.module_name.startswith(u"cython."): submodule = (node.module_name + u".")[7:] newimp = [] for pos, name, as_name, kind in node.imported_names: full_name = submodule + name qualified_name = u"cython." + full_name if self.is_parallel_directive(qualified_name, node.pos): # from cython cimport parallel, or # from cython.parallel cimport parallel, prange, ... self.parallel_directives[as_name or name] = qualified_name elif self.is_cython_directive(full_name): if as_name is None: as_name = full_name self.directive_names[as_name] = full_name if kind is not None: self.context.nonfatal_error(PostParseError(pos, "Compiler directive imports must be plain imports")) else: newimp.append((pos, name, as_name, kind)) if not newimp: return None node.imported_names = newimp return node def visit_FromImportStatNode(self, node): if (node.module.module_name.value == u"cython") or \ node.module.module_name.value.startswith(u"cython."): submodule = (node.module.module_name.value + u".")[7:] newimp = [] for name, name_node in node.items: full_name = submodule + name qualified_name = u"cython." + full_name if self.is_parallel_directive(qualified_name, node.pos): self.parallel_directives[name_node.name] = qualified_name elif self.is_cython_directive(full_name): self.directive_names[name_node.name] = full_name else: newimp.append((name, name_node)) if not newimp: return None node.items = newimp return node def visit_SingleAssignmentNode(self, node): if isinstance(node.rhs, ExprNodes.ImportNode): module_name = node.rhs.module_name.value is_parallel = (module_name + u".").startswith(u"cython.parallel.") if module_name != u"cython" and not is_parallel: return node module_name = node.rhs.module_name.value as_name = node.lhs.name node = Nodes.CImportStatNode(node.pos, module_name = module_name, as_name = as_name) node = self.visit_CImportStatNode(node) else: self.visitchildren(node) return node def visit_NameNode(self, node): if node.name in self.cython_module_names: node.is_cython_module = True else: node.cython_attribute = self.directive_names.get(node.name) return node def try_to_parse_directives(self, node): # If node is the contents of an directive (in a with statement or # decorator), returns a list of (directivename, value) pairs. # Otherwise, returns None if isinstance(node, ExprNodes.CallNode): self.visit(node.function) optname = node.function.as_cython_attribute() if optname: directivetype = Options.directive_types.get(optname) if directivetype: args, kwds = node.explicit_args_kwds() directives = [] key_value_pairs = [] if kwds is not None and directivetype is not dict: for keyvalue in kwds.key_value_pairs: key, value = keyvalue sub_optname = "%s.%s" % (optname, key.value) if Options.directive_types.get(sub_optname): directives.append(self.try_to_parse_directive(sub_optname, [value], None, keyvalue.pos)) else: key_value_pairs.append(keyvalue) if not key_value_pairs: kwds = None else: kwds.key_value_pairs = key_value_pairs if directives and not kwds and not args: return directives directives.append(self.try_to_parse_directive(optname, args, kwds, node.function.pos)) return directives elif isinstance(node, (ExprNodes.AttributeNode, ExprNodes.NameNode)): self.visit(node) optname = node.as_cython_attribute() if optname: directivetype = Options.directive_types.get(optname) if directivetype is bool: return [(optname, True)] elif directivetype is None: return [(optname, None)] else: raise PostParseError( node.pos, "The '%s' directive should be used as a function call." % optname) return None def try_to_parse_directive(self, optname, args, kwds, pos): directivetype = Options.directive_types.get(optname) if len(args) == 1 and isinstance(args[0], ExprNodes.NoneNode): return optname, Options.directive_defaults[optname] elif directivetype is bool: if kwds is not None or len(args) != 1 or not isinstance(args[0], ExprNodes.BoolNode): raise PostParseError(pos, 'The %s directive takes one compile-time boolean argument' % optname) return (optname, args[0].value) elif directivetype is int: if kwds is not None or len(args) != 1 or not isinstance(args[0], ExprNodes.IntNode): raise PostParseError(pos, 'The %s directive takes one compile-time integer argument' % optname) return (optname, int(args[0].value)) elif directivetype is str: if kwds is not None or len(args) != 1 or not isinstance( args[0], (ExprNodes.StringNode, ExprNodes.UnicodeNode)): raise PostParseError(pos, 'The %s directive takes one compile-time string argument' % optname) return (optname, str(args[0].value)) elif directivetype is type: if kwds is not None or len(args) != 1: raise PostParseError(pos, 'The %s directive takes one type argument' % optname) return (optname, args[0]) elif directivetype is dict: if len(args) != 0: raise PostParseError(pos, 'The %s directive takes no prepositional arguments' % optname) return optname, dict([(key.value, value) for key, value in kwds.key_value_pairs]) elif directivetype is list: if kwds and len(kwds) != 0: raise PostParseError(pos, 'The %s directive takes no keyword arguments' % optname) return optname, [ str(arg.value) for arg in args ] elif callable(directivetype): if kwds is not None or len(args) != 1 or not isinstance( args[0], (ExprNodes.StringNode, ExprNodes.UnicodeNode)): raise PostParseError(pos, 'The %s directive takes one compile-time string argument' % optname) return (optname, directivetype(optname, str(args[0].value))) else: assert False def visit_with_directives(self, body, directives): olddirectives = self.directives newdirectives = copy.copy(olddirectives) newdirectives.update(directives) self.directives = newdirectives assert isinstance(body, Nodes.StatListNode), body retbody = self.visit_Node(body) directive = Nodes.CompilerDirectivesNode(pos=retbody.pos, body=retbody, directives=newdirectives) self.directives = olddirectives return directive # Handle decorators def visit_FuncDefNode(self, node): directives = self._extract_directives(node, 'function') if not directives: return self.visit_Node(node) body = Nodes.StatListNode(node.pos, stats=[node]) return self.visit_with_directives(body, directives) def visit_CVarDefNode(self, node): directives = self._extract_directives(node, 'function') if not directives: return node for name, value in directives.iteritems(): if name == 'locals': node.directive_locals = value elif name != 'final': self.context.nonfatal_error(PostParseError( node.pos, "Cdef functions can only take cython.locals() " "or final decorators, got %s." % name)) body = Nodes.StatListNode(node.pos, stats=[node]) return self.visit_with_directives(body, directives) def visit_CClassDefNode(self, node): directives = self._extract_directives(node, 'cclass') if not directives: return self.visit_Node(node) body = Nodes.StatListNode(node.pos, stats=[node]) return self.visit_with_directives(body, directives) def visit_PyClassDefNode(self, node): directives = self._extract_directives(node, 'class') if not directives: return self.visit_Node(node) body = Nodes.StatListNode(node.pos, stats=[node]) return self.visit_with_directives(body, directives) def _extract_directives(self, node, scope_name): if not node.decorators: return {} # Split the decorators into two lists -- real decorators and directives directives = [] realdecs = [] for dec in node.decorators: new_directives = self.try_to_parse_directives(dec.decorator) if new_directives is not None: for directive in new_directives: if self.check_directive_scope(node.pos, directive[0], scope_name): directives.append(directive) else: realdecs.append(dec) if realdecs and isinstance(node, (Nodes.CFuncDefNode, Nodes.CClassDefNode, Nodes.CVarDefNode)): raise PostParseError(realdecs[0].pos, "Cdef functions/classes cannot take arbitrary decorators.") else: node.decorators = realdecs # merge or override repeated directives optdict = {} directives.reverse() # Decorators coming first take precedence for directive in directives: name, value = directive if name in optdict: old_value = optdict[name] # keywords and arg lists can be merged, everything # else overrides completely if isinstance(old_value, dict): old_value.update(value) elif isinstance(old_value, list): old_value.extend(value) else: optdict[name] = value else: optdict[name] = value return optdict # Handle with statements def visit_WithStatNode(self, node): directive_dict = {} for directive in self.try_to_parse_directives(node.manager) or []: if directive is not None: if node.target is not None: self.context.nonfatal_error( PostParseError(node.pos, "Compiler directive with statements cannot contain 'as'")) else: name, value = directive if name in ('nogil', 'gil'): # special case: in pure mode, "with nogil" spells "with cython.nogil" node = Nodes.GILStatNode(node.pos, state = name, body = node.body) return self.visit_Node(node) if self.check_directive_scope(node.pos, name, 'with statement'): directive_dict[name] = value if directive_dict: return self.visit_with_directives(node.body, directive_dict) return self.visit_Node(node) class ParallelRangeTransform(CythonTransform, SkipDeclarations): """ Transform cython.parallel stuff. The parallel_directives come from the module node, set there by InterpretCompilerDirectives. x = cython.parallel.threadavailable() -> ParallelThreadAvailableNode with nogil, cython.parallel.parallel(): -> ParallelWithBlockNode print cython.parallel.threadid() -> ParallelThreadIdNode for i in cython.parallel.prange(...): -> ParallelRangeNode ... """ # a list of names, maps 'cython.parallel.prange' in the code to # ['cython', 'parallel', 'prange'] parallel_directive = None # Indicates whether a namenode in an expression is the cython module namenode_is_cython_module = False # Keep track of whether we are the context manager of a 'with' statement in_context_manager_section = False # One of 'prange' or 'with parallel'. This is used to disallow closely # nested 'with parallel:' blocks state = None directive_to_node = { u"cython.parallel.parallel": Nodes.ParallelWithBlockNode, # u"cython.parallel.threadsavailable": ExprNodes.ParallelThreadsAvailableNode, u"cython.parallel.threadid": ExprNodes.ParallelThreadIdNode, u"cython.parallel.prange": Nodes.ParallelRangeNode, } def node_is_parallel_directive(self, node): return node.name in self.parallel_directives or node.is_cython_module def get_directive_class_node(self, node): """ Figure out which parallel directive was used and return the associated Node class. E.g. for a cython.parallel.prange() call we return ParallelRangeNode """ if self.namenode_is_cython_module: directive = '.'.join(self.parallel_directive) else: directive = self.parallel_directives[self.parallel_directive[0]] directive = '%s.%s' % (directive, '.'.join(self.parallel_directive[1:])) directive = directive.rstrip('.') cls = self.directive_to_node.get(directive) if cls is None and not (self.namenode_is_cython_module and self.parallel_directive[0] != 'parallel'): error(node.pos, "Invalid directive: %s" % directive) self.namenode_is_cython_module = False self.parallel_directive = None return cls def visit_ModuleNode(self, node): """ If any parallel directives were imported, copy them over and visit the AST """ if node.parallel_directives: self.parallel_directives = node.parallel_directives return self.visit_Node(node) # No parallel directives were imported, so they can't be used :) return node def visit_NameNode(self, node): if self.node_is_parallel_directive(node): self.parallel_directive = [node.name] self.namenode_is_cython_module = node.is_cython_module return node def visit_AttributeNode(self, node): self.visitchildren(node) if self.parallel_directive: self.parallel_directive.append(node.attribute) return node def visit_CallNode(self, node): self.visit(node.function) if not self.parallel_directive: return node # We are a parallel directive, replace this node with the # corresponding ParallelSomethingSomething node if isinstance(node, ExprNodes.GeneralCallNode): args = node.positional_args.args kwargs = node.keyword_args else: args = node.args kwargs = {} parallel_directive_class = self.get_directive_class_node(node) if parallel_directive_class: # Note: in case of a parallel() the body is set by # visit_WithStatNode node = parallel_directive_class(node.pos, args=args, kwargs=kwargs) return node def visit_WithStatNode(self, node): "Rewrite with cython.parallel.parallel() blocks" newnode = self.visit(node.manager) if isinstance(newnode, Nodes.ParallelWithBlockNode): if self.state == 'parallel with': error(node.manager.pos, "Nested parallel with blocks are disallowed") self.state = 'parallel with' body = self.visit(node.body) self.state = None newnode.body = body return newnode elif self.parallel_directive: parallel_directive_class = self.get_directive_class_node(node) if not parallel_directive_class: # There was an error, stop here and now return None if parallel_directive_class is Nodes.ParallelWithBlockNode: error(node.pos, "The parallel directive must be called") return None node.body = self.visit(node.body) return node def visit_ForInStatNode(self, node): "Rewrite 'for i in cython.parallel.prange(...):'" self.visit(node.iterator) self.visit(node.target) in_prange = isinstance(node.iterator.sequence, Nodes.ParallelRangeNode) previous_state = self.state if in_prange: # This will replace the entire ForInStatNode, so copy the # attributes parallel_range_node = node.iterator.sequence parallel_range_node.target = node.target parallel_range_node.body = node.body parallel_range_node.else_clause = node.else_clause node = parallel_range_node if not isinstance(node.target, ExprNodes.NameNode): error(node.target.pos, "Can only iterate over an iteration variable") self.state = 'prange' self.visit(node.body) self.state = previous_state self.visit(node.else_clause) return node def visit(self, node): "Visit a node that may be None" if node is not None: return super(ParallelRangeTransform, self).visit(node) class WithTransform(CythonTransform, SkipDeclarations): def visit_WithStatNode(self, node): self.visitchildren(node, 'body') pos = node.pos body, target, manager = node.body, node.target, node.manager node.enter_call = ExprNodes.SimpleCallNode( pos, function=ExprNodes.AttributeNode( pos, obj=ExprNodes.CloneNode(manager), attribute=EncodedString('__enter__'), is_special_lookup=True), args=[], is_temp=True) if target is not None: body = Nodes.StatListNode( pos, stats = [ Nodes.WithTargetAssignmentStatNode( pos, lhs = target, rhs = ResultRefNode(node.enter_call), orig_rhs = node.enter_call), body]) excinfo_target = ExprNodes.TupleNode(pos, slow=True, args=[ ExprNodes.ExcValueNode(pos) for _ in range(3)]) except_clause = Nodes.ExceptClauseNode( pos, body=Nodes.IfStatNode( pos, if_clauses=[ Nodes.IfClauseNode( pos, condition=ExprNodes.NotNode( pos, operand=ExprNodes.WithExitCallNode( pos, with_stat=node, test_if_run=False, args=excinfo_target)), body=Nodes.ReraiseStatNode(pos), ), ], else_clause=None), pattern=None, target=None, excinfo_target=excinfo_target, ) node.body = Nodes.TryFinallyStatNode( pos, body=Nodes.TryExceptStatNode( pos, body=body, except_clauses=[except_clause], else_clause=None, ), finally_clause=Nodes.ExprStatNode( pos, expr=ExprNodes.WithExitCallNode( pos, with_stat=node, test_if_run=True, args=ExprNodes.TupleNode( pos, args=[ExprNodes.NoneNode(pos) for _ in range(3)] ))), handle_error_case=False, ) return node def visit_ExprNode(self, node): # With statements are never inside expressions. return node class DecoratorTransform(ScopeTrackingTransform, SkipDeclarations): """Originally, this was the only place where decorators were transformed into the corresponding calling code. Now, this is done directly in DefNode and PyClassDefNode to avoid reassignments to the function/class name - except for cdef class methods. For those, the reassignment is required as methods are originally defined in the PyMethodDef struct. The IndirectionNode allows DefNode to override the decorator """ def visit_DefNode(self, func_node): scope_type = self.scope_type func_node = self.visit_FuncDefNode(func_node) if scope_type != 'cclass' or not func_node.decorators: return func_node return self.handle_decorators(func_node, func_node.decorators, func_node.name) def handle_decorators(self, node, decorators, name): decorator_result = ExprNodes.NameNode(node.pos, name = name) for decorator in decorators[::-1]: decorator_result = ExprNodes.SimpleCallNode( decorator.pos, function = decorator.decorator, args = [decorator_result]) name_node = ExprNodes.NameNode(node.pos, name = name) reassignment = Nodes.SingleAssignmentNode( node.pos, lhs = name_node, rhs = decorator_result) reassignment = Nodes.IndirectionNode([reassignment]) node.decorator_indirection = reassignment return [node, reassignment] class CnameDirectivesTransform(CythonTransform, SkipDeclarations): """ Only part of the CythonUtilityCode pipeline. Must be run before DecoratorTransform in case this is a decorator for a cdef class. It filters out @cname('my_cname') decorators and rewrites them to CnameDecoratorNodes. """ def handle_function(self, node): if not getattr(node, 'decorators', None): return self.visit_Node(node) for i, decorator in enumerate(node.decorators): decorator = decorator.decorator if (isinstance(decorator, ExprNodes.CallNode) and decorator.function.is_name and decorator.function.name == 'cname'): args, kwargs = decorator.explicit_args_kwds() if kwargs: raise AssertionError( "cname decorator does not take keyword arguments") if len(args) != 1: raise AssertionError( "cname decorator takes exactly one argument") if not (args[0].is_literal and args[0].type == Builtin.str_type): raise AssertionError( "argument to cname decorator must be a string literal") cname = args[0].compile_time_value(None).decode('UTF-8') del node.decorators[i] node = Nodes.CnameDecoratorNode(pos=node.pos, node=node, cname=cname) break return self.visit_Node(node) visit_FuncDefNode = handle_function visit_CClassDefNode = handle_function visit_CEnumDefNode = handle_function visit_CStructOrUnionDefNode = handle_function class ForwardDeclareTypes(CythonTransform): def visit_CompilerDirectivesNode(self, node): env = self.module_scope old = env.directives env.directives = node.directives self.visitchildren(node) env.directives = old return node def visit_ModuleNode(self, node): self.module_scope = node.scope self.module_scope.directives = node.directives self.visitchildren(node) return node def visit_CDefExternNode(self, node): old_cinclude_flag = self.module_scope.in_cinclude self.module_scope.in_cinclude = 1 self.visitchildren(node) self.module_scope.in_cinclude = old_cinclude_flag return node def visit_CEnumDefNode(self, node): node.declare(self.module_scope) return node def visit_CStructOrUnionDefNode(self, node): if node.name not in self.module_scope.entries: node.declare(self.module_scope) return node def visit_CClassDefNode(self, node): if node.class_name not in self.module_scope.entries: node.declare(self.module_scope) return node class AnalyseDeclarationsTransform(EnvTransform): basic_property = TreeFragment(u""" property NAME: def __get__(self): return ATTR def __set__(self, value): ATTR = value """, level='c_class', pipeline=[NormalizeTree(None)]) basic_pyobject_property = TreeFragment(u""" property NAME: def __get__(self): return ATTR def __set__(self, value): ATTR = value def __del__(self): ATTR = None """, level='c_class', pipeline=[NormalizeTree(None)]) basic_property_ro = TreeFragment(u""" property NAME: def __get__(self): return ATTR """, level='c_class', pipeline=[NormalizeTree(None)]) struct_or_union_wrapper = TreeFragment(u""" cdef class NAME: cdef TYPE value def __init__(self, MEMBER=None): cdef int count count = 0 INIT_ASSIGNMENTS if IS_UNION and count > 1: raise ValueError, "At most one union member should be specified." def __str__(self): return STR_FORMAT % MEMBER_TUPLE def __repr__(self): return REPR_FORMAT % MEMBER_TUPLE """, pipeline=[NormalizeTree(None)]) init_assignment = TreeFragment(u""" if VALUE is not None: ATTR = VALUE count += 1 """, pipeline=[NormalizeTree(None)]) fused_function = None in_lambda = 0 def __call__(self, root): # needed to determine if a cdef var is declared after it's used. self.seen_vars_stack = [] self.fused_error_funcs = set() super_class = super(AnalyseDeclarationsTransform, self) self._super_visit_FuncDefNode = super_class.visit_FuncDefNode return super_class.__call__(root) def visit_NameNode(self, node): self.seen_vars_stack[-1].add(node.name) return node def visit_ModuleNode(self, node): self.seen_vars_stack.append(set()) node.analyse_declarations(self.current_env()) self.visitchildren(node) self.seen_vars_stack.pop() return node def visit_LambdaNode(self, node): self.in_lambda += 1 node.analyse_declarations(self.current_env()) self.visitchildren(node) self.in_lambda -= 1 return node def visit_CClassDefNode(self, node): node = self.visit_ClassDefNode(node) if node.scope and node.scope.implemented: stats = [] for entry in node.scope.var_entries: if entry.needs_property: property = self.create_Property(entry) property.analyse_declarations(node.scope) self.visit(property) stats.append(property) if stats: node.body.stats += stats return node def _handle_fused_def_decorators(self, old_decorators, env, node): """ Create function calls to the decorators and reassignments to the function. """ # Delete staticmethod and classmethod decorators, this is # handled directly by the fused function object. decorators = [] for decorator in old_decorators: func = decorator.decorator if (not func.is_name or func.name not in ('staticmethod', 'classmethod') or env.lookup_here(func.name)): # not a static or classmethod decorators.append(decorator) if decorators: transform = DecoratorTransform(self.context) def_node = node.node _, reassignments = transform.handle_decorators( def_node, decorators, def_node.name) reassignments.analyse_declarations(env) node = [node, reassignments] return node def _handle_def(self, decorators, env, node): "Handle def or cpdef fused functions" # Create PyCFunction nodes for each specialization node.stats.insert(0, node.py_func) node.py_func = self.visit(node.py_func) node.update_fused_defnode_entry(env) pycfunc = ExprNodes.PyCFunctionNode.from_defnode(node.py_func, True) pycfunc = ExprNodes.ProxyNode(pycfunc.coerce_to_temp(env)) node.resulting_fused_function = pycfunc # Create assignment node for our def function node.fused_func_assignment = self._create_assignment( node.py_func, ExprNodes.CloneNode(pycfunc), env) if decorators: node = self._handle_fused_def_decorators(decorators, env, node) return node def _create_fused_function(self, env, node): "Create a fused function for a DefNode with fused arguments" from Cython.Compiler import FusedNode if self.fused_function or self.in_lambda: if self.fused_function not in self.fused_error_funcs: if self.in_lambda: error(node.pos, "Fused lambdas not allowed") else: error(node.pos, "Cannot nest fused functions") self.fused_error_funcs.add(self.fused_function) node.body = Nodes.PassStatNode(node.pos) for arg in node.args: if arg.type.is_fused: arg.type = arg.type.get_fused_types()[0] return node decorators = getattr(node, 'decorators', None) node = FusedNode.FusedCFuncDefNode(node, env) self.fused_function = node self.visitchildren(node) self.fused_function = None if node.py_func: node = self._handle_def(decorators, env, node) return node def _handle_nogil_cleanup(self, lenv, node): "Handle cleanup for 'with gil' blocks in nogil functions." if lenv.nogil and lenv.has_with_gil_block: # Acquire the GIL for cleanup in 'nogil' functions, by wrapping # the entire function body in try/finally. # The corresponding release will be taken care of by # Nodes.FuncDefNode.generate_function_definitions() node.body = Nodes.NogilTryFinallyStatNode( node.body.pos, body=node.body, finally_clause=Nodes.EnsureGILNode(node.body.pos)) def _handle_fused(self, node): if node.is_generator and node.has_fused_arguments: node.has_fused_arguments = False error(node.pos, "Fused generators not supported") node.gbody = Nodes.StatListNode(node.pos, stats=[], body=Nodes.PassStatNode(node.pos)) return node.has_fused_arguments def visit_FuncDefNode(self, node): """ Analyse a function and its body, as that hasn't happend yet. Also analyse the directive_locals set by @cython.locals(). Then, if we are a function with fused arguments, replace the function (after it has declared itself in the symbol table!) with a FusedCFuncDefNode, and analyse its children (which are in turn normal functions). If we're a normal function, just analyse the body of the function. """ env = self.current_env() self.seen_vars_stack.append(set()) lenv = node.local_scope node.declare_arguments(lenv) for var, type_node in node.directive_locals.items(): if not lenv.lookup_here(var): # don't redeclare args type = type_node.analyse_as_type(lenv) if type: lenv.declare_var(var, type, type_node.pos) else: error(type_node.pos, "Not a type") if self._handle_fused(node): node = self._create_fused_function(env, node) else: node.body.analyse_declarations(lenv) self._handle_nogil_cleanup(lenv, node) self._super_visit_FuncDefNode(node) self.seen_vars_stack.pop() return node def visit_DefNode(self, node): node = self.visit_FuncDefNode(node) env = self.current_env() if (not isinstance(node, Nodes.DefNode) or node.fused_py_func or node.is_generator_body or not node.needs_assignment_synthesis(env)): return node return [node, self._synthesize_assignment(node, env)] def visit_GeneratorBodyDefNode(self, node): return self.visit_FuncDefNode(node) def _synthesize_assignment(self, node, env): # Synthesize assignment node and put it right after defnode genv = env while genv.is_py_class_scope or genv.is_c_class_scope: genv = genv.outer_scope if genv.is_closure_scope: rhs = node.py_cfunc_node = ExprNodes.InnerFunctionNode( node.pos, def_node=node, pymethdef_cname=node.entry.pymethdef_cname, code_object=ExprNodes.CodeObjectNode(node)) else: binding = self.current_directives.get('binding') rhs = ExprNodes.PyCFunctionNode.from_defnode(node, binding) if env.is_py_class_scope: rhs.binding = True node.is_cyfunction = rhs.binding return self._create_assignment(node, rhs, env) def _create_assignment(self, def_node, rhs, env): if def_node.decorators: for decorator in def_node.decorators[::-1]: rhs = ExprNodes.SimpleCallNode( decorator.pos, function = decorator.decorator, args = [rhs]) def_node.decorators = None assmt = Nodes.SingleAssignmentNode( def_node.pos, lhs=ExprNodes.NameNode(def_node.pos, name=def_node.name), rhs=rhs) assmt.analyse_declarations(env) return assmt def visit_ScopedExprNode(self, node): env = self.current_env() node.analyse_declarations(env) # the node may or may not have a local scope if node.has_local_scope: self.seen_vars_stack.append(set(self.seen_vars_stack[-1])) self.enter_scope(node, node.expr_scope) node.analyse_scoped_declarations(node.expr_scope) self.visitchildren(node) self.exit_scope() self.seen_vars_stack.pop() else: node.analyse_scoped_declarations(env) self.visitchildren(node) return node def visit_TempResultFromStatNode(self, node): self.visitchildren(node) node.analyse_declarations(self.current_env()) return node def visit_CppClassNode(self, node): if node.visibility == 'extern': return None else: return self.visit_ClassDefNode(node) def visit_CStructOrUnionDefNode(self, node): # Create a wrapper node if needed. # We want to use the struct type information (so it can't happen # before this phase) but also create new objects to be declared # (so it can't happen later). # Note that we don't return the original node, as it is # never used after this phase. if True: # private (default) return None self_value = ExprNodes.AttributeNode( pos = node.pos, obj = ExprNodes.NameNode(pos=node.pos, name=u"self"), attribute = EncodedString(u"value")) var_entries = node.entry.type.scope.var_entries attributes = [] for entry in var_entries: attributes.append(ExprNodes.AttributeNode(pos = entry.pos, obj = self_value, attribute = entry.name)) # __init__ assignments init_assignments = [] for entry, attr in zip(var_entries, attributes): # TODO: branch on visibility init_assignments.append(self.init_assignment.substitute({ u"VALUE": ExprNodes.NameNode(entry.pos, name = entry.name), u"ATTR": attr, }, pos = entry.pos)) # create the class str_format = u"%s(%s)" % (node.entry.type.name, ("%s, " * len(attributes))[:-2]) wrapper_class = self.struct_or_union_wrapper.substitute({ u"INIT_ASSIGNMENTS": Nodes.StatListNode(node.pos, stats = init_assignments), u"IS_UNION": ExprNodes.BoolNode(node.pos, value = not node.entry.type.is_struct), u"MEMBER_TUPLE": ExprNodes.TupleNode(node.pos, args=attributes), u"STR_FORMAT": ExprNodes.StringNode(node.pos, value = EncodedString(str_format)), u"REPR_FORMAT": ExprNodes.StringNode(node.pos, value = EncodedString(str_format.replace("%s", "%r"))), }, pos = node.pos).stats[0] wrapper_class.class_name = node.name wrapper_class.shadow = True class_body = wrapper_class.body.stats # fix value type assert isinstance(class_body[0].base_type, Nodes.CSimpleBaseTypeNode) class_body[0].base_type.name = node.name # fix __init__ arguments init_method = class_body[1] assert isinstance(init_method, Nodes.DefNode) and init_method.name == '__init__' arg_template = init_method.args[1] if not node.entry.type.is_struct: arg_template.kw_only = True del init_method.args[1] for entry, attr in zip(var_entries, attributes): arg = copy.deepcopy(arg_template) arg.declarator.name = entry.name init_method.args.append(arg) # setters/getters for entry, attr in zip(var_entries, attributes): # TODO: branch on visibility if entry.type.is_pyobject: template = self.basic_pyobject_property else: template = self.basic_property property = template.substitute({ u"ATTR": attr, }, pos = entry.pos).stats[0] property.name = entry.name wrapper_class.body.stats.append(property) wrapper_class.analyse_declarations(self.current_env()) return self.visit_CClassDefNode(wrapper_class) # Some nodes are no longer needed after declaration # analysis and can be dropped. The analysis was performed # on these nodes in a seperate recursive process from the # enclosing function or module, so we can simply drop them. def visit_CDeclaratorNode(self, node): # necessary to ensure that all CNameDeclaratorNodes are visited. self.visitchildren(node) return node def visit_CTypeDefNode(self, node): return node def visit_CBaseTypeNode(self, node): return None def visit_CEnumDefNode(self, node): if node.visibility == 'public': return node else: return None def visit_CNameDeclaratorNode(self, node): if node.name in self.seen_vars_stack[-1]: entry = self.current_env().lookup(node.name) if (entry is None or entry.visibility != 'extern' and not entry.scope.is_c_class_scope): warning(node.pos, "cdef variable '%s' declared after it is used" % node.name, 2) self.visitchildren(node) return node def visit_CVarDefNode(self, node): # to ensure all CNameDeclaratorNodes are visited. self.visitchildren(node) return None def visit_CnameDecoratorNode(self, node): child_node = self.visit(node.node) if not child_node: return None if type(child_node) is list: # Assignment synthesized node.child_node = child_node[0] return [node] + child_node[1:] node.node = child_node return node def create_Property(self, entry): if entry.visibility == 'public': if entry.type.is_pyobject: template = self.basic_pyobject_property else: template = self.basic_property elif entry.visibility == 'readonly': template = self.basic_property_ro property = template.substitute({ u"ATTR": ExprNodes.AttributeNode(pos=entry.pos, obj=ExprNodes.NameNode(pos=entry.pos, name="self"), attribute=entry.name), }, pos=entry.pos).stats[0] property.name = entry.name property.doc = entry.doc return property class CalculateQualifiedNamesTransform(EnvTransform): """ Calculate and store the '__qualname__' and the global module name on some nodes. """ def visit_ModuleNode(self, node): self.module_name = self.global_scope().qualified_name self.qualified_name = [] _super = super(CalculateQualifiedNamesTransform, self) self._super_visit_FuncDefNode = _super.visit_FuncDefNode self._super_visit_ClassDefNode = _super.visit_ClassDefNode self.visitchildren(node) return node def _set_qualname(self, node, name=None): if name: qualname = self.qualified_name[:] qualname.append(name) else: qualname = self.qualified_name node.qualname = EncodedString('.'.join(qualname)) node.module_name = self.module_name self.visitchildren(node) return node def _append_entry(self, entry): if entry.is_pyglobal and not entry.is_pyclass_attr: self.qualified_name = [entry.name] else: self.qualified_name.append(entry.name) def visit_ClassNode(self, node): return self._set_qualname(node, node.name) def visit_PyClassNamespaceNode(self, node): # class name was already added by parent node return self._set_qualname(node) def visit_PyCFunctionNode(self, node): return self._set_qualname(node, node.def_node.name) def visit_FuncDefNode(self, node): orig_qualified_name = self.qualified_name[:] if getattr(node, 'name', None) == '<lambda>': self.qualified_name.append('<lambda>') else: self._append_entry(node.entry) self.qualified_name.append('<locals>') self._super_visit_FuncDefNode(node) self.qualified_name = orig_qualified_name return node def visit_ClassDefNode(self, node): orig_qualified_name = self.qualified_name[:] entry = (getattr(node, 'entry', None) or # PyClass self.current_env().lookup_here(node.name)) # CClass self._append_entry(entry) self._super_visit_ClassDefNode(node) self.qualified_name = orig_qualified_name return node class AnalyseExpressionsTransform(CythonTransform): def visit_ModuleNode(self, node): node.scope.infer_types() node.body = node.body.analyse_expressions(node.scope) self.visitchildren(node) return node def visit_FuncDefNode(self, node): node.local_scope.infer_types() node.body = node.body.analyse_expressions(node.local_scope) self.visitchildren(node) return node def visit_ScopedExprNode(self, node): if node.has_local_scope: node.expr_scope.infer_types() node = node.analyse_scoped_expressions(node.expr_scope) self.visitchildren(node) return node def visit_IndexNode(self, node): """ Replace index nodes used to specialize cdef functions with fused argument types with the Attribute- or NameNode referring to the function. We then need to copy over the specialization properties to the attribute or name node. Because the indexing might be a Python indexing operation on a fused function, or (usually) a Cython indexing operation, we need to re-analyse the types. """ self.visit_Node(node) if node.is_fused_index and not node.type.is_error: node = node.base elif node.memslice_ellipsis_noop: # memoryviewslice[...] expression, drop the IndexNode node = node.base return node class FindInvalidUseOfFusedTypes(CythonTransform): def visit_FuncDefNode(self, node): # Errors related to use in functions with fused args will already # have been detected if not node.has_fused_arguments: if not node.is_generator_body and node.return_type.is_fused: error(node.pos, "Return type is not specified as argument type") else: self.visitchildren(node) return node def visit_ExprNode(self, node): if node.type and node.type.is_fused: error(node.pos, "Invalid use of fused types, type cannot be specialized") else: self.visitchildren(node) return node class ExpandInplaceOperators(EnvTransform): def visit_InPlaceAssignmentNode(self, node): lhs = node.lhs rhs = node.rhs if lhs.type.is_cpp_class: # No getting around this exact operator here. return node if isinstance(lhs, ExprNodes.IndexNode) and lhs.is_buffer_access: # There is code to handle this case. return node env = self.current_env() def side_effect_free_reference(node, setting=False): if isinstance(node, ExprNodes.NameNode): return node, [] elif node.type.is_pyobject and not setting: node = LetRefNode(node) return node, [node] elif isinstance(node, ExprNodes.IndexNode): if node.is_buffer_access: raise ValueError("Buffer access") base, temps = side_effect_free_reference(node.base) index = LetRefNode(node.index) return ExprNodes.IndexNode(node.pos, base=base, index=index), temps + [index] elif isinstance(node, ExprNodes.AttributeNode): obj, temps = side_effect_free_reference(node.obj) return ExprNodes.AttributeNode(node.pos, obj=obj, attribute=node.attribute), temps else: node = LetRefNode(node) return node, [node] try: lhs, let_ref_nodes = side_effect_free_reference(lhs, setting=True) except ValueError: return node dup = lhs.__class__(**lhs.__dict__) binop = ExprNodes.binop_node(node.pos, operator = node.operator, operand1 = dup, operand2 = rhs, inplace=True) # Manually analyse types for new node. lhs.analyse_target_types(env) dup.analyse_types(env) binop.analyse_operation(env) node = Nodes.SingleAssignmentNode( node.pos, lhs = lhs, rhs=binop.coerce_to(lhs.type, env)) # Use LetRefNode to avoid side effects. let_ref_nodes.reverse() for t in let_ref_nodes: node = LetNode(t, node) return node def visit_ExprNode(self, node): # In-place assignments can't happen within an expression. return node class AdjustDefByDirectives(CythonTransform, SkipDeclarations): """ Adjust function and class definitions by the decorator directives: @cython.cfunc @cython.cclass @cython.ccall """ def visit_ModuleNode(self, node): self.directives = node.directives self.in_py_class = False self.visitchildren(node) return node def visit_CompilerDirectivesNode(self, node): old_directives = self.directives self.directives = node.directives self.visitchildren(node) self.directives = old_directives return node def visit_DefNode(self, node): if 'ccall' in self.directives: node = node.as_cfunction(overridable=True, returns=self.directives.get('returns')) return self.visit(node) if 'cfunc' in self.directives: if self.in_py_class: error(node.pos, "cfunc directive is not allowed here") else: node = node.as_cfunction(overridable=False, returns=self.directives.get('returns')) return self.visit(node) self.visitchildren(node) return node def visit_PyClassDefNode(self, node): if 'cclass' in self.directives: node = node.as_cclass() return self.visit(node) else: old_in_pyclass = self.in_py_class self.in_py_class = True self.visitchildren(node) self.in_py_class = old_in_pyclass return node def visit_CClassDefNode(self, node): old_in_pyclass = self.in_py_class self.in_py_class = False self.visitchildren(node) self.in_py_class = old_in_pyclass return node class AlignFunctionDefinitions(CythonTransform): """ This class takes the signatures from a .pxd file and applies them to the def methods in a .py file. """ def visit_ModuleNode(self, node): self.scope = node.scope self.directives = node.directives self.imported_names = set() # hack, see visit_FromImportStatNode() self.visitchildren(node) return node def visit_PyClassDefNode(self, node): pxd_def = self.scope.lookup(node.name) if pxd_def: if pxd_def.is_cclass: return self.visit_CClassDefNode(node.as_cclass(), pxd_def) elif not pxd_def.scope or not pxd_def.scope.is_builtin_scope: error(node.pos, "'%s' redeclared" % node.name) if pxd_def.pos: error(pxd_def.pos, "previous declaration here") return None return node def visit_CClassDefNode(self, node, pxd_def=None): if pxd_def is None: pxd_def = self.scope.lookup(node.class_name) if pxd_def: outer_scope = self.scope self.scope = pxd_def.type.scope self.visitchildren(node) if pxd_def: self.scope = outer_scope return node def visit_DefNode(self, node): pxd_def = self.scope.lookup(node.name) if pxd_def and (not pxd_def.scope or not pxd_def.scope.is_builtin_scope): if not pxd_def.is_cfunction: error(node.pos, "'%s' redeclared" % node.name) if pxd_def.pos: error(pxd_def.pos, "previous declaration here") return None node = node.as_cfunction(pxd_def) elif (self.scope.is_module_scope and self.directives['auto_cpdef'] and not node.name in self.imported_names and node.is_cdef_func_compatible()): # FIXME: cpdef-ing should be done in analyse_declarations() node = node.as_cfunction(scope=self.scope) # Enable this when nested cdef functions are allowed. # self.visitchildren(node) return node def visit_FromImportStatNode(self, node): # hack to prevent conditional import fallback functions from # being cdpef-ed (global Python variables currently conflict # with imports) if self.scope.is_module_scope: for name, _ in node.items: self.imported_names.add(name) return node def visit_ExprNode(self, node): # ignore lambdas and everything else that appears in expressions return node class RemoveUnreachableCode(CythonTransform): def visit_StatListNode(self, node): if not self.current_directives['remove_unreachable']: return node self.visitchildren(node) for idx, stat in enumerate(node.stats): idx += 1 if stat.is_terminator: if idx < len(node.stats): if self.current_directives['warn.unreachable']: warning(node.stats[idx].pos, "Unreachable code", 2) node.stats = node.stats[:idx] node.is_terminator = True break return node def visit_IfClauseNode(self, node): self.visitchildren(node) if node.body.is_terminator: node.is_terminator = True return node def visit_IfStatNode(self, node): self.visitchildren(node) if node.else_clause and node.else_clause.is_terminator: for clause in node.if_clauses: if not clause.is_terminator: break else: node.is_terminator = True return node def visit_TryExceptStatNode(self, node): self.visitchildren(node) if node.body.is_terminator and node.else_clause: if self.current_directives['warn.unreachable']: warning(node.else_clause.pos, "Unreachable code", 2) node.else_clause = None return node class YieldNodeCollector(TreeVisitor): def __init__(self): super(YieldNodeCollector, self).__init__() self.yields = [] self.returns = [] self.has_return_value = False def visit_Node(self, node): self.visitchildren(node) def visit_YieldExprNode(self, node): self.yields.append(node) self.visitchildren(node) def visit_ReturnStatNode(self, node): self.visitchildren(node) if node.value: self.has_return_value = True self.returns.append(node) def visit_ClassDefNode(self, node): pass def visit_FuncDefNode(self, node): pass def visit_LambdaNode(self, node): pass def visit_GeneratorExpressionNode(self, node): pass class MarkClosureVisitor(CythonTransform): def visit_ModuleNode(self, node): self.needs_closure = False self.visitchildren(node) return node def visit_FuncDefNode(self, node): self.needs_closure = False self.visitchildren(node) node.needs_closure = self.needs_closure self.needs_closure = True collector = YieldNodeCollector() collector.visitchildren(node) if collector.yields: if isinstance(node, Nodes.CFuncDefNode): # Will report error later return node for i, yield_expr in enumerate(collector.yields): yield_expr.label_num = i + 1 # no enumerate start arg in Py2.4 for retnode in collector.returns: retnode.in_generator = True gbody = Nodes.GeneratorBodyDefNode( pos=node.pos, name=node.name, body=node.body) generator = Nodes.GeneratorDefNode( pos=node.pos, name=node.name, args=node.args, star_arg=node.star_arg, starstar_arg=node.starstar_arg, doc=node.doc, decorators=node.decorators, gbody=gbody, lambda_name=node.lambda_name) return generator return node def visit_CFuncDefNode(self, node): self.visit_FuncDefNode(node) if node.needs_closure: error(node.pos, "closures inside cdef functions not yet supported") return node def visit_LambdaNode(self, node): self.needs_closure = False self.visitchildren(node) node.needs_closure = self.needs_closure self.needs_closure = True return node def visit_ClassDefNode(self, node): self.visitchildren(node) self.needs_closure = True return node class CreateClosureClasses(CythonTransform): # Output closure classes in module scope for all functions # that really need it. def __init__(self, context): super(CreateClosureClasses, self).__init__(context) self.path = [] self.in_lambda = False def visit_ModuleNode(self, node): self.module_scope = node.scope self.visitchildren(node) return node def find_entries_used_in_closures(self, node): from_closure = [] in_closure = [] for name, entry in node.local_scope.entries.items(): if entry.from_closure: from_closure.append((name, entry)) elif entry.in_closure: in_closure.append((name, entry)) return from_closure, in_closure def create_class_from_scope(self, node, target_module_scope, inner_node=None): # move local variables into closure if node.is_generator: for entry in node.local_scope.entries.values(): if not entry.from_closure: entry.in_closure = True from_closure, in_closure = self.find_entries_used_in_closures(node) in_closure.sort() # Now from the begining node.needs_closure = False node.needs_outer_scope = False func_scope = node.local_scope cscope = node.entry.scope while cscope.is_py_class_scope or cscope.is_c_class_scope: cscope = cscope.outer_scope if not from_closure and (self.path or inner_node): if not inner_node: if not node.py_cfunc_node: raise InternalError("DefNode does not have assignment node") inner_node = node.py_cfunc_node inner_node.needs_self_code = False node.needs_outer_scope = False if node.is_generator: pass elif not in_closure and not from_closure: return elif not in_closure: func_scope.is_passthrough = True func_scope.scope_class = cscope.scope_class node.needs_outer_scope = True return as_name = '%s_%s' % ( target_module_scope.next_id(Naming.closure_class_prefix), node.entry.cname) entry = target_module_scope.declare_c_class( name=as_name, pos=node.pos, defining=True, implementing=True) entry.type.is_final_type = True func_scope.scope_class = entry class_scope = entry.type.scope class_scope.is_internal = True if Options.closure_freelist_size: class_scope.directives['freelist'] = Options.closure_freelist_size if from_closure: assert cscope.is_closure_scope class_scope.declare_var(pos=node.pos, name=Naming.outer_scope_cname, cname=Naming.outer_scope_cname, type=cscope.scope_class.type, is_cdef=True) node.needs_outer_scope = True for name, entry in in_closure: closure_entry = class_scope.declare_var(pos=entry.pos, name=entry.name, cname=entry.cname, type=entry.type, is_cdef=True) if entry.is_declared_generic: closure_entry.is_declared_generic = 1 node.needs_closure = True # Do it here because other classes are already checked target_module_scope.check_c_class(func_scope.scope_class) def visit_LambdaNode(self, node): if not isinstance(node.def_node, Nodes.DefNode): # fused function, an error has been previously issued return node was_in_lambda = self.in_lambda self.in_lambda = True self.create_class_from_scope(node.def_node, self.module_scope, node) self.visitchildren(node) self.in_lambda = was_in_lambda return node def visit_FuncDefNode(self, node): if self.in_lambda: self.visitchildren(node) return node if node.needs_closure or self.path: self.create_class_from_scope(node, self.module_scope) self.path.append(node) self.visitchildren(node) self.path.pop() return node def visit_GeneratorBodyDefNode(self, node): self.visitchildren(node) return node def visit_CFuncDefNode(self, node): self.visitchildren(node) return node class GilCheck(VisitorTransform): """ Call `node.gil_check(env)` on each node to make sure we hold the GIL when we need it. Raise an error when on Python operations inside a `nogil` environment. Additionally, raise exceptions for closely nested with gil or with nogil statements. The latter would abort Python. """ def __call__(self, root): self.env_stack = [root.scope] self.nogil = False # True for 'cdef func() nogil:' functions, as the GIL may be held while # calling this function (thus contained 'nogil' blocks may be valid). self.nogil_declarator_only = False return super(GilCheck, self).__call__(root) def visit_FuncDefNode(self, node): self.env_stack.append(node.local_scope) was_nogil = self.nogil self.nogil = node.local_scope.nogil if self.nogil: self.nogil_declarator_only = True if self.nogil and node.nogil_check: node.nogil_check(node.local_scope) self.visitchildren(node) # This cannot be nested, so it doesn't need backup/restore self.nogil_declarator_only = False self.env_stack.pop() self.nogil = was_nogil return node def visit_GILStatNode(self, node): if self.nogil and node.nogil_check: node.nogil_check() was_nogil = self.nogil self.nogil = (node.state == 'nogil') if was_nogil == self.nogil and not self.nogil_declarator_only: if not was_nogil: error(node.pos, "Trying to acquire the GIL while it is " "already held.") else: error(node.pos, "Trying to release the GIL while it was " "previously released.") if isinstance(node.finally_clause, Nodes.StatListNode): # The finally clause of the GILStatNode is a GILExitNode, # which is wrapped in a StatListNode. Just unpack that. node.finally_clause, = node.finally_clause.stats self.visitchildren(node) self.nogil = was_nogil return node def visit_ParallelRangeNode(self, node): if node.nogil: node.nogil = False node = Nodes.GILStatNode(node.pos, state='nogil', body=node) return self.visit_GILStatNode(node) if not self.nogil: error(node.pos, "prange() can only be used without the GIL") # Forget about any GIL-related errors that may occur in the body return None node.nogil_check(self.env_stack[-1]) self.visitchildren(node) return node def visit_ParallelWithBlockNode(self, node): if not self.nogil: error(node.pos, "The parallel section may only be used without " "the GIL") return None if node.nogil_check: # It does not currently implement this, but test for it anyway to # avoid potential future surprises node.nogil_check(self.env_stack[-1]) self.visitchildren(node) return node def visit_TryFinallyStatNode(self, node): """ Take care of try/finally statements in nogil code sections. """ if not self.nogil or isinstance(node, Nodes.GILStatNode): return self.visit_Node(node) node.nogil_check = None node.is_try_finally_in_nogil = True self.visitchildren(node) return node def visit_Node(self, node): if self.env_stack and self.nogil and node.nogil_check: node.nogil_check(self.env_stack[-1]) self.visitchildren(node) node.in_nogil_context = self.nogil return node class TransformBuiltinMethods(EnvTransform): def visit_SingleAssignmentNode(self, node): if node.declaration_only: return None else: self.visitchildren(node) return node def visit_AttributeNode(self, node): self.visitchildren(node) return self.visit_cython_attribute(node) def visit_NameNode(self, node): return self.visit_cython_attribute(node) def visit_cython_attribute(self, node): attribute = node.as_cython_attribute() if attribute: if attribute == u'compiled': node = ExprNodes.BoolNode(node.pos, value=True) elif attribute == u'__version__': import Cython node = ExprNodes.StringNode(node.pos, value=EncodedString(Cython.__version__)) elif attribute == u'NULL': node = ExprNodes.NullNode(node.pos) elif attribute in (u'set', u'frozenset'): node = ExprNodes.NameNode(node.pos, name=EncodedString(attribute), entry=self.current_env().builtin_scope().lookup_here(attribute)) elif PyrexTypes.parse_basic_type(attribute): pass elif self.context.cython_scope.lookup_qualified_name(attribute): pass else: error(node.pos, u"'%s' not a valid cython attribute or is being used incorrectly" % attribute) return node def visit_ExecStatNode(self, node): lenv = self.current_env() self.visitchildren(node) if len(node.args) == 1: node.args.append(ExprNodes.GlobalsExprNode(node.pos)) if not lenv.is_module_scope: node.args.append( ExprNodes.LocalsExprNode( node.pos, self.current_scope_node(), lenv)) return node def _inject_locals(self, node, func_name): # locals()/dir()/vars() builtins lenv = self.current_env() entry = lenv.lookup_here(func_name) if entry: # not the builtin return node pos = node.pos if func_name in ('locals', 'vars'): if func_name == 'locals' and len(node.args) > 0: error(self.pos, "Builtin 'locals()' called with wrong number of args, expected 0, got %d" % len(node.args)) return node elif func_name == 'vars': if len(node.args) > 1: error(self.pos, "Builtin 'vars()' called with wrong number of args, expected 0-1, got %d" % len(node.args)) if len(node.args) > 0: return node # nothing to do return ExprNodes.LocalsExprNode(pos, self.current_scope_node(), lenv) else: # dir() if len(node.args) > 1: error(self.pos, "Builtin 'dir()' called with wrong number of args, expected 0-1, got %d" % len(node.args)) if len(node.args) > 0: # optimised in Builtin.py return node if lenv.is_py_class_scope or lenv.is_module_scope: if lenv.is_py_class_scope: pyclass = self.current_scope_node() locals_dict = ExprNodes.CloneNode(pyclass.dict) else: locals_dict = ExprNodes.GlobalsExprNode(pos) return ExprNodes.SortedDictKeysNode(locals_dict) local_names = [ var.name for var in lenv.entries.values() if var.name ] items = [ ExprNodes.IdentifierStringNode(pos, value=var) for var in local_names ] return ExprNodes.ListNode(pos, args=items) def visit_PrimaryCmpNode(self, node): # special case: for in/not-in test, we do not need to sort locals() self.visitchildren(node) if node.operator in 'not_in': # in/not_in if isinstance(node.operand2, ExprNodes.SortedDictKeysNode): arg = node.operand2.arg if isinstance(arg, ExprNodes.NoneCheckNode): arg = arg.arg node.operand2 = arg return node def visit_CascadedCmpNode(self, node): return self.visit_PrimaryCmpNode(node) def _inject_eval(self, node, func_name): lenv = self.current_env() entry = lenv.lookup_here(func_name) if entry or len(node.args) != 1: return node # Inject globals and locals node.args.append(ExprNodes.GlobalsExprNode(node.pos)) if not lenv.is_module_scope: node.args.append( ExprNodes.LocalsExprNode( node.pos, self.current_scope_node(), lenv)) return node def _inject_super(self, node, func_name): lenv = self.current_env() entry = lenv.lookup_here(func_name) if entry or node.args: return node # Inject no-args super def_node = self.current_scope_node() if (not isinstance(def_node, Nodes.DefNode) or not def_node.args or len(self.env_stack) < 2): return node class_node, class_scope = self.env_stack[-2] if class_scope.is_py_class_scope: def_node.requires_classobj = True class_node.class_cell.is_active = True node.args = [ ExprNodes.ClassCellNode( node.pos, is_generator=def_node.is_generator), ExprNodes.NameNode(node.pos, name=def_node.args[0].name) ] elif class_scope.is_c_class_scope: node.args = [ ExprNodes.NameNode( node.pos, name=class_node.scope.name, entry=class_node.entry), ExprNodes.NameNode(node.pos, name=def_node.args[0].name) ] return node def visit_SimpleCallNode(self, node): # cython.foo function = node.function.as_cython_attribute() if function: if function in InterpretCompilerDirectives.unop_method_nodes: if len(node.args) != 1: error(node.function.pos, u"%s() takes exactly one argument" % function) else: node = InterpretCompilerDirectives.unop_method_nodes[function](node.function.pos, operand=node.args[0]) elif function in InterpretCompilerDirectives.binop_method_nodes: if len(node.args) != 2: error(node.function.pos, u"%s() takes exactly two arguments" % function) else: node = InterpretCompilerDirectives.binop_method_nodes[function](node.function.pos, operand1=node.args[0], operand2=node.args[1]) elif function == u'cast': if len(node.args) != 2: error(node.function.pos, u"cast() takes exactly two arguments") else: type = node.args[0].analyse_as_type(self.current_env()) if type: node = ExprNodes.TypecastNode(node.function.pos, type=type, operand=node.args[1]) else: error(node.args[0].pos, "Not a type") elif function == u'sizeof': if len(node.args) != 1: error(node.function.pos, u"sizeof() takes exactly one argument") else: type = node.args[0].analyse_as_type(self.current_env()) if type: node = ExprNodes.SizeofTypeNode(node.function.pos, arg_type=type) else: node = ExprNodes.SizeofVarNode(node.function.pos, operand=node.args[0]) elif function == 'cmod': if len(node.args) != 2: error(node.function.pos, u"cmod() takes exactly two arguments") else: node = ExprNodes.binop_node(node.function.pos, '%', node.args[0], node.args[1]) node.cdivision = True elif function == 'cdiv': if len(node.args) != 2: error(node.function.pos, u"cdiv() takes exactly two arguments") else: node = ExprNodes.binop_node(node.function.pos, '/', node.args[0], node.args[1]) node.cdivision = True elif function == u'set': node.function = ExprNodes.NameNode(node.pos, name=EncodedString('set')) elif self.context.cython_scope.lookup_qualified_name(function): pass else: error(node.function.pos, u"'%s' not a valid cython language construct" % function) self.visitchildren(node) if isinstance(node, ExprNodes.SimpleCallNode) and node.function.is_name: func_name = node.function.name if func_name in ('dir', 'locals', 'vars'): return self._inject_locals(node, func_name) if func_name == 'eval': return self._inject_eval(node, func_name) if func_name == 'super': return self._inject_super(node, func_name) return node class ReplaceFusedTypeChecks(VisitorTransform): """ This is not a transform in the pipeline. It is invoked on the specific versions of a cdef function with fused argument types. It filters out any type branches that don't match. e.g. if fused_t is mytype: ... elif fused_t in other_fused_type: ... """ def __init__(self, local_scope): super(ReplaceFusedTypeChecks, self).__init__() self.local_scope = local_scope # defer the import until now to avoid circular import time dependencies from Cython.Compiler import Optimize self.transform = Optimize.ConstantFolding(reevaluate=True) def visit_IfStatNode(self, node): """ Filters out any if clauses with false compile time type check expression. """ self.visitchildren(node) return self.transform(node) def visit_PrimaryCmpNode(self, node): type1 = node.operand1.analyse_as_type(self.local_scope) type2 = node.operand2.analyse_as_type(self.local_scope) if type1 and type2: false_node = ExprNodes.BoolNode(node.pos, value=False) true_node = ExprNodes.BoolNode(node.pos, value=True) type1 = self.specialize_type(type1, node.operand1.pos) op = node.operator if op in ('is', 'is_not', '==', '!='): type2 = self.specialize_type(type2, node.operand2.pos) is_same = type1.same_as(type2) eq = op in ('is', '==') if (is_same and eq) or (not is_same and not eq): return true_node elif op in ('in', 'not_in'): # We have to do an instance check directly, as operand2 # needs to be a fused type and not a type with a subtype # that is fused. First unpack the typedef if isinstance(type2, PyrexTypes.CTypedefType): type2 = type2.typedef_base_type if type1.is_fused: error(node.operand1.pos, "Type is fused") elif not type2.is_fused: error(node.operand2.pos, "Can only use 'in' or 'not in' on a fused type") else: types = PyrexTypes.get_specialized_types(type2) for specialized_type in types: if type1.same_as(specialized_type): if op == 'in': return true_node else: return false_node if op == 'not_in': return true_node return false_node return node def specialize_type(self, type, pos): try: return type.specialize(self.local_scope.fused_to_specific) except KeyError: error(pos, "Type is not specific") return type def visit_Node(self, node): self.visitchildren(node) return node class DebugTransform(CythonTransform): """ Write debug information for this Cython module. """ def __init__(self, context, options, result): super(DebugTransform, self).__init__(context) self.visited = set() # our treebuilder and debug output writer # (see Cython.Debugger.debug_output.CythonDebugWriter) self.tb = self.context.gdb_debug_outputwriter #self.c_output_file = options.output_file self.c_output_file = result.c_file # Closure support, basically treat nested functions as if the AST were # never nested self.nested_funcdefs = [] # tells visit_NameNode whether it should register step-into functions self.register_stepinto = False def visit_ModuleNode(self, node): self.tb.module_name = node.full_module_name attrs = dict( module_name=node.full_module_name, filename=node.pos[0].filename, c_filename=self.c_output_file) self.tb.start('Module', attrs) # serialize functions self.tb.start('Functions') # First, serialize functions normally... self.visitchildren(node) # ... then, serialize nested functions for nested_funcdef in self.nested_funcdefs: self.visit_FuncDefNode(nested_funcdef) self.register_stepinto = True self.serialize_modulenode_as_function(node) self.register_stepinto = False self.tb.end('Functions') # 2.3 compatibility. Serialize global variables self.tb.start('Globals') entries = {} for k, v in node.scope.entries.iteritems(): if (v.qualified_name not in self.visited and not v.name.startswith('__pyx_') and not v.type.is_cfunction and not v.type.is_extension_type): entries[k]= v self.serialize_local_variables(entries) self.tb.end('Globals') # self.tb.end('Module') # end Module after the line number mapping in # Cython.Compiler.ModuleNode.ModuleNode._serialize_lineno_map return node def visit_FuncDefNode(self, node): self.visited.add(node.local_scope.qualified_name) if getattr(node, 'is_wrapper', False): return node if self.register_stepinto: self.nested_funcdefs.append(node) return node # node.entry.visibility = 'extern' if node.py_func is None: pf_cname = '' else: pf_cname = node.py_func.entry.func_cname attrs = dict( name=node.entry.name or getattr(node, 'name', '<unknown>'), cname=node.entry.func_cname, pf_cname=pf_cname, qualified_name=node.local_scope.qualified_name, lineno=str(node.pos[1])) self.tb.start('Function', attrs=attrs) self.tb.start('Locals') self.serialize_local_variables(node.local_scope.entries) self.tb.end('Locals') self.tb.start('Arguments') for arg in node.local_scope.arg_entries: self.tb.start(arg.name) self.tb.end(arg.name) self.tb.end('Arguments') self.tb.start('StepIntoFunctions') self.register_stepinto = True self.visitchildren(node) self.register_stepinto = False self.tb.end('StepIntoFunctions') self.tb.end('Function') return node def visit_NameNode(self, node): if (self.register_stepinto and node.type.is_cfunction and getattr(node, 'is_called', False) and node.entry.func_cname is not None): # don't check node.entry.in_cinclude, as 'cdef extern: ...' # declared functions are not 'in_cinclude'. # This means we will list called 'cdef' functions as # "step into functions", but this is not an issue as they will be # recognized as Cython functions anyway. attrs = dict(name=node.entry.func_cname) self.tb.start('StepIntoFunction', attrs=attrs) self.tb.end('StepIntoFunction') self.visitchildren(node) return node def serialize_modulenode_as_function(self, node): """ Serialize the module-level code as a function so the debugger will know it's a "relevant frame" and it will know where to set the breakpoint for 'break modulename'. """ name = node.full_module_name.rpartition('.')[-1] cname_py2 = 'init' + name cname_py3 = 'PyInit_' + name py2_attrs = dict( name=name, cname=cname_py2, pf_cname='', # Ignore the qualified_name, breakpoints should be set using # `cy break modulename:lineno` for module-level breakpoints. qualified_name='', lineno='1', is_initmodule_function="True", ) py3_attrs = dict(py2_attrs, cname=cname_py3) self._serialize_modulenode_as_function(node, py2_attrs) self._serialize_modulenode_as_function(node, py3_attrs) def _serialize_modulenode_as_function(self, node, attrs): self.tb.start('Function', attrs=attrs) self.tb.start('Locals') self.serialize_local_variables(node.scope.entries) self.tb.end('Locals') self.tb.start('Arguments') self.tb.end('Arguments') self.tb.start('StepIntoFunctions') self.register_stepinto = True self.visitchildren(node) self.register_stepinto = False self.tb.end('StepIntoFunctions') self.tb.end('Function') def serialize_local_variables(self, entries): for entry in entries.values(): if not entry.cname: # not a local variable continue if entry.type.is_pyobject: vartype = 'PythonObject' else: vartype = 'CObject' if entry.from_closure: # We're dealing with a closure where a variable from an outer # scope is accessed, get it from the scope object. cname = '%s->%s' % (Naming.cur_scope_cname, entry.outer_entry.cname) qname = '%s.%s.%s' % (entry.scope.outer_scope.qualified_name, entry.scope.name, entry.name) elif entry.in_closure: cname = '%s->%s' % (Naming.cur_scope_cname, entry.cname) qname = entry.qualified_name else: cname = entry.cname qname = entry.qualified_name if not entry.pos: # this happens for variables that are not in the user's code, # e.g. for the global __builtins__, __doc__, etc. We can just # set the lineno to 0 for those. lineno = '0' else: lineno = str(entry.pos[1]) attrs = dict( name=entry.name, cname=cname, qualified_name=qname, type=vartype, lineno=lineno) self.tb.start('LocalVar', attrs) self.tb.end('LocalVar')
bsd-3-clause
Senseg/Py4A
python-modules/twisted/twisted/protocols/sip.py
59
41755
# -*- test-case-name: twisted.test.test_sip -*- # Copyright (c) 2001-2009 Twisted Matrix Laboratories. # See LICENSE for details. """Session Initialization Protocol. Documented in RFC 2543. [Superceded by 3261] This module contains a deprecated implementation of HTTP Digest authentication. See L{twisted.cred.credentials} and L{twisted.cred._digest} for its new home. """ # system imports import socket, time, sys, random, warnings from zope.interface import implements, Interface # twisted imports from twisted.python import log, util from twisted.python.deprecate import deprecated from twisted.python.versions import Version from twisted.python.hashlib import md5 from twisted.internet import protocol, defer, reactor from twisted import cred import twisted.cred.error from twisted.cred.credentials import UsernameHashedPassword, UsernamePassword # sibling imports from twisted.protocols import basic PORT = 5060 # SIP headers have short forms shortHeaders = {"call-id": "i", "contact": "m", "content-encoding": "e", "content-length": "l", "content-type": "c", "from": "f", "subject": "s", "to": "t", "via": "v", } longHeaders = {} for k, v in shortHeaders.items(): longHeaders[v] = k del k, v statusCodes = { 100: "Trying", 180: "Ringing", 181: "Call Is Being Forwarded", 182: "Queued", 183: "Session Progress", 200: "OK", 300: "Multiple Choices", 301: "Moved Permanently", 302: "Moved Temporarily", 303: "See Other", 305: "Use Proxy", 380: "Alternative Service", 400: "Bad Request", 401: "Unauthorized", 402: "Payment Required", 403: "Forbidden", 404: "Not Found", 405: "Method Not Allowed", 406: "Not Acceptable", 407: "Proxy Authentication Required", 408: "Request Timeout", 409: "Conflict", # Not in RFC3261 410: "Gone", 411: "Length Required", # Not in RFC3261 413: "Request Entity Too Large", 414: "Request-URI Too Large", 415: "Unsupported Media Type", 416: "Unsupported URI Scheme", 420: "Bad Extension", 421: "Extension Required", 423: "Interval Too Brief", 480: "Temporarily Unavailable", 481: "Call/Transaction Does Not Exist", 482: "Loop Detected", 483: "Too Many Hops", 484: "Address Incomplete", 485: "Ambiguous", 486: "Busy Here", 487: "Request Terminated", 488: "Not Acceptable Here", 491: "Request Pending", 493: "Undecipherable", 500: "Internal Server Error", 501: "Not Implemented", 502: "Bad Gateway", # no donut 503: "Service Unavailable", 504: "Server Time-out", 505: "SIP Version not supported", 513: "Message Too Large", 600: "Busy Everywhere", 603: "Decline", 604: "Does not exist anywhere", 606: "Not Acceptable", } specialCases = { 'cseq': 'CSeq', 'call-id': 'Call-ID', 'www-authenticate': 'WWW-Authenticate', } def dashCapitalize(s): ''' Capitalize a string, making sure to treat - as a word seperator ''' return '-'.join([ x.capitalize() for x in s.split('-')]) def unq(s): if s[0] == s[-1] == '"': return s[1:-1] return s def DigestCalcHA1( pszAlg, pszUserName, pszRealm, pszPassword, pszNonce, pszCNonce, ): m = md5() m.update(pszUserName) m.update(":") m.update(pszRealm) m.update(":") m.update(pszPassword) HA1 = m.digest() if pszAlg == "md5-sess": m = md5() m.update(HA1) m.update(":") m.update(pszNonce) m.update(":") m.update(pszCNonce) HA1 = m.digest() return HA1.encode('hex') DigestCalcHA1 = deprecated(Version("Twisted", 9, 0, 0))(DigestCalcHA1) def DigestCalcResponse( HA1, pszNonce, pszNonceCount, pszCNonce, pszQop, pszMethod, pszDigestUri, pszHEntity, ): m = md5() m.update(pszMethod) m.update(":") m.update(pszDigestUri) if pszQop == "auth-int": m.update(":") m.update(pszHEntity) HA2 = m.digest().encode('hex') m = md5() m.update(HA1) m.update(":") m.update(pszNonce) m.update(":") if pszNonceCount and pszCNonce: # pszQop: m.update(pszNonceCount) m.update(":") m.update(pszCNonce) m.update(":") m.update(pszQop) m.update(":") m.update(HA2) hash = m.digest().encode('hex') return hash DigestCalcResponse = deprecated(Version("Twisted", 9, 0, 0))(DigestCalcResponse) _absent = object() class Via(object): """ A L{Via} is a SIP Via header, representing a segment of the path taken by the request. See RFC 3261, sections 8.1.1.7, 18.2.2, and 20.42. @ivar transport: Network protocol used for this leg. (Probably either "TCP" or "UDP".) @type transport: C{str} @ivar branch: Unique identifier for this request. @type branch: C{str} @ivar host: Hostname or IP for this leg. @type host: C{str} @ivar port: Port used for this leg. @type port C{int}, or None. @ivar rportRequested: Whether to request RFC 3581 client processing or not. @type rportRequested: C{bool} @ivar rportValue: Servers wishing to honor requests for RFC 3581 processing should set this parameter to the source port the request was received from. @type rportValue: C{int}, or None. @ivar ttl: Time-to-live for requests on multicast paths. @type ttl: C{int}, or None. @ivar maddr: The destination multicast address, if any. @type maddr: C{str}, or None. @ivar hidden: Obsolete in SIP 2.0. @type hidden: C{bool} @ivar otherParams: Any other parameters in the header. @type otherParams: C{dict} """ def __init__(self, host, port=PORT, transport="UDP", ttl=None, hidden=False, received=None, rport=_absent, branch=None, maddr=None, **kw): """ Set parameters of this Via header. All arguments correspond to attributes of the same name. To maintain compatibility with old SIP code, the 'rport' argument is used to determine the values of C{rportRequested} and C{rportValue}. If None, C{rportRequested} is set to True. (The deprecated method for doing this is to pass True.) If an integer, C{rportValue} is set to the given value. Any arguments not explicitly named here are collected into the C{otherParams} dict. """ self.transport = transport self.host = host self.port = port self.ttl = ttl self.hidden = hidden self.received = received if rport is True: warnings.warn( "rport=True is deprecated since Twisted 9.0.", DeprecationWarning, stacklevel=2) self.rportValue = None self.rportRequested = True elif rport is None: self.rportValue = None self.rportRequested = True elif rport is _absent: self.rportValue = None self.rportRequested = False else: self.rportValue = rport self.rportRequested = False self.branch = branch self.maddr = maddr self.otherParams = kw def _getrport(self): """ Returns the rport value expected by the old SIP code. """ if self.rportRequested == True: return True elif self.rportValue is not None: return self.rportValue else: return None def _setrport(self, newRPort): """ L{Base._fixupNAT} sets C{rport} directly, so this method sets C{rportValue} based on that. @param newRPort: The new rport value. @type newRPort: C{int} """ self.rportValue = newRPort self.rportRequested = False rport = property(_getrport, _setrport) def toString(self): """ Serialize this header for use in a request or response. """ s = "SIP/2.0/%s %s:%s" % (self.transport, self.host, self.port) if self.hidden: s += ";hidden" for n in "ttl", "branch", "maddr", "received": value = getattr(self, n) if value is not None: s += ";%s=%s" % (n, value) if self.rportRequested: s += ";rport" elif self.rportValue is not None: s += ";rport=%s" % (self.rport,) etc = self.otherParams.items() etc.sort() for k, v in etc: if v is None: s += ";" + k else: s += ";%s=%s" % (k, v) return s def parseViaHeader(value): """ Parse a Via header. @return: The parsed version of this header. @rtype: L{Via} """ parts = value.split(";") sent, params = parts[0], parts[1:] protocolinfo, by = sent.split(" ", 1) by = by.strip() result = {} pname, pversion, transport = protocolinfo.split("/") if pname != "SIP" or pversion != "2.0": raise ValueError, "wrong protocol or version: %r" % value result["transport"] = transport if ":" in by: host, port = by.split(":") result["port"] = int(port) result["host"] = host else: result["host"] = by for p in params: # it's the comment-striping dance! p = p.strip().split(" ", 1) if len(p) == 1: p, comment = p[0], "" else: p, comment = p if p == "hidden": result["hidden"] = True continue parts = p.split("=", 1) if len(parts) == 1: name, value = parts[0], None else: name, value = parts if name in ("rport", "ttl"): value = int(value) result[name] = value return Via(**result) class URL: """A SIP URL.""" def __init__(self, host, username=None, password=None, port=None, transport=None, usertype=None, method=None, ttl=None, maddr=None, tag=None, other=None, headers=None): self.username = username self.host = host self.password = password self.port = port self.transport = transport self.usertype = usertype self.method = method self.tag = tag self.ttl = ttl self.maddr = maddr if other == None: self.other = [] else: self.other = other if headers == None: self.headers = {} else: self.headers = headers def toString(self): l = []; w = l.append w("sip:") if self.username != None: w(self.username) if self.password != None: w(":%s" % self.password) w("@") w(self.host) if self.port != None: w(":%d" % self.port) if self.usertype != None: w(";user=%s" % self.usertype) for n in ("transport", "ttl", "maddr", "method", "tag"): v = getattr(self, n) if v != None: w(";%s=%s" % (n, v)) for v in self.other: w(";%s" % v) if self.headers: w("?") w("&".join([("%s=%s" % (specialCases.get(h) or dashCapitalize(h), v)) for (h, v) in self.headers.items()])) return "".join(l) def __str__(self): return self.toString() def __repr__(self): return '<URL %s:%s@%s:%r/%s>' % (self.username, self.password, self.host, self.port, self.transport) def parseURL(url, host=None, port=None): """Return string into URL object. URIs are of of form 'sip:user@example.com'. """ d = {} if not url.startswith("sip:"): raise ValueError("unsupported scheme: " + url[:4]) parts = url[4:].split(";") userdomain, params = parts[0], parts[1:] udparts = userdomain.split("@", 1) if len(udparts) == 2: userpass, hostport = udparts upparts = userpass.split(":", 1) if len(upparts) == 1: d["username"] = upparts[0] else: d["username"] = upparts[0] d["password"] = upparts[1] else: hostport = udparts[0] hpparts = hostport.split(":", 1) if len(hpparts) == 1: d["host"] = hpparts[0] else: d["host"] = hpparts[0] d["port"] = int(hpparts[1]) if host != None: d["host"] = host if port != None: d["port"] = port for p in params: if p == params[-1] and "?" in p: d["headers"] = h = {} p, headers = p.split("?", 1) for header in headers.split("&"): k, v = header.split("=") h[k] = v nv = p.split("=", 1) if len(nv) == 1: d.setdefault("other", []).append(p) continue name, value = nv if name == "user": d["usertype"] = value elif name in ("transport", "ttl", "maddr", "method", "tag"): if name == "ttl": value = int(value) d[name] = value else: d.setdefault("other", []).append(p) return URL(**d) def cleanRequestURL(url): """Clean a URL from a Request line.""" url.transport = None url.maddr = None url.ttl = None url.headers = {} def parseAddress(address, host=None, port=None, clean=0): """Return (name, uri, params) for From/To/Contact header. @param clean: remove unnecessary info, usually for From and To headers. """ address = address.strip() # simple 'sip:foo' case if address.startswith("sip:"): return "", parseURL(address, host=host, port=port), {} params = {} name, url = address.split("<", 1) name = name.strip() if name.startswith('"'): name = name[1:] if name.endswith('"'): name = name[:-1] url, paramstring = url.split(">", 1) url = parseURL(url, host=host, port=port) paramstring = paramstring.strip() if paramstring: for l in paramstring.split(";"): if not l: continue k, v = l.split("=") params[k] = v if clean: # rfc 2543 6.21 url.ttl = None url.headers = {} url.transport = None url.maddr = None return name, url, params class SIPError(Exception): def __init__(self, code, phrase=None): if phrase is None: phrase = statusCodes[code] Exception.__init__(self, "SIP error (%d): %s" % (code, phrase)) self.code = code self.phrase = phrase class RegistrationError(SIPError): """Registration was not possible.""" class Message: """A SIP message.""" length = None def __init__(self): self.headers = util.OrderedDict() # map name to list of values self.body = "" self.finished = 0 def addHeader(self, name, value): name = name.lower() name = longHeaders.get(name, name) if name == "content-length": self.length = int(value) self.headers.setdefault(name,[]).append(value) def bodyDataReceived(self, data): self.body += data def creationFinished(self): if (self.length != None) and (self.length != len(self.body)): raise ValueError, "wrong body length" self.finished = 1 def toString(self): s = "%s\r\n" % self._getHeaderLine() for n, vs in self.headers.items(): for v in vs: s += "%s: %s\r\n" % (specialCases.get(n) or dashCapitalize(n), v) s += "\r\n" s += self.body return s def _getHeaderLine(self): raise NotImplementedError class Request(Message): """A Request for a URI""" def __init__(self, method, uri, version="SIP/2.0"): Message.__init__(self) self.method = method if isinstance(uri, URL): self.uri = uri else: self.uri = parseURL(uri) cleanRequestURL(self.uri) def __repr__(self): return "<SIP Request %d:%s %s>" % (id(self), self.method, self.uri.toString()) def _getHeaderLine(self): return "%s %s SIP/2.0" % (self.method, self.uri.toString()) class Response(Message): """A Response to a URI Request""" def __init__(self, code, phrase=None, version="SIP/2.0"): Message.__init__(self) self.code = code if phrase == None: phrase = statusCodes[code] self.phrase = phrase def __repr__(self): return "<SIP Response %d:%s>" % (id(self), self.code) def _getHeaderLine(self): return "SIP/2.0 %s %s" % (self.code, self.phrase) class MessagesParser(basic.LineReceiver): """A SIP messages parser. Expects dataReceived, dataDone repeatedly, in that order. Shouldn't be connected to actual transport. """ version = "SIP/2.0" acceptResponses = 1 acceptRequests = 1 state = "firstline" # or "headers", "body" or "invalid" debug = 0 def __init__(self, messageReceivedCallback): self.messageReceived = messageReceivedCallback self.reset() def reset(self, remainingData=""): self.state = "firstline" self.length = None # body length self.bodyReceived = 0 # how much of the body we received self.message = None self.setLineMode(remainingData) def invalidMessage(self): self.state = "invalid" self.setRawMode() def dataDone(self): # clear out any buffered data that may be hanging around self.clearLineBuffer() if self.state == "firstline": return if self.state != "body": self.reset() return if self.length == None: # no content-length header, so end of data signals message done self.messageDone() elif self.length < self.bodyReceived: # aborted in the middle self.reset() else: # we have enough data and message wasn't finished? something is wrong raise RuntimeError, "this should never happen" def dataReceived(self, data): try: basic.LineReceiver.dataReceived(self, data) except: log.err() self.invalidMessage() def handleFirstLine(self, line): """Expected to create self.message.""" raise NotImplementedError def lineLengthExceeded(self, line): self.invalidMessage() def lineReceived(self, line): if self.state == "firstline": while line.startswith("\n") or line.startswith("\r"): line = line[1:] if not line: return try: a, b, c = line.split(" ", 2) except ValueError: self.invalidMessage() return if a == "SIP/2.0" and self.acceptResponses: # response try: code = int(b) except ValueError: self.invalidMessage() return self.message = Response(code, c) elif c == "SIP/2.0" and self.acceptRequests: self.message = Request(a, b) else: self.invalidMessage() return self.state = "headers" return else: assert self.state == "headers" if line: # XXX support multi-line headers try: name, value = line.split(":", 1) except ValueError: self.invalidMessage() return self.message.addHeader(name, value.lstrip()) if name.lower() == "content-length": try: self.length = int(value.lstrip()) except ValueError: self.invalidMessage() return else: # CRLF, we now have message body until self.length bytes, # or if no length was given, until there is no more data # from the connection sending us data. self.state = "body" if self.length == 0: self.messageDone() return self.setRawMode() def messageDone(self, remainingData=""): assert self.state == "body" self.message.creationFinished() self.messageReceived(self.message) self.reset(remainingData) def rawDataReceived(self, data): assert self.state in ("body", "invalid") if self.state == "invalid": return if self.length == None: self.message.bodyDataReceived(data) else: dataLen = len(data) expectedLen = self.length - self.bodyReceived if dataLen > expectedLen: self.message.bodyDataReceived(data[:expectedLen]) self.messageDone(data[expectedLen:]) return else: self.bodyReceived += dataLen self.message.bodyDataReceived(data) if self.bodyReceived == self.length: self.messageDone() class Base(protocol.DatagramProtocol): """Base class for SIP clients and servers.""" PORT = PORT debug = False def __init__(self): self.messages = [] self.parser = MessagesParser(self.addMessage) def addMessage(self, msg): self.messages.append(msg) def datagramReceived(self, data, addr): self.parser.dataReceived(data) self.parser.dataDone() for m in self.messages: self._fixupNAT(m, addr) if self.debug: log.msg("Received %r from %r" % (m.toString(), addr)) if isinstance(m, Request): self.handle_request(m, addr) else: self.handle_response(m, addr) self.messages[:] = [] def _fixupNAT(self, message, (srcHost, srcPort)): # RFC 2543 6.40.2, senderVia = parseViaHeader(message.headers["via"][0]) if senderVia.host != srcHost: senderVia.received = srcHost if senderVia.port != srcPort: senderVia.rport = srcPort message.headers["via"][0] = senderVia.toString() elif senderVia.rport == True: senderVia.received = srcHost senderVia.rport = srcPort message.headers["via"][0] = senderVia.toString() def deliverResponse(self, responseMessage): """Deliver response. Destination is based on topmost Via header.""" destVia = parseViaHeader(responseMessage.headers["via"][0]) # XXX we don't do multicast yet host = destVia.received or destVia.host port = destVia.rport or destVia.port or self.PORT destAddr = URL(host=host, port=port) self.sendMessage(destAddr, responseMessage) def responseFromRequest(self, code, request): """Create a response to a request message.""" response = Response(code) for name in ("via", "to", "from", "call-id", "cseq"): response.headers[name] = request.headers.get(name, [])[:] return response def sendMessage(self, destURL, message): """Send a message. @param destURL: C{URL}. This should be a *physical* URL, not a logical one. @param message: The message to send. """ if destURL.transport not in ("udp", None): raise RuntimeError, "only UDP currently supported" if self.debug: log.msg("Sending %r to %r" % (message.toString(), destURL)) self.transport.write(message.toString(), (destURL.host, destURL.port or self.PORT)) def handle_request(self, message, addr): """Override to define behavior for requests received @type message: C{Message} @type addr: C{tuple} """ raise NotImplementedError def handle_response(self, message, addr): """Override to define behavior for responses received. @type message: C{Message} @type addr: C{tuple} """ raise NotImplementedError class IContact(Interface): """A user of a registrar or proxy""" class Registration: def __init__(self, secondsToExpiry, contactURL): self.secondsToExpiry = secondsToExpiry self.contactURL = contactURL class IRegistry(Interface): """Allows registration of logical->physical URL mapping.""" def registerAddress(domainURL, logicalURL, physicalURL): """Register the physical address of a logical URL. @return: Deferred of C{Registration} or failure with RegistrationError. """ def unregisterAddress(domainURL, logicalURL, physicalURL): """Unregister the physical address of a logical URL. @return: Deferred of C{Registration} or failure with RegistrationError. """ def getRegistrationInfo(logicalURL): """Get registration info for logical URL. @return: Deferred of C{Registration} object or failure of LookupError. """ class ILocator(Interface): """Allow looking up physical address for logical URL.""" def getAddress(logicalURL): """Return physical URL of server for logical URL of user. @param logicalURL: a logical C{URL}. @return: Deferred which becomes URL or fails with LookupError. """ class Proxy(Base): """SIP proxy.""" PORT = PORT locator = None # object implementing ILocator def __init__(self, host=None, port=PORT): """Create new instance. @param host: our hostname/IP as set in Via headers. @param port: our port as set in Via headers. """ self.host = host or socket.getfqdn() self.port = port Base.__init__(self) def getVia(self): """Return value of Via header for this proxy.""" return Via(host=self.host, port=self.port) def handle_request(self, message, addr): # send immediate 100/trying message before processing #self.deliverResponse(self.responseFromRequest(100, message)) f = getattr(self, "handle_%s_request" % message.method, None) if f is None: f = self.handle_request_default try: d = f(message, addr) except SIPError, e: self.deliverResponse(self.responseFromRequest(e.code, message)) except: log.err() self.deliverResponse(self.responseFromRequest(500, message)) else: if d is not None: d.addErrback(lambda e: self.deliverResponse(self.responseFromRequest(e.code, message)) ) def handle_request_default(self, message, (srcHost, srcPort)): """Default request handler. Default behaviour for OPTIONS and unknown methods for proxies is to forward message on to the client. Since at the moment we are stateless proxy, thats basically everything. """ def _mungContactHeader(uri, message): message.headers['contact'][0] = uri.toString() return self.sendMessage(uri, message) viaHeader = self.getVia() if viaHeader.toString() in message.headers["via"]: # must be a loop, so drop message log.msg("Dropping looped message.") return message.headers["via"].insert(0, viaHeader.toString()) name, uri, tags = parseAddress(message.headers["to"][0], clean=1) # this is broken and needs refactoring to use cred d = self.locator.getAddress(uri) d.addCallback(self.sendMessage, message) d.addErrback(self._cantForwardRequest, message) def _cantForwardRequest(self, error, message): error.trap(LookupError) del message.headers["via"][0] # this'll be us self.deliverResponse(self.responseFromRequest(404, message)) def deliverResponse(self, responseMessage): """Deliver response. Destination is based on topmost Via header.""" destVia = parseViaHeader(responseMessage.headers["via"][0]) # XXX we don't do multicast yet host = destVia.received or destVia.host port = destVia.rport or destVia.port or self.PORT destAddr = URL(host=host, port=port) self.sendMessage(destAddr, responseMessage) def responseFromRequest(self, code, request): """Create a response to a request message.""" response = Response(code) for name in ("via", "to", "from", "call-id", "cseq"): response.headers[name] = request.headers.get(name, [])[:] return response def handle_response(self, message, addr): """Default response handler.""" v = parseViaHeader(message.headers["via"][0]) if (v.host, v.port) != (self.host, self.port): # we got a message not intended for us? # XXX note this check breaks if we have multiple external IPs # yay for suck protocols log.msg("Dropping incorrectly addressed message") return del message.headers["via"][0] if not message.headers["via"]: # this message is addressed to us self.gotResponse(message, addr) return self.deliverResponse(message) def gotResponse(self, message, addr): """Called with responses that are addressed at this server.""" pass class IAuthorizer(Interface): def getChallenge(peer): """Generate a challenge the client may respond to. @type peer: C{tuple} @param peer: The client's address @rtype: C{str} @return: The challenge string """ def decode(response): """Create a credentials object from the given response. @type response: C{str} """ class BasicAuthorizer: """Authorizer for insecure Basic (base64-encoded plaintext) authentication. This form of authentication is broken and insecure. Do not use it. """ implements(IAuthorizer) def __init__(self): """ This method exists solely to issue a deprecation warning. """ warnings.warn( "twisted.protocols.sip.BasicAuthorizer was deprecated " "in Twisted 9.0.0", category=DeprecationWarning, stacklevel=2) def getChallenge(self, peer): return None def decode(self, response): # At least one SIP client improperly pads its Base64 encoded messages for i in range(3): try: creds = (response + ('=' * i)).decode('base64') except: pass else: break else: # Totally bogus raise SIPError(400) p = creds.split(':', 1) if len(p) == 2: return UsernamePassword(*p) raise SIPError(400) class DigestedCredentials(UsernameHashedPassword): """Yet Another Simple Digest-MD5 authentication scheme""" def __init__(self, username, fields, challenges): warnings.warn( "twisted.protocols.sip.DigestedCredentials was deprecated " "in Twisted 9.0.0", category=DeprecationWarning, stacklevel=2) self.username = username self.fields = fields self.challenges = challenges def checkPassword(self, password): method = 'REGISTER' response = self.fields.get('response') uri = self.fields.get('uri') nonce = self.fields.get('nonce') cnonce = self.fields.get('cnonce') nc = self.fields.get('nc') algo = self.fields.get('algorithm', 'MD5') qop = self.fields.get('qop-options', 'auth') opaque = self.fields.get('opaque') if opaque not in self.challenges: return False del self.challenges[opaque] user, domain = self.username.split('@', 1) if uri is None: uri = 'sip:' + domain expected = DigestCalcResponse( DigestCalcHA1(algo, user, domain, password, nonce, cnonce), nonce, nc, cnonce, qop, method, uri, None, ) return expected == response class DigestAuthorizer: CHALLENGE_LIFETIME = 15 implements(IAuthorizer) def __init__(self): warnings.warn( "twisted.protocols.sip.DigestAuthorizer was deprecated " "in Twisted 9.0.0", category=DeprecationWarning, stacklevel=2) self.outstanding = {} def generateNonce(self): c = tuple([random.randrange(sys.maxint) for _ in range(3)]) c = '%d%d%d' % c return c def generateOpaque(self): return str(random.randrange(sys.maxint)) def getChallenge(self, peer): c = self.generateNonce() o = self.generateOpaque() self.outstanding[o] = c return ','.join(( 'nonce="%s"' % c, 'opaque="%s"' % o, 'qop-options="auth"', 'algorithm="MD5"', )) def decode(self, response): response = ' '.join(response.splitlines()) parts = response.split(',') auth = dict([(k.strip(), unq(v.strip())) for (k, v) in [p.split('=', 1) for p in parts]]) try: username = auth['username'] except KeyError: raise SIPError(401) try: return DigestedCredentials(username, auth, self.outstanding) except: raise SIPError(400) class RegisterProxy(Proxy): """A proxy that allows registration for a specific domain. Unregistered users won't be handled. """ portal = None registry = None # should implement IRegistry authorizers = {} def __init__(self, *args, **kw): Proxy.__init__(self, *args, **kw) self.liveChallenges = {} if "digest" not in self.authorizers: self.authorizers["digest"] = DigestAuthorizer() def handle_ACK_request(self, message, (host, port)): # XXX # ACKs are a client's way of indicating they got the last message # Responding to them is not a good idea. # However, we should keep track of terminal messages and re-transmit # if no ACK is received. pass def handle_REGISTER_request(self, message, (host, port)): """Handle a registration request. Currently registration is not proxied. """ if self.portal is None: # There is no portal. Let anyone in. self.register(message, host, port) else: # There is a portal. Check for credentials. if not message.headers.has_key("authorization"): return self.unauthorized(message, host, port) else: return self.login(message, host, port) def unauthorized(self, message, host, port): m = self.responseFromRequest(401, message) for (scheme, auth) in self.authorizers.iteritems(): chal = auth.getChallenge((host, port)) if chal is None: value = '%s realm="%s"' % (scheme.title(), self.host) else: value = '%s %s,realm="%s"' % (scheme.title(), chal, self.host) m.headers.setdefault('www-authenticate', []).append(value) self.deliverResponse(m) def login(self, message, host, port): parts = message.headers['authorization'][0].split(None, 1) a = self.authorizers.get(parts[0].lower()) if a: try: c = a.decode(parts[1]) except SIPError: raise except: log.err() self.deliverResponse(self.responseFromRequest(500, message)) else: c.username += '@' + self.host self.portal.login(c, None, IContact ).addCallback(self._cbLogin, message, host, port ).addErrback(self._ebLogin, message, host, port ).addErrback(log.err ) else: self.deliverResponse(self.responseFromRequest(501, message)) def _cbLogin(self, (i, a, l), message, host, port): # It's stateless, matey. What a joke. self.register(message, host, port) def _ebLogin(self, failure, message, host, port): failure.trap(cred.error.UnauthorizedLogin) self.unauthorized(message, host, port) def register(self, message, host, port): """Allow all users to register""" name, toURL, params = parseAddress(message.headers["to"][0], clean=1) contact = None if message.headers.has_key("contact"): contact = message.headers["contact"][0] if message.headers.get("expires", [None])[0] == "0": self.unregister(message, toURL, contact) else: # XXX Check expires on appropriate URL, and pass it to registry # instead of having registry hardcode it. if contact is not None: name, contactURL, params = parseAddress(contact, host=host, port=port) d = self.registry.registerAddress(message.uri, toURL, contactURL) else: d = self.registry.getRegistrationInfo(toURL) d.addCallbacks(self._cbRegister, self._ebRegister, callbackArgs=(message,), errbackArgs=(message,) ) def _cbRegister(self, registration, message): response = self.responseFromRequest(200, message) if registration.contactURL != None: response.addHeader("contact", registration.contactURL.toString()) response.addHeader("expires", "%d" % registration.secondsToExpiry) response.addHeader("content-length", "0") self.deliverResponse(response) def _ebRegister(self, error, message): error.trap(RegistrationError, LookupError) # XXX return error message, and alter tests to deal with # this, currently tests assume no message sent on failure def unregister(self, message, toURL, contact): try: expires = int(message.headers["expires"][0]) except ValueError: self.deliverResponse(self.responseFromRequest(400, message)) else: if expires == 0: if contact == "*": contactURL = "*" else: name, contactURL, params = parseAddress(contact) d = self.registry.unregisterAddress(message.uri, toURL, contactURL) d.addCallback(self._cbUnregister, message ).addErrback(self._ebUnregister, message ) def _cbUnregister(self, registration, message): msg = self.responseFromRequest(200, message) msg.headers.setdefault('contact', []).append(registration.contactURL.toString()) msg.addHeader("expires", "0") self.deliverResponse(msg) def _ebUnregister(self, registration, message): pass class InMemoryRegistry: """A simplistic registry for a specific domain.""" implements(IRegistry, ILocator) def __init__(self, domain): self.domain = domain # the domain we handle registration for self.users = {} # map username to (IDelayedCall for expiry, address URI) def getAddress(self, userURI): if userURI.host != self.domain: return defer.fail(LookupError("unknown domain")) if self.users.has_key(userURI.username): dc, url = self.users[userURI.username] return defer.succeed(url) else: return defer.fail(LookupError("no such user")) def getRegistrationInfo(self, userURI): if userURI.host != self.domain: return defer.fail(LookupError("unknown domain")) if self.users.has_key(userURI.username): dc, url = self.users[userURI.username] return defer.succeed(Registration(int(dc.getTime() - time.time()), url)) else: return defer.fail(LookupError("no such user")) def _expireRegistration(self, username): try: dc, url = self.users[username] except KeyError: return defer.fail(LookupError("no such user")) else: dc.cancel() del self.users[username] return defer.succeed(Registration(0, url)) def registerAddress(self, domainURL, logicalURL, physicalURL): if domainURL.host != self.domain: log.msg("Registration for domain we don't handle.") return defer.fail(RegistrationError(404)) if logicalURL.host != self.domain: log.msg("Registration for domain we don't handle.") return defer.fail(RegistrationError(404)) if self.users.has_key(logicalURL.username): dc, old = self.users[logicalURL.username] dc.reset(3600) else: dc = reactor.callLater(3600, self._expireRegistration, logicalURL.username) log.msg("Registered %s at %s" % (logicalURL.toString(), physicalURL.toString())) self.users[logicalURL.username] = (dc, physicalURL) return defer.succeed(Registration(int(dc.getTime() - time.time()), physicalURL)) def unregisterAddress(self, domainURL, logicalURL, physicalURL): return self._expireRegistration(logicalURL.username)
apache-2.0
pgmillon/ansible
test/units/modules/storage/netapp/test_netapp_e_auditlog.py
68
10758
# (c) 2018, NetApp Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from ansible.modules.storage.netapp.netapp_e_auditlog import AuditLog from units.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args __metaclass__ = type from units.compat import mock class AuditLogTests(ModuleTestCase): REQUIRED_PARAMS = {'api_username': 'rw', 'api_password': 'password', 'api_url': 'http://localhost', 'ssid': '1'} REQ_FUNC = 'ansible.modules.storage.netapp.netapp_e_auditlog.request' MAX_RECORDS_MAXIMUM = 50000 MAX_RECORDS_MINIMUM = 100 def _set_args(self, **kwargs): module_args = self.REQUIRED_PARAMS.copy() if kwargs is not None: module_args.update(kwargs) set_module_args(module_args) def test_max_records_argument_pass(self): """Verify AuditLog arument's max_records and threshold upper and lower boundaries.""" initial = {"max_records": 1000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90} max_records_set = (self.MAX_RECORDS_MINIMUM, 25000, self.MAX_RECORDS_MAXIMUM) for max_records in max_records_set: initial["max_records"] = max_records self._set_args(**initial) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})): audit_log = AuditLog() self.assertTrue(audit_log.max_records == max_records) def test_max_records_argument_fail(self): """Verify AuditLog arument's max_records and threshold upper and lower boundaries.""" initial = {"max_records": 1000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90} max_records_set = (self.MAX_RECORDS_MINIMUM - 1, self.MAX_RECORDS_MAXIMUM + 1) for max_records in max_records_set: with self.assertRaisesRegexp(AnsibleFailJson, r"Audit-log max_records count must be between 100 and 50000"): initial["max_records"] = max_records self._set_args(**initial) AuditLog() def test_threshold_argument_pass(self): """Verify AuditLog arument's max_records and threshold upper and lower boundaries.""" initial = {"max_records": 1000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90} threshold_set = (60, 75, 90) for threshold in threshold_set: initial["threshold"] = threshold self._set_args(**initial) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})): audit_log = AuditLog() self.assertTrue(audit_log.threshold == threshold) def test_threshold_argument_fail(self): """Verify AuditLog arument's max_records and threshold upper and lower boundaries.""" initial = {"max_records": 1000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90} threshold_set = (59, 91) for threshold in threshold_set: with self.assertRaisesRegexp(AnsibleFailJson, r"Audit-log percent threshold must be between 60 and 90"): initial["threshold"] = threshold self._set_args(**initial) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})): AuditLog() def test_is_proxy_pass(self): """Verify that True is returned when proxy is used to communicate with storage.""" initial = {"max_records": 1000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90, "api_url": "https://10.1.1.10/devmgr/v2"} self._set_args(**initial) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})): audit_log = AuditLog() with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})): self.assertTrue(audit_log.is_proxy()) def test_is_proxy_fail(self): """Verify that AnsibleJsonFail exception is thrown when exception occurs.""" initial = {"max_records": 1000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90} self._set_args(**initial) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})): audit_log = AuditLog() with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the webservices about information"): with mock.patch(self.REQ_FUNC, return_value=Exception()): audit_log.is_proxy() def test_get_configuration_pass(self): """Validate get configuration does not throw exception when normal request is returned.""" initial = {"max_records": 1000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90} expected = {"auditLogMaxRecords": 1000, "auditLogLevel": "writeOnly", "auditLogFullPolicy": "overWrite", "auditLogWarningThresholdPct": 90} self._set_args(**initial) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})): audit_log = AuditLog() with mock.patch(self.REQ_FUNC, return_value=(200, expected)): body = audit_log.get_configuration() self.assertTrue(body == expected) def test_get_configuration_fail(self): """Verify AnsibleJsonFail exception is thrown.""" initial = {"max_records": 1000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90} self._set_args(**initial) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})): audit_log = AuditLog() with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the audit-log configuration!"): with mock.patch(self.REQ_FUNC, return_value=Exception()): audit_log.get_configuration() def test_build_configuration_pass(self): """Validate configuration changes will force an update.""" response = {"auditLogMaxRecords": 1000, "auditLogLevel": "writeOnly", "auditLogFullPolicy": "overWrite", "auditLogWarningThresholdPct": 90} initial = {"max_records": 1000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90} changes = [{"max_records": 50000}, {"log_level": "all"}, {"full_policy": "preventSystemAccess"}, {"threshold": 75}] for change in changes: initial_with_changes = initial.copy() initial_with_changes.update(change) self._set_args(**initial_with_changes) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})): audit_log = AuditLog() with mock.patch(self.REQ_FUNC, return_value=(200, response)): update = audit_log.build_configuration() self.assertTrue(update) def test_delete_log_messages_fail(self): """Verify AnsibleJsonFail exception is thrown.""" initial = {"max_records": 1000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90} self._set_args(**initial) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})): audit_log = AuditLog() with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to delete audit-log messages!"): with mock.patch(self.REQ_FUNC, return_value=Exception()): audit_log.delete_log_messages() def test_update_configuration_delete_pass(self): """Verify 422 and force successfully returns True.""" body = {"auditLogMaxRecords": 1000, "auditLogLevel": "writeOnly", "auditLogFullPolicy": "overWrite", "auditLogWarningThresholdPct": 90} initial = {"max_records": 2000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90, "force": True} self._set_args(**initial) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})): audit_log = AuditLog() with mock.patch(self.REQ_FUNC, side_effect=[(200, body), (422, {u"invalidFieldsIfKnown": None, u"errorMessage": u"Configuration change...", u"localizedMessage": u"Configuration change...", u"retcode": u"auditLogImmediateFullCondition", u"codeType": u"devicemgrerror"}), (200, None), (200, None)]): self.assertTrue(audit_log.update_configuration()) def test_update_configuration_delete_skip_fail(self): """Verify 422 and no force results in AnsibleJsonFail exception.""" body = {"auditLogMaxRecords": 1000, "auditLogLevel": "writeOnly", "auditLogFullPolicy": "overWrite", "auditLogWarningThresholdPct": 90} initial = {"max_records": 2000, "log_level": "writeOnly", "full_policy": "overWrite", "threshold": 90, "force": False} self._set_args(**initial) with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})): audit_log = AuditLog() with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to update audit-log configuration!"): with mock.patch(self.REQ_FUNC, side_effect=[(200, body), Exception(422, {"errorMessage": "error"}), (200, None), (200, None)]): audit_log.update_configuration()
gpl-3.0
AntonChankin/thefuck
tests/test_conf.py
13
4054
import pytest import six from mock import Mock from thefuck import conf @pytest.fixture def load_source(mocker): return mocker.patch('thefuck.conf.load_source') @pytest.fixture def environ(monkeypatch): data = {} monkeypatch.setattr('thefuck.conf.os.environ', data) return data @pytest.mark.usefixture('environ') def test_settings_defaults(load_source, settings): load_source.return_value = object() settings.init() for key, val in conf.DEFAULT_SETTINGS.items(): assert getattr(settings, key) == val @pytest.mark.usefixture('environ') class TestSettingsFromFile(object): def test_from_file(self, load_source, settings): load_source.return_value = Mock(rules=['test'], wait_command=10, require_confirmation=True, no_colors=True, priority={'vim': 100}, exclude_rules=['git']) settings.init() assert settings.rules == ['test'] assert settings.wait_command == 10 assert settings.require_confirmation is True assert settings.no_colors is True assert settings.priority == {'vim': 100} assert settings.exclude_rules == ['git'] def test_from_file_with_DEFAULT(self, load_source, settings): load_source.return_value = Mock(rules=conf.DEFAULT_RULES + ['test'], wait_command=10, exclude_rules=[], require_confirmation=True, no_colors=True) settings.init() assert settings.rules == conf.DEFAULT_RULES + ['test'] @pytest.mark.usefixture('load_source') class TestSettingsFromEnv(object): def test_from_env(self, environ, settings): environ.update({'THEFUCK_RULES': 'bash:lisp', 'THEFUCK_EXCLUDE_RULES': 'git:vim', 'THEFUCK_WAIT_COMMAND': '55', 'THEFUCK_REQUIRE_CONFIRMATION': 'true', 'THEFUCK_NO_COLORS': 'false', 'THEFUCK_PRIORITY': 'bash=10:lisp=wrong:vim=15'}) settings.init() assert settings.rules == ['bash', 'lisp'] assert settings.exclude_rules == ['git', 'vim'] assert settings.wait_command == 55 assert settings.require_confirmation is True assert settings.no_colors is False assert settings.priority == {'bash': 10, 'vim': 15} def test_from_env_with_DEFAULT(self, environ, settings): environ.update({'THEFUCK_RULES': 'DEFAULT_RULES:bash:lisp'}) settings.init() assert settings.rules == conf.DEFAULT_RULES + ['bash', 'lisp'] class TestInitializeSettingsFile(object): def test_ignore_if_exists(self, settings): settings_path_mock = Mock(is_file=Mock(return_value=True), open=Mock()) settings.user_dir = Mock(joinpath=Mock(return_value=settings_path_mock)) settings._init_settings_file() assert settings_path_mock.is_file.call_count == 1 assert not settings_path_mock.open.called def test_create_if_doesnt_exists(self, settings): settings_file = six.StringIO() settings_path_mock = Mock( is_file=Mock(return_value=False), open=Mock(return_value=Mock( __exit__=lambda *args: None, __enter__=lambda *args: settings_file))) settings.user_dir = Mock(joinpath=Mock(return_value=settings_path_mock)) settings._init_settings_file() settings_file_contents = settings_file.getvalue() assert settings_path_mock.is_file.call_count == 1 assert settings_path_mock.open.call_count == 1 assert conf.SETTINGS_HEADER in settings_file_contents for setting in conf.DEFAULT_SETTINGS.items(): assert '# {} = {}\n'.format(*setting) in settings_file_contents settings_file.close()
mit