prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
[0, 0.3, 0.6]}) cut = {'front': 3, 'back': 4} ts_pvrow = TsPVRow.from_raw_inputs( xy_center, width, df_inputs.rotation_vec, cut, df_inputs.shaded_length_front, df_inputs.shaded_length_back) # Plot it at ts 0 f, ax = plt.subplots() ts_pvrow.plot_at_idx(0, ax) plt.show() # Plot it at ts 1 f, ax = plt.subplots() ts_pvrow.plot_at_idx(1, ax) plt.show() # Plot it at ts 2: flat case f, ax = plt.subplots() ts_pvrow.plot_at_idx(2, ax) plt.show() def test_ts_pvrow_to_geometry(): """Check that the geometries are created correctly""" xy_center = (0, 2) width = 2. df_inputs = pd.DataFrame({ 'rotation_vec': [20., -30., 0.], 'shaded_length_front': [1.3, 0., 1.9], 'shaded_length_back': [0, 0.3, 0.6]}) cut = {'front': 3, 'back': 4} param_names = ['test1', 'test2'] ts_pvrow = TsPVRow.from_raw_inputs( xy_center, width, df_inputs.rotation_vec, cut, df_inputs.shaded_length_front, df_inputs.shaded_length_back, param_names=param_names) pvrow = ts_pvrow.at(0) # Check classes of geometries assert isinstance(pvrow, PVRow) assert isinstance(pvrow.front, BaseSide) assert isinstance(pvrow.back, BaseSide) assert isinstance(pvrow.front.list_segments[0], PVSegment) assert isinstance(pvrow.back.list_segments[0].illum_collection, ShadeCollection) assert isinstance(pvrow.front.list_segments[1].illum_collection .list_surfaces[0], PVSurface) # Check some values np.testing.assert_allclose(pvrow.front.shaded_length, 1.3) front_surface = (pvrow.front.list_segments[1].illum_collection .list_surfaces[0]) back_surface = (pvrow.back.list_segments[1].illum_collection .list_surfaces[0]) n_vector_front = front_surface.n_vector n_vector_back = back_surface.n_vector expected_n_vec_front = np.array([-0.68404029, 1.87938524]) np.testing.assert_allclose(n_vector_front, expected_n_vec_front) np.testing.assert_allclose(n_vector_back, - expected_n_vec_front) assert front_surface.param_names == param_names assert back_surface.param_names == param_names def test_ts_ground_from_ts_pvrow(): """Check that ground geometries are created correctly from ts pvrow""" # Create a ts pv row xy_center = (0, 2) width = 2. df_inputs = pd.DataFrame({ 'rotation_vec': [20., -90., 0.], 'shaded_length_front': [1.3, 0., 1.9], 'shaded_length_back': [0, 0.3, 0.6]}) cut = {'front': 3, 'back': 4} param_names = ['test1', 'test2'] ts_pvrow = TsPVRow.from_raw_inputs( xy_center, width, df_inputs.rotation_vec, cut, df_inputs.shaded_length_front, df_inputs.shaded_length_back, param_names=param_names) # Create ground from it alpha_vec = np.deg2rad([80., 90., 70.]) ts_ground = TsGround.from_ts_pvrows_and_angles( [ts_pvrow], alpha_vec, df_inputs.rotation_vec, param_names=param_names) assert len(ts_ground.shadow_elements) == 1 # Check at specific times ground_0 = ts_ground.at(0) assert ground_0.n_surfaces == 4 assert ground_0.list_segments[0].shaded_collection.n_surfaces == 1 ground_1 = ts_ground.at(1) # vertical, sun above assert ground_1.n_surfaces == 2 # only 2 illuminated surfaces assert ground_1.list_segments[0].shaded_collection.n_surfaces == 0 assert ground_1.shaded_length == 0 # no shadow (since shadow length 0ish) np.testing.assert_allclose(ground_0.shaded_length, 1.7587704831436) np.testing.assert_allclose(ts_ground.at(2).shaded_length, width) # flat # Check that all have surface params for surf in ground_0.all_surfaces: assert surf.param_names == param_names def test_ts_ground_overlap(): shadow_coords = np.array([ [[[0, 0], [0, 0]], [[2, 1], [0, 0]]], [[[1, 2], [0, 0]], [[5, 5], [0, 0]]] ]) overlap = [True, False] # Test without overlap ts_ground = TsGround.from_ordered_shadows_coords(shadow_coords) np.testing.assert_allclose(ts_ground.shadow_elements[0].b2.x, [2, 1]) # Test with overlap ts_ground = TsGround.from_ordered_shadows_coords(shadow_coords, flag_overlap=overlap) np.testing.assert_allclose(ts_ground.shadow_elements[0].b2.x, [1, 1]) def test_ts_ground_to_geometry(): # There should be an overlap shadow_coords = np.array([ [[[0, 0], [0, 0]], [[2, 1], [0, 0]]], [[[1, 2], [0, 0]], [[5, 5], [0, 0]]] ]) overlap = [True, False] cut_point_coords = [TsPointCoords.from_array(np.array([[2, 2], [0, 0]]))] # Test with overlap ts_ground = TsGround.from_ordered_shadows_coords( shadow_coords, flag_overlap=overlap, cut_point_coords=cut_point_coords) # Run some checks for index 0 pvground = ts_ground.at(0, merge_if_flag_overlap=False, with_cut_points=False) assert pvground.n_surfaces == 4 assert pvground.list_segments[0].illum_collection.n_surfaces == 2 assert pvground.list_segments[0].shaded_collection.n_surfaces == 2 assert
pvground.list_segme
nts[0].shaded_collection.length == 5 np.testing.assert_allclose(pvground.shaded_length, 5) # Run some checks for index 1 pvground = ts_ground.at(1, with_cut_points=False) assert pvground.n_surfaces == 5 assert pvground.list_segments[0].illum_collection.n_surfaces == 3 assert pvground.list_segments[0].shaded_collection.n_surfaces == 2 assert pvground.list_segments[0].shaded_collection.length == 4 np.testing.assert_allclose(pvground.shaded_length, 4) # Run some checks for index 0, when merging pvground = ts_ground.at(0, merge_if_flag_overlap=True, with_cut_points=False) assert pvground.n_surfaces == 3 assert pvground.list_segments[0].illum_collection.n_surfaces == 2 assert pvground.list_segments[0].shaded_collection.n_surfaces == 1 assert pvground.list_segments[0].shaded_collection.length == 5 np.testing.assert_allclose(pvground.shaded_length, 5) # Run some checks for index 0, when merging and with cut points pvground = ts_ground.at(0, merge_if_flag_overlap=True, with_cut_points=True) assert pvground.n_surfaces == 4 assert pvground.list_segments[0].illum_collection.n_surfaces == 2 assert pvground.list_segments[0].shaded_collection.n_surfaces == 2 assert pvground.list_segments[0].shaded_collection.length == 5 np.testing.assert_allclose(pvground.shaded_length, 5) def test_shadows_coords_left_right_of_cut_point(): """Test that coords left and right of cut point are created correctly""" # Ground inputs shadow_coords = np.array([ [[[0], [0]], [[2], [0]]], [[[3], [0]], [[5], [0]]] ], dtype=float) overlap = [False] # --- Create timeseries ground cut_point = TsPointCoords([2.5], [0]) ts_ground = TsGround.from_ordered_shadows_coords( shadow_coords, flag_overlap=overlap, cut_point_coords=[cut_point]) # Get left and right shadows shadows_left = ts_ground.shadow_coords_left_of_cut_point(0) shadows_right = ts_ground.shadow_coords_right_of_cut_point(0) # Reformat for testing shadows_left = [shadow.as_array for shadow in shadows_left] shadows_right = [shadow.as_array for shadow in shadows_right] expected_shadows_left = [shadow_coords[0], [cut_point.as_array, cut_point.as_array]] expected_shadows_right = [[cut_point.as_array, cut_point.as_array], shadow_coords[1]] # Test that correct np.testing.assert_allclose(shadows_left, expected_shadows_left) np.testing.assert_allclose(shadows_right, expected_shadows_right) # --- Case where pv rows are flat, cut point are inf cut_point = TsPointCoords([np.inf], [0]) ts_ground = TsGround.from_ordered_shadows_coords( shadow_coords, flag_overlap=overlap
# # (c) 2020 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by #
the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, #
but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from io import StringIO import pytest from units.compat import unittest from ansible.plugins.connection import local from ansible.playbook.play_context import PlayContext class TestLocalConnectionClass(unittest.TestCase): def test_local_connection_module(self): play_context = PlayContext() play_context.prompt = ( '[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: ' ) in_stream = StringIO() self.assertIsInstance(local.Connection(play_context, in_stream), local.Connection)
#!/usr/bin/python # _*_ coding: utf-8 _*_ import zlib s = b'witch which has which witches wrist
watch' print len(s) t = zlib.compress(s) print len(t) print t print zlib.decompress(t) print zlib.crc32(s)
# UrbanFootprint v1.5 # Copyright (C) 2017 Calthorpe Analytics # # This file is part of UrbanFootprint version 1.5 # # UrbanFootprint is distributed under the terms of the GNU General # Public License version 3, as published by the Free Software Foundation. This # code is distributed WITHOUT ANY WARRANTY, without implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License v3 for more details; see <http://www.gnu.org/licenses/>. import pwd import shlex import subprocess from optparse import make_option import os from distutils import spawn from django.conf import settings from django.core.management.base import BaseCommand, CommandError from footprint.utils.postgres_utils import build_postgres_conn_string, postgres_env_password_loaded class Command(BaseCommand): args = '<destination_folder> (optional - if not specified use settings.py option)' help = 'Creates a data dump' # I hate having to use optparse. We should be using argparse. # When https://code.djangoproject.com/ticket/19973 gets fixed, we can # use the new way of parsing (which will likely use argparse instead). # In the meantime we'll stick with the documented way of doing this option_list = BaseCommand.option_list + ( make_option('--destination-folder', action='store', type='string', dest='destination_folder', default=getattr(settings, 'CALTHORPE_DATA_DUMP_LOCATION', ''), help='output folder for daily dump'), ) def handle(self, *args, **options): rsync = spawn.find_executable('rsync') if rsync is None: raise CommandError('rsync not found') pg_dump = spawn.find_executable('pg_dump') if pg_dump is None: raise CommandError('pg_dump not found') if options['destination_folder'] == '': raise CommandError('--destination-folder not specified in command line nor settings.py') # make sure destination folder exists if not os.path.exists(options['destination_folder']): try: os.makedirs(options['destination_folder']) except Exception, e: raise Exception("Cannot create directory with user %s. Exception %s" % ( pwd.getpwuid(os.getuid())[0], e.message)) pg_output_file_name = os.path.join(options['destination_folder'], 'pg_dump.dmp') media_output_copy_folder = os.path.join(options['destination_folder'], 'media') # make sure destination daily media folder also exists if not os.path.exists(media_output_copy_folder): os.makedirs(media_output_copy_folder) ################# #rsync folder rsync += ' -rapthzvO {extra} {src} {dest}'.format(extra=settings.CALTHORPE_DAILY_DUMP_RSYNC_EXTRA_PARAMS, src=settings.MEDIA_ROOT, dest=media_output_copy_folder) self.stdout.write(rsync + '\n') output = self.exec_cmd(rsync) self.stdout.write(output) ################# #do database dump print settings.DATABASES['default'] with postgres_env_password_loaded(): pg_dump += ' {pg_conn_string} -Fc -f
{output_file_name}'.format( pg_conn_string=build_postgres_conn_string(settings.DATABASES['default']), output_file_name=pg_output_file_name) output = self.exec_cmd(pg_dump) self.stdout.write(output)
self.stdout.write('Wrote ' + pg_output_file_name + '\n') def exec_cmd(self, cmd): p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, err = p.communicate() if p.returncode != 0: raise CommandError('Error Executing "{cmd}\n{output}\n"'.format(cmd=cmd, output=out)) return out
# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack.api_version_request \ import MAX_IMAGE_META_PROXY_API_VERSION from nova.api.openstack.api_ver
sion_request \ import MAX_PROXY_API_SUPPORT_VERSION from nova.api.openstack.api_version_request \ import MIN_WITHOUT_IMAGE_META_PROXY_API_VERSION from nova.api.openstack.api_version_request \ import MIN_WITHOUT_PROXY_API_SUPPORT_VERSION from nova.api.openstack.compute.schemas import limits from nova.api.openstack.compute.views import limits as limits_views from nova.api.openstack import wsgi from nova.api import validation from nova.policies import limits a
s limits_policies from nova import quota QUOTAS = quota.QUOTAS # This is a list of limits which needs to filter out from the API response. # This is due to the deprecation of network related proxy APIs, the related # limit should be removed from the API also. FILTERED_LIMITS_2_36 = ['floating_ips', 'security_groups', 'security_group_rules'] FILTERED_LIMITS_2_57 = list(FILTERED_LIMITS_2_36) FILTERED_LIMITS_2_57.extend(['injected_files', 'injected_file_content_bytes']) class LimitsController(wsgi.Controller): """Controller for accessing limits in the OpenStack API.""" @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(()) @validation.query_schema(limits.limits_query_schema) def index(self, req): return self._index(req) @wsgi.Controller.api_version(MIN_WITHOUT_PROXY_API_SUPPORT_VERSION, # noqa MAX_IMAGE_META_PROXY_API_VERSION) # noqa @wsgi.expected_errors(()) @validation.query_schema(limits.limits_query_schema) def index(self, req): return self._index(req, FILTERED_LIMITS_2_36) @wsgi.Controller.api_version( # noqa MIN_WITHOUT_IMAGE_META_PROXY_API_VERSION, '2.56') # noqa @wsgi.expected_errors(()) @validation.query_schema(limits.limits_query_schema) def index(self, req): return self._index(req, FILTERED_LIMITS_2_36, max_image_meta=False) @wsgi.Controller.api_version('2.57') # noqa @wsgi.expected_errors(()) @validation.query_schema(limits.limits_query_schema_275, '2.75') @validation.query_schema(limits.limits_query_schema, '2.57', '2.74') def index(self, req): return self._index(req, FILTERED_LIMITS_2_57, max_image_meta=False) def _index(self, req, filtered_limits=None, max_image_meta=True): """Return all global limit information.""" context = req.environ['nova.context'] context.can(limits_policies.BASE_POLICY_NAME) project_id = req.params.get('tenant_id', context.project_id) quotas = QUOTAS.get_project_quotas(context, project_id, usages=True) builder = limits_views.ViewBuilder() return builder.build(req, quotas, filtered_limits=filtered_limits, max_image_meta=max_image_meta)
"""Config flow for OpenWeatherMap.""" from pyowm import OWM from pyowm.exceptions.api_call_error import APICallError from pyowm.exceptions.api_response_error import UnauthorizedError import voluptuous as vol from homeassistant import config_entries from homeassistant.const import ( CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_MODE, CONF_NAME, ) from homeassistant.core import callback import homeassistant.helpers.config_validation as cv from .const import ( CONF_LANGUAGE, DEFAULT_FORECAST_MODE, DEFAULT_LANGUAGE, DEFAULT_NAME, FORECAST_MODES, LANGUAGES, ) from .const import DOMAIN # pylint:disable=unused-import SCHEMA = vol.Schema( { vol.Required(CONF_API_KEY): str, vol.Optional(CONF_NAME, default=DEFAULT_NAME): str, vol.Optional(CONF_LATITUDE): cv.latitude, vol.Optional(CONF_LONGITUDE): cv.longitude, vol.Optional(CONF_MODE, default=DEFAULT_FORECAST_MODE): vol.In(FORECAST_MODES), vol.Optional(CONF_LANGUAGE, default=DEFAULT_LANGUAGE): vol.In(LANGUAGES), } ) class OpenWeatherMapConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Config flow for OpenWeatherMap.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL @staticmethod @callback def async_get_options_flow(config_entry): """Get the options flow for this handler.""" return OpenWeatherMapOptionsFlow(config_entry) async def async_step_user(self, user_input=None): """Handle a flow initialized by the user.""" errors = {} if user_input is not None: latitude = user_input[CONF_LATITUDE] longitude = user_input[CONF_LONGITUDE] await self.async_set_unique_id(f"{latitude}-{longitude}") self._abort_if_unique_id_configured() try: api_online = await _is_owm_api_online( self.hass, user_input[CONF_API_KEY] ) if not api_online: errors["base"] = "invalid_api_key" except UnauthorizedError: errors["base"] = "invalid_api_key" except APICallError: errors["base"] = "cannot_connect" if not errors: return self.async_create_entry( title=user_input[CONF_NAME], data=user_input ) return self.async_show_form(step_id="user", data_schema=SCHEMA, errors=errors) async def async_step_import(self, import_input=None): """Set the config entry up from yaml.""" config = import_input.copy() if CONF_NAME not in config: config[CONF_NAME] = DEFAULT_NAME if CONF_LATITUDE not in config: config[CONF_LATITUDE] = self.hass.config.latitude if CONF_LONGITUDE not in config: config[CONF_LONGITUDE] = self.hass.config.longitude if CONF_MODE not in config: config[CONF_MODE] = DEFAULT_FORECAST_MODE if CONF_LANGUAGE not in config: config[CONF_LANGUAGE] = DEFAULT_LANGUAGE return await self.async_step_user(config) class OpenWeatherMapOptionsFlow(config_entries.OptionsFlow): """Handle options.""" def __init__(self, config_entry): """Initialize options flow.""" self.config_entry = config_entry async def async_step_init(self, user_input=None): """Manage the options.""" if user_input is not None: return self.async_create_entry(title="", data=user_input) return self.async_show_form( step_id="init", data_schema=self._get_options_schema(), ) def _get_options_schema(self): return vol.Schema( { vol.Optional( CONF_MODE, default=self.config_entry.options.get( CONF_MODE, DEFAULT_FORECAST_MODE ), ): vol.In(FORECAST_MODES), vol.Optional( CONF_LANGUAGE, default=self.config_entry.options.get( CONF_LANGUAGE, DEFAULT_LANGUAGE ), ): vol.In(LANGUAGES),
} ) async def _is_owm_api_online(hass, api_key): owm = OWM(api
_key) return await hass.async_add_executor_job(owm.is_API_online)
twork interface. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param network_interface_name: The name of the network interface. :type network_interface_name: str :param expand: Expands referenced resources. :type expand: str :keyword callable cls: A custom type or function that will be passed the direct response :return: NetworkInterface, or the result of cls(response) :rtype: ~azure.mgmt.network.v2019_07_01.models.NetworkInterface :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-07-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') if expand is not None: query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('NetworkInterface', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore async def _create_or_update_initial( self, resource_group_name: str, network_interface_name: str, parameters: "_models.NetworkInterface", **kwargs: Any ) -> "_models.NetworkInterface": cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-07-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_or_update_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._ser
ialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs
= {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'NetworkInterface') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('NetworkInterface', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('NetworkInterface', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore async def begin_create_or_update( self, resource_group_name: str, network_interface_name: str, parameters: "_models.NetworkInterface", **kwargs: Any ) -> AsyncLROPoller["_models.NetworkInterface"]: """Creates or updates a network interface. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param network_interface_name: The name of the network interface. :type network_interface_name: str :param parameters: Parameters supplied to the create or update network interface operation. :type parameters: ~azure.mgmt.network.v2019_07_01.models.NetworkInterface :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either NetworkInterface or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_07_01.models.NetworkInterface] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._create_or_update_initial( resource_group_name=resource_group_name, network_interface_name=network_interface_name, parameters=parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('NetworkInte
# SPDX-License-Identifier: GPL-2.0+ # Copyright (c) 2016 Google, Inc # Written by Simon Glass <sjg@chromium.org> # # Entry-type module for the 16-bit x86 reset code for U-Boot # from binman.entry import Entry from binman.etype.blob import Entry_blob class Entry_x86_reset16(Entry_blob): """x86 16-bit reset code for U-Boot Properties / Entry arguments: - filename: Filename of u-boot-x86-reset16.bin (default 'u-boot-x86-reset16.bin') x86 CPUs start up in 16-bit mode, even if they are 32-bit CPUs. This code must be placed at a particular address. This entry holds that code. It is typically placed at offset CONFIG_RESET_VEC_LOC. The c
ode
is responsible for jumping to the x86-start16 code, which continues execution. For 64-bit U-Boot, the 'x86_reset16_spl' entry type is used instead. """ def __init__(self, section, etype, node): super().__init__(section, etype, node) def GetDefaultFilename(self): return 'u-boot-x86-reset16.bin'
name_plural = '视频地区' def __str__(self): return self.name class VideoCategory(models.Model): name = models.CharField(max_length=128, verbose_name='分类名称') type = models.CharField(max_length=128, choices=TYPES, default='common', verbose_name='类型') isSecret = models.BooleanField(default=False, verbose_name='是否加密') level = models.IntegerField(null=False, blank=False, default=1, choices=((1, '一级分类'), (2, '二级分类')), verbose_name='分类等级') subset = models.ManyToManyField('self', blank=True, verbose_name='分类关系') class Meta: verbose_name = '视频分类' verbose_name_plural = '视频分类管理' def __str__(self): base_name = self.name + str(' (level %d)' % (self.level)) if self.subset.first() and self.level == 2: return '--'.join([self.subset.first().name, base_name]) else: return base_name def save(self, *args, **kwargs): super(VideoCategory, self).save(*args, **kwargs) def colored_level(self): color_code = 'red' if self.level == 1 else 'green' return format_html( '<span style="color:{};">{}</span>', color_code, self.get_level_display() ) colored_level.short_description = '分级' # --------------------------------------------------------------------- class MultipleUpload(models.Model): files = ModelAdminResumableMultiFileField(null=True, blank=True, storage=VodStorage(), verbose_name='文件') save_path = models.CharField(max_length=128, blank=False, null=True, verbose_name='保存路径') category = models.ForeignKey(VideoCategory, null=True, verbose_name='分类') class Meta: verbose_name = '批量上传' verbose_name_plural = '批量上传管理' # --------------------------------------------------------------------- # TODO(hhy): Please Leave This Model Here. It Will Be Use In The Future. # class VideoTag(models.Model): # name = models.CharField(max_length=200, null=False, blank=False) # # def __str__(self): # return self.name class Restore(models.Model): txt_file = models.FileField(blank=True, null=True, verbose_name='备份配置文件') zip_file = ModelAdminResumableRestoreFileField(null=True, blank=True, storage=VodStorage(), verbose_name='压缩包') save_path = models.CharField(max_length=128, blank=False, null=True) # ,default=FileDirectory.objects.first()) class Meta: verbose_name = '视频导入' verbose_name_plural = '视频导入' def save(self, force_insert=False, force_update=False, using=None, update_fields=None): result = super(Restore, self).save() file_path = self.txt_file.path call_command('loaddata', file_path) return result class Vod(models.Model): title = models.CharField(max_length=120, verbose_name='标题') # image = models.ImageField(upload_to=upload_image_location, null=True, blank=True) # video = models.FileField(null=True,blank=True,storage=VodStorage()) image = ModelAdminResumableImageField(null=True, blank=True, storage=VodStorage(), max_length=1000, verbose_name='缩略图') video = ModelAdminResumableFileField(null=True, blank=True, storage=VodStorage(), max_length=1000, verbose_name='视频') duration = models.CharField(max_length=50, blank=True, null=True, verbose_name='时长') local_video = models.FilePathField(path=settings.LOCAL_MEDIA_ROOT, blank=True, recursive=True) definition = models.CharField(max_length=10, choices=VIDEO_QUALITY, blank=False, default='H', verbose_name='清晰度') category = models.ForeignKey(VideoCategory, null=True, blank=True, verbose_name='分类') save_path = models.CharField(max_length=128, blank=False, null=True, default='default', verbose_name='保存路径') # ,default=FileDirectory.objects.first()) year = models.CharField(max_length=10, blank=False, null=True, default=datetime.datetime.now().year, verbose_name='年份') region = models.ForeignKey(VideoRegion, to_field='name', null=True, blank=True, on_delete=models.SET_NULL, verbose_name='地区') file_size = models.CharField(max_length=128, default='0B', editable=False, verbose_name='文件大小') view_count = models.IntegerField(default=0, verbose_name='观看次数') view_count_temp = 0 creator = models.ForeignKey(User, null=True, blank=False, editable=False) description = models.TextField(blank=True, verbose_name='简介') select_name = models.CharField(max_length=100, blank=False, verbose_name='选集名称', default='1') updated = models.DateTimeField(auto_now=True, auto_now_add=False) timestamp = models.DateTimeField(auto_now=False, auto_now_add=True, verbose_name='创建时间') # The first time added slug = models.SlugField(unique=True, blank=True) search_word = models.CharField(max_length=10000, null=True, blank=True) # tags = models.ManyToManyField(VideoTag, blank=True) video_list = SortedManyToManyField('self', blank=True) # video_list = models.ManyToManyField('self', blank=True, symmetrical=False) active = models.IntegerField(null=True, blank=False, default=0, choices=((1, 'Yes'), (0, 'No'))) progress = models.IntegerField(null=True, blank=True, default=0) objects = VodManager() class Meta: verbose_name = '视频' verbose_name_plural = '视频列表' ordering = ["-timestamp", "-updated"] def save(self, without_valid=False, *args, **kwargs): logging.debug('==== 保存点播节目 %s ====' % self.title) p = Pinyin() full_pinyin = p.get_pinyin(smart_str(self.title), '') first_pinyin = p.get_initials(smart_str(self.title), '').lower() self.search_word = " ".join([full_pinyin, first_pinyin]) logging.debug("video path:", self.video) if self.description is None or self.description == "": self.description = default_description(self) if self.local_video != '' and self.local_video is not None: basename = Path(self.local_video).relative_to(Path(settings.LOCAL_MEDIA_ROOT)) self.video.name = str(Path(settings.LOCAL_MEDIA_URL) / basename) logging.debug("save local_video to filefield done") if without_valid: ret = super(Vod, self).save(*args, **kwargs) return ret super(Vod, self).save(*args, **kwargs) try: if self.video != None and self.video != '': relative_path = Path(self.video.name).relative_to(settings.MEDIA_URL) # Djan%20go.mp4 rel_name = uri_to_iri(relative_path) # Djan go.mp4 # Make sure the self
.video.name is not in the LOCAL_FOLDER if not self.video.name.startswith(settings.LOCAL_FOLDER_NAME) and \ not self.video.name.startswith(setting
s.RECORD_MEDIA_FOLDER): self.video.name = str(rel_name) logging.debug('save_path:', self.save_path) logging.debug('video.name:', self.video.name) logging.debug('size:', self.video.file.size) self.file_size = humanfriendly.format_size(self.video.file.size) # duration = VideoFileClip(self.video.path).duration # self.duration = time_formate(duration) else: print("video file is None") except: pass try: if self.image: self.image.name = str(uri_to_iri(Path(self.image.name).relative_to(settings.MEDIA_URL))) except: pass return super(Vod, self).save(*args, **kwargs) def __unicode__(self): return self.title def __str__(self): return self.title def image_tag(self): if self.image is not None and str(self.image) != "": if os.path.exists(self.image.path): return mark_safe('<img src="%s" width="160" height="90" />' % (self.image.url)) else: return mark_safe('<img src="#" width="160" height="90" />') else: return mark_safe('<img src="%s" width="160" height="90" />' % (settings.DEFAULT_IMAGE_SRC))
# coding=utf-8 """Request handler for authentication.""" from __future__ import unicode_literals import logging import random import string import time from builtins import range import jwt from medusa import app, helpers, notifiers from medusa.logger.adapters.style import BraceAdapter from medusa.server.api.v2.base import BaseRequestHandler from six import text_type from tornado.escape import json_decode log = BraceAdapter(logging.getLogger(__name__)) log.logger.addHandler(logging.NullHandler()) class AuthHandler(BaseRequestHandler): """Auth request handler.""" #: resource name name = 'authenticate' #: allowed HTTP methods allowed_methods = ('POST', ) def _check_authentication(self): """Override authentication check for the authentication endpoint.""" return None def post(self, *args, **kwargs): """Request JWT.""" username = app.WEB_USERNAME password = app.WEB_PASSWORD # If the user hasn't set a username and/or password just let them login if not username.strip() or not password.strip(): return self._login() if not self.request.body: return self._failed_login(error='No Credentials Pro
vided') if self.request.headers['content-type'] != 'application/json': return self._failed_login(error='Incorrect content-type') request_body = json_decode(self.request.body) submitted_username = request_bod
y.get('username') submitted_password = request_body.get('password') submitted_exp = request_body.get('exp', 86400) if username != submitted_username or password != submitted_password: return self._failed_login(error='Invalid credentials') return self._login(submitted_exp) def _login(self, exp=86400): self.set_header('Content-Type', 'application/json') if app.NOTIFY_ON_LOGIN and not helpers.is_ip_private(self.request.remote_ip): notifiers.notify_login(self.request.remote_ip) log.info('{user} logged into the API v2', {'user': app.WEB_USERNAME}) time_now = int(time.time()) return self._ok(data={ 'token': jwt.encode({ 'iss': 'Medusa ' + text_type(app.APP_VERSION), 'iat': time_now, # @TODO: The jti should be saved so we can revoke tokens 'jti': ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20)), 'exp': time_now + int(exp), 'username': app.WEB_USERNAME, 'apiKey': app.API_KEY }, app.ENCRYPTION_SECRET, algorithm='HS256').decode('utf-8') }) def _failed_login(self, error=None): log.warning('{user} attempted a failed login to the API v2 from IP: {ip}', { 'user': app.WEB_USERNAME, 'ip': self.request.remote_ip }) return self._unauthorized(error=error)
_modules(modules, options, sender, tags): """Reloads any changed modules from the 'etc' directory. Args: cdir: The path to the 'collectors' directory. modules: A dict of path -> (module, timestamp). Returns: whether or not anything has changed. """ etcdir = os.path.join(options.cdir, 'etc') current_modules = set(list_config_modules(etcdir)) current_paths = set(os.path.join(etcdir, name) for name in current_modules) changed = False # Reload any module that has changed. for path, (module, timestamp) in modules.iteritems(): if path not in current_paths: # Module was removed. continue mtime = os.path.getmtime(path) if mtime > timestamp: LOG.info('Reloading %s, file has changed', path) module = load_config_module(module, options, tags) modules[path] = (module, mtime) changed = True # Remove any module that has been removed. for path in set(modules).difference(current_paths): LOG.info('%s has been removed, tcollector should be restarted', path) del modules[path] changed = True # Check for any modules that may have been added. for name in current_modules: path = os.path.join(etcdir, name) if path not in modules: module = load_config_module(name, options, tags) modules[path] = (module, os.path.getmtime(path)) changed = True return changed def write_pid(pidfile): """Write our pid to a pidfile.""" f = open(pidfile, "w") try: f.write(str(os.getpid())) finally: f.close() def all_collectors(): """Generator to return all collectors.""" return COLLECTORS.itervalues() # collectors that are not marked dead def all_valid_collectors(): """Generator to return all defined collectors that haven't been marked dead in the past hour, allowing temporarily broken collectors a chance at redemption.""" now = int(time.time()) for col in all_collectors(): if not col.dead or (now - col.lastspawn > 3600): yield col # collectors that have a process attached (currenty alive) def all_living_collectors(): """Generator to return all defined collectors that have an active process.""" for col in all_collectors(): if col.proc is not None: yield col def shutdown_signal(signum, frame): """Called when we get a signal and need to terminate.""" LOG.warning("shutting down, got signal %d", signum) shutdown() def kill(proc, signum=signal.SIGTERM): os.killpg(proc.pid, signum) def shutdown(): """Called by atexit and when we receive a signal, this ensures we properly terminate any outstanding children.""" global ALIVE # prevent repeated calls if not ALIVE: return # notify threads of program termination ALIVE = False LOG.info('shutting down children') # tell everyone to die for col in all_living_collectors(): col.shutdown() LOG.info('exiting') sys.exit(1) def reap_children(): """When a child process dies, we have to determine why it died and whether or not we need to restart it. This method manages that logic.""" for col in all_living_collectors(): now = int(time.time()) # FIXME: this is not robust. the asyncproc module joins on the # reader threads when you wait if that process has died. this can cause # slow dying processes to hold up the main loop. good for now though. status = col.proc.poll() if status is None: continue col.proc = None # behavior based on status. a code 0 is normal termination, code 13 # is used to indicate that we don't want to restart this collector. # any other status code is an error and is logged. if status == 13: LOG.info('removing %s from the list of collectors (by request)', col.name) col.dead = True elif status != 0: LOG.warning('collector %s terminated after %d seconds with ' 'status code %d, marking dead', col.name, now - col.lastspawn, status) col.dead = True else: register_collector(Collector(col.name, col.interval, col.filename, col.mtime, col.lastspawn)) def check_children(): """When a child process hasn't received a datapoint in a while, assume it's died in some fashion and restart it.""" for col in all_living_collectors(): now = int(time.time()) if col.last_datapoint < (now - ALLOWED_INACTIVI
TY_TIME): # It's too old, kill it LOG.warning('Terminating collector %s after %d seconds of inactivity', col.name, now - col.last_datapoint) col.shutdown() register_collector(Collector(col.na
me, col.interval, col.filename, col.mtime, col.lastspawn)) def set_nonblocking(fd): """Sets the given file descriptor to non-blocking mode.""" fl = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK fcntl.fcntl(fd, fcntl.F_SETFL, fl) def spawn_collector(col): """Takes a Collector object and creates a process for it.""" LOG.info('%s (interval=%d) needs to be spawned', col.name, col.interval) # FIXME: do custom integration of Python scripts into memory/threads # if re.search('\.py$', col.name) is not None: # ... load the py module directly instead of using a subprocess ... try: col.proc = subprocess.Popen(col.filename, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, preexec_fn=os.setsid) except OSError, e: LOG.error('Failed to spawn collector %s: %s' % (col.filename, e)) return # The following line needs to move below this line because it is used in # other logic and it makes no sense to update the last spawn time if the # collector didn't actually start. col.lastspawn = int(time.time()) set_nonblocking(col.proc.stdout.fileno()) set_nonblocking(col.proc.stderr.fileno()) if col.proc.pid > 0: col.dead = False LOG.info('spawned %s (pid=%d)', col.name, col.proc.pid) return # FIXME: handle errors better LOG.error('failed to spawn collector: %s', col.filename) def spawn_children(): """Iterates over our defined collectors and performs the logic to determine if we need to spawn, kill, or otherwise take some action on them.""" if not ALIVE: return for col in all_valid_collectors(): now = int(time.time()) if col.interval == 0: if col.proc is None: spawn_collector(col) elif col.interval <= now - col.lastspawn: if col.proc is None: spawn_collector(col) continue # I'm not very satisfied with this path. It seems fragile and # overly complex, maybe we should just reply on the asyncproc # terminate method, but that would make the main tcollector # block until it dies... :| if col.nextkill > now: continue if col.killstate == 0: LOG.warning('warning: %s (interval=%d, pid=%d) overstayed ' 'its welcome, SIGTERM sent', col.name, col.interval, col.proc.pid) kill(col.proc) col.nextkill = now + 5 col.killstate = 1 elif col.killstate == 1: LOG.error('error: %s (interval=%d, pid=%d) still not dead, ' 'SIGKILL sent', col.name, col.interval, col.proc.pid) kill(col.proc, signal.SIGKILL) col.nextkill = now + 5 col.killstate = 2 else:
#!/usr/bin/env python import numpy as np import os import shutil import mss import matplotlib matplotlib.use('TkAgg') from datetime import datetime from matplotlib.figure import Figure from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg as FigCanvas from PIL import ImageTk, Image import sys PY3_OR_LATER = sys.version_info[0] >= 3 if PY3_OR_LATER: # Python 3 specific definitions import tkinter as tk import tkinter.ttk as ttk import tkinter.messagebox as tkMessageBox else: # Python 2 specific definitions import Tkinter as tk import ttk import tkMessageBox from utils import Screenshot, XboxController IMAGE_SIZE = (320, 240) IDLE_SAMPLE_RATE = 1500 SAMPLE_RATE = 200 IMAGE_TYPE = ".png" class MainWindow(): """ Main frame of the application """ def __init__(self): self.root = tk.Tk() self.sct = mss.mss() self.root.title('Data Acquisition') self.root.geometry("660x325") self.root.resizable(False, False) # Init controller self.controller = XboxController() # Create G
UI self.create_main_panel() # Timer
self.rate = IDLE_SAMPLE_RATE self.sample_rate = SAMPLE_RATE self.idle_rate = IDLE_SAMPLE_RATE self.recording = False self.t = 0 self.pause_timer = False self.on_timer() self.root.mainloop() def create_main_panel(self): # Panels top_half = tk.Frame(self.root) top_half.pack(side=tk.TOP, expand=True, padx=5, pady=5) message = tk.Label(self.root, text="(Note: UI updates are disabled while recording)") message.pack(side=tk.TOP, padx=5) bottom_half = tk.Frame(self.root) bottom_half.pack(side=tk.LEFT, padx=5, pady=10) # Images self.img_panel = tk.Label(top_half, image=ImageTk.PhotoImage("RGB", size=IMAGE_SIZE)) # Placeholder self.img_panel.pack(side = tk.LEFT, expand=False, padx=5) # Joystick self.init_plot() self.PlotCanvas = FigCanvas(figure=self.fig, master=top_half) self.PlotCanvas.get_tk_widget().pack(side=tk.RIGHT, expand=False, padx=5) # Recording textframe = tk.Frame(bottom_half, width=332, height=15, padx=5) textframe.pack(side=tk.LEFT) textframe.pack_propagate(0) self.outputDirStrVar = tk.StringVar() self.txt_outputDir = tk.Entry(textframe, textvariable=self.outputDirStrVar, width=100) self.txt_outputDir.pack(side=tk.LEFT) self.outputDirStrVar.set("samples/" + datetime.now().strftime('%Y-%m-%d_%H:%M:%S')) self.record_button = ttk.Button(bottom_half, text="Record", command=self.on_btn_record) self.record_button.pack(side = tk.LEFT, padx=5) def init_plot(self): self.plotMem = 50 # how much data to keep on the plot self.plotData = [[0] * (5)] * self.plotMem # mem storage for plot self.fig = Figure(figsize=(4,3), dpi=80) # 320,240 self.axes = self.fig.add_subplot(111) def on_timer(self): self.poll() # stop drawing if recording to avoid slow downs if self.recording == False: self.draw() if not self.pause_timer: self.root.after(self.rate, self.on_timer) def poll(self): self.img = self.take_screenshot() self.controller_data = self.controller.read() self.update_plot() if self.recording == True: self.save_data() self.t += 1 def take_screenshot(self): # Get raw pixels from the screen sct_img = self.sct.grab({ "top": Screenshot.OFFSET_Y, "left": Screenshot.OFFSET_X, "width": Screenshot.SRC_W, "height": Screenshot.SRC_H}) # Create the Image return Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX') def update_plot(self): self.plotData.append(self.controller_data) # adds to the end of the list self.plotData.pop(0) # remove the first item in the list, ie the oldest def save_data(self): image_file = self.outputDir+'/'+'img_'+str(self.t)+IMAGE_TYPE self.img.save(image_file) # write csv line self.outfile.write( image_file + ',' + ','.join(map(str, self.controller_data)) + '\n' ) def draw(self): # Image self.img.thumbnail(IMAGE_SIZE, Image.ANTIALIAS) # Resize self.img_panel.img = ImageTk.PhotoImage(self.img) self.img_panel['image'] = self.img_panel.img # Joystick x = np.asarray(self.plotData) self.axes.clear() self.axes.plot(range(0,self.plotMem), x[:,0], 'r') self.axes.plot(range(0,self.plotMem), x[:,1], 'b') self.axes.plot(range(0,self.plotMem), x[:,2], 'g') self.axes.plot(range(0,self.plotMem), x[:,3], 'k') self.axes.plot(range(0,self.plotMem), x[:,4], 'y') self.PlotCanvas.draw() def on_btn_record(self): # pause timer self.pause_timer = True if self.recording: self.recording = False else: self.start_recording() if self.recording: self.t = 0 # Reset our counter for the new recording self.record_button["text"] = "Stop" self.rate = self.sample_rate # make / open outfile self.outfile = open(self.outputDir+'/'+'data.csv', 'a') else: self.record_button["text"] = "Record" self.rate = self.idle_rate self.outfile.close() # un pause timer self.pause_timer = False self.on_timer() def start_recording(self): should_record = True # check that a dir has been specified if not self.outputDirStrVar.get(): tkMessageBox.showerror(title='Error', message='Specify the Output Directory', parent=self.root) should_record = False else: # a directory was specified self.outputDir = self.outputDirStrVar.get() # check if path exists - i.e. may be saving over data if os.path.exists(self.outputDir): # overwrite the data, yes/no? if tkMessageBox.askyesno(title='Warning!', message='Output Directory Exists - Overwrite Data?', parent=self.root): # delete & re-make the dir: shutil.rmtree(self.outputDir) os.mkdir(self.outputDir) # answer was 'no', so do not overwrite the data else: should_record = False self.txt_outputDir.focus_set() # directory doesn't exist, so make one else: os.mkdir(self.outputDir) self.recording = should_record if __name__ == '__main__': app = MainWindow()
# This file is part of Buildbot. Buildbot is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURP
OSE. See the GNU Gen
eral Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members from __future__ import absolute_import from __future__ import print_function def trycmd(config): from buildbot.clients import tryclient t = tryclient.Try(config) t.run() return 0
from hypothesis import given from ppb_v
ector import Vector from utils import floats, vectors @given(x=floats(), y=floats()) def test_class_member_access(x: float, y: float): v = Vector(x, y) assert v.x == x assert v.y == y @given(v=vectors()) def test_index_access(v: Vector): assert v[0] == v.x assert v[1] ==
v.y @given(v=vectors()) def test_key_access(v: Vector): assert v["x"] == v.x assert v["y"] == v.y
atol and btol. = 2 means x approximately solves the least-squares problem according to atol. = 3 means COND(A) seems to be greater than CONLIM. = 4 is the same as 1 with atol = btol = eps (machine precision) = 5 is the same as 2 with atol = eps. = 6 is the same as 3 with CONLIM = 1/eps. = 7 means ITN reached maxiter before the other stopping conditions were satisfied. itn : int Number of iterations used. normr : float ``norm(b-Ax)`` normar : float ``norm(A^T (b - Ax))`` norma : float ``norm(A)`` conda : float Condition number of A. normx : float ``norm(x)`` Notes ----- .. versionadded:: 0.11.0 References ---------- .. [1] D. C.-L. Fong and M. A. Saunders, "LSMR: An iterative algorithm for sparse least-squares problems", SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011. http://arxiv.org/abs/1006.0758 .. [2] LSMR Software, http://web.stanford.edu/group/SOL/software/lsmr/ Examples -------- >>> from scipy.sparse import csc_matrix >>> from scipy.sparse.linalg import lsmr >>> A = csc_matrix([[1., 0.], [1., 1.], [0., 1.]], dtype=float) The first example has the trivial solution `[0, 0]` >>> b = np.array([0., 0., 0.], dtype=float) >>> x, istop, itn, normr = lsmr(A, b)[:4] >>> istop 0 >>> x array([ 0., 0.]) The stopping code `istop=0` returned indicates that a vector of zeros was found as a solution. The returned solution `x` indeed contains `[0., 0.]`. The next example has a non-trivial solution: >>> b = np.array([1., 0., -1.], dtype=float) >>> x, istop, itn, normr = lsmr(A, b)[:4] >>> istop 1 >>> x array([ 1., -1.]) >>> itn 1 >>> normr 4.440892098500627e-16 As indicated by `istop=1`, `lsmr` found a solution obeying the tolerance limits. The given solution `[1., -1.]` obviously solves the equation. The remaining return values include information about the number of iterations (`itn=1`) and the remaining difference of left and right side of the solved equation. The final example demonstrates the behavior in the case where there is no solution for the equation: >>> b = np.array([1., 0.01, -1.], dtype=float) >>> x, istop, itn, normr = lsmr(A, b)[:4] >>> istop 2 >>> x array([ 1.00333333, -0.99666667]) >>> A.dot(x)-b array([ 0.00333333, -0.00333333, 0.00333333]) >>> normr 0.005773502691896255 `istop` indicates that the system is inconsistent and thus `x` is rather an approximate solution to the corresponding least-squares problem. `normr` contains the minimal distance that was found. """ A = aslinearoperator(A) b = atleast_1d(b) if b.ndim > 1: b = b.squeeze() msg = ('The exact solution is x = 0, or x = x0, if x0 was given ', 'Ax - b is small enough, given atol, btol ', 'The least-squares solution is good enough, given atol ', 'The estimate of cond(Abar) has exceeded conlim ', 'Ax - b is small enough for this machine ', 'The least-squares solution is good enough for this machine', 'Cond(Abar) seems to be too large for this machine ', 'The iteration limit has been reached ') hdg1 = ' itn x(1) norm r norm A''r' hdg2 = ' compatible LS norm A cond A' pfreq = 20 # print frequency (for repeating the heading) pcount = 0 # print counter m, n = A.shape # stores the num of singular values minDim = min([m, n]) if maxiter is None: maxiter = minDim if show: print(' ') print('LSMR Least-squares solution of Ax = b\n') print('The matrix A has %8g rows and %8g cols' % (m, n)) print('damp = %20.14e\n' % (damp)) print('atol = %8.2e conlim = %8.2e\n' % (atol, conlim)) print('btol = %8.2e maxiter = %8g\n' % (btol, maxiter)) u = b normb = norm(b) if x0 is None: x = zeros(n) beta = normb.copy() else: x = atleast_1d(x0) u = u - A.matvec(x) beta = norm(u) if beta > 0: u = (1 / beta) * u v = A.rmatvec(u) alpha = norm(v) else: v = zeros(n) alpha = 0 if alpha > 0: v = (1 / alpha) * v # Initialize variables for 1st iteration. itn = 0 zetabar = alpha * beta alphabar = alpha rho = 1 rhobar = 1 cbar = 1 sbar = 0 h = v.copy() hbar = zeros(n) # Initialize var
iables for estimation of ||r||.
betadd = beta betad = 0 rhodold = 1 tautildeold = 0 thetatilde = 0 zeta = 0 d = 0 # Initialize variables for estimation of ||A|| and cond(A) normA2 = alpha * alpha maxrbar = 0 minrbar = 1e+100 normA = sqrt(normA2) condA = 1 normx = 0 # Items for use in stopping rules, normb set earlier istop = 0 ctol = 0 if conlim > 0: ctol = 1 / conlim normr = beta # Reverse the order here from the original matlab code because # there was an error on return when arnorm==0 normar = alpha * beta if normar == 0: if show: print(msg[0]) return x, istop, itn, normr, normar, normA, condA, normx if show: print(' ') print(hdg1, hdg2) test1 = 1 test2 = alpha / beta str1 = '%6g %12.5e' % (itn, x[0]) str2 = ' %10.3e %10.3e' % (normr, normar) str3 = ' %8.1e %8.1e' % (test1, test2) print(''.join([str1, str2, str3])) # Main iteration loop. while itn < maxiter: itn = itn + 1 # Perform the next step of the bidiagonalization to obtain the # next beta, u, alpha, v. These satisfy the relations # beta*u = a*v - alpha*u, # alpha*v = A'*u - beta*v. u = A.matvec(v) - alpha * u beta = norm(u) if beta > 0: u = (1 / beta) * u v = A.rmatvec(u) - beta * v alpha = norm(v) if alpha > 0: v = (1 / alpha) * v # At this point, beta = beta_{k+1}, alpha = alpha_{k+1}. # Construct rotation Qhat_{k,2k+1}. chat, shat, alphahat = _sym_ortho(alphabar, damp) # Use a plane rotation (Q_i) to turn B_i to R_i rhoold = rho c, s, rho = _sym_ortho(alphahat, beta) thetanew = s*alpha alphabar = c*alpha # Use a plane rotation (Qbar_i) to turn R_i^T to R_i^bar rhobarold = rhobar zetaold = zeta thetabar = sbar * rho rhotemp = cbar * rho cbar, sbar, rhobar = _sym_ortho(cbar * rho, thetanew) zeta = cbar * zetabar zetabar = - sbar * zetabar # Update h, h_hat, x. hbar = h - (thetabar * rho / (rhoold * rhobarold)) * hbar x = x + (zeta / (rho * rhobar)) * hbar h = v - (thetanew / rho) * h # Estimate of ||r||. # Apply rotation Qhat_{k,2k+1}. betaacute = chat * betadd betacheck = -shat * betadd # Apply rotation Q_{k,k+1}. betahat = c * betaacute betadd = -s * betaacute # Apply rotation Qtilde_{k-1}. # betad = betad_{k-1} here. thetatildeold = thetatilde ctildeold, stildeold, rhotildeold = _sym_ortho(rhodold, thetabar) thetatilde = stildeold * rhobar rhodold = ctildeold * rhobar betad = - stildeold * betad + ctildeold * betahat # betad = betad_k here. # rhodold = rhod_k here. tautildeold = (zetaold - thetatildeold * tautildeold) / rhotildeold taud = (zeta - thetatilde * tautildeold) / rhodold d = d + betacheck * betacheck normr = sqrt(d + (betad - taud)**2 + betadd * betadd)
from datetime import datetime from django.core.files import storage from django.contrib.staticfiles.storage import CachedStaticFilesStorage class DummyStorage(storage.Storage): """ A storage class that does implement modified_time() but raises NotImplementedError when calling """ def _save(self, name, content): return 'dummy' def delete(self, name): pass def exists(self, name):
pass def modified_time(self, name): return datetime.date(1970, 1, 1) class SimpleCachedStaticFilesStorage(CachedStaticFilesStorage): def file_hash(self, name, content=N
one): return 'deploy12345'
for agent in stmt['authority']['member']: if 'account' in agent: if not 'oauth' in agent['account']['homePage'].lower(): err_msg = "Statements cannot have a non-Oauth group as the authority" raise ParamError(err_msg) # No members contain an account so that means it's not an Oauth group else: err_msg = "Statements cannot have a non-Oauth group as the authority" raise ParamError(err_msg) else: return True else: return True # Retrieve JSON data from ID def get_act_def_data(act_data): act_url_data = {} # See if id resolves try: req = urllib2.Request(act_data['id']) req.add_header('Accept', 'application/json, */*') act_resp = urllib2.urlopen(req, timeout=settings.ACTIVITY_ID_RESOLVE_TIMEOUT) except Exception: # Doesn't resolve-hopefully data is in payload pass else: # If it resolves then try parsing JSON from it try: act_url_data = json.loads(act_resp.read()) except Exception: # Resolves but no data to retrieve - this is OK pass # If there was data from the URL and a defintion in received JSON already if act_url_data and 'definition' in act_data: act_data['definition'] = dict(act_url_data.items() + act_data['definition'].items()) # If there was data from the URL and no definition in the JSON elif act_url_data and not 'definition' in act_data: act_data['definition'] = act_url_data def server_validation(stmt_set, auth, payload_sha2s): auth_validated = False if type(stmt_set) is list: for stmt in stmt_set: server_validation(stmt, auth, payload_sha2s) else: if 'id' in stmt_set: statement_id = stmt_set['id'] if check_for_existing_statementId(statement_id): err_msg = "A statement with ID %s already exists" % statement_id raise ParamConflict(err_msg) server_validate_statement_object(stmt_set['object'], auth) if stmt_set['verb']['id'] == 'http://adlnet.gov/expapi/verbs/voided': validate_void_statement(stmt_set['object']['id']) if not 'objectType' in stmt_set['object'] or stmt_set['object']['objectType'] == 'Activity': get_act_def_data(stmt_set['object']) try: validator = StatementValidator() validator.validate_activity(stmt_set['object']) except Exception, e: raise BadRequest(e.message) except ParamError, e: raise ParamError(e.message) auth_validated = validate_stmt_authority(stmt_set, auth, auth_validated) if 'attachments' in stmt_set: attachment_data = stmt_set['attachments'] validate_attachments(attachment_data, payload_sha2s) @auth def statements_post(req_dict): if req_dict['params'].keys(): raise ParamError("The post statements request contained unexpected parameters: %s" % ", ".join(req_dict['params'].keys())) if isinstance(req_dict['body'], basestring): req_dict['body'] = convert_to_dict(req_dict['body']) try: validator = StatementValidator(req_dict['body']) validator.validate() except Exception, e: raise BadRequest(e.message) except ParamError, e: raise ParamError(e.message) server_validation(req_dict['body'], req_dict['auth'], req_dict.get('payload_sha2s', None)) return req_dict @auth def statements_more_get(req_dict): if not 'more_id' in req_dict: err_msg = "Missing more_id while trying to hit /more endpoint" raise ParamError(err_msg) return req_dict def validate_statementId(req_dict): if 'statementId' in req_dict['params'] and 'voidedStatementId' in req_dict['params']: err_msg = "Cannot have both statementId and voidedStatementId in a GET request" raise ParamError(err_msg) elif 'statementId' in req_dict['params']: statementId = req_dict['params']['statementId'] voided = False else: statementId = req_dict['params']['voidedStatementId'] voided = True not_allowed = ["agent", "verb", "activity", "registration", "related_activities", "related_agents", "since", "until", "limit", "ascending"] bad_keys = set(not_allowed) & set(req_dict['params'].keys()) if bad_keys: err_msg = "Cannot have %s in a GET request only 'format' and/or 'attachments' are allowed with 'statementId' and 'voidedStatementId'" % ', '.join(bad_keys) raise ParamError(err_msg) # Try to retrieve stmt, if DNE then return empty else return stmt info try: st = Statement.objects.get(statement_id=statementId) except Statement.DoesNotExist: err_msg = 'There is no statement associated with the id: %s' % statementId raise IDNotFoundError(err_msg) auth = req_dict.get('auth', None) mine_only = auth and 'statements_mine_only' in auth if auth['authority']: if mine_only and st.authority.id != auth['authority'].id: err_msg = "Incorrect permissions to view statements" raise Forbidden(err_msg) if st.voided != voided: if st.voided: err_msg = 'The requested statement (%s) is voided. Use the "voidedStatementId" parameter to retrieve your statement.' % statementId else: err_msg = 'The requested statement (%s) is not voided. Use the "statementId" parameter to retrieve your statement.' % statementId raise IDNotFoundError(err_msg) return statementId @auth def statements_get(req_dict): rogueparams = set(req_dict['params']) - set(["statementId","voidedStatementId","agent", "verb
", "activity", "registration", "related_activities", "related_agents", "since", "until", "limit", "format", "attachments", "ascending"]) if rogueparams:
raise ParamError("The get statements request contained unexpected parameters: %s" % ", ".join(rogueparams)) formats = ['exact', 'canonical', 'ids'] if 'params' in req_dict and 'format' in req_dict['params']: if req_dict['params']['format'] not in formats: raise ParamError("The format filter value (%s) was not one of the known values: %s" % (req_dict['params']['format'], ','.join(formats))) else: req_dict['params']['format'] = 'exact' # StatementId could be for voided statement as well if 'params' in req_dict and ('statementId' in req_dict['params'] or 'voidedStatementId' in req_dict['params']): req_dict['statementId'] = validate_statementId(req_dict) if 'since' in req_dict['params']: try: parse_datetime(req_dict['params']['since']) except (Exception, ISO8601Error): raise ParamError("Since parameter was not a valid ISO8601 timestamp") if 'until' in req_dict['params']: try: parse_datetime(req_dict['params']['until']) except (Exception, ISO8601Error): raise ParamError("Until parameter was not a valid ISO8601 timestamp") # Django converts all query values to string - make boolean depending on if client wants attachments or not # Only need to do this in GET b/c GET/more will have it saved in pickle information if 'params' in req_dict and 'attachments' in req_dict['params']: if req_dict['params']['attachments'].lower() == 'true': req_dict['params']['attachments'] = True else: req_dict['params']['attachments'] = False else: req_dict['params']['attachments'] = False return req_dict @auth def statements_put(req_dict): # Find any unexpected parameters rogueparams = set(req_dict['params']) - set(["statementId"]) if rogueparams
#============================================================================ # This library is free software; you can redistribute it and/or # modify it under the terms of version 2.1 of the GNU Lesser General Public # License as published by the Free Software Foundation. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #============================================================================ # Copyright (C) 2006 XenSource Ltd. #===
=============================
============================================ from xen.xend.XendAPIConstants import * from xen.util import auxbin # # Shutdown codes and reasons. # DOMAIN_POWEROFF = 0 DOMAIN_REBOOT = 1 DOMAIN_SUSPEND = 2 DOMAIN_CRASH = 3 DOMAIN_HALT = 4 DOMAIN_SHUTDOWN_REASONS = { DOMAIN_POWEROFF: "poweroff", DOMAIN_REBOOT : "reboot", DOMAIN_SUSPEND : "suspend", DOMAIN_CRASH : "crash", DOMAIN_HALT : "halt" } REVERSE_DOMAIN_SHUTDOWN_REASONS = \ dict([(y, x) for x, y in DOMAIN_SHUTDOWN_REASONS.items()]) HVM_PARAM_CALLBACK_IRQ = 0 HVM_PARAM_STORE_PFN = 1 HVM_PARAM_STORE_EVTCHN = 2 HVM_PARAM_PAE_ENABLED = 4 HVM_PARAM_IOREQ_PFN = 5 HVM_PARAM_BUFIOREQ_PFN = 6 HVM_PARAM_NVRAM_FD = 7 # ia64 HVM_PARAM_VHPT_SIZE = 8 # ia64 HVM_PARAM_BUFPIOREQ_PFN = 9 # ia64 HVM_PARAM_VIRIDIAN = 9 # x86 HVM_PARAM_TIMER_MODE = 10 HVM_PARAM_HPET_ENABLED = 11 HVM_PARAM_ACPI_S_STATE = 14 HVM_PARAM_VPT_ALIGN = 16 restart_modes = [ "restart", "destroy", "preserve", "rename-restart", "coredump-destroy", "coredump-restart" ] DOM_STATES = [ 'halted', 'paused', 'running', 'suspended', 'shutdown', 'crashed', 'unknown', ] DOM_STATE_HALTED = XEN_API_VM_POWER_STATE_HALTED DOM_STATE_PAUSED = XEN_API_VM_POWER_STATE_PAUSED DOM_STATE_RUNNING = XEN_API_VM_POWER_STATE_RUNNING DOM_STATE_SUSPENDED = XEN_API_VM_POWER_STATE_SUSPENDED DOM_STATE_SHUTDOWN = XEN_API_VM_POWER_STATE_SHUTTINGDOWN DOM_STATE_CRASHED = XEN_API_VM_POWER_STATE_CRASHED DOM_STATE_UNKNOWN = XEN_API_VM_POWER_STATE_UNKNOWN DOM_STATES_OLD = [ 'running', 'blocked', 'paused', 'shutdown', 'crashed', 'dying' ] SHUTDOWN_TIMEOUT = (60.0 * 5) """Minimum time between domain restarts in seconds.""" MINIMUM_RESTART_TIME = 60 RESTART_IN_PROGRESS = 'xend/restart_in_progress' DUMPCORE_IN_PROGRESS = 'xend/dumpcore_in_progress' LAST_SHUTDOWN_REASON = 'xend/last_shutdown_reason' TRIGGER_NMI = 0 TRIGGER_RESET = 1 TRIGGER_INIT = 2 TRIGGER_POWER = 3 TRIGGER_S3RESUME = 4 TRIGGER_TYPE = { "nmi" : TRIGGER_NMI, "reset" : TRIGGER_RESET, "init" : TRIGGER_INIT, "s3resume": TRIGGER_S3RESUME, "power": TRIGGER_POWER } # # Device migration stages (eg. XendDomainInfo, XendCheckpoint, server.tpmif) # DEV_MIGRATE_TEST = 0 DEV_MIGRATE_STEP1 = 1 DEV_MIGRATE_STEP2 = 2 DEV_MIGRATE_STEP3 = 3 # # VTPM-related constants # VTPM_DELETE_SCRIPT = auxbin.scripts_dir() + '/vtpm-delete' # # Xenstore Constants # XS_VMROOT = "/vm/" NR_PCI_FUNC = 8 NR_PCI_DEV = 32 NR_PCI_DEVFN = NR_PCI_FUNC * NR_PCI_DEV AUTO_PHP_SLOT = 0x100 # # tmem # TMEM_CONTROL = 0 TMEM_NEW_POOL = 1 TMEM_DESTROY_POOL = 2 TMEM_NEW_PAGE = 3 TMEM_PUT_PAGE = 4 TMEM_GET_PAGE = 5 TMEM_FLUSH_PAGE = 6 TMEM_FLUSH_OBJECT = 7 TMEM_READ = 8 TMEM_WRITE = 9 TMEM_XCHG = 10 TMEMC_THAW = 0 TMEMC_FREEZE = 1 TMEMC_FLUSH = 2 TMEMC_DESTROY = 3 TMEMC_LIST = 4 TMEMC_SET_WEIGHT = 5 TMEMC_SET_CAP = 6 TMEMC_SET_COMPRESS = 7
# Copyright 2013 Ken Pepple <ken@pepple.info> # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOU
T # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License.
import alfred_utils as utils import requests PROWL_URL = "https://api.prowlapp.com/publicapi/" DEFAULT_PRIORITY = 0 VALID_PRIORITIES = [-2, -1, 0, 1, 2] def get_api_key(): return utils.get_config('apikey') def get_priority_key(): try: p = utils.get_config('priority') if p not in VALID_PRIORITIES: p = DEFAULT_PRIORITY except: p = DEFAULT_PRIORITY return p def verify_apikey(apikey): parameters = {'apikey': apikey} r = requests.post(PROWL_URL + "verify", params=parameters) return r.ok def save_api_key(apikey): utils.save_config('apikey',apikey) def send_prowl(description, application="Alfred", event="event", priority=0): try: apikey = get_api_key() except: print "No APIKEY. Please configure by holding down the cmd key and pasting in prowl APIKEY." raise Exception("No APIKEY. Please configure by holding down the cmd key and pasting in prowl APIKEY.") parameters = {'apikey': apikey, 'event': event, 'application': application, 'priority': priority, 'description': description} r = requests.post(PROWL_URL + "add", params=parameters) return r.ok
# Copyright(c)2015 NTT corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.object_storage import test_container_sync from tempest import config from tempest import test CONF = config.CONF # This test can be quite long to run due to its # dependency on container-sync process running interval. # You can obviously reduce the container-sync interval in th
e # container-server configuration. class ContainerSyncMiddlewareTest(test_container_sync.ContainerSyncTest): @classmethod def resource_setup(cls): super(ContainerSyncMiddlewareTest, cls).resource_setup() # Set container-sync-realms.conf info cls.realm_name = CONF.object_storage.realm_name cls.key = 'sync_key' cls.cluster_name = CONF.object_storage.cluster_name @test.attr(type='slow') @test.requires_ext(extension='container_sync', service='object') def test_container_synchronization(self): def make_headers(cont, cont_client): # tell first container to synchronize to a second account_name = cont_client.base_url.split('/')[-1] headers = {'X-Container-Sync-Key': "%s" % (self.key), 'X-Container-Sync-To': "//%s/%s/%s/%s" % (self.realm_name, self.cluster_name, str(account_name), str(cont))} return headers self._test_container_synchronization(make_headers)
from __future__ import absolute_import #
Copyright (c) 2010-2015 openpyxl from .formatting import ConditionalFormatting fro
m .rule import Rule
import json import time import pytest from anchore_engine.auth.common import ( get_creds_by_registry, get_docker_registry_userpw, registry_record_matches, ) _test_username = "tonystark" _test_password = "potts" _test_registry_meta = { "authorizationToken": "{}:{}".format(_test_username, _test_password) } _record_ecr = { "registry_type": "awsecr", "registry_meta": json.dumps(_test_registry_meta), } _record_not_ecr = { "registry_type": "other-registry", "registry_user": _test_username, "registry_pass": _test_password, } _record_ecr_inactive = { "registry": "docker.io", "record_state_key": "inactive", "registry_type": "awsecr", "registry_meta": json.dumps(_test_registry_meta), "registry_verify": True, } _record_ecr_unavailable = { "registry": "docker.io", "record_state_key": "inactive", "record_state_val": time.time(), # note: technically this could yield nondeterministic results "registry_type": "awsecr", "registry_meta": json.dumps(_test_registry_meta), "registry_verify": True, } @pytest.mark.parametrize("registry_record", [_record_ecr, _record_not_ecr]) def test_get_docker_registry_userpw(registry_record): result = get_docker_registry_userpw(registry_record) assert result == (_test_username, _test_password) def test_get_docker_registry_userpw_bad_json(): record_ecr_bad_json = { "registry_type": "awsecr", "registry_meta": "this-is-not-valid-json!}", } with pytest.raises(Exception): get_docker_registry_userpw(record_ecr_bad_json) @pytest.mark.parametrize( "registry,repository,registry_creds,expected", [ ("docker.io", "library/node", None, (None, None, None)), ( "docker.io", "library/node", [_record_ecr_inactive], (_test_username, _test_password, True), ), ], ) def test_get_creds_by_registry(registry, repository, registry_creds, expected): result = get_creds_by_registry(registry, repository, registry_creds) assert result == expected def test_get_creds_by_registry_unavailable(): with pytest.raises(Exception): get_creds_by_registry("docker.io", "library/node", [_record_ecr_unavailable]) @pytest.mark.parametrize( "registry_record_str,registry,repository", [ ("docker.io/library/centos", "docker.io", "library/centos"), ("docker.io", "docker.io", "centos"), ("docker.io", "docker.io", "myuser/myrepo"), ], ) def test_registry_record_matches_exact(registry_record_str, registry, repository): assert registry_record_matches(registry_record_str, registry, repository) @pytest.mark.parametrize( "registry_record_str,registry,repository", [ ("docker.io/library/*", "docker.io", "library/centos"), ("docker.io/*", "docker.io", "library/centos"), ("gcr.io/myproject/*", "gcr.io", "myproject/myuser/myrepo"), ], ) def test_
registry_record_matches_wildcard(registry_record_str, registry, repository): assert registry_record_matches(registry_record_str, registry, repository) @pytest.mark.parametrize( "registry_record_str,registry,repository"
, [ ("docker.io", "gcr.io", "myproject/myuser"), ("docker.io/*", "gcr.io", "myproject/myuser"), ("docker.io/library/*", "docker.io", "myuser/myrepo"), ("docker.io/myuser/myrepo", "docker.io", "myuser/myrepo2"), ], ) def test_registry_record_matches_non(registry_record_str, registry, repository): assert not registry_record_matches(registry_record_str, registry, repository)
#!/usr/bin/env python3 """ sumton.py : compute the sum of 0 through N Copyright (C) Simon D. Levy 2016 This file is part of ISCPP. ISCPP is free software: you can re
distribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your optio
n) any later version. This code is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this code. If not, see <http:#www.gnu.org/licenses/>. """ def sumToN(n): res = 0 for k in range(0,n+1): res = res + k return res if __name__ == "__main__": """ Example """ print(sumToN(5))
#!/usr/bin/env python2 # This file is part of Archivematica. # # Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com> # # Archivematica is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Archivematica is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Archivematica. If not, see <http://www.gnu.org/licenses/>. # @package Archivematica # @subpackage archivematicaClientScript # @author Joseph Perry <joseph@artefactual.com> from __future__ import print_function import os import sys import shutil import django django.setup() # dashboard from main.models import Job, SIP # archivematicaCommon from custom_handlers import get_script_logger from databaseFunctions import createSIP if __name__ == '__main__': logger = get_script_logger("archivematica.mcp.client.generateDIPFromAIPGenerateDIP") # COPY THE METS FILE # Move the DIP Directory fauxUUID = sys.argv[1] unitPath = sys.argv[2] date = sys.argv[3] basename = os.path.basename(unitPath[:-1]) uuidLen = 36 originalSIPName = basename[:-(uuidLen+1)*2] originalSIPUUID = basename[:-(uuidLen+1)][-uuidLen:] METSPath = os.path.join(unitPath, "metadata/submissionDocumentation/data/", "METS.%s.xml" % (originalSIPUUID)) if not os.path.isfile(METSPath): print("Mets file not found: ", METSPath, file=sys.stderr) exit(-1) # move mets to DIP src = METSPath dst = os.path.join(unitPath, "DIP", os.path.basename(METSPath)) shutil.move(src, dst) # Move DIP src = os.path.join(unitPath, "DIP") dst = os.path.join("/var/archivematica/sharedDirectory/watchedDirectories/uploadDIP/", originalSIPName + "-" + originalSIPUUID) shutil.move(s
rc, dst) try: SIP.objects.get(uuid=originalSIPUUID) except SIP.DoesNotExist: # otherwise doesn't appear in dashboard createSI
P(unitPath, UUID=originalSIPUUID) Job.objects.create(jobtype="Hack to make DIP Jobs appear", directory=unitPath, sip_id=originalSIPUUID, currentstep="Completed successfully", unittype="unitSIP", microservicegroup="Upload DIP")
rent_exp_end_date = frappe.db.get_value("Task", self.parent_task, "exp_end_date") if parent_exp_end_date and getdate(self.get("exp_end_date")) > getdate(parent_exp_end_date): frappe.throw(_("Expected End Date should be less than or equal to parent task's Expected End Date {0}.").format(getdate(parent_exp_end_date))) def validate_parent_project_dates(self): if not self.project or frappe.flags.in_test: return expected_end_date = frappe.db.get_value("Project", self.project, "expected_end_date") if expected_end_date: validate_project_dates(getdate(expected_end_date), self, "exp_start_date", "exp_end_date", "Expected") validate_project_dates(getdate(expected_end_date), self, "act_start_date", "act_end_date", "Actual") def validate_status(self): if self.is_template and self.status != "Template": self.status = "Template" if self.status!=self.get_db_value("status") and self.status == "Completed": for d in self.depends_on: if frappe.db.get_value("Task", d.task, "status") not in ("Completed", "Cancelled"): frappe.throw(_("Cannot complete task {0} as its dependant task {1} are not ccompleted / cancelled.").format(frappe.bold(self.name), frappe.bold(d.task))) close_all_assignments(self.doctype, self.name) def validate_progress(self): if flt(self.progress or 0) > 100: frappe.throw(_("Progress % for a task cannot be more than 100.")) if flt(self.progress) == 100: self.status = 'Completed' if self.status == 'Completed': self.progress = 100 def validate_dependencies_for_template_task(self): if self.is_template: self.validate_parent_template_task() self.validate_depends_on_tasks() def validate_parent_template_task(self): if self.parent_task: if not frappe.db.get_value("Task", self.parent_task, "is_template"): parent_task_format = """<a href="#Form/Task/{0}">{0}</a>""".format(self.parent_task) frappe.throw(_("Parent Task {0} is not a Template Task").format(parent_task_format)) def validate_depends_on_tasks(self): if self.depends_on: for task in self.depends_on: if not frappe.db.get_value("Task", task.task, "is_template"): dependent_task_format = """<a href="#Form/Task/{0}">{0}</a>""".format(task.task) frappe.throw(_("Dependent Task {0} is not a Template Task").format(dependent_task_format)) def update_depends_on(self): depends_on_tasks = self.depends_on_tasks or "" for d in self.depends_on: if d.task and d.task not in depends_on_tasks: depends_on_tasks += d.task + "," self.depends_on_tasks = depends_on_tasks def update_nsm_model(self): frappe.utils.nestedset.update_nsm(self) def on_update(self): self.update_nsm_model() self.check_recursion() self.reschedule_dependent_tasks() self.update_project() self.unassign_todo() self.populate_depends_on() def unassign_todo(self): if self.status == "Completed": close_all_assignments(self.doctype, self.name) if self.status == "Cancelled": clear(self.doctype, self.name) def update_total_expense_claim(self): self.total_expense_claim = frappe.db.sql("""select sum(total_sanctioned_amount) from `tabExpense Claim` where project = %s and task = %s and docstatus=1""",(self.project, self.name))[0][0] def update_time_and_costing(self): tl = frappe.db.sql("""select min(from_time) as start_date, max(to_time) as end_date, sum(billing_amount) as total_billing_amount, sum(costing_amount) as total_costing_amount, sum(hours) as time from `tabTimesheet Detail` where task = %s and docstatus=1""" ,self.name, as_dict=1)[0] if self.status == "Open": self.status = "Working" self.total_costing_amount= tl.total_costing_amount self.total_billing_amount= tl.total_billing_amount self.actual_time= tl.time self.act_start_date= tl.start_date self.act_end_date= tl.end_date def update_project(self): if self.project and not self.flags.from_project: frappe.get_cached_doc("Project", self.project).update_project() def check_recursion(self): if self.flags.ignore_recursion_check: return check_list = [['task', 'parent'], ['parent', 'task']] for d in check_list: task_list, count = [self.name], 0 while (len(task_list) > count ): tasks = frappe.db.sql(" select %s from `tabTask Depends On` where %s = %s " % (d[0], d[1], '%s'), cstr(task_list[count])) count = count + 1 for b in tasks: if b[0] == self.name: frappe.throw(_("Circular Reference Error"), CircularReferenceError) if b[0]: task_list.append(b[0]) if count == 15: break def reschedule_dependent_tasks(self): end_date = self.exp_end_date or self.act_end_date if end_date: for task_name in frappe.db.sql(""" select name from `tabTask` as parent where parent.project = %(project)s and parent.name in ( select parent from `tabTask Depends On` as child where child.task = %(task)s and child.project = %(project)s) """, {'project': self.project, 'task':self.name }, as_dict=1): task = frappe.get_doc("Task", task_name.name) if task.exp_start_date and task.exp_end_date and task.exp_start_date < getdate(end_date) and task.status == "Open": task_duration = date_diff(task.exp_end_date, task.exp_start_date) task.exp_start_date = add_days(end_date, 1) task.exp_end_date = add_days(task.exp_start_date, task_duration) task.flags.ignore_recursion_check = True task.save() def has_webform_permission(self): project_user = frappe.db.get_value("Project User", {"parent": self.project, "user":frappe.session.user} , "user") if project_user: return True def populate_depends_on(self): if self.parent_task: parent = frappe.get_doc('Task', self.parent_task) if self.name not in [row.task for row in parent.depends_on]: parent.append("depends_on", { "doctype": "Task Depends On", "task": self.name, "subject": self.subject }) parent.save() def on_trash(self): if check_if_child_exists(self.name): throw(_("Child Task exists for this Task. You can not delete this Task.")) self.update_nsm_model() def after_delete(self): self.update_project() def update_status(self): if self.status not in ('Cancelled', 'Completed') and self.exp_end_date: from datetime import datetime if self.exp_end_date < datetime.now().date(): self.db_set('status', 'Overdue', update_modified=False) self.update_project() @frap
pe.whitelist() def check_if_child_exists(name): child_tasks = frappe.get_all("Task", filters={"parent_task": name}) child_tasks = [get_link_to_form("Task", task.name) for task in child_tasks] return child_tasks @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs def get_project(doctype, txt, searchfield, start, page_len, filters): from erpnext.controllers.queries import get_match_cond meta = frappe.get_meta(doctype) searchfields = meta.get_
search_fields() search_columns = ", " + ", ".join(searchfields) if searchfields else '' search_cond = " or " + " or ".join([field + " like %(txt)s" for field in searchfields]) return frappe.db.sql(""" select name {search_columns} from `tabProject` where %(key)s like %(txt)s %(mcond)s {search_condition} order by name limit %(start)s, %(page_len)s""".format(search_columns = search_columns, search_condition=search_cond), { 'key': searchfield, 'txt': '%' + txt + '%', 'mcond':get_match_cond(doctype), 'start': start, 'page_len': page_len }) @frappe.whitelist() def set_multiple_status(names, status): names = json.loads(names) for name in names: task = frappe.get_doc("Task", name) task.status = status task.save() def set_tasks_as_overdue(): tasks = frappe.get_all("Task", filters={"status": ["not in", ["Cancelled", "Completed"]]}, fields=["name", "status", "review_date"]) for task in tasks: if task.status == "Pending Review": if getdate(task.review_date) > getdate(today()): continue frappe.get_doc("Task", task.name).update_status() @frappe.whitelist() def make_timesheet(source_name, target_doc=None, ignore_permissions=False): def set_missing_values(source, target): target.append("time_logs", { "hours": source.actual_time, "comple
#!/usr/bin/env python # encoding: utf-8 """ Show how to use `dur` and `delay` parameters of play() and out() methods to sequence events over time. """ from pyo import * import random s = Server(duplex=0).boot() num = 70 freqs = [random.uniform(100, 1000) for i in range(num)] start1 = [i * 0.5 for i in range(num)] fade1 = Fader([1] * num, 1, 5, mul=0.03).play(dur=5, delay=start1) a = SineLoop(freqs, feedback=0.05, mul=fade1).out(dur=5, delay=start1) start2 = 30 dur2 = 40 snds = [ "../snds/alum1.wav", "../snds/alum2.wav", "../snds/alum3.wav", "../snds/alum4.wav", ] tabs = SndTable(snds) fade2 = Fader(0.05, 10, dur2, mul=0.7).play(dur=dur2,
delay=start2) b = Beat(time=0.125, w1=[90, 30, 30, 20], w2=[30, 90, 50, 40], w3=[0, 30, 30, 40], poly=1).play(dur=dur2, delay=start2) out = TrigEnv(b, tabs, b["dur"], mu
l=b["amp"] * fade2).out(dur=dur2, delay=start2) start3 = 45 dur3 = 30 fade3 = Fader(15, 15, dur3, mul=0.02).play(dur=dur3, delay=start3) fm = FM(carrier=[149, 100, 151, 50] * 3, ratio=[0.2499, 0.501, 0.75003], index=10, mul=fade3).out( dur=dur3, delay=start3 ) s.gui(locals())
# -*- coding: utf-8 -*- from Headset import Heads
et import logging import time puerto = 'COM3' headset = Headset(logging.INFO) try: headset.connect(puerto, 115200) except Exception, e: raise e print "Is conected? " + str(headset.isConnected()) print "-
----------------------------------------" headset.startReading(persist_data=True) time.sleep(5) headset.stopReading() headset.closePort() print "-----------------------------------------" print "Is conected? " + str(headset.isConnected()) print headset.getStatus()
import sublime import sublime_plugin import re import os import datetime TMLP_DIR = 'templates' KEY_SYNTAX = 'syntax' KEY_FILE_EXT = 'extension' IS_GTE_ST3 = int(sublime.version()) >= 3000 PACKAGE_NAME = 'new-file-pro' PACKAGES_PATH = sublime.packages_path() BASE_PATH = os.path.abspath(os.path.dirname(__file__)) class NewFileBase(sublime_plugin.WindowCommand): def __init__(self, window): super(NewFileBase, self).__init__(window) def appendFileExtension(self, name, t): tmp = name.split('.') length = len(tmp) s_ext = tmp[length - 1] exts = {'css': 'css', 'html': 'html',
'js': 'js', 'json': 'json', 'php': 'php', 'php-class': 'php', 'php-interface': 'php', 'xml':'xml', 'python': 'python', 'ruby': 'ruby'} try: t_ext = exts[t] if (s_ext == t_ext and length == 1) or s_ext != t_ext: return
name + '.' + t_ext except KeyError: pass return name; def appendPHPExtension(self, name): t = name.split('.') length = len(t) ext = t[length - 1] if ext != "php": return name + '.php' return name; def get_code(self, type='text' ): code = '' file_name = "%s.tmpl" % type isIOError = False if IS_GTE_ST3: tmpl_dir = 'Packages/' + PACKAGE_NAME + '/' + TMLP_DIR + '/' user_tmpl_dir = 'Packages/User/' + PACKAGE_NAME + '/' + TMLP_DIR + '/' else: tmpl_dir = os.path.join(PACKAGES_PATH, PACKAGE_NAME, TMLP_DIR) user_tmpl_dir = os.path.join(PACKAGES_PATH, 'User', PACKAGE_NAME, TMLP_DIR) self.user_tmpl_path = os.path.join(user_tmpl_dir, file_name) self.tmpl_path = os.path.join(tmpl_dir, file_name) if IS_GTE_ST3: try: code = sublime.load_resource(self.user_tmpl_path) except IOError: try: code = sublime.load_resource(self.tmpl_path) except IOError: isIOError = True else: if os.path.isfile(self.user_tmpl_path): code = self.open_file(self.user_tmpl_path) elif os.path.isfile(self.tmpl_path): code = self.open_file(self.tmpl_path) else: isIOError = True if isIOError: sublime.message_dialog('[Warning] No such file: ' + self.tmpl_path + ' or ' + self.user_tmpl_path) return self.format_tag(code) def format_tag(self, code): win = sublime.active_window() code = code.replace('\r', '') # replace \r\n -> \n # format settings = self.get_settings() format = settings.get('date_format', '%Y-%m-%d') date = datetime.datetime.now().strftime(format) if not IS_GTE_ST3: code = code.decode('utf8') # for st2 && Chinese characters code = code.replace('${date}', date) attr = settings.get('attr', {}) for key in attr: code = code.replace('${%s}' % key, attr.get(key, '')) if settings.get('enable_project_variables', False) and hasattr(win, 'extract_variables'): variables = win.extract_variables() for key in ['project_base_name', 'project_path', 'platform']: code = code.replace('${%s}' % key, variables.get(key, '')) code = re.sub(r"(?<!\\)\${(?!\d)", '\${', code) return code def open_file(self, path, mode='r'): fp = open(path, mode) code = fp.read() fp.close() return code def get_settings(self, type=None): settings = sublime.load_settings(PACKAGE_NAME + '.sublime-settings') if not type: return settings opts = settings.get(type, []) return opts
Move SCU application. For sending Query/Retrieve (QR) C-MOVE requests to a QR Move SCP. """ import argparse import sys from pynetdicom import ( AE, evt, QueryRetrievePresentationContexts, AllStoragePresentationContexts, ) from pynetdicom.apps.common import setup_logging, create_dataset, handle_store from pynetdicom._globals import ALL_TRANSFER_SYNTAXES, DEFAULT_MAX_LENGTH from pynetdicom.pdu_primitives import SOPClassExtendedNegotiation from pynetdicom.sop_class import ( PatientRootQueryRetrieveInformationModelMove, StudyRootQueryRetrieveInformationModelMove, PatientStudyOnlyQueryRetrieveInformationModelMove, ) __version__ = "0.4.0" def _setup_argparser(): """Setup the command line arguments""" # Description parser = argparse.Argu
mentParser( description=( "The movescu application implements a Service Class User (SCU) " "for the Query/Retrieve (QR) Service Class and (optionally) a " "Storage SCP for the Storage Service Class. movescu supports " "retrieve functionality using the C-MOVE message. It sends query " "keys to an SCP and waits for a response. It will accept " "associations for the
purpose of receiving images sent as a " "result of the C-MOVE request. movescu can initiate the transfer " "of images to a third party or can retrieve images to itself " "(note: the use of the term 'move' is a misnomer, the C-MOVE " "operation performs a SOP Instance copy only)" ), usage="movescu [options] addr port", ) # Parameters req_opts = parser.add_argument_group("Parameters") req_opts.add_argument( "addr", help="TCP/IP address or hostname of DICOM peer", type=str ) req_opts.add_argument("port", help="TCP/IP port number of peer", type=int) # General Options gen_opts = parser.add_argument_group("General Options") gen_opts.add_argument( "--version", help="print version information and exit", action="store_true" ) output = gen_opts.add_mutually_exclusive_group() output.add_argument( "-q", "--quiet", help="quiet mode, print no warnings and errors", action="store_const", dest="log_type", const="q", ) output.add_argument( "-v", "--verbose", help="verbose mode, print processing details", action="store_const", dest="log_type", const="v", ) output.add_argument( "-d", "--debug", help="debug mode, print debug information", action="store_const", dest="log_type", const="d", ) gen_opts.add_argument( "-ll", "--log-level", metavar="[l]", help=("use level l for the logger (critical, error, warn, info, debug)"), type=str, choices=["critical", "error", "warn", "info", "debug"], ) parser.set_defaults(log_type="v") # Network Options net_opts = parser.add_argument_group("Network Options") net_opts.add_argument( "-aet", "--calling-aet", metavar="[a]etitle", help="set my calling AE title (default: MOVESCU)", type=str, default="MOVESCU", ) net_opts.add_argument( "-aec", "--called-aet", metavar="[a]etitle", help="set called AE title of peer (default: ANY-SCP)", type=str, default="ANY-SCP", ) net_opts.add_argument( "-aem", "--move-aet", metavar="[a]etitle", help="set move destination AE title (default: STORESCP)", type=str, default="STORESCP", ) net_opts.add_argument( "-ta", "--acse-timeout", metavar="[s]econds", help="timeout for ACSE messages (default: 30 s)", type=float, default=30, ) net_opts.add_argument( "-td", "--dimse-timeout", metavar="[s]econds", help="timeout for DIMSE messages (default: 30 s)", type=float, default=30, ) net_opts.add_argument( "-tn", "--network-timeout", metavar="[s]econds", help="timeout for the network (default: 30 s)", type=float, default=30, ) net_opts.add_argument( "-pdu", "--max-pdu", metavar="[n]umber of bytes", help=( f"set max receive pdu to n bytes (0 for unlimited, " f"default: {DEFAULT_MAX_LENGTH})" ), type=int, default=DEFAULT_MAX_LENGTH, ) # Query information model choices qr_group = parser.add_argument_group("Query Information Model Options") qr_model = qr_group.add_mutually_exclusive_group() qr_model.add_argument( "-P", "--patient", help="use patient root information model (default)", action="store_true", ) qr_model.add_argument( "-S", "--study", help="use study root information model", action="store_true" ) qr_model.add_argument( "-O", "--psonly", help="use patient/study only information model", action="store_true", ) # Query Options qr_query = parser.add_argument_group("Query Options") qr_query.add_argument( "-k", "--keyword", metavar="[k]eyword: (gggg,eeee)=str, keyword=str", help=( "add or override a query element using either an element tag as " "(group,element) or the element's keyword (such as PatientName)" ), type=str, action="append", ) qr_query.add_argument( "-f", "--file", metavar="path to [f]ile", help=( "use a DICOM file as the query dataset, if " "used with -k then the elements will be added to or overwrite " "those present in the file" ), type=str, ) # Store SCP options store_group = parser.add_argument_group("Storage SCP Options") store_group.add_argument( "--store", help="start a Storage SCP that can be used as the move destination", action="store_true", default=False, ) store_group.add_argument( "--store-port", metavar="[p]ort", help="the port number to use for the Storage SCP", type=int, default=11113, ) store_group.add_argument( "--store-aet", metavar="[a]etitle", help="the AE title to use for the Storage SCP", type=str, default="STORESCP", ) # Extended Negotiation Options ext_neg = parser.add_argument_group("Extended Negotiation Options") ext_neg.add_argument( "--relational-retrieval", help="request the use of relational retrieval", action="store_true", ) ext_neg.add_argument( "--enhanced-conversion", help="request the use of enhanced multi-frame image conversion", action="store_true", ) # Output Options out_opts = parser.add_argument_group("Output Options") out_opts.add_argument( "-od", "--output-directory", metavar="[d]irectory", help="write received objects to directory d", type=str, ) out_opts.add_argument( "--ignore", help="receive data but don't store it", action="store_true" ) ns = parser.parse_args() if ns.version: pass elif not bool(ns.file) and not bool(ns.keyword): parser.error("-f and/or -k must be specified") return ns def main(args=None): """Run the application.""" if args is not None: sys.argv = args args = _setup_argparser() if args.version: print(f"movescu.py v{__version__}") sys.exit() APP_LOGGER = setup_logging(args, "movescu") APP_LOGGER.debug(f"movescu.py v{__version__}") APP_LOGGER.debug("") # Create query (identifier) dataset try: # If you're looking at this to see how QR Move works then `identifer` # is a pydicom Dataset instance with your query keys, e.g.: # identifier = D
from sys import exit from random import randint def death(): quips = ["You died. You kinda suck at this.", "Your mom would be proud. If she were smarter.", "Such a luser.", "I have a small puppy that's better at this."] print quips[randint(0,len(quips)-1)] exit(1) def princess_lives_here(): print "You see a beatiful Princess with a shiny crown." print "She offers you some cake." eat_it = raw_input("> ") if eat_it == "eat it": print "You explode like a pinata full of frogs." print "The Princess cackles and eats the frogs. Yum!" return 'death' elif eat_it == "do not eat it": print "She throws the cake at you and it curs off your head." print "The last thing you see is her
munching on your torso. Yum!" return 'death' elif eat_it == "make her eat it": print "The Princess screams as you cram the cake in her mouth." print "Then she smiles and cries and thanks you for saving her." print "She points to a tiny door and says, 'The Koi needs cake too.'" print "She gives you the very last bit of cake and ahoves you in." return 'gold_koi_pond' else: print "The princess looks at you confused and just points at the cake." return 'prince
ss_lives_here' def gold_koi_pond(): print "There is a garden with a koi pond in the center." print "You walk close and see a massive fin pole out." print "You peek in and a creepy looking huge Koi stares at you." print "It opens its mouth waiting for food." feed_it = raw_input("> ") if feed_it == "feed it": print "The Koi jumps up and rather than eating the cake, eats your arm." print "You fall in and the Koi shruge than eats you." print "You are then pooped out sometime later." return 'death' elif feed_it == "do not feed it": print "The Koi grimaces, then thrashes around for a second." print "It rushes to the other end of the pond, braces against the wall..." print "then it *lunges* out of the water, up in the air and over your" print "entire body, cake and all." print "You are then poped out a week later." return 'death' elif feed_it == "throw it in": print "The Koi wiggles, then leaps into the air to eat the cake." print "You can see it's happy, it then grunts, thrashes..." print "and finally rolls over and poops a magic diamond into the air" print "at your feet." return 'bear_with_sword' else: print "The Koi gets annoyed and wiggles a bit." return 'gold_koi_pond' def bear_with_sword(): print "Puzzled, you are about to pick up the fish poop diamond when" print "a bear bearing a load bearing sword walks in." print '"Hey! That\' my diamond! Where\'d you get that!?"' print "It holds its paw out and looks at you." give_it = raw_input("> ") if give_it == "give it": print "The bear swipes at your hand to grab the diamond and" print "rips your hand off in the process. It then looks at" print 'your bloody stump and says, "Oh crap, sorry about that."' print "It tries to put your hand back on, but you collapse." print "The last thing you see is the bear shrug and eat you." return 'death' elif give_it == "say_no": print "The bear looks shocked. Nobody ever told a bear" print "with a broadsword 'no'. It asks, " print '"Is it because it\'s not a katana? I could go get one!"' print "It then runs off and now you notice a big iron gate." print '"Where the hell did that come from?" You say.' return 'big_iron_gate' else: print "The bear look puzzled as to why you'd do that." return "bear_with_sword" def big_iron_gate(): print "You walk up to the big iron gate and see there's a handle." open_it = raw_input("> ") if open_it == 'open it': print "You open it and you are free!" print "There are mountains. And berries! And..." print "Oh, but then the bear comes with his katana and stabs you." print '"Who\'s laughing now!? Love this katana."' return 'death' else: print "That doesn't seem sensible. I mean, the door's right there." return 'big_iron_gate' ROOMS = { 'death':death, 'princess_lives_here':princess_lives_here, 'gold_koi_pond':gold_koi_pond, 'big_iron_gate':big_iron_gate, 'bear_with_sword':bear_with_sword } def runner(map,start): next = start while True: room = map[next] print "\n------------" next = room() runner(ROOMS,'princess_lives_here')
quota_project_id=client_options.quota_project_id, client_info=client_info, always_use_jwt_access=( Transport == type(self).get_transport_class("grpc") or Transport == type(self).get_transport_class("grpc_asyncio") ), ) def create_specialist_pool( self, request: specialist_pool_service.CreateSpecialistPoolRequest = None, *, parent: str = None, specialist_pool: gca_specialist_pool.SpecialistPool = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gac_operation.Operation: r"""Creates a SpecialistPool. Args: request (google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest): The request object. Request message for [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. parent (str): Required. The parent Project name for the new SpecialistPool. The form is ``projects/{project}/locations/{location}``. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. specialist_pool (google.cloud.aiplatform_v1beta1.types.SpecialistPool): Required. The SpecialistPool to create. This corresponds to the ``specialist_pool`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data labeling jobs. It includes a group of specialist managers who are responsible for managing the labelers in this pool as well as customers' data labeling jobs associated with this pool. Customers create specialist pool as well as start data labeling jobs on Cloud, managers and labelers work with the jobs using CrowdCompute console. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, specialist_pool]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a specialist_pool_service.CreateSpecialistPoolRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, specialist_pool_service.CreateSpecialistPoolRequest): request = specialist_pool_service.CreateSpecialistPoolRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if specialist_pool is not None: request.specialist_pool = specialist_pool # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_specialist_pool] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = gac_operation.from_gapic( response, self._transport.operations_client, gca_specialist_pool.SpecialistPool, metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata, ) # Done; return the response. return response def get_specialist_pool( self, request: specialist_pool_service.GetSpecialistPoolRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> specialist_pool.SpecialistPool: r"""Gets a SpecialistPool. Args: request (google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest): The request object. Request message for [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool]. name (str): Required. The name of the SpecialistPool resource. The form is ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.aiplatform_v1beta1.types.SpecialistPool: SpecialistPool represents customers' own workforce to work on their data labeling jobs. It includes a group of specialist managers who are responsible for managing the labelers in this pool as well as customers' data labeling jobs associated with this pool. Customers create specialist pool as well as start data label
ing jobs on Cloud, managers and labelers work with the jobs using CrowdCompute console. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments
that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a specialist_pool_service.GetSpecialistPoolRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, specialist_pool_service.GetSpecialistPoolRequest): request = specialist_pool_service.GetSpecialistPoolRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and
# -*- coding: utf-8 -*- { 'name': 'Import OFX Bank Statement', 'category': 'Banking addons', 'version': '8.0.1.0.1', 'license': 'AGPL-3', 'author': 'OpenERP SA,' 'Odoo Community Association (OCA)', 'website': 'https://github.com/OCA/bank-statement-import', 'depends': [ 'account
_bank_statement_import' ], 'demo': [ 'demo/demo_data.xml', ], 'external_dependencies': { 'python': ['ofxparse'], }, 'auto_install': False,
'installable': True, }
r[int(Singleton.state.spinner_frame/20)]) self.mw.widget.update() else: self.mw.progress_label.set_text("No task running") self.mw.widget.update() def update_settings(self, args): dbgfname() debug(" settings update: "+str(args)) setting = args[0][0] if setting.type == TOSTypes.float: new_value = args[0][1][0].get_value() setting.set_value(new_value) project.push_state(Singleton.state, "update_settings") elif setting.type == TOSTypes.button: setting.set_value(None) else: warning(" Unknown setting type: %s"%(setting.type,)) self.mw.widget.update() def tool_operation_up_click(self, args): dbgfname() debug(" tool operation up") if self.selected_tool_operation==None: return if len(Singleton.state.tool_operations)==0: return cur_idx = Singleton.state.tool_operations.index(self.selected_tool_operation) debug(" cur idx: "+str(cur_idx)) if cur_idx == 0: return temp = self.selected_tool_operation Singleton.state.tool_operations.remove(self.selected_tool_operation) Singleton.state.tool_operations.insert(cur_idx-1, temp) self.push_event(self.ee.update_tool_operations_list, {"selection": cur_idx-1}) project.push_state(Singleton.state, "tool_operation_up_click") def tool_operation_down_click(self, args): dbgfname() debug(" tool operation down") if self.selected_tool_operation==None: return if len(Singleton.state.tool_operations)==0: return cur_idx = Singleton.state.tool_operations.index(self.selected_tool_operation) debug(" cur idx: "+str(cur_idx)) if cur_idx == len(Singleton.state.tool_operations)-1: return temp = self.selected_tool_operation Singleton.state.tool_operations.remove(self.selected_tool_operation) Singleton.state.tool_operations.insert(cur_idx+1, temp) self.push_event(self.ee.update_tool_operations_list, {"selection": cur_idx+1}) project.push_state(Singleton.state, "tool_operation_down_click") def scroll_up(self, args): dbgfname() debug(" scroll up") if self.shift_pressed: offset = Singleton.state.get_base_offset() Singleton.mw.widget_vscroll.set_value(-(offset[1]+10*S
ingleton.state.scale[0])) elif self.ctrl_pressed: offset = Singleton.state.get_base_offset() Singleton.mw.widget_hscroll.set_value(-(offset[0]+10*Singleton.state.scale[0])) else: osx, osy = Singleton.state.scale if Singleton.state.scale[0]<=0.01: Singleton.state.scale = (Singleton.state.scale[0]+0.1, Singleton.state.scale[1]+0.1) else: Singleton.state.scale = (Singleton.state.scale
[0]*1.5, Singleton.state.scale[1]*1.5) tx, ty = Singleton.state.get_offset() sx, sy = Singleton.state.get_screen_size() px, py = self.pointer_position nsx, nsy = Singleton.state.scale debug(" Old px, py: %f, %f"%(px, py)) debug(" Screen size: %s"%((sx, sy),)) Singleton.state.set_base_offset((px-px*nsx, -(py-py*nsy))) debug(" New px, py: %f, %f"%((-(px-px/nsx), (py-py/nsy)))) debug(" New scale: %s"%((nsx, nsy),)) self.mw.cursor_pos_label.set_text("cur: %.3f:%.3f"%(px, py)) self.mw.widget.update() def scroll_down(self, args): dbgfname() debug(" scroll down") if self.shift_pressed: offset = Singleton.state.get_base_offset() Singleton.mw.widget_vscroll.set_value(-(offset[1]-10*Singleton.state.scale[0])) elif self.ctrl_pressed: offset = Singleton.state.get_base_offset() Singleton.mw.widget_hscroll.set_value(-(offset[0]-10*Singleton.state.scale[0])) else: if Singleton.state.scale[0]>0.1: if Singleton.state.scale[0]<=1: Singleton.state.scale = (Singleton.state.scale[0]-0.1, Singleton.state.scale[1]-0.1) else: Singleton.state.scale = (Singleton.state.scale[0]/1.5, Singleton.state.scale[1]/1.5) px, py = self.pointer_position nsx, nsy = Singleton.state.scale Singleton.state.set_base_offset((-px*nsx, py*nsy)) self.mw.widget.update() def hscroll(self, args): dbgfname() debug(" hscroll: "+str(args)) debug(" "+str(args[0][0].get_value())) offset = Singleton.state.get_base_offset() Singleton.state.set_base_offset((-args[0][0].get_value(), offset[1])) self.mw.widget.update() def vscroll(self, args): dbgfname() debug(" vscroll: "+str(args)) debug(" "+str(args[0][0].get_value())) offset = Singleton.state.get_base_offset() Singleton.state.set_base_offset((offset[0], -args[0][0].get_value())) self.mw.widget.update() def tool_paths_check_button_click(self, args): name = args[0][0] for o in Singleton.state.tool_operations: if o.display_name == name: o.display = not o.display break self.mw.widget.update() def paths_check_button_click(self, args): name = args[0][0] for p in Singleton.state.paths: if p.name == name: p.display = not p.display break self.mw.widget.update() def path_delete_button_click(self, args): if self.selected_path in Singleton.state.paths: Singleton.state.paths.remove(self.selected_path) self.selected_path = None self.push_event(self.ee.update_paths_list, (None)) project.push_state(Singleton.state, "path_delete_button_click") self.mw.widget.update() def tool_operation_delete_button_click(self, args): if self.selected_tool_operation in Singleton.state.tool_operations: Singleton.state.tool_operations.remove(self.selected_tool_operation) self.selected_tool_operation = None self.push_event(self.ee.update_tool_operations_list, (None)) project.push_state(Singleton.state, "tool_operation_delete_button_click") self.mw.widget.update() def undo_click(self, args): dbgfname() debug(" steps("+str(len(project.steps))+") before: "+str(project.steps)) project.step_back() debug(" steps("+str(len(project.steps))+") after: "+str(project.steps)) self.push_event(self.ee.update_tool_operations_list, (None)) self.push_event(self.ee.update_paths_list, (None)) self.mw.widget.update() def redo_click(self, args): dbgfname() debug(" steps("+str(len(project.steps))+") before: "+str(project.steps)) project.step_forward() debug(" steps("+str(len(project.steps))+") after: "+str(project.steps)) self.push_event(self.ee.update_tool_operations_list, (None)) self.push_event(self.ee.update_paths_list, (None)) self.mw.widget.update() def main_start(self, args): Singleton.plugins = [] if Singleton.plugins_dir != None: for dirname, subdirs, files in os.walk(Singleton.plugins_dir): debug('Found directory: %s' % dirname) for fname in files: if os.path.splitext(fname)[1] == ".py": debug('F\t%s' % fname) plugin_path = os.path.abspath(os.path.join(Singleton.plugins_dir, fname)) plugin_mod_name = os.path.splitext(fname)[0] debug("Loading module with spec %s:%s"%(plugin_mod_name, plugin_path)) imp.load_source(plugin_mod_name, plugin_path) for dname in subdirs: debug('D\t%s' % dname) debug("Registering plugins"
from zope.
interface import Interface, Attribute from zope import schema from uwosh.emergency.client.config import mf as _ class IUWOshEmergencyClientLayer(Interface): """Marker interface that defines a browser layer "
""
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ducktape.cluster.remoteaccount import RemoteCommandError from ducktape.utils.util import wait_until class JmxMixin(object): """This mixin helps existing service subclasses start JmxTool on their worker nodes and collect jmx stats. A couple things worth noting: - this is not a service in its own right. - we assume the service using JmxMixin also uses KafkaPathResolverMixin """ def __init__(self, num_nodes, jmx_object_names=None, jmx_attributes=None): self.jmx_object_names = jmx_object_names self.jmx_attributes = jmx_attributes or [] self.jmx_port = 9192 self.started = [False] * num_nodes self.jmx_stats = [{} for x in range(num_nodes)] self.maximum_jmx_value = {} # map from object_attribute_name to maximum value observed over time self.average_jmx_value = {} # map from object_attribute_name to average value observed over time self.jmx_tool_log = "/mnt/jmx_tool.log" self.jmx_tool_err_log = "/mnt/jmx_tool.err.log" def clean_node(self, node): node.account.kill_process("jmx", clean_shutdown=False, allow_fail=True) node.account.ssh("rm -rf %s" % self.jmx_tool_log, allow_fail=False) def start_jmx_tool(self, idx, node): if self.jmx_object_names is None: self.logger.debug("%s: Not starting jmx tool because no jmx objects are defined" % node.account) return if self.started[idx-1]: self.logger.debug("%s: jmx tool has been started already on this node" % node.account) return cmd = "%s kafka.tools.JmxTool " % self.path.script("kafka-run-class.sh", node) cmd += "--reporting-interval 1000 --jmx-url service:jmx:rmi:///jndi/rmi://127.0.0.1:%d/jmxrmi" % self.jmx_port for jmx_object_name in self.jmx_object_names: cmd += " --object-name %s" % jmx_object_name for jmx_attribute in self.jmx_attributes: cmd += " --attributes %s" % jmx_attribute cmd += " 1>> %s" % self.jmx_tool_log cmd += " 2>> %s &" % self.jmx_tool_err_log self.logger.debug("%s: Start JmxTool %d command: %s" % (node.account, idx, cmd)) node.account.ssh(cmd, allow_fail=False) wait_until(lambda: self._jmx_has_output(node), timeout_sec=10, backoff_sec=.5, err_msg="%s: Jmx tool took too long to start" % node.account) self.started[idx-1] = True def _jmx_has_output(self, node): """Helper used as a proxy to determine whether jmx is running by that jmx_tool_log contains output.""" try: node.account.ssh("test -z \"$(cat %s)\"" % self.jmx_tool_log, allow_fail=False) return False except RemoteCommandError: return True def read_jmx_output(self, idx, node): if not self.started[idx-1]: return object_attribute_names = [] cmd = "cat %s" % self.jmx_tool_log self.logger.debug("Read jmx output %d command: %s", idx, cmd) lines = [line for line in node.account.ssh_capture(cmd, allow_fail=False)] assert len(lines) > 1, "There don't appear to be any samples in the jmx tool log: %s" % lines for line in lines: if "time" in line: object_attribute_names = line.strip()[1:-1].split("\",\"")[1:] continue stats = [float(field) for field in line.split(',')] time_sec = int(stats[0]/1000) self.jmx_stats[idx-1][time_sec] = {name: stats[i+1] for i, name in enumerate(object_attribute_names)} # do not calculate average and maximum of jmx stats until we have read output from all nodes # If the service is multithreaded, this means that the results will be aggregated only when the last # service finishes if any(len(time_to_stats) == 0 for time_to_stats in self.jmx_stats): return start_time_sec = min([min(time_to_stats.keys()) for time_to_stats in self.jmx_stats]) end_time_sec = max([max(time_to_stats.keys()) for time_to_stats in self.jmx_stats]) for name in object_attribute_names: aggregates_
per_time = [] for time_sec in xrange(start_time_sec, end_time_sec + 1): # assume that value is 0 if it is not read by jmx tool at the given time. This is appropriate for metrics such as bandwidth values_per_node = [time_to_stats.get(time_sec, {}).get(name, 0) for time_to_stats in self.jmx_stats] # assume that value is aggregated across nodes by sum. This is ap
propriate for metrics such as bandwidth aggregates_per_time.append(sum(values_per_node)) self.average_jmx_value[name] = sum(aggregates_per_time) / len(aggregates_per_time) self.maximum_jmx_value[name] = max(aggregates_per_time) def read_jmx_output_all_nodes(self): for node in self.nodes: self.read_jmx_output(self.idx(node), node)
ding: utf-8 -*- # # textract documentation build configuration file, created by # sphinx-quickstart on Fri Jul 4 11:09:09 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.join(os.path.abspath('.'), '..')) import textract # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['.templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # Gener
al information about the project. project = u'textract' copyright = u'2014, Dean Malmgren' # The version info for the project you're documenting,
acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = textract.VERSION # The full version, including alpha/beta/rc tags. release = textract.VERSION # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['.static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'textractdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'textract.tex', u'textract Documentation', u'Dean Malmgren', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'textract', u'textract Documentation', [u'Dean Malmgren'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'textract', u'textract Documentation', u'Dean Malmgren', 'textract', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If tr
import random class ImageQueryParser: def __init__(self): pass def parse(self, query_string): tab = query_string.split(" ") last = tab[-1].lower() is_random = False index = 0
if last.startswith("-"): if last == "-r": is_random = True tab.pop() else: try: i
ndex = int(last[1:]) tab.pop() except ValueError: pass query_string = " ".join(tab) return ImageQuery(query_string, is_random, index) class ImageQuery: def __init__(self, query, is_random, index): self.__query = query self.__is_random = is_random self.__index = index def query(self): return self.__query def is_random(self): return self.__is_random def next_index(self): if self.is_random(): return random.randrange(0, 100) else: i = self.__index self.__index += 1 return i
class Parameter(object): def __init__(self, name): self.name = name class Vehicle(object): def __init__(self, name, path): self.name = name self.path = path self.params = [] class Library(object): def __init__(self, name): self.name = name self.params = [] known_param_fields = [ 'Description', 'DisplayName', 'Values', 'Range', 'Units', 'Increment', 'User', 'RebootRequired', 'Bitmask', 'Volatile', 'ReadOnly', ] # Follow SI units conventions from: # http://physics.nist.gov/cuu/Units/units.html # http://physics.nist.gov/cuu/Units/outside.html # and # http://physics.nist.gov/cuu/Units/checklist.html # http://www.bipm.org/en/publications/si-brochure/ # http://www1.bipm.org/en/CGPM/db/3/2/ g_n unit for G-force # one further constrain is that only printable (7bit) ASCII characters are allowed known_units = { # abreviation : full-text (used in .html .rst and .wiki files) # time 's' : 'seconds' , 'ds' : 'deciseconds' , 'cs' : 'centiseconds' , 'ms' : 'milliseconds' , 'PWM' : 'PWM in microseconds' , # should be microseconds, this is NOT a SI unit, but follows https://github.com/ArduPilot/ardupilot/pull/5538#issuecomment-271943061 'Hz' : 'hertz' , # distance 'km' : 'kilometers' , # metre is the SI unit name, meter is the american spelling of it 'm' : 'meters' , # metre is the SI unit name, meter is the american spelling of it 'm/s' : 'meters per second' , # metre is the SI unit name, meter is the american spelling of it 'm/s/s' : 'meters per square second' , # metre is the SI unit name, meter is the american spelling of it 'm/s/s/s' : 'meters per cubic second' , # metre is the SI unit name, meter is the american spelling of it 'cm' : 'centimeters' , # metre is the SI unit name, meter is the american spelling of it 'cm/s' : 'centimeters per second' , # metre is the SI unit name, meter is the american spelling of it 'cm/s/s' : 'centimeters per square second', # metre is the SI unit name, meter is the american spelling of it 'cm/s/s/s': 'centimeters per cubic second' , # metre is the SI unit name, meter is the american spelling of it 'mm' : 'millimeters' , # metre is the SI unit name, meter is the american spelling of it # temperature 'degC' : 'degrees Celsius' , # Not SI, but Kelvin is too cumbersome for most users # angle 'deg' : 'degrees' , # Not SI, but is some situations more user-friendly than radians 'deg/s' : 'degrees per second' , # Not SI, but is some situations more user-friendly than radians 'cdeg' : 'centidegrees' , # Not SI, but is some situations more user-friendly than radian
s 'cdeg/s' : 'centidegrees per second', # Not SI, but is some situations more user-friendly than radians 'cdeg/s/s': 'centidegrees per square second' , # Not SI, but is some situations more user-friendly than radians 'rad' : 'radians' , 'rad/s' : 'radians per second' , 'rad/s/s' : 'radians per square second' , # electricity 'A' : 'ampere'
, 'V' : 'volt' , 'W' : 'watt' , # magnetism 'Gauss' : 'gauss' , # Gauss is not an SI unit, but 1 tesla = 10000 gauss so a simple replacement is not possible here 'Gauss/s' : 'gauss per second' , # Gauss is not an SI unit, but 1 tesla = 10000 gauss so a simple replacement is not possible here 'mGauss' : 'milligauss' , # Gauss is not an SI unit, but 1 tesla = 10000 gauss so a simple replacement is not possible here # pressure 'Pa' : 'pascal' , 'mbar' : 'millibar' , # ratio '%' : 'percent' , '%/s' : 'percent per second' , 'd%' : 'decipercent' , # decipercent is strange, but "per-mille" is even more exotic # compound 'm.m/s/s' : 'square meter per square second', 'deg/m/s' : 'degrees per meter per second' , 'm/s/m' : 'meters per second per meter' , # Why not use Hz here ???? 'mGauss/A': 'milligauss per ampere' , 'mA.h' : 'milliampere hour' , 'A/V' : 'ampere per volt' , 'm/V' : 'meters per volt' , 'gravities': 'standard acceleration due to gravity' , # g_n would be a more correct unit, but IMHO no one understands what g_n means } required_param_fields = [ 'Description', 'DisplayName', 'User', ] known_group_fields = [ 'Path', ]
# c
oding=utf-8 HOSTNAME = 'localhost' DATABASE = 'r' USERNAME = 'web' PASSWORD = 'web' DB_URI = 'mysql://{}:{}@{}/{}'.format(
USERNAME, PASSWORD, HOSTNAME, DATABASE)
# Copyright (c) 2008 Joost Cassee # Licensed under the terms of the MIT License (see LICENSE.txt) import logging from django.core import urlresolvers from django.http import HttpResponse from django.shortcuts import render_to_response from django.template import RequestContext, loader from django.utils import simplejson from django.utils.translation import ugettext as _ from tinymce.compressor import gzip_compressor from tinymce.widgets import get_language_config from django.views.decorators.csrf import csrf_exempt def textareas_js(request, name, lang=None): """ Returns a HttpResponse whose content is a Javscript file. The template is loaded from 'tinymce/<name>_textareas.js' or '<name>/tinymce_textareas.js'. Optionally, the lang argument sets the content language. """ template_files = ( 'tinymce/%s_textareas.js' % name, '%s/tinymce_textareas.js' % name, ) template = loader.select_template(template_files) vars = get_language_config(lang) vars['content_language'] = lang context = RequestContext(request, vars) return HttpResponse(template.render(context), content_type="application/x-javascript") @csrf_exempt def spell_check(request): """ Returns a HttpResponse that implements the TinyMCE spellchecker protocol. """ try: import enchant raw = request.raw_post_data input = simplejson.loads(raw) id = input['id'] method = input['method'] params = input['params'] lang = params[0] arg = params[1] if not enchant.dict_exists(str(lang)): raise RuntimeError("dictionary not found for language '%s'" % lang) checker = enchant.Dict(str(lang)) if method == 'checkWords': result = [word for word in arg if not checker.check(word)] elif method == 'getSuggestions': result = checker.suggest(arg) else: raise RuntimeError("Unkown spellcheck method: '%s'" % method) output = { 'id': id, 'result':
result, 'error': None, } except Exception: logging.exception("Erro
r running spellchecker") return HttpResponse(_("Error running spellchecker")) return HttpResponse(simplejson.dumps(output), content_type='application/json') def preview(request, name): """ Returns a HttpResponse whose content is an HTML file that is used by the TinyMCE preview plugin. The template is loaded from 'tinymce/<name>_preview.html' or '<name>/tinymce_preview.html'. """ template_files = ( 'tinymce/%s_preview.html' % name, '%s/tinymce_preview.html' % name, ) template = loader.select_template(template_files) return HttpResponse(template.render(RequestContext(request)), content_type="text/html") def flatpages_link_list(request): """ Returns a HttpResponse whose content is a Javscript file representing a list of links to flatpages. """ from django.contrib.flatpages.models import FlatPage link_list = [(page.title, page.url) for page in FlatPage.objects.all()] return render_to_link_list(link_list) def compressor(request): """ Returns a GZip-compressed response. """ return gzip_compressor(request) def render_to_link_list(link_list): """ Returns a HttpResponse whose content is a Javscript file representing a list of links suitable for use wit the TinyMCE external_link_list_url configuration option. The link_list parameter must be a list of 2-tuples. """ return render_to_js_vardef('tinyMCELinkList', link_list) def render_to_image_list(image_list): """ Returns a HttpResponse whose content is a Javscript file representing a list of images suitable for use wit the TinyMCE external_image_list_url configuration option. The image_list parameter must be a list of 2-tuples. """ return render_to_js_vardef('tinyMCEImageList', image_list) def render_to_js_vardef(var_name, var_value): output = "var %s = %s" % (var_name, simplejson.dumps(var_value)) return HttpResponse(output, content_type='application/x-javascript') def filebrowser(request): fb_url = urlresolvers.reverse('filebrowser.views.browse') return render_to_response('tinymce/filebrowser.js', {'fb_url': fb_url}, context_instance=RequestContext(request))
''' mode
| desc r 또는 rt | 텍스트 모드로 읽기 w 또는 wt | 텍스트 모드로 쓰기 a 또는 at | 텍스트 모드로 파일 마지막에 추가하기 rb | 바이너리 모드로 읽기 wb | 바이너리 모드로 쓰기 ab | 바이너리 모드로 파일 마지막에 추가하기 ''' f = open("./py200_sample.txt", "w") f.write("abcd") f.close() r = open("./py200_sample.txt", "r") print("-" * 60) print(r
.readline()) r.close()
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # RUN: %p/structured_output | FileCheck %s # pylint: disable=missing-docstring,line-too-long from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v2 as tf from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common class TestModule(tf.Module): # The fNNNN name prefixes in this file are such that the sorted order of the # functions in the resulting MLIR output match the order in the source file, # allowing us to conveniently co-locate the CHECK's with the code they are # checking. # # Note: CHECK-DAG doesn't work with CHECK-SAME/CHECK-NEXT. # Check index paths for results. # # CHECK: func {{@[a-zA-Z_0-9]+}}() -> ( # CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = []}) # CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0000_single_return"] @tf.function(input_signature=[]) def f0000_single_return(self): return tf.constant(1.0, shape=[1]) # Check index paths for results with multiple return values. # Note that semantically in Python, multiple return values are equivalent # to returning a tuple/list. # # CHECK: func {{@[a-zA-Z_0-9]+}}() -> ( # CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0]}, # CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [1]}) # CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0001_multiple_results_no_punctuation"] @tf.function(input_signature=[]) def f0001_multiple_results_no_punctuation(self): return tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2]) # Check index paths for results written explicitly with parentheses. # This is semantically equivalent to the earlier test without parentheses, # but this test serves as documentation of this behavior for the purposes # of tf_saved_model users. # # CHECK: func {{@[a-zA-Z_0-9]+}}() -> ( # CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0]}, # CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [1]}) # CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0002_multiple_results_parentheses"] @tf.function(input_signature=[]) def f0002_multiple_results_parentheses(self): return (tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2])) # Check index paths for results written explicitly with brackets. # This is semantically equivalent to the earlier test without parentheses, # but this test serves as documentation of this behavior for the purposes # of tf_saved_model users. # # CHECK: func {{@[a-zA-Z_0-9]+}}() -> ( # CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0]}, # CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [1]}) # CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0003_multiple_results_brackets"] @tf.function(input_signature=[]) def f0003_multiple_results_brackets(self): return [tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2])] # Check index paths for lists. # # CHECK: func {{@[a-zA-Z_0-9]+}}() -> ( # CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0, 0]}, # CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [0, 1]}) # CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0004_list_2_elements"] @tf.function(input_signature=[]) def f0004_list_2_elements(self): return [[tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2])]] # Check index paths for dicts. # Keys are linearized in sorted order, matching `tf.nest.flatten`. # More thorough testing of this is in structured_input.py. The underlying code # path for linearization is shared, so no need to replicate that testing here. # # CHECK: func {{@[a-zA-Z_0-9]+}}() -> ( # CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = ["x"]}, # CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = ["y"]}) # CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0005_dict_2_keys"] @tf.function(input_signature=[]) def f0005_dict_2_keys(self): return { 'x': tf.constant(1.0, shape=[1]), 'y': tf.constant(1.0, shape=[2]), } # Check index paths for outputs are correctly handled in the presence of # multiple return statements. # # CHECK: func {{@[a-zA-Z_0-9]+}}( # CHECK-SAME: %arg0: tensor<f32> {tf._user_specified_name = "x", tf_saved_model.index_path = [0]} # CHECK-SAME: ) -> ( # CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = ["x"]}) # CHECK-SAME: attribut
es {{.*}} tf_saved_model.exported_names = ["f0006_multiple_return_statements"] @tf.function(input_signature=[tf.TensorSpec([], tf.float32)]) def f0006_multiple_return_statements(self, x): if x > 3.: return
{'x': tf.constant(1.0, shape=[1])} else: return {'x': tf.constant(1.0, shape=[1])} if __name__ == '__main__': common.do_test(TestModule)
import unittest from series import slices # Tests adapted from `problem-specifications//canonical-data.json` @ v1.0.0 class SeriesTest(unittest.TestCase): def test_slices_of_one_from_one(self): self.assertEqual(slices("1", 1), ["1"]) def test_slices_of_one_from_two(self): self.assertEqual(slices("12", 1), ["1", "2"]) def test_slices_of_two(self): self.assertEqual(slices("35", 2), ["35"]) def test_slices_of_two_overlap(self): self.assertEqual(slices("9142", 2), ["91", "14", "42"]) def test_slices_can_include_duplicates(self): self.assertEqual(slices("777777", 3), ["777", "777", "777", "777"]) def test_slices_of_a_long_series(self): self.assertEqual( slices("918493904243", 5), ["91849", "18493", "84939", "49390", "93904", "39042", "90424", "04243"], ) def test_slice_length_is_too_large(self): with self.assertRaisesWithMessage(ValueError): slices("12345", 6) def test_slice_length_cannot_be_zero(self): with self.assertRaisesWithMessage(ValueError): slices("1
2345", 0) def test_slice_length_cannot_be_negative(self): with self.assertRaisesWithMessage(ValueError): slices("123", -1) def test_empty_series_is_invalid(self): with self.assertRaisesWithMessage(ValueError): slices("", 1) # Utility functions def setUp(self): try: self.assertRaisesRegex except AttributeError: self.assertRaisesRegex = self.assertRaisesRegexp def assertRaisesW
ithMessage(self, exception): return self.assertRaisesRegex(exception, r".+") if __name__ == "__main__": unittest.main()
r https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import time from datetime import datetime, timedelta from ansible.errors import AnsibleError from ansible.plugins.action import ActionBase from ansible.module_utils._text import to_native try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class TimedOutException(Exception): pass class ActionModule(ActionBase): TRANSFERS_FILES = False DEFAULT_REBOOT_TIMEOUT = 600 DEFAULT_CONNECT_TIMEOUT = 5 DEFAULT_PRE_REBOOT_DELAY = 2 DEFAULT_POST_REBOOT_DELAY = 0 DEFAULT_TEST_COMMAND = 'whoami' DEFAULT_REBOOT_MESSAGE = 'Reboot initiated by Ansible.' def get_system_uptime(self): uptime_command = "(Get-WmiObjec
t -ClassName Win32_OperatingSystem).LastBootUpTime" (rc, stdout, stderr) = self._connection.exec_command(uptime_command) if rc != 0: raise Exception("win_reboot: failed to get host uptime info, rc: %d, stdout: %s, stderr: %s" % (rc, stdout, stderr)) return stdout def do_until_success_or_timeout(self, what, timeout, what_desc, fail_
sleep=1): max_end_time = datetime.utcnow() + timedelta(seconds=timeout) exc = "" while datetime.utcnow() < max_end_time: try: what() if what_desc: display.debug("win_reboot: %s success" % what_desc) return except Exception as e: exc = e if what_desc: display.debug("win_reboot: %s fail (expected), retrying in %d seconds..." % (what_desc, fail_sleep)) time.sleep(fail_sleep) raise TimedOutException("timed out waiting for %s: %s" % (what_desc, exc)) def run(self, tmp=None, task_vars=None): self._supports_check_mode = True self._supports_async = True if self._play_context.check_mode: return dict(changed=True, elapsed=0, rebooted=True) if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) if result.get('skipped', False) or result.get('failed', False): return result # Handle timeout parameters and its alias deprecated_args = { 'shutdown_timeout': '2.5', 'shutdown_timeout_sec': '2.5', } for arg, version in deprecated_args.items(): if self._task.args.get(arg) is not None: display.warning("Since Ansible %s, %s is no longer used with win_reboot" % (arg, version)) if self._task.args.get('connect_timeout') is not None: connect_timeout = int(self._task.args.get('connect_timeout', self.DEFAULT_CONNECT_TIMEOUT)) else: connect_timeout = int(self._task.args.get('connect_timeout_sec', self.DEFAULT_CONNECT_TIMEOUT)) if self._task.args.get('reboot_timeout') is not None: reboot_timeout = int(self._task.args.get('reboot_timeout', self.DEFAULT_REBOOT_TIMEOUT)) else: reboot_timeout = int(self._task.args.get('reboot_timeout_sec', self.DEFAULT_REBOOT_TIMEOUT)) if self._task.args.get('pre_reboot_delay') is not None: pre_reboot_delay = int(self._task.args.get('pre_reboot_delay', self.DEFAULT_PRE_REBOOT_DELAY)) else: pre_reboot_delay = int(self._task.args.get('pre_reboot_delay_sec', self.DEFAULT_PRE_REBOOT_DELAY)) if self._task.args.get('post_reboot_delay') is not None: post_reboot_delay = int(self._task.args.get('post_reboot_delay', self.DEFAULT_POST_REBOOT_DELAY)) else: post_reboot_delay = int(self._task.args.get('post_reboot_delay_sec', self.DEFAULT_POST_REBOOT_DELAY)) test_command = str(self._task.args.get('test_command', self.DEFAULT_TEST_COMMAND)) msg = str(self._task.args.get('msg', self.DEFAULT_REBOOT_MESSAGE)) # Get current uptime try: before_uptime = self.get_system_uptime() except Exception as e: result['failed'] = True result['reboot'] = False result['msg'] = to_native(e) return result # Initiate reboot display.vvv("rebooting server") (rc, stdout, stderr) = self._connection.exec_command('shutdown /r /t %d /c "%s"' % (pre_reboot_delay, msg)) # Test for "A system shutdown has already been scheduled. (1190)" and handle it gracefully if rc == 1190: display.warning('A scheduled reboot was pre-empted by Ansible.') # Try to abort (this may fail if it was already aborted) (rc, stdout1, stderr1) = self._connection.exec_command('shutdown /a') # Initiate reboot again (rc, stdout2, stderr2) = self._connection.exec_command('shutdown /r /t %d' % pre_reboot_delay) stdout += stdout1 + stdout2 stderr += stderr1 + stderr2 if rc != 0: result['failed'] = True result['rebooted'] = False result['msg'] = "Shutdown command failed, error text was %s" % stderr return result start = datetime.now() # Get the original connection_timeout option var so it can be reset after connection_timeout_orig = None try: connection_timeout_orig = self._connection.get_option('connection_timeout') except AnsibleError: display.debug("win_reboot: connection_timeout connection option has not been set") try: # keep on checking system uptime with short connection responses def check_uptime(): display.vvv("attempting to get system uptime") # override connection timeout from defaults to custom value try: self._connection.set_options(direct={"connection_timeout": connect_timeout}) self._connection._reset() except AttributeError: display.warning("Connection plugin does not allow the connection timeout to be overridden") # try and get uptime try: current_uptime = self.get_system_uptime() except Exception as e: raise e if current_uptime == before_uptime: raise Exception("uptime has not changed") self.do_until_success_or_timeout(check_uptime, reboot_timeout, what_desc="reboot uptime check success") # reset the connection to clear the custom connection timeout try: self._connection.set_options(direct={"connection_timeout": connection_timeout_orig}) self._connection._reset() except (AnsibleError, AttributeError): display.debug("Failed to reset connection_timeout back to default") # finally run test command to ensure everything is working def run_test_command(): display.vvv("attempting post-reboot test command '%s'" % test_command) (rc, stdout, stderr) = self._connection.exec_command(test_command) if rc != 0: raise Exception('test command failed') # FUTURE: add a stability check (system must remain up for N seconds) to deal with self-multi-reboot updates self.do_until_success_or_timeout(run_test_command, reboot_timeout, what_desc="post-reboot test command success") result['rebooted'] = True result['changed'] = True except TimedOutException as toex: result['failed'] = True result['rebooted'] = True result['msg'] = to_native(toex) if post_reboot_delay != 0: display.vvv("win_reboot: waiting an additional %d seconds" % post_reboot_delay) time.sleep(post_reboot_delay) elapsed = datetime.now() - start result['elapsed'] = elapsed.seconds
LD_ORDER,) = range(3) JOB_REFRESH_RATE = 100 def getwords(s): # We decompose the string so that ascii letters with accents can be part of the word. s = normalize("NFD", s) s = multi_replace(s, "-_&+():;\\[]{}.,<>/?~!@#$*", " ").lower() s = "".join( c for c in s if c in string.ascii_letters + string.digits + string.whitespace ) return [_f for _f in s.split(" ") if _f] # remove empty elements def getfields(s): fields = [getwords(field) for field in s.split(" - ")] return [_f for _f in fields if _f] def unpack_fields(fields): result = [] for field in fields: if isinstance(field, list): result += field else: result.append(field) return result def compare(first, second, flags=()): """Returns the % of words that match between ``first`` and ``second`` The result is a ``int`` in the range 0..100. ``first`` and ``second`` can be either a string or a list (of words). """ if not (first and second): return 0 if any(isinstance(element, list) for element in first): return compare_fields(first, second, flags) second = second[:] # We must use a copy of second because we remove items from it match_similar = MATCH_SIMILAR_WORDS in flags weight_words = WEIGHT_WORDS in flags joined = first + second total_count = sum(len(word) for word in joined) if weight_words else len(joined) match_count = 0 in_order = True for word in first: if match_similar and (word not in second): similar = difflib.get_close_matches(word, second, 1, 0.8) if similar: word = similar[0] if word in second: if second[0] != word: in_order = False second.remove(word) match_count += len(word) if weight_words else 1 result = round(((match_count * 2) / total_count) * 100) if (result == 100) and (not in_order): result = 99 # We cannot consider a match exact unless the ordering is the same return result def compare_fields(first, second, flags=()): """Returns the score for the lowest matching :ref:`fields`. ``first`` and ``second`` must be lists of lists of string. Each sub-list is then compared with :func:`compare`. """ if len(first) != len(second): return 0 if NO_FIELD_ORDER in flags: results = [] # We don't want to remove field directly in the list. We must work on a copy. second = second[:] for field1 in first: max = 0 matched_field = None for field2 in second: r = compare(field1, field2, flags) if r > max: max = r matched_field = field2 results.append(max) if matched_field: second.remove(matched_field) else: results = [ compare(field1, field2, flags) for field1, field2 in zip(first, second) ] return min(results) if results else 0 def build_word_dict(objects, j=job.nulljob): """Returns a dict of objects mapped by their words. objects must have a ``words`` attribute being a list of strings or a list of lists of strings (:ref:`fields`). The result will be a dict with words as keys, lists of objects as values. """ result = defaultdict(set) for object in j.iter_with_progress( objects, "Prepared %d/%d files", JOB_REFRESH_RATE ): for word in unpack_fields(object.words): result[word].add(object) return result def merge_similar_words(word_dict): """Take all keys in ``word_dict`` that are similar, and merge them together. ``word_dict`` has been built with :func:`build_word_dict`. Similarity is computed with Python's ``difflib.get_close_matches()``, which computes the number of edits that are necessary to make a word equal to the other. """ keys = list(word_dict.keys()) keys.sort(key=len) # we want the shortest word to stay while keys: key = keys.pop(0) similars = difflib.get_close_matches(key, keys, 100, 0.8) if not similars: continue objects = word_dict[key] for similar in similars: objects |= word_dict[similar] del word_dict[similar] keys.remove(similar) def reduce_common_words(word_dict, threshold): """Remove all objects from ``word_dict`` values where the object count >= ``threshold`` ``word_dict`` has been built with :func:`build_word_dict`. The exception to this removal are the objects where all the words of the object are common. Because if we remove them, we will miss some duplicates! """ uncommon_words = set( word for word, objects in word_dict.items() if len(objects) < threshold ) for word, objects in list(word_dict.items()): if len(objects) < threshold: continue reduced = set() for o in objects: if not any(w in uncommon_words for w in unpack_fields(o.words)): reduced.add(o) if reduced: word_dict[word] = reduced else: del word_dict[word] # Writing docstrings in a namedtuple is tricky. From Python 3.3, it's possible to set __doc__, but # some research allowed me to find a more elegant solution, which is what is done here. See # http://stackoverflow.com/questions/1606436/adding-docstrings-to-namedtuples-in-python class Match(namedtuple("Match", "first second percentage")): """Represents a match between two :class:`~core.fs.File`. Regarless of the matching method, when two files are determined to match, a Match pair is created, which holds, of course, the two matched files, but also their match "level". .. attribute:: first first file of the pair. .. attribute:: second second file of the pair. .. attribute:: percentage their match level according to the scan method which found
the match. int from 1 to 100. For exact scan methods, such as Contents scans, this will always be 100. """ __slots__ = () def get_match(first, second, flags=()): # it is assumed here that first and second both have a "words" attribute percentage = compare(first.words, second.words, flags) return Match(first, second, percentage) def getmatches( objects, min_match_percentage=0, match_similar_words=False, weight
_words=False, no_field_order=False, j=job.nulljob, ): """Returns a list of :class:`Match` within ``objects`` after fuzzily matching their words. :param objects: List of :class:`~core.fs.File` to match. :param int min_match_percentage: minimum % of words that have to match. :param bool match_similar_words: make similar words (see :func:`merge_similar_words`) match. :param bool weight_words: longer words are worth more in match % computations. :param bool no_field_order: match :ref:`fields` regardless of their order. :param j: A :ref:`job progress instance <jobs>`. """ COMMON_WORD_THRESHOLD = 50 LIMIT = 5000000 j = j.start_subjob(2) sj = j.start_subjob(2) for o in objects: if not hasattr(o, "words"): o.words = getwords(o.name) word_dict = build_word_dict(objects, sj) reduce_common_words(word_dict, COMMON_WORD_THRESHOLD) if match_similar_words: merge_similar_words(word_dict) match_flags = [] if weight_words: match_flags.append(WEIGHT_WORDS) if match_similar_words: match_flags.append(MATCH_SIMILAR_WORDS) if no_field_order: match_flags.append(NO_FIELD_ORDER) j.start_job(len(word_dict), tr("0 matches found")) compared = defaultdict(set) result = [] try: # This whole 'popping' thing is there to avoid taking too much memory at the same time. while word_dict: items = word_dict.popitem()[1] while items: ref = items.pop() compared_already = compared[ref] to_compare = items - compared_already
#!/usr/bin/env python # encoding: utf-8 """ Copyright (c) 2010 The Echo Nest. All rights reserved. Created by Tyler Williams on 2010-09-01 # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more deta
ils. """ # ======================== # = try_new_things.py = # ======================== # # enter a few of your favorite artists and create a playlist of new music that # you might like. # import sys, os, logging import xml.sax.saxu
tils as saxutils from optparse import OptionParser from pyechonest import artist, playlist # set your api key here if it's not set in the environment # config.ECHO_NEST_API_KEY = "XXXXXXXXXXXXXXXXX" logger = logging.getLogger(__name__) class XmlWriter(object): """ code from: http://users.musicbrainz.org/~matt/xspf/m3u2xspf Copyright (c) 2006, Matthias Friedrich <matt@mafr.de> """ def __init__(self, outStream, indentAmount=' '): self._out = outStream self._indentAmount = indentAmount self._stack = [ ] def prolog(self, encoding='UTF-8', version='1.0'): pi = '<?xml version="%s" encoding="%s"?>' % (version, encoding) self._out.write(pi + '\n') def start(self, name, attrs={ }): indent = self._getIndention() self._stack.append(name) self._out.write(indent + self._makeTag(name, attrs) + '\n') def end(self): name = self._stack.pop() indent = self._getIndention() self._out.write('%s</%s>\n' % (indent, name)) def elem(self, name, value, attrs={ }): # delete attributes with an unset value for (k, v) in attrs.items(): if v is None or v == '': del attrs[k] if value is None or value == '': if len(attrs) == 0: return self._out.write(self._getIndention()) self._out.write(self._makeTag(name, attrs, True) + '\n') else: escValue = saxutils.escape(value or '') self._out.write(self._getIndention()) self._out.write(self._makeTag(name, attrs)) self._out.write(escValue) self._out.write('</%s>\n' % name) def _getIndention(self): return self._indentAmount * len(self._stack) def _makeTag(self, name, attrs={ }, close=False): ret = '<' + name for (k, v) in attrs.iteritems(): if v is not None: v = saxutils.quoteattr(str(v)) ret += ' %s=%s' % (k, v) if close: return ret + '/>' else: return ret + '>' def write_xspf(f, tuples): """send me a list of (artist,title,mp3_url)""" xml = XmlWriter(f, indentAmount=' ') xml.prolog() xml.start('playlist', { 'xmlns': 'http://xspf.org/ns/0/', 'version': '1' }) xml.start('trackList') for tupe in tuples: xml.start('track') xml.elem('creator',tupe[0]) xml.elem('title',tupe[1]) xml.elem('location', tupe[2]) xml.end() xml.end() xml.end() f.close() def lookup_seeds(seed_artist_names): seed_ids = [] for artist_name in seed_artist_names: try: seed_ids.append("-%s" % (artist.Artist(artist_name).id,)) except Exception: logger.info('artist "%s" not found.' % (artist_name,)) # we could try to do full artist search here # and let them choose the right artist logger.info('seed_ids: %s' % (seed_ids,)) return seed_ids def find_playlist(seed_artist_ids, playable=False): if playable: logger.info("finding playlist with audio...") p = playlist.static(type='artist-radio', artist_id=seed_artist_ids, variety=1, buckets=['id:7digital', 'tracks'], limit=True) else: logger.info("finding playlist without audio...") p = playlist.static(type='artist-radio', artist_id=seed_artist_ids, variety=1) return p if __name__ == "__main__": usage = 'usage: %prog [options] "artist 1" "artist 2" ... "artist N"\n\n' \ 'example:\n' \ '\t ./%prog "arcade fire" "feist" "broken social scene" -x -f arcade_feist_scene.xspf\n' \ '\t ./%prog "justice" "four tet" "bitshifter" -v\n' parser = OptionParser(usage=usage) parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="say what you're doing") parser.add_option("-a", "--audio", action="store_true", dest="audio", default=False, help="fetch sample audio for songs") parser.add_option("-x", "--xspf", action="store_true", dest="xspf", default=False, help="output an xspf format playlist") parser.add_option("-f", "--filename", metavar="FILE", help="write output to FILE") (options, args) = parser.parse_args() if len(args) < 1: parser.error("you must provide at least 1 seed artist!") # handle verbose logging log_level = logging.ERROR if options.verbose: log_level = logging.INFO logging.basicConfig(level=log_level) logger.setLevel(log_level) # make sure output file doesn't already exist if options.filename and os.path.exists(options.filename): logger.error("The file path: %s already exists." % (options.filename,)) sys.exit(1) # resolve seed artists seed_ids = lookup_seeds(args) # find playlist raw_plist = find_playlist(seed_ids, playable=(options.audio or options.xspf)) tuple_plist = [] for s in raw_plist: name = s.artist_name title = s.title url = "" if options.audio: url = s.get_tracks('7digital', [{}])[0].get('preview_url') tuple_plist.append((name,title,url)) # write to stdout or file specified fout = open(options.filename, 'w') if options.filename else sys.stdout if options.xspf: write_xspf(fout, tuple_plist) else: for tupe in tuple_plist: fout.write("%s - %s \t %s\n" % tupe) logger.info("all done!") sys.exit(0)
# -*- coding: utf-8 -*- def main(): startApplication("sasview") clickTab(waitForObject(":FittingWidgetUI.tabFitting_QTabWidget_2"), "Resolution") test.compare(waitForObjectExists(":groupBox_4.cbSmearing_QComboBox").currentIndex, 0) test.compare(str(waitForObjectExists(":groupBox_4.cbSmearing_QComboBox").currentText), "None") test.compare(waitForObjectExists(":groupBox_4.cbSmear
ing_QComboBo
x").count, 1) clickTab(waitForObject(":FittingWidgetUI.tabFitting_QTabWidget_2"), "Model") clickButton(waitForObject(":groupBox.cmdLoad_QPushButton")) waitForObjectItem(":stackedWidget.listView_QListView", "test") doubleClickItem(":stackedWidget.listView_QListView", "test", 36, 4, 0, Qt.LeftButton) waitForObjectItem(":stackedWidget.listView_QListView", "1d\\_data") doubleClickItem(":stackedWidget.listView_QListView", "1d\\_data", 30, 10, 0, Qt.LeftButton) waitForObjectItem(":stackedWidget.listView_QListView", "cyl\\_400\\_20\\.txt") doubleClickItem(":stackedWidget.listView_QListView", "cyl\\_400\\_20\\.txt", 72, 3, 0, Qt.LeftButton) clickButton(waitForObject(":groupBox.cmdSendTo_QPushButton")) mouseClick(waitForObject(":groupBox_6.cbCategory_QComboBox_2"), 136, 8, 0, Qt.LeftButton) mouseClick(waitForObjectItem(":groupBox_6.cbCategory_QComboBox_2", "Cylinder"), 129, 9, 0, Qt.LeftButton) clickTab(waitForObject(":FittingWidgetUI.tabFitting_QTabWidget_2"), "Resolution") test.compare(waitForObjectExists(":groupBox_4.cbSmearing_QComboBox").currentIndex, 0) test.compare(str(waitForObjectExists(":groupBox_4.cbSmearing_QComboBox").currentText), "None") test.compare(waitForObjectExists(":groupBox_4.cbSmearing_QComboBox").count, 3) mouseClick(waitForObject(":groupBox_4.cbSmearing_QComboBox"), 117, 7, 0, Qt.LeftButton) mouseClick(waitForObjectItem(":groupBox_4.cbSmearing_QComboBox", "Custom Pinhole Smear"), 113, 6, 0, Qt.LeftButton) test.compare(str(waitForObjectExists(":groupBox_4.cbSmearing_QComboBox").currentText), "Custom Pinhole Smear") test.compare(str(waitForObjectExists(":groupBox_4.lblSmearUp_QLabel").text), "<html><head/><body><p>dQ<span style=\" vertical-align:sub;\">low</span></p></body></html>") test.compare(str(waitForObjectExists(":groupBox_4.lblSmearDown_QLabel").text), "<html><head/><body><p>dQ<span style=\" vertical-align:sub;\">high</span></p></body></html>") test.compare(str(waitForObjectExists(":groupBox_4.txtSmearUp_QLineEdit").text), "") test.compare(waitForObjectExists(":groupBox_4.txtSmearUp_QLineEdit").enabled, True) test.compare(str(waitForObjectExists(":groupBox_4.txtSmearDown_QLineEdit").text), "") test.compare(waitForObjectExists(":groupBox_4.txtSmearDown_QLineEdit").enabled, True) mouseClick(waitForObject(":groupBox_4.cbSmearing_QComboBox"), 117, 15, 0, Qt.LeftButton) mouseClick(waitForObjectItem(":groupBox_4.cbSmearing_QComboBox", "Custom Slit Smear"), 89, 5, 0, Qt.LeftButton) test.compare(str(waitForObjectExists(":groupBox_4.cbSmearing_QComboBox").currentText), "Custom Slit Smear") test.compare(waitForObjectExists(":groupBox_4.lblSmearUp_QLabel").visible, True) test.compare(str(waitForObjectExists(":groupBox_4.lblSmearUp_QLabel").text), "Slit height") test.compare(str(waitForObjectExists(":groupBox_4.lblSmearDown_QLabel").text), "Slit width") test.compare(waitForObjectExists(":groupBox_4.lblSmearDown_QLabel").visible, True)
from __future__ import absolute_import, division import time import os try: unicode except NameError: unicode = str from . import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked class SQLiteLockFile(LockBase): "Demonstrate SQL-based locking." testdb = None def __init__(self, path, threaded=True, timeout=None): """ >>> lock = SQLiteLockFile('somefile') >>> lock = SQLiteLockFile('somefile', threaded=False) """ LockBase.__init__(self, path, threaded, timeout) self.lock_file = unicode(self.lock_file) self.unique_name = unicode(self.unique_name) if SQLiteLockFile.testdb is None: import tempfile _fd, testdb = tempfile.mkstemp() os.close(_fd) os.unlink(testdb) del _fd, tempfile SQLiteLockFile.testdb = testdb import sqlite3 self.connection = sqlite3.connect(SQLiteLockFile.testdb) c = self.connection.cursor() try: c.execute("create table locks" "(" " lock_file varchar(32)," " unique_name varchar(32)" ")") except sqlite3.OperationalError: pass else: self.connection.commit() import atexit atexit.register(os.unlink, SQLiteLockFile.testdb) def acquire(self, timeout=None): timeout = timeout is not None and timeout or self.timeout end_time = time.time() if timeout is not None and timeout > 0: end_time += timeout if timeout is None: wait = 0.1 elif timeout <= 0: wait = 0 else: wait = timeout / 10 cursor = self.connection.cursor() while True: if not self.is_locked(): # Not locked. Try to lock it. cursor.execute("insert into locks" " (lock_file, unique_name)" " values" " (?, ?)", (self.lock_file, self.unique_name)) self.connection.commit() # Check to see if we are the only lock holder. cursor.execute("select * from locks" " where unique_name = ?", (self.unique_name,)) rows = cursor.fetchall()
if len(rows) > 1: # Nope. Someone else got there. Remove our lock. cursor.execute("delete from locks" " where unique_name = ?", (self.unique_name,)) self.connection.commit() else: # Y
up. We're done, so go home. return else: # Check to see if we are the only lock holder. cursor.execute("select * from locks" " where unique_name = ?", (self.unique_name,)) rows = cursor.fetchall() if len(rows) == 1: # We're the locker, so go home. return # Maybe we should wait a bit longer. if timeout is not None and time.time() > end_time: if timeout > 0: # No more waiting. raise LockTimeout("Timeout waiting to acquire" " lock for %s" % self.path) else: # Someone else has the lock and we are impatient.. raise AlreadyLocked("%s is already locked" % self.path) # Well, okay. We'll give it a bit longer. time.sleep(wait) def release(self): if not self.is_locked(): raise NotLocked("%s is not locked" % self.path) if not self.i_am_locking(): raise NotMyLock("%s is locked, but not by me (by %s)" % (self.unique_name, self._who_is_locking())) cursor = self.connection.cursor() cursor.execute("delete from locks" " where unique_name = ?", (self.unique_name,)) self.connection.commit() def _who_is_locking(self): cursor = self.connection.cursor() cursor.execute("select unique_name from locks" " where lock_file = ?", (self.lock_file,)) return cursor.fetchone()[0] def is_locked(self): cursor = self.connection.cursor() cursor.execute("select * from locks" " where lock_file = ?", (self.lock_file,)) rows = cursor.fetchall() return not not rows def i_am_locking(self): cursor = self.connection.cursor() cursor.execute("select * from locks" " where lock_file = ?" " and unique_name = ?", (self.lock_file, self.unique_name)) return not not cursor.fetchall() def break_lock(self): cursor = self.connection.cursor() cursor.execute("delete from locks" " where lock_file = ?", (self.lock_file,)) self.connection.commit()
#!/usr/bin/python import json import sys import data_processing as dp from mython import NumpyToListEncoder from subprocess import check_output from imp import reloa
d reload(dp) # Neat way of calling: # find . -name '*_metadata.json' > rootlist # python gen_json.py $(< rootlist) &> gen_json.out files = sys.argv[1:] roots = [f.replace('_metadata.json','') for f in files] for
root in roots: data = dp.read_dir_autogen(root,gosling='/home/busemey2/bin/gosling') loc = '/'.join(root.split('/')[:-1]) outfn = loc+"/record.json" print("Outputting to %s..."%outfn) with open(outfn,'w') as outf: json.dump(data,outf,cls=NumpyToListEncoder)
import numpy as np class lemketableau: def __init__(self,M,q,maxIter = 100): n = len(q) self.T = np.hstack((np.eye(n),-M,-np.ones((n,1)),q.reshape((n,1)))) self.n = n self.wPos = np.arange(n) self.zPos = np.arange(n,2*n) self.W = 0 self.Z = 1 self.Y = 2 self.Q = 3 TbInd = np.vstack((self.W*np.ones(n,dtype=int), np.arange(n,dtype=int))) TnbInd = np.vstack((self.Z*np.ones(n,dtype=int), np.arange(n,dtype=int))) DriveInd = np.array([[self.Y],[0]]) QInd = np.array([[self.Q],[0]]) self.Tind = np.hstack((TbInd,TnbInd,DriveInd,QInd)) self.maxIter = maxIter def lemkeAlgorithm(self): initVal = self.initialize() if not initVal: return np.zeros(self.n),0,'Solution Found' for k in range(self.maxIter): stepVal = self.step() if self.Tind[0,-2] == self.Y: # Solution Found z = self.extractSolution() return z,0,'Solution Found' elif not stepVal: return None,1,'Secondary ray found' return None,2,'Max Iterations Exceeded' def initialize(self): q = self.T[:,-1] minQ = np.min(q) if minQ < 0: ind = np.argmin(q) self.clearDriverColumn(ind) self.pivot(ind) return True else: return False def step(self): q = self.T[:,-1] a = self.T[:,-2] ind = np.nan minRatio = np.inf for i in range(self.n): if a[i] > 0: newRatio = q[i] / a[i] if newRatio < minRatio: ind = i minRatio = newRatio if minRatio < np.inf: self.clearDriverColumn(ind) self.pivot(ind) return True else: return False def extractSolution(self): z = np.zeros(self.n) q = self.T[:,-1] for i in range(self.n): if self.Tind[0,i] == self.Z: z[self.Tind[1,i]] = q[i] return z def partnerPos(self,pos): v,ind = self.Tind[:,pos] if v == self.W: ppos = self.zPos[ind] elif v == self.Z: ppos = self.wPos[ind] else: ppos = None return ppos def pivot(self,pos): ppos = self.partnerPos(pos) if ppos is not None: self.swapColumns(pos,ppos) self.swapColumns(pos,-2) return True else: self.swapColumns(pos,-2) return False def swapMatColumns(self,M,i,j): Mi = np.array(M[:,i],copy=True) Mj = np.array(M[:,j],copy=True) M[:,i] = Mj M[:,j] = Mi return M def swapPos(self,v,ind,newPos): if v == self.W: self.wPos[ind] = newPos % (2*self.n+2) elif v == self.Z: self.zPos[ind] = newPos % (2*self.n+2) def swapColumns(self,i,j): iInd = self.Tind[:,i] jInd = self.Tind[:,j] v,ind = iInd self.swapPos(v,ind,j) v,ind = jInd self.swapPos(v,ind,i) self.Tind = self.swapMatColumns(self.Tind,i,j) self.T = self.swapMatColumns(self.T,i,j) def clearDriverColumn(se
lf,ind): a = self.T[ind,-2] self.T[ind] /= a for i in range(self.n): if i != ind:
b = self.T[i,-2] self.T[i] -= b * self.T[ind] def ind2str(self,indvec): v,pos = indvec if v == self.W: s = 'w%d' % pos elif v == self.Z: s = 'z%d' % pos elif v == self.Y: s = 'y' else: s = 'q' return s def indexStringArray(self): indstr = np.array([self.ind2str(indvec) for indvec in self.Tind.T],dtype=object) return indstr def indexedTableau(self): indstr = self.indexStringArray() return np.vstack((indstr,self.T)) def __repr__(self): IT = self.indexedTableau() return IT.__repr__() def __str__(self): IT = self.indexedTableau() return IT.__str__() def lemkelcp(M,q,maxIter=100): """ sol = lemkelcp(M,q,maxIter) Uses Lemke's algorithm to copute a solution to the linear complementarity problem: Mz + q >= 0 z >= 0 z'(Mz+q) = 0 The inputs are given by: M - an nxn numpy array q - a length n numpy array maxIter - an optional number of pivot iterations. Set to 100 by default The solution is a tuple of the form: z,exit_code,exit_string = sol The entries are summaries in the table below: |z | exit_code | exit_string | ----------------------------------------------------------- | solution to LCP | 0 | 'Solution Found' | | None | 1 | 'Secondary ray found' | | None | 2 | 'Max Iterations Exceeded' | """ tableau = lemketableau(M,q,maxIter) return tableau.lemkeAlgorithm()
import logging #Config MYSQL_HOST = '127.0.0.1' MYSQL_PORT = 3306 MYSQL_USER = 'root' MYSQL_PASS = 'oppzk' MYSQL_DB = 'SSMM' MANAGE_PASS = 'passwd' #if you want manage in other server you should set this value to global ip MANAGE_BIND_IP = '127.0.0.1' #make sure this port is idle MANAGE_PORT = 10001 PANEL_VERSION = 'V2' # V2 or V3. V2 not support API API_URL = 'http://domain/mu' API_PASS = 'mupass' NODE_ID = '1' CHECKTIME = 30 # check service time SYNCTIME = 300 # sync traffic time RESETTIME = 300 # reset traffic time #BIND IP #if you want bind ipv4 and ipv6 '[::]' #if you want bind all of ipv4 if '0.0.0.0' #if you
want bind all of if only '4.4.4.4' SS_BIND_IP = '0.0.0.0' SS_METHOD = 'rc4-md5' #LOG CON
FIG LOG_ENABLE = False LOG_LEVEL = logging.DEBUG LOG_FILE = '/var/log/shadowsocks.log'
#!/usr/bin/env python import sys # import particle restart sys.path.append('../../src/utils') import particle_restart as pr # read in particle restart file if len(sys.argv) > 1: p = pr.Particle(sys.argv[1]) else: p = pr.Particle('par
ticle_12_842.binary') # set up output string outstr = ''
# write out properties outstr += 'current batch:\n' outstr += "{0:12.6E}\n".format(p.current_batch) outstr += 'current gen:\n' outstr += "{0:12.6E}\n".format(p.current_gen) outstr += 'particle id:\n' outstr += "{0:12.6E}\n".format(p.id) outstr += 'run mode:\n' outstr += "{0:12.6E}\n".format(p.run_mode) outstr += 'particle weight:\n' outstr += "{0:12.6E}\n".format(p.weight) outstr += 'particle energy:\n' outstr += "{0:12.6E}\n".format(p.energy) outstr += 'particle xyz:\n' outstr += "{0:12.6E} {1:12.6E} {2:12.6E}\n".format(p.xyz[0],p.xyz[1],p.xyz[2]) outstr += 'particle uvw:\n' outstr += "{0:12.6E} {1:12.6E} {2:12.6E}\n".format(p.uvw[0],p.uvw[1],p.uvw[2]) # write results to file with open('results_test.dat','w') as fh: fh.write(outstr)
__version__ = '$Id$' from Acquisition import aq_inner from Products.Five.browser import BrowserView from Products.CMFCore.utils import getToolByName from DateTime import DateTime class LastZorionagurrak(BrowserView): def getLastZorionagurrak(self, num=5): context = aq_inner(self.context) today = DateTime().earliestTime() todayend = DateTime().latestTime() tomorrow = today + 1 pcal = getToolByName(context, 'portal_catalog') todaybrains = pcal(portal_type='Zorionagurra', review_state='published', getDate={'query':(today, todayend), 'range':'min:max'}, sort_on='getDate', sort_limit=num) todaybrainnumber = len(todaybrains) if todaybrainnumber >= num: return todaybrains else: tomorrowbrainnumber = num - todaybrainnumber tomorrowbrains = pcal(portal_type='Zorionagurra', review_state='published', getDate={'query':(todaye
nd,), 'range':'mi
n'}, sort_on='getDate', sort_limit=tomorrowbrainnumber) return todaybrains + tomorrowbrains
import unittest import os import hiframe import hipubiface_test_basic_plugin._hiframe MY_ABSOLUTE_PATH = os.path.abspath(__file__) MY_ABSOLUTE_PARENT = os.path.dirname(MY_ABSOLUTE_PATH) HIPUBIFACE_PATH = os.path.dirname(os.path.dirname(os.path.dirname(MY_ABSOLUTE_PARENT))) HIPUBIFACE_SRC_PATH = HIPUBIFACE_PATH+"/src" class HipubifaceTestBasic(unittest.TestCase): # def test_guest_ping_pass(self): # cv = [["00000000", "ffffffff"], # ["00000001", "fffffffe"], # ["a7845763", "587ba89c"], # ["8da581bf", "725a7e40"], # ["0da581bf", "f25a7e40"] # ] # for c in cv : # r = hipubiface.call("base", "guest_ping", {"txt_value":c[0].upper()}) # self.check_ok(r) # self.assertEqual(r["type"],"value") # self.assertEqual(r["value"],c[1].lower()) # # r = hipubiface.call("base", "guest_ping", {"txt_value":c[0].lower()}) # self.check_ok(r) # self.assertEqual(r["type"],"value") # self.assertEqual(r["value"],c[1].lower()) # # r = hipubiface.call("base", "guest_ping", {"txt_value":c[1].upper()}) # self.check_ok(r) # self.assertEqual(r["type"],"value") # self.assertEqual(r["value"],c[0].lower()) # # r = hipubiface.call("base", "guest_ping", {"txt_value":c[1].lower()}) # self.check_ok(r
) # self.assertEqual(r["type"],"value") # self.assertEqual(r["value"],c[0].lower()) # # def test_guest_ping_fail(self): # cv = ["asdf", # "0000", # "1234", # "dddd", # "1234567890", # "-9999999", # "-99999999", # "9999999", # "999999999" # ] # for c in cv : # r = hipubiface.call("base", "guest_pi
ng", {"txt_value":c}) # self.assertTrue(r != None) # self.assertTrue(isinstance(r,dict)) # self.assertEqual(r[hipubiface.RESULT_KEY], hipubiface.RESULT_VALUE_FAIL_TXT) # self.assertEqual(r["fail_reason"],"bad value") # def test_list_cmd(self): # ret = hipubiface._hiframe.command_guest_list_cmd() # self.check_ok(ret) # self.assertEqual(ret["type"],"value") # self.assertTrue("value" in ret) # self.assertTrue("hs_plugin" in ret["value"]) # self.assertTrue("guest_list_cmd" in ret["value"]["hs_plugin"]) # self.assertEqual(ret["value"]["hs_plugin"]["guest_list_cmd"],[]) def test_call_noarg(self): hf=hiframe.HiFrame(plugin_path_list=[MY_ABSOLUTE_PARENT,HIPUBIFACE_SRC_PATH]) hf.start() me=hf.plugin_D["hipubiface"] ret = me.call("hipubiface_test_basic_plugin","helloworld") self.assertEqual(ret, "helloworld") def test_call_arg(self): hf=hiframe.HiFrame(plugin_path_list=[MY_ABSOLUTE_PARENT,HIPUBIFACE_SRC_PATH]) hf.start() me=hf.plugin_D["hipubiface"] ret = me.call("hipubiface_test_basic_plugin","uppercase",{"txt_a":"asdf"}) self.assertEqual(ret, "ASDF") def test_call_exception(self): hf=hiframe.HiFrame(plugin_path_list=[MY_ABSOLUTE_PARENT,HIPUBIFACE_SRC_PATH]) hf.start() me=hf.plugin_D["hipubiface"] try: me.call("hipubiface_test_basic_plugin","hello_exception") self.fail() except hipubiface_test_basic_plugin._hiframe.TestException: pass except: self.fail() # def test_hellofile(self): # ret = hipubiface.call("hipubiface_test_basic_plugin","hellofile") # self.check_ok(ret) # self.assertEqual(ret["type"], "file") # self.assertEqual(ret["file_type"], "local") # self.assertEqual(ret["mime"], "text/plain; charset=us-ascii") # self.assertTrue(ret["file_name"].endswith("/test/res/test0.torrent.txt")) # # def test_hellofile2(self): # ret = hipubiface.call("hipubiface_test_basic_plugin","hellofile2") # self.check_ok(ret) # self.assertEqual(ret["type"], "file") # self.assertEqual(ret["file_type"], "local") # self.assertTrue(not ("mime" in ret)) # self.assertTrue(ret["file_name"].endswith("/test/res/test0.torrent.txt"))
#!/usr/bin/env python import os import sys import argparse import traceback sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir)) from toollib.group import Group,UnsortedInputGrouper import scipy.stats as ss class KSGroup(Group): def __init__(self, tup): super(KSGroup, self).__init__(tup) self.samples = [] def add(self, chunks): self.samples.append(float(chunks[args.column])) def done(self): jdelim = args.delimiter if args.delimiter != None else ' ' if len(self.tup) > 0: args.outfile.write(jdelim.join(self.tup) + jdelim) args.outfile.write(jdelim.join(map(str, ss.kstest(self.samples, args.distf, args=args.params))) + '\n') if __name__ == "__main__": # set up command line args parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\ description='Compare the request distributions of all clients') parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin) parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout) parser.add_argument('-s', '--source', default='scipy.stats', choices=['scipy.stats', 'lambda'], help='source of the distribu
tion to fit') parser.add_argument('-i', '--dist', default='paretoLomax') parser.add_argument('-p', '--params', default='', help='initial parameters') parser.add_argument('-c', '--column', type=int, default=0) parser.add_argument('-g', '--group', nargs='+', type=int, default=[]) parser.add_argument('-d', '--delimiter', default=None) args = parser.parse_args() args.params = map(float, args.params.split(args.delimiter)) if args.source == 'scipy.s
tats': args.source = ss else: args.source = None if args.source: mod = args.source for c in args.dist.split('.'): mod = getattr(mod, c) args.distf = mod else: args.distf = eval(args.dist) grouper = UnsortedInputGrouper(args.infile, KSGroup, args.group, args.delimiter) grouper.group()
from django.db import models from django.template.defaultfilters import slugify from datetime import datetime from redactor.fields import RedactorField from management.post_tweet import post_tweet ### News #################################################################################################### class News(models.Model): post_tweet = models.BooleanField( default=False, ) tweet_cc = models.CharField( max_length=70, blank=True, null=True, ) title = models.CharField( max_length=250, ) slug = models.SlugField( max_length=250, blank=True, unique=True, ) content = RedactorField() created = models.DateTimeField( default=datetime.now, blank=True, null=True, ) city = models.ForeignKey( 'utils.City', blank=True, null=True, ) country = models.ForeignKey( 'utils.Country', blank=True, null=True, ) tags = models.ManyToManyField( 'utils.Tag', through='NewsTag', related_name='news', ) projects = models.ManyToManyField( 'projects.Project', through='ProjectRelatedToNews', related_name='news', ) publications = models.ManyToManyField( 'publications.Publication', through='PublicationRelatedToNews', related_name='news', ) persons = models.ManyToManyField( 'persons.Person', through='PersonRelatedToNews', related_name='news', ) class Meta: ordering = ('-created',) verbose_name = u'News piece' verbose_name_plural = u'News pieces' def __unicode__(self): return u'%s' % self.title def save(self, *args, **kwargs): self.slug = slugify(self.title) if self.post_tweet: post_tweet(self) self.content = self.content.replace("<img src=", "<img class='img-responsive' src=") super(News, self).save(*args, **kwargs) ### NewsTag #################################################################################################### class NewsTag(models.Model): tag = models.ForeignKey('utils.Tag') news = models.ForeignKey('News') class Meta: verbose_name = u'News Tag' verbose_name_plural = u'News Tags' ### ProjectRelatedToNews #################################################################################################### class ProjectRelatedToNews(models.Model): project = models.ForeignKey('projects.Project') news = models.ForeignKey('News') class Meta: verbose_name = u'Project related to News piece' verbose_name_plural = u'Projects related to News pieces' ### PublicationRelatedToNews #################################################################################################### class PublicationRelatedToNews(models.Model): publication = models.ForeignKey('publications.Publication') news = models.ForeignKey('News') class Meta: verbose_name = u'Publication related to News piece' verbose_name_plural = u'Publications related to News pieces' ### PersonRelatedToNews #################################################################################################### class PersonRelatedToNews(models.Model): person = models.ForeignKey('persons.Person') news = models.ForeignKey('News') class Meta: verbose_name = u'Person rela
ted to News piece' verbose_name_plural = u'People related to News pieces' ### EventRelatedToNews #################################################################################################### class EventRelatedToNews(models.Model): event = models.ForeignKey('events.Event') news = models.ForeignKey('News') class Meta: verbose_name = u'Event related to N
ews piece' verbose_name_plural = u'Events related to News pieces'
from PIL import Image import stripe import datetime from django.shortcuts import render, redirect from django.views.generic import TemplateView, View, FormView from django.core.urlresolvers import reverse_lazy, reverse from django.views.decorators.csrf import csrf_exempt from
django.utils.decorators import method_decorator from django.conf import settings from paypal.standard.forms import PayPalPaymentsForm from picture.models import Picture, Settings, Pixel, PaymentNote from picture.forms import PaymentNoteForm from paypal.standard.models import ST_PP_COMPLETED from paypal.standard.ipn.signals import valid_ipn_received, invalid_ipn_received, payment_was_flagged # Create your views here. class PictureIndexView(FormView): tem
plate_name = 'picture/index.html' form_class = PaymentNoteForm def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['picture'] = Settings.objects.first().picture context['random'] = datetime.datetime.now() context['payment_notes'] = [{ 'name': note.name, 'url': note.url, 'number': note.number, 'pixels': [{ 'r': pixel.r, 'g': pixel.g, 'b': pixel.b, } for pixel in note.pixels.all()[:50]] } for note in PaymentNote.objects.filter(picture=self.picture).order_by('-number')] return context def form_valid(self, form): note = form.save(commit=False) self.request.session['payment_note'] = { 'name': note.name, 'url': note.url, 'number': note.number, } return super().form_valid(form) def dispatch(self, request, *args, **kwargs): self.picture = Settings.objects.first().picture return super().dispatch(request, *args, **kwargs) def get_form_kwargs(self): kwargs = super().get_form_kwargs() kwargs['picture'] = self.picture return kwargs def get_success_url(self): if getattr(settings,'NO_PAYMENTS', False) == True: create_payment_note(self.request.session['payment_note']) return reverse('picture-payment-success') else: return reverse('picture-payment') class PaymentView(TemplateView): template_name = 'picture/payment.html' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['picture'] = Settings.objects.first().picture context['paypal_form'] = self.paypal_form context['stripe'] = self.stripe_options context['amount'] = self.request.session.get('payment_note').get('number') return context @method_decorator(csrf_exempt) def dispatch(self, request, *args, **kwargs): self.picture = Settings.objects.first().picture business = settings.PAYPAL_EMAIL paypal_options = { "business": business, "amount": request.session.get('payment_note').get('number'), "invoice": request.session.get('payment_note').get('url'), "custom": request.session.get('payment_note').get('name'), "item_name": "Pixel Reveal", # "invoice": "unique-invoice-id", "notify_url": request.build_absolute_uri(reverse('paypal-ipn')), "return_url": request.build_absolute_uri(reverse('picture-paypal-payment-success')), "cancel_return": request.build_absolute_uri(reverse('picture-index')), } self.paypal_form = PayPalPaymentsForm(initial=paypal_options) #STRIPE stuff self.stripe_options = { 'p_key': settings.STRIPE_PUBLISH, 'amount': request.session.get('payment_note').get('number') * 100, 'name': 'Calvin Collins', 'description': 'Pixel Reveal', } return super().dispatch(request, *args, **kwargs) class PaymentSuccessView(TemplateView): template_name = 'picture/payment_success.html' class PaypalPaymentSuccessView(TemplateView): template_name = 'picture/paypal_payment_success.html' class PaymentErrorView(TemplateView): template_name = 'picture/payment_error.html' class StripeView(View): def post(self, request, *args, **kwargs): self.picture = Settings.objects.first().picture stripe.api_key = settings.STRIPE_SECRET token = request.POST['stripeToken'] try: charge = stripe.Charge.create( amount = request.session.get('payment_note').get('number') * 100, currency="usd", source=token, description="Pixel Reveal" ) except stripe.error.CardError as e: # The card has been declined return redirect(reverse('picture-payment-error')) else: create_payment_note(self.request.session['payment_note']) return redirect(reverse('picture-payment-success')) def create_payment_note(note_info): form = PaymentNoteForm(note_info, picture=Settings.objects.first().picture) if form.is_valid(): note = form.save(commit=False) note.picture = Settings.objects.first().picture note.save() coords = note.picture.uncover_line(note.number) img = note.picture.pillow_image.convert('RGB') for i, coord in enumerate(coords): if i > 50: break r, g, b = img.getpixel((coord['x'], coord['y'])) note.pixels.add(Pixel.objects.create( x = coord['x'], y = coord['y'], r = r, g = g, b = b )) note.save() def handle_payment(sender, **kwargs): ipn_obj = sender if ipn_obj.payment_status == ST_PP_COMPLETED: # WARNING ! # Check that the receiver email is the same we previously # set on the business field request. (The user could tamper # with those fields on payment form before send it to PayPal) if ipn_obj.receiver_email != settings.PAYPAL_EMAIL: # Not a valid payment return note_info = { 'name': ipn_obj.custom, 'url': ipn_obj.invoice, 'number': ipn_obj.mc_gross, } create_payment_note(note_info) valid_ipn_received.connect(handle_payment)
import startbot, stats, os, re, random, sys import utils MARKOV_LENGTH = 2 #majority of the code taken from https://github.com/hrs/markov-sentence-generator #changes made: allowed it to hook up from the text gotten directly from messages #changed it to be encompassed in a class structure. Made minor changes to make it Py3.X compatible class markov(): # These mappings can get fairly large -- they're stored globally to # save copying time. # (tuple of words) -> {dict: word -> number of times the word appears following the tuple} # Example entry: # ('eyes', 'turned') => {'to': 2.0, 'from': 1.0} # Used briefly while first constructing the normalized mapping tempMapping = {} # (tuple of words) -> {dict: word -> *normalized* number of times the word appears following the tuple} # Example entry: # ('eyes', 'turned') => {'to': 0.66666666, 'from': 0.33333333} mapping = {} # Contains the set of words that can start sentences starts = [] m_botName = None def __init__(self, groupObj, groupName, bot): self.m_botName = bot.name self.train(groupObj, groupName) def train(self, groupObj, groupName): stats.getAllText(groupObj, groupName, self.m_botName) self.buildMapping(self.wordlist('..{1}cache{1}messages-{0}.txt'.format(groupName, os.path.sep)), MARKOV_LENGTH) utils.showOutput("bot successfully trained.") def talk(self, message, bot, groupName): try: bot.post(self.genSentence2(message, MARKOV_LENGTH)) except: bot.post(self.genSentence(MARKOV_LENGTH)) # We want to be able to compare words independent of their capitalization. def fixCaps(self, word): # Ex: "FOO" -> "foo" if word.isupper() and word != "I": word = word.lower() # Ex: "LaTeX" => "Latex" elif word [0].isupper(): word = word.lower().capitalize() # Ex: "wOOt" -> "woot" else: word = word.lower() return word # Tuples can be hashed; lists can't. We need hashable values for dict keys. # This looks like a hack (and it is, a little) but in practice it doesn't # affect processing time too negatively. def toHashKey(self, lst): return tuple(lst) # Returns the contents of the file, split into a list of words and # (some) punctuation. def wordlist(self, filename): f = open(filename, 'r', encoding='utf-8') wordlist = [self.fixCaps(w) for w in re.findall(r"[\w']+|[.,!?;]", f.read())] f.close() return wordlist # Self-explanatory -- adds "word" to the "tempMapping" dict under "history". # tempMapping (and mapping) both match each word to a list of possible next # words. # Given history = ["the", "rain", "in"] and word = "Spain", we add "Spain" to # the entries for ["the", "rain", "in"], ["rain", "in"], and ["in"]. def addItemToTempMapping(self, history, word): while len(history) > 0: first = self.toHashKey(history) if first in self.tempMapping: if word in self.tempMapping[first]: self.tempMapping[first][word] += 1.0 else: self.tempMapping[first][word] = 1.0 else: self.tempMapping[first] = {} self.tempMapping[first][word] = 1.0 history = history[1:] # Building and normalizing the mapping. def buildMapping(self, wordlist, markovLength): self.starts.append(wordlist [0]) for i in range(1, len(wordlist) - 1): if i <= markovLength: history = wordlist[: i + 1] else: history = wordlist[i - markovLength + 1 : i + 1] follow = wordlist[i + 1] # if the last elt was a period, add the next word to the start list if history[-1] == "." and follow not in ".,!?;": self.starts.append(follow) self.addItemToTempMapping(history, follow) # Normalize the values in tempMapping, put them into mapping for first, followset in self.tempMapping.items(): total = sum(followset.values()) # Normalizing here: self.mapping[first] = dict([(k, v / total) for k, v in followset.items()]) # Returns the next word in the sentence (chosen randomly), # given the previous ones. def next(self, prevList): sum = 0.0 retval = "" index = random.random() # Shorten prevList until it's in mapping while self.toHashKey(prevList) not in self.mapping: prevList.pop(0) # Get a random word from the mapping, given prevList for k, v in self.mapping[self.toHashKey(prevList)].items(): sum += v if sum >= index and retval == "": retval = k return retval
def genSentence2(self,
message, markovLength): #attempts to use input sentence material to construct a sentence # Start with a random "starting word" from the input message splitmessage = message.lower().split() splitmessage.remove('{0},'.format(self.m_botName.lower())) if len(splitmessage) == 0: curr = random.choice(self.starts) else: curr = random.choice(splitmessage) sent = curr.capitalize() prevList = [curr] # Keep adding words until we hit a period while (curr not in "."): curr = self.next(prevList) prevList.append(curr) # if the prevList has gotten too long, trim it if len(prevList) > markovLength: prevList.pop(0) if (curr not in ".,!?;"): sent += " " # Add spaces between words (but not punctuation) sent += curr return sent def genSentence(self, markovLength): # Start with a random "starting word" curr = random.choice(self.starts) sent = curr.capitalize() prevList = [curr] # Keep adding words until we hit a period while (curr not in "."): curr = self.next(prevList) prevList.append(curr) # if the prevList has gotten too long, trim it if len(prevList) > markovLength: prevList.pop(0) if (curr not in ".,!?;"): sent += " " # Add spaces between words (but not punctuation) sent += curr return sent
# -*- coding: utf-8 -*- # Generated by Django 1.9.12 on 2018-12-04 15:13 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('anagrafica', '0049_auto_20181028_1639'), ] operations = [ migrations.CreateModel( name='Question', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('text', models.CharField(max_length=255)), ('is_active', models.BooleanField(default=True)), ('required', models.BooleanField(default=True, verbose_name='Obbligatorio')), ], options={ 'verbose_name': 'Domanda', 'verbose_name_plural': 'Domande', }, ), migrations.CreateModel( name='Survey', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('is_active', models.BooleanField(default=True)), ('text', models.CharField(max_length=255)), ], options={ 'verbose_name': 'Questionario di gradimento', 'verbose_name_plural': 'Questionari di gradimento', }, ), migrations.CreateModel( name='SurveyResult', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('response', models.TextField(blank=True, max_length=1000, null=True)), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='survey.Question')), ('survey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='survey.Survey')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='anagrafica.Persona')), ], options={ 'verbose_name': "Risposta dell'ut
ente", 'verbose_name_plural': 'Risposte degli utenti', }, ), migrations.AddField( model_name='question', name='survey', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='survey.Survey'), ), ]
#-*- coding: utf-8 -*- import unittest from top.WordTableModel import WordTableModel class WordTableModelTestsTestCase(unittest.TestCase): def setUp(self): self.model = WordTableModel() self.model.load("dotestow.pkl") def testLo
ading(self): assert len(self.model.words) == 5, "incorrect number of loaded words " + \ "got: " + len(self.model.words) + ", but: 5 was expected" list = [] for word in self.model.words: list.append(word.word) msg = "failed while loading the words with number: " assert list[0] == "sibilant sound", msg + '0' assert list[1]
== "aberration", msg + '1' assert list[2] == "acrid", msg + '2' assert list[3] == "adjourn", msg + '3' assert list[4] == "ambience", msg + '4' def testSorting(self): self.model.sortByWord() assert self.model.words[0].word == "aberration", "incorrect sorting by word " + \ "got: " + self.model.words[0].word + ", but: 'aberration' was expected" self.model.sortByDifficulty() assert self.model.words[0].word == "adjourn", "incorrect sorting by word " + \ "got: " + self.model.words[0].word + ", but: 'adjourn' was expected" self.model.reversedDiffSort = True self.model.sortByDifficulty() assert self.model.words[0].word == "ambience", "incorrect sorting by word " + \ "got: " + self.model.words[0].word + ", but: 'ambience' was expected" def testExport(self): self.model.exportWords("exportTest.txt") modelFh = open("dotestow.txt") testFh = open("exportTest.txt") modelText = modelFh.read() testText = testFh.read() assert modelText == testText, "incorrect export" modelFh.close() testFh.close() import os os.remove("exportTest.txt") def testImport(self): self.model.words.clearWords() self.model.importWords("dotestow.txt") self.testLoading() if __name__ == '__main__': unittest.main()
#!/usr/bin/env python2 # Print out the 2^n possibilities of a word with the length n import unittest from itertools import product, permutations def word_variations(s): try: if not len(s): return lower, upper = s.lower(), s.upper() except: return # Since number strings won't produce cartesian values with lower/upper, # we use itertools.permutations. if lower == upper: pairs = permutations(lower) else: pairs = p
roduct(*zip(lower, upper)) result = {''.join(pair) for pair in pairs} # Using set literal notation. print result, "\n", len(result) return result word_variations("abc") class WordTest(unittest.TestCase): def _test(self, s, expected): result = word_variations(s) self.assertEqual(len(result), expected) def t
est_basecase(self): self._test("hello", 32) def test_int(self): self._test("123", 6) def test_empty(self): self.assertEqual(word_variations(""), None)
self.assertEqual(F2.is_before(O3), None) class TestMigrationDependencies(Monkeypatcher): installed_apps = ['deps_a', 'deps_b', 'deps_c'] def setUp(self): super(TestMigrationDependencies, self).setUp() self.deps_a = Migrations('deps_a') self.deps_b = Migrations('deps_b') self.deps_c = Migrations('deps_c') Migrations.calculate_dependencies(force=True) def test_dependencies(self): self.assertEqual( [ set([]), set([self.deps_a['0001_a']]), set([self.deps_a['0002_a']]), set([ self.deps_a['0003_a'], self.deps_b['0003_b'], ]), set([self.deps_a['0004_a']]), ], [m.dependencies for m in self.deps_a], ) self.assertEqual( [ set([]), set([ self.deps_b['0001_b'], self.deps_a['0002_a'] ]), set([ self.deps_b['0002_b'], self.deps_a['0003_a'] ]), set([self.deps_b['0003_b']]), set([self.deps_b['0004_b']]), ], [m.dependencies for m in self.deps_b], ) self.assertEqual( [ set([]), set([self.deps_c['0001_c']]), set([self.deps_c['0002_c']]), set([self.deps_c['0003_c']]), set([ self.deps_c['0004_c'], self.deps_a['0002_a'] ]), ], [m.dependencies for m in self.deps_c], ) def test_dependents(self): self.assertEqual([set([self.deps_a['0002_a']]), set([self.deps_c['0005_c'], self.deps_b['0002_b'], self.deps_a['0003_a']]), set([self.deps_b['0003_b'], self.deps_a['0004_a']]), set([self.deps_a['0005_a']]), set([])], [m.dependents for m in self.deps_a]) self.assertEqual([set([self.deps_b['0002_b']]), set([self.deps_b['0003_b']]), set([self.deps_b['0004_b'], self.deps_a['0004_a']]), set([self.deps_b['0005_b']]), set([])], [m.dependents for m in self.deps_b]) self.assertEqual([set([self.deps_c['0002_c']]), set([self.deps_c['0003_c']]), set([self.deps_c['0004_c']]), set([self.deps_c['0005_c']]), set([])], [m.dependents for m in self.deps_c]) def test_forwards_plan(self): self.assertEqual([[self.deps_a['0001_a']], [self.deps_a['0001_a'], self.deps_a['0002_a']], [self.deps_a['0001_a'], self.deps_a['0002_a'], self.deps_a['0003_a']], [self.deps_b['0001_b'], self.deps_a['0001_a'], self.deps_a['0002_a'], self.deps_b['0002_b'], self.deps_a['0003_a'], self.deps_b['0003_b'], self.deps_a['0004_a']], [self.deps_b['0001_b'], self.deps_a['0001_a'], self.deps_a['0002_a'], self.deps_b['0002_b'], self.deps_a['0003_a'], self.deps_b['0003_b'], self.deps_a['0004_a'], self.deps_a['0005_a']]], [m.forwards_plan() for m in self.deps_a]) self.assertEqual([[self.deps_b['0001_b']], [self.deps_b['0001_b'], self.deps_a['0001_a'], self.deps_a['0002_a'], self.deps_b['0002_b']], [self.deps_b['0001_b'], self.deps_a['0001_a'], self.deps_a['0002_a'], self.deps_b['0002_b'], self.deps_a['0003_a'], self.deps_b['0003_b']], [self.dep
s_b['0001_b'], self.deps_a['0001_a'], self.deps_a['0002_a'], self.deps_b['0002_b'], self.deps_a['0003_a'], self.deps_b['0003_b'], self.deps_b['0004_b']], [self.deps_b['0001_b'], self.deps_a['0001_a'],
self.deps_a['0002_a'], self.deps_b['0002_b'], self.deps_a['0003_a'], self.deps_b['0003_b'], self.deps_b['0004_b'], self.deps_b['0005_b']]], [m.forwards_plan() for m in self.deps_b]) self.assertEqual([[self.deps_c['0001_c']], [self.deps_c['0001_c'], self.deps_c['0002_c']], [self.deps_c['0001_c'], self.deps_c['0002_c'], self.deps_c['0003_c']], [self.deps_c['0001_c'], self.deps_c['0002_c'], self.deps_c['0003_c'], self.deps_c['0004_c']], [self.deps_c['0001_c'], self.deps_c['0002_c'], self.deps_c['0003_c'], self.deps_c['0004_c'], self.deps_a['0001_a'], self.deps_a['0002_a'], self.deps_c['0005_c']]], [m.forwards_plan() for m in self.deps_c]) def test_backwards_plan(self): self.assertEqual([ [ self.deps_c['0005_c'], self.deps_b['0005_b'], self.deps_b['0004_b'], self.deps_a['0005_a'], self.deps_a['0004_a'], self.deps_b['0003_b'], self.deps_b['0002_b'], self.deps_a['0003_a'], self.deps_a['0002_a'], self.deps_a['0001_a'], ], [ self.deps_c['0005_c'], self.deps_b['0005_b'], self.deps_b['0004_b'], self.deps_a['0005_a'], self.deps_a['0004_a'], self.deps_b['0003_b'], self.deps_b['0002_b'], self.deps_a['0003_a'], self.deps_a['0002_a'], ], [ self.deps_b['0005_b'], self.deps_b['0004_b'], self.deps_a['0005_a'], self.deps_a['0004_a'], self.deps_b['0003_b'], self.deps_a['0003_a'], ], [ self.deps_a['0005_a'], self.deps_a['0004_a'], ], [ self.deps_a['0005_a'], ] ], [m.backwards_plan() for m in self.deps_a]) self.assertEqual([ [ self.deps_b['0005_b'], self.deps_b['0004_b'], self.deps_a['0005_a'], self.deps_a['0004_a'], self.deps_b['0003_b'], self.deps_b['0002_b'], self.deps_b['0001_b'], ], [ self.deps_b['0005_b'], self.deps_b
from __future__ import unicode_literals from django.contrib.syndication import views from django.utils import feedgenerator from django.utils.timezone import get_fixed_timezone from .models import Article, Entry class TestRss2Feed(views.Feed): title = 'My blog' description = 'A more thorough description of my blog.' link = '/blog/' feed_guid = '/foo/bar/1234' author_name = 'Sally Smith' author_email = 'test@example.com' author_link = 'http://www.example.com/' categories = ('python', 'django') feed_copyright = 'Copyright (c) 2007, Sally Smith' ttl = 600 def items(self): return Entry.objects.all() def item_description(self, item): return "Overridden description: %s" % item def item_pubdate(self, item): return item.published def item_updateddate(self, item): return item.updated item_author_name = 'Sally Smith' item_author_email = 'test@example.com' item_author_link = 'http://www.example.com/' item_categories = ('python', 'testing') item_copyright = 'Copyright (c) 2007, Sally Smith' class TestRss2FeedWithGuidIsPermaLinkTrue(TestRss2Feed): def item_guid_is_permalink(self, item): return True class TestRss2FeedWithGuidIsPermaLinkFalse(TestRss2Feed): def item_guid(self, item): return str(item.pk) def item_guid_is_permalink(self, item): return False class TestRss091Feed(TestRss2Feed): feed_type = feedgenerator.RssUserland091Feed class TestNoPubdateFeed(views.Feed): title = 'Test feed' link = '/feed/' def items(self): return Entry.objects.all() class TestAtomFeed(TestRss2Feed): feed_type = feedgenerator.Atom1Feed subtitle = TestRss2Feed.description class TestLatestFeed(TestRss2Feed): """ A feed where the latest entry date is an `updated` element. """ feed_type = feedgenerator.Atom1Feed subtitle = TestRss2Feed.description def items(self): return Entry.objects.exclude(pk=5) class ArticlesFeed(TestRss2Feed): """ A feed to test no link being defined. Articles have no get_absolute_url() method, and item_link() is not defined. """ def items(self): return Article.objects.all() class TestSingleEnclosureRSSFeed(TestRss2Feed): """ A feed to test that RSS feeds work with a single enclosure. """ def item_enclosure_url(self, item): return 'http://example.com' def item_enclosure_size(self, item): return 0 def item_mime_type(self, item): return 'image/png' class TestMultipleEnclosureRSSFeed(TestRss2Feed): """ A feed to test that RSS feeds raise an exception with multiple enclosures. """ def item_enclosures(self, item): return [ feedgenerator.Enclosure('http://example.com/hello.png', 0, 'image/png'), feedgenerator.Enclosure('http://example.com/goodbye.png', 0, 'image/png'), ] class TemplateFeed(TestRss2Feed): """ A feed to test defining item titles and descriptions with templates. """ title_template = 'syndication/title.html' description_template = 'syndication/description.html' # Defining a template overrides any item_title definition def item_title(self): return "Not in a template" class TemplateContextFeed(TestRss2Feed): """ A feed to test custom context data in templates for title or description. """ title_template = 'syndication/title_context.html' description_template = 'syndication/description_context.html' def get_context_data(self, **kwargs): context = super(TemplateContextFeed, self).get_context_data(**kwargs) context['foo'] = 'bar' return context class NaiveDatesFeed(TestAtomFeed): """ A feed with naive (non-timezone-aware) dates. """ def item_pubdate(self, item): return item.published class TZAwareDatesFeed(TestAtomFeed): """ A feed with timezone-aware dates. """ def item_pubdate(self, item): # Provide a weird offset so that the test can know it's getting this # specific offset and not accidentally getting on from # settings.TIME_ZONE. return item.published.replace(tzinfo=get_fixed_timezone(42)) class TestFeedUrlFeed(TestAtomFeed): feed_url = 'http://example.com/customfeedurl/' class MyCustomAtom1Feed(feedgenerator.Atom1Feed): """ Test of a custom feed generator class. """ def root_attributes(self): attrs = super(MyCustomAtom1Feed, self).root_attributes() attrs['django'] = 'rocks' return attrs def add_root_elements(self, handler): super(MyCustomAtom1Feed, self).add_root_elements(handler) handler.addQuickElement('spam', 'eggs') def item_attributes(self, item): attrs = super(MyCustomAtom1Feed, self).item_attributes(item) attrs['bacon'] = 'yum' return attrs def add_item_elements(self, handler, item): super(MyCustomAtom1Feed, self).add_item_elements(handler, item) handler.addQuickElement('ministry', 'silly walks') class TestCustomFeed(TestAtomFeed): feed_type = MyCustomAtom1Feed class TestSingleEnclosureAtomFeed(TestAtomFeed): """ A feed to test that Atom feeds work with a single enclosure. """ def item_enclosure_url(self, item): return 'http://example.com' def item_enclosure_size(self, item): return 0 def item_mime_type(self, item): return 'image/png' class TestMultipleEnclosureAtomFeed(TestAtomFeed): """ A feed to test that Atom feeds work
with multiple enclosures. """ def item_enclosu
res(self, item): return [ feedgenerator.Enclosure('http://example.com/hello.png', '0', 'image/png'), feedgenerator.Enclosure('http://example.com/goodbye.png', '0', 'image/png'), ]
def safe_add(fn): """A wrapper for adding commands in a safe manner.""" def checked_add(*args): # Wrappers aren't bound methods so they can't reference 'self' # directly. However, 'self' will be provided as the first parameter # when the wrapped method is called. _self = args[0] msg_len = len(_self._msg) global_params_types_len = len(_self._global_params_types) local_params_byte_count = _self._local_params_byte_count global_params_byte_count = _self._global_params_byte_count fn(*args) if ((MAX_CMD_LEN < len(_self._msg)) or (MAX_CMD_LEN < _self._global_params_byte_count) or (MAX_LOCAL_VARIABLE_BYTES < _self._local_params_byte_count)): del (_self._msg[msg_len:]) del (_self._global_params_types[global_params_types_len:]) _self._local_params_byte_count = local_params_byte_count _self._global_params_byte_count = global_params_byte_count raise DirectCommandError('Not enough space to add the ' + 'given func.') return checked_add @safe_add def add_timer_wait(self, milliseconds): """Causes the thread to sleep for the specified number of milliseconds. """ local_var_tuple = self._allocate_local_param(DataFormat.DATA32) self._msg.append(Opcode.TIMER_WAIT) self._append_local_constant(milliseconds) self._append_param(*local_var_tuple) self._msg.append(Opcode.TIMER_READY) self._append_param(*local_var_tuple) @safe_add def add_ui_draw_update(self): """Updates the screen (applies whatever drawing commands have been issued since the last update). """ self._msg.append(Opcode.UI_DRAW) self._msg.append(UIDrawSubcode.UPDATE) @s
afe_add def add_ui_draw_clean(self): """Fills the screen with LCDColor.BACKGROUND.""" self._msg.append(Opcode.UI_DRAW
) self._msg.append(UIDrawSubcode.CLEAN) @safe_add def add_ui_draw_fillwindow(self, lcd_color, start_y, count): """Fills the window with count rows of the given LCDColor starting at row start_y. NOTE: Starting at 0 with a size of 0 will clear the window. This seems to be the way the CLEAN command is implemented. """ self._msg.append(Opcode.UI_DRAW) self._msg.append(UIDrawSubcode.FILLWINDOW) self._append_param(lcd_color) self._append_param(start_y, ParamType.LC2) self._append_param(count, ParamType.LC2) @safe_add def add_ui_draw_pixel(self, lcd_color, xy): """Draws a pixel at the given (x, y).""" self._msg.append(Opcode.UI_DRAW) self._msg.append(UIDrawSubcode.PIXEL) self._append_param(lcd_color) self._append_param(xy[0], ParamType.LC2) self._append_param(xy[1], ParamType.LC2) @safe_add def add_ui_draw_line(self, lcd_color, start_xy, end_xy): """Draws a line from the start (x, y) to the end (x, y).""" self._msg.append(Opcode.UI_DRAW) self._msg.append(UIDrawSubcode.LINE) self._append_param(lcd_color) self._append_param(start_xy[0], ParamType.LC2) self._append_param(start_xy[1], ParamType.LC2) self._append_param(end_xy[0], ParamType.LC2) self._append_param(end_xy[1], ParamType.LC2) @safe_add def add_ui_draw_dotline(self, lcd_color, start_xy, end_xy, on_pixels, off_pixels): """Draws a line from the start (x, y) to the end (x, y). The line will be composed of a repeating pattern consisting of on_pixels followed by off_pixels. """ self._msg.append(Opcode.UI_DRAW) self._msg.append(UIDrawSubcode.DOTLINE) self._append_param(lcd_color) self._append_param(start_xy[0], ParamType.LC2) self._append_param(start_xy[1], ParamType.LC2) self._append_param(end_xy[0], ParamType.LC2) self._append_param(end_xy[1], ParamType.LC2) self._append_param(on_pixels, ParamType.LC2) self._append_param(off_pixels, ParamType.LC2) @safe_add def add_ui_draw_rect(self, lcd_color, xy, width, height): """Draws a rectangle with (x, y) as the top-left corner and with width and height dimensions. """ self._msg.append(Opcode.UI_DRAW) self._msg.append(UIDrawSubcode.RECT) self._append_param(lcd_color) self._append_param(xy[0], ParamType.LC2) self._append_param(xy[1], ParamType.LC2) self._append_param(width, ParamType.LC2) self._append_param(height, ParamType.LC2) @safe_add def add_ui_draw_fillrect(self, lcd_color, xy, width, height): """Draws a filled rectangle with (x, y) as the top-left corner and with width and height dimensions. """ self._msg.append(Opcode.UI_DRAW) self._msg.append(UIDrawSubcode.FILLRECT) self._append_param(lcd_color) self._append_param(xy[0], ParamType.LC2) self._append_param(xy[1], ParamType.LC2) self._append_param(width, ParamType.LC2) self._append_param(height, ParamType.LC2) @safe_add def add_ui_draw_inverserect(self, xy, width, height): """Draws a rectangle with (x, y) as the top-left corner and with width and height dimensions. Any pixel that this rectangle overlaps will have its color flipped. """ self._msg.append(Opcode.UI_DRAW) self._msg.append(UIDrawSubcode.INVERSERECT) self._append_param(xy[0], ParamType.LC2) self._append_param(xy[1], ParamType.LC2) self._append_param(width, ParamType.LC2) self._append_param(height, ParamType.LC2) @safe_add def add_ui_draw_circle(self, lcd_color, xy, radius): """Draws a circle centered at (x, y) with the specified radius.""" self._msg.append(Opcode.UI_DRAW) self._msg.append(UIDrawSubcode.CIRCLE) self._append_param(lcd_color) self._append_param(xy[0], ParamType.LC2) self._append_param(xy[1], ParamType.LC2) self._append_param(radius, ParamType.LC2) @safe_add def add_ui_draw_fillcircle(self, lcd_color, xy, radius): """Draws a filled circle centered at (x, y) with the specified radius. """ self._msg.append(Opcode.UI_DRAW) self._msg.append(UIDrawSubcode.FILLCIRCLE) self._append_param(lcd_color) self._append_param(xy[0], ParamType.LC2) self._append_param(xy[1], ParamType.LC2) self._append_param(radius, ParamType.LC2) @safe_add def add_ui_draw_selectfont(self, font_type): """Selects the FontType that will be used by following calls to add_ui_draw_text. """ self._msg.append(Opcode.UI_DRAW) self._msg.append(UIDrawSubcode.SELECT_FONT) self._append_param(font_type) @safe_add def add_ui_draw_text(self, lcd_color, xy, text_str): """Draws the given text with (x, y) as the top-left corner of the bounding box. Use add_ui_draw_selectfont to select the font. """ self._msg.append(Opcode.UI_DRAW) self._msg.append(UIDrawSubcode.TEXT) self._append_param(lcd_color) self._append_param(xy[0], ParamType.LC2) self._append_param(xy[1], ParamType.LC2) self._append_param(text_str, ParamType.LCS) @safe_add def add_ui_draw_topline(self, topline_enabled): """Enables or disables the display of the menu bar at the top of the screen that normally displays status icons such as the battery indicator. """ self._msg.append(Opcode.UI_DRAW) self._msg.append(UIDrawSubcode.TOPLINE) self._append_param(int(topline_enabled)) @safe_add def ad
from vectore
s_oo import Vector x = input('vector U componente X= ') y = input('vector U componente X= ') U = Vector(x,y) m = input('vector V magnitud= ') a = input('vector V angulo= ') V = Vector(m=m, a=a) E = input('Escalar= ') print "U=%s" % U print "V=%s" % V print 'UxE=%s' % U.x_escalar(E) print 'VxE=%s' % V.x_escalar(E) print 'U+V=%s' % U.Suma(V) print 'U.V=%s' % U.ProductoPunto(V) print '|UxV|=%s' % U.Modulo_ProductoCruz(V)
# -*- coding: utf-8 -*- import os import pygame from pygame.locals import * class Sprite(pygame.sprite.Sprite): def __init__(self,SpriteName): pygame.sprite.Sprite.__init__(self) self.Name = SpriteName self.rect = 0 self.image = 0 def getRect(self): return self.rect def getImg(self): return self.image def load_image(self, name, colorkey=None): #fullname = os.path.join('data', 'images') fullname = name + '.png' try: image = pygame.image.load(fullname) except pygame.error, message: print 'Cannot load image:', fullname raise SystemExit, message image = image.convert() if colorkey is not None: if colorkey is -1: colorkey = image.get_at((0,0)) image.set_colorkey(colorkey, RLEACCEL) return image, image.get_rect() class spritesheet(object): def __init__(self, filename): try: self.sheet = pygame.image.load(filename).convert() except pygame.error, message: print 'Unable to load spritesheet image:', filename raise SystemExit, message # Load a specific image from a specific rectangle def image_at(self, rectangle, colorkey = None): "Loads image from x,y,x+offset,y+offset" rect = pygame.Rect(rectangle) image = pygame.Surface(rect.size).convert() image.blit(self.sheet, (0, 0), rect) if colorkey is not None: if colorkey is -1: col
orkey = image.get_at((0,0)) image.set_colorkey(colorkey, pygame.RLEACCEL) return image, rect # Load a whole bunch of images and return them as a list def images_at(self, rects): "Loads multiple images, supply a list of coordinates" return [self.image_at(rect) fo
r rect in rects], rect # Load a whole strip of images def load_strip(self, rect, image_count, colorkey = None): "Loads a strip of images and returns them as a list" tups = [(rect[0]+rect[2]*x, rect[1], rect[2], rect[3]) for x in range(image_count)] return self.images_at(tups, colorkey)
from validx import Dict, List from .protos import DataObject from .palette import Palette from .utils.parser import BinaryParser from .utils.validator import UInt8 from .utils.types import Remappings, Remapping class PaletteMapping(DataObject): __slots__ = ( "colors", "remaps", ) schema = Dict({"colors": Palette.schema, "remaps": List(List(UInt8))}) def __init__(self): self.colors: Palette = Palette() self.remaps: Remappings = [] def remap(self, remap_id: int) -> Palette: return self.colors.remap(self.remaps[remap_id]) def read(self, parser: BinaryParser): self.colors = Palette().read(parser) for k in range(0, 19): remap: Remapping = [] for m in range(0, 256): remap.append(parser.get_uint8())
self.remaps.append(remap) return self def write(self, parser): self.colors.write(parser) for k in range(0, 19): for m in range(0, 256): parser.put_uint8(self.
remaps[k][m]) def serialize(self) -> dict: return {"colors": self.colors.serialize(), "remaps": self.remaps} def unserialize(self, data: dict): self.colors = Palette().unserialize(data["colors"]) self.remaps = data["remaps"] return self
from pyroute2.netlink import nlmsg class errmsg(nlmsg): ''' Custom message type
Error ersat
z-message ''' fields = (('code', 'i'), )
import src class Chemical(src.items.Item): type = "Chemical" def __init__(self): super().__init__(display=src.canvas.displayChars.fireCrystals) self.name = "chemical" self.composition = b"cccccggggg" def apply(self, character): import hashlib results = [] counter = 0 while 1: tmp = random.choice(["mix", "shift"]) if tmp == "mix": self.mix(character) elif tmp == "switch":
self.mix(character) elif tmp == "shift": self.shift() test = hashlib.sha256(self.composition[0:9]) character.addMessage(counter) result = int(test.digest()[-1]) result2 = int(test.digest()[-2]) if result < 15: character.addMessag
e(test.digest()) character.addMessage(result) character.addMessage(result2) break counter += 1 # character.addMessage(results) def shift(self): self.composition = self.composition[1:] + self.composition[0:1] def mix(self, character): part1 = self.composition[0:5] part2 = self.composition[5:10] self.composition = ( part1[0:1] + part2[0:1] + part1[1:2] + part2[1:2] + part1[2:3] + part2[2:3] + part1[3:4] + part2[3:4] + part1[4:5] + part2[4:5] ) src.items.addType(Chemical)
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ ZMQ example using python3's asyncio Bitcoin should be started with the command line arguments: GuldenD -testnet -daemon \ -zmqpubhashblock=tcp://127.0.0.1:28332 \ -zmqpubrawtx=tcp://127.0.0.1:28332 \ -zmqpubhashtx=tcp://127.0.0.1:28332 \ -zmqpubhashblock=tcp://127.0.0.1:28332 We use the asyncio library here. `self.handle()` installs itself as a future at the end of the function. Since it never returns with the event loop having an empty stack of futures, this creates an infinite loop. An alternative is to wrap the contents of `handle` inside `while True`. The `@asyncio.coroutine` decorator and the `yield from` syntax found here was introduced in python 3.4 and has been deprecated in favor of the `async` and `await` keywords respectively. A blocking example using python 2.7 can be obtained from the git history: https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py """ import binascii import asyncio import zmq import zmq.asyncio import signal import struct import sys if not (sys.version_info.major >= 3 and sys.version_info.minor >= 4): print("This example only works with Python 3.4 and greater") exit(1) port = 28332 class ZMQHandler(): def __init__(self): self.loop = zmq.asyncio.install() self.zmqContext = zmq.asyncio.Context() self.zmqSubSocket = self.zmqContext.socket(zmq.SUB) self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx") self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port) @asyncio.coroutine def handle(self) : msg = yield from self.zmqSubSocket.recv_multipart() topic = msg[0] body = msg[1] sequence = "Unknown" if len(msg[-1]) == 4: msgSequence = struct.unpack('<I', msg[-1])[-1] sequence = str(msgSequence) if topic == b"hashblock": print('- HASH BLOCK ('+sequence+') -') print(binascii.hexlify(body)) elif topic == b"hashtx": print('- HASH TX ('+sequence+') -') print(binascii.hexlify(body)) elif topic == b"rawblock": print('- RAW BLOCK HEADER ('+sequence+') -') print(binascii.hexlify(body[:80])) elif topic == b"rawtx": print('- RAW TX ('+sequence+') -') print(binascii.hexlify(body)) # schedule ourselves to receive the nex
t message asyncio.ensure_future(self.handle()) def start(self): self.loop.add_signal_handler(signal.SIGINT, self.stop
) self.loop.create_task(self.handle()) self.loop.run_forever() def stop(self): self.loop.stop() self.zmqContext.destroy() daemon = ZMQHandler() daemon.start()
# Copyright 2014,
Doug Wiegley, A10 Networks. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR COND
ITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import from __future__ import unicode_literals from acos_client import errors as acos_errors from acos_client.v21 import base class BasePersistence(base.BaseV21): def __init__(self, client): super(BasePersistence, self).__init__(client) self.prefix = "slb.template.%s_persistence" % self.pers_type def get(self, name, **kwargs): return self._post(("%s.search" % self.prefix), {'name': name}, **kwargs) def exists(self, name, **kwargs): try: self.get(name, **kwargs) return True except acos_errors.NotFound: return False def create(self, name, **kwargs): self._post(("%s.create" % self.prefix), self.get_params(name), **kwargs) def delete(self, name, **kwargs): self._post(("%s.delete" % self.prefix), {'name': name}, **kwargs) class CookiePersistence(BasePersistence): def __init__(self, client): self.pers_type = 'cookie' super(CookiePersistence, self).__init__(client) def get_params(self, name): return { "cookie_persistence_template": { "name": name } } class SourceIpPersistence(BasePersistence): def __init__(self, client): self.pers_type = 'src_ip' super(SourceIpPersistence, self).__init__(client) def get_params(self, name): return { "src_ip_persistence_template": { "name": name } }
# tsuserver3, an Attorney Online server # # Copyright (C) 2016 argoneus <argoneuscze@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warra
nty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import ipaddress import json import yaml from server.exceptions impor
t ServerError class BanManager: def __init__(self): self.bans = {} self.load_banlist() self.hdid_exempt = {} self.load_hdidexceptions() def load_banlist(self): try: with open('storage/banlist.json', 'r') as banlist_file: self.bans = json.load(banlist_file) except FileNotFoundError: with open('storage/banlist.json', 'w') as poll_list_file: json.dump({}, poll_list_file) def write_banlist(self): with open('storage/banlist.json', 'w') as banlist_file: json.dump(self.bans, banlist_file) def add_ban(self, ip): try: x = len(ip) except AttributeError: raise ServerError('Argument must be an 12-digit number.') if x == 12: self.bans[ip] = True self.write_banlist() def remove_ban(self, client, ip): try: try: int(ip) except ValueError: ipaddress.ip_address(ip) ip = client.server.get_ipid(ip) except ValueError: if not len(ip) == 12: raise ServerError('Argument must be an IP address or 10-digit number.') del self.bans[ip] self.write_banlist() def is_banned(self, ipid): try: return self.bans[ipid] except KeyError: return False def load_hdidexceptions(self): with open('config/hdid_exceptions.yaml', 'r', encoding='utf-8') as hdid: self.hdid_exempt = yaml.load(hdid)
""" mbed SDK Copyright (c) 2011-2013 ARM Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os, tempfile from os.path import join, exists, basename from shutil import copytree, rmtree from workspace_tools.utils import mkdir from workspace_tools.export import uvision4, codesourcery, codered, gccarm, ds5_5, iar, emblocks, coide, kds from workspace_tools.export.exporters import zip_working_directory_and_clean_up, OldLibrariesException from workspace_tools.targets import EXPORT_MAP EXPORTERS = { 'uvision': uvision4.Uvision4, 'lpcxpresso': codered.CodeRed, 'codesourcery': codesourcery.CodeSourcery, 'gcc_arm': gccarm.GccArm, 'ds5_5': ds5_5.DS5_5, 'iar': iar.IAREmbeddedWorkbench, 'emblocks' : emblocks.IntermediateFile, 'coide' : coide.CoIDE, 'kds' : kds.KDS, } ERROR_MESSAGE_UNSUPPORTED_TOOLCHAIN = """ Sorry, the target %s is not currently supported on the %s toolchain. Please refer to <a href='/handbook/Exporting-to-offline-toolchains' target='_blank'>Exporting to offline toolchains</a> for more information. """ ERROR_MESSAGE_NOT_EXPORT_LIBS = """ To export this project please <a href='http://mbed.org/compiler/?import=http://mbed.org/users/mbed_official/code/mbed-export/k&mode=lib' target='_blank'>import the export version of the mbed library</a>. """ def online_build_url_resolver(url): # TODO: Retrieve the path and name of an online library build URL return {'path':'', 'name':''} def export(project_path, project_name, ide, target, destination='/tmp/', tempdir=None, clean=True, extra_symbols=None, build_url_resolver=online_build_url_resolver): # Convention: we are using capitals for toolchain and target names if target is not None: target = target.upper() if tempdir is None: tempdir = tempfile.mkdtemp() if ide is None: # Simply copy everything, no project files to be generated for d in ['src', 'lib']: os.system("cp -r %s/* %s" % (join(project_path, d), tempdir)) report = {'success': Tr
ue} else: report = {'success': False} if ide not in EXPORTERS: report['errormsg'] = "Unsupported toolchain" else: Exporter = EXPORTERS[ide] target = EXPORT_MAP.get(target, target) if target not in Exporter.TARGETS: report['errormsg'] = ERROR_MESSAGE_UNSUPPORTED_TOOLCHAIN % (target, ide) else: try: exporter = Exporter(target, tempdir, project_name, build
_url_resolver, extra_symbols=extra_symbols) exporter.scan_and_copy_resources(project_path, tempdir) exporter.generate() report['success'] = True except OldLibrariesException, e: report['errormsg'] = ERROR_MESSAGE_NOT_EXPORT_LIBS zip_path = None if report['success']: # add readme file to every offline export. open(os.path.join(temdir, 'README.html'),'w').write('<meta http-equiv="refresh" content="0; url=http://developer.mbed.org/handbook/ExportToOfflineToolchain#%s#%s"/>'% (target,ide)) zip_path = zip_working_directory_and_clean_up(tempdir, destination, project_name, clean) return zip_path, report ############################################################################### # Generate project folders following the online conventions ############################################################################### def copy_tree(src, dst, clean=True): if exists(dst): if clean: rmtree(dst) else: return copytree(src, dst) def setup_user_prj(user_dir, prj_path, lib_paths=None): """ Setup a project with the same directory structure of the mbed online IDE """ mkdir(user_dir) # Project Path copy_tree(prj_path, join(user_dir, "src")) # Project Libraries user_lib = join(user_dir, "lib") mkdir(user_lib) if lib_paths is not None: for lib_path in lib_paths: copy_tree(lib_path, join(user_lib, basename(lib_path)))
# -*- coding: UTF-8 -*- # Copyright 2015-2021 Rumma & Ko Ltd # License: GNU Affero General Public License v3 (see file COPYING for details) """ Defines a set of user roles and fills :class:`lino.modlib.users.choicelists.UserTypes`. This is used as the :attr:`user_types_module <lino.core.site.Site.user_types_module>` for :ref:`noi`. """ from django.utils.translation import gettext_lazy as _ from lino.modlib.office.roles import OfficeStaff, OfficeUser from lino.modlib.users.roles import Helper # from lino.modlib.comments.roles import CommentsReader from lino.modlib.comments.roles import CommentsUser, CommentsStaff, PrivateCommentsReader, CommentsReader from lino.core.roles import SiteUser, SiteAdmin from lino_xl.lib.excerpts.roles import ExcerptsUser, ExcerptsStaff from lino_xl.lib.contacts.roles import ContactsUser, ContactsStaff from lino_xl.lib.courses.roles import CoursesUser from lino_xl.lib.tickets.roles import Reporter, Searcher, Triager, TicketsStaff from lino_xl.lib.working.roles import Worker from lino_xl.lib.cal.roles import CalendarReader from lino_xl.lib.votes.roles import VotesStaff, VotesUser from lino_xl.lib.products.roles import ProductsStaff from lino_xl.lib.ledger.roles import LedgerStaff from lino.modlib.users.choicelists import UserTypes class Customer(SiteUser, OfficeUser, VotesUser, Searcher, Reporter, CommentsUser): """ A **Customer** is somebody who uses our software and may report tickets, but won't work on them. Able to comment and view tickets on sites where they are contact people. Unable to see any client data other than orgs where they are a contact person and themselves. """ pass class Contributor(Customer, Searcher, Helper, Worker, ExcerptsUser, CoursesUser): """ A **Contributor** is somebody who works on and see tickets of sites they are team members of. """ pass class Developer(Contributor, ContactsUser, Triager, ExcerptsStaff, CommentsStaff, TicketsStaff, PrivateCommentsReader): """ A **Developer** is a trusted user who has signed an NDA, has access to client contacts. Is able to make service reports as well as manage tickets. """ pass class SiteAdmin(SiteAdmin, Developer, OfficeStaff, VotesStaff, ContactsStaff, CommentsStaff, ProductsStaff, LedgerStaff): """ Can do everything. """ # class Anonymous(CommentsReader, CalendarReader): class Anonymous(CalendarReader, CommentsReader, Searcher): pass UserTypes.clear() add = UserTypes.add_item add('000', _("Anonymous"), Anonymous, 'anonymous', readon
ly=True, authenticated=False) add('100', _("Customer"), Customer, 'customer user') add('200', _("Contributor"), Contributor, 'contributor') add('400', _("Developer"), Developer, 'developer') add('900', _("Administrator"), SiteAdmin, 'admin') # UserTypes.user = UserTypes.customer # from lino.core.merge import MergeAction # from lino.api import rt # lib = rt.models # for m in (lib.contacts.Company, ): # m.de
fine_action(merge_row=MergeAction( # m, required_roles=set([ContactsStaff])))
## # Copyright 2009-2015 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en), # the Hercules foundation (http://www.herculesstichting.be/in_English) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # http://github.com/hpcugent/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. ## """ EasyBuild support for SuiteSparse, implemented as an easyblock @author: Stijn De Weirdt (Ghent University) @author: Dries Verdegem (Ghent University) @author: Kenneth Hoste (Ghent University) @author: Pieter De Baets (Ghent University) @author: Jens T
immerman (Ghent University) """ import fileinput import re import os import shutil import sys from distutils.version import LooseVersion from easybuild.easyblocks.generic.configuremake import ConfigureMake from easybuild.tools.build_log import EasyBuildError from easybuild.tools.filetools import mkdir from easybuild.tools.modules import get_software_root class EB_SuiteSparse(ConfigureMake): """Support for building SuiteSparse.""" def __init__(self, *args, **kwargs): """Custom constructor for SuiteSparse easyblock, initialize custom class parameters.""" super(EB_SuiteSparse, self).__init__(*args, **kwargs) self.config_name = 'UNKNOWN' def configure_step(self): """Configure build by patching UFconfig.mk or SuiteSparse_config.mk.""" if LooseVersion(self.version) < LooseVersion('4.0'): self.config_name = 'UFconfig' else: self.config_name = 'SuiteSparse_config' cfgvars = { 'CC': os.getenv('MPICC'), 'CFLAGS': os.getenv('CFLAGS'), 'CXX': os.getenv('MPICXX'), 'F77': os.getenv('MPIF77'), 'F77FLAGS': os.getenv('F77FLAGS'), 'BLAS': os.getenv('LIBBLAS_MT'), 'LAPACK': os.getenv('LIBLAPACK_MT'), } metis = get_software_root('METIS') parmetis = get_software_root('ParMETIS') if parmetis: metis_path = parmetis metis_libs = ' '.join([ os.path.join(parmetis, 'lib', 'libparmetis.a'), os.path.join(parmetis, 'lib', 'metis.a'), ]) elif metis: metis_path = metis metis_libs = os.path.join(metis, 'lib', 'metis.a') else: raise EasyBuildError("Neither METIS or ParMETIS module loaded.") cfgvars.update({ 'METIS_PATH': metis_path, 'METIS': metis_libs, }) # patch file fp = os.path.join(self.cfg['start_dir'], self.config_name, '%s.mk' % self.config_name) try: for line in fileinput.input(fp, inplace=1, backup='.orig'): for (var, val) in cfgvars.items(): orig_line = line # for variables in cfgvars, substiture lines assignment # in the file, whatever they are, by assignments to the # values in cfgvars line = re.sub(r"^\s*(%s\s*=\s*).*$" % var, r"\1 %s # patched by EasyBuild" % val, line) if line != orig_line: cfgvars.pop(var) sys.stdout.write(line) except IOError, err: raise EasyBuildError("Failed to patch %s in: %s", fp, err) # add remaining entries at the end if cfgvars: try: f = open(fp, "a") f.write("# lines below added automatically by EasyBuild") for (var, val) in cfgvars.items(): f.write("%s = %s\n" % (var, val)) f.close() except IOError, err: raise EasyBuildError("Failed to complete %s: %s", fp, err) def install_step(self): """Install by copying the contents of the builddir to the installdir (preserving permissions)""" for x in os.listdir(self.cfg['start_dir']): src = os.path.join(self.cfg['start_dir'], x) dst = os.path.join(self.installdir, x) try: if os.path.isdir(src): shutil.copytree(src, dst) # symlink # - dst/Lib to dst/lib # - dst/Include to dst/include for c in ['Lib', 'Include']: nsrc = os.path.join(dst, c) ndst = os.path.join(dst, c.lower()) if os.path.exists(nsrc): os.symlink(nsrc, ndst) else: shutil.copy2(src, dst) except OSError, err: raise EasyBuildError("Copying src %s to dst %s failed: %s", src, dst, err) # some extra symlinks are necessary for UMFPACK to work. paths = [ os.path.join('AMD', 'include', 'amd.h'), os.path.join('AMD' ,'include' ,'amd_internal.h'), os.path.join(self.config_name, '%s.h' % self.config_name), os.path.join('AMD', 'lib', 'libamd.a') ] for path in paths: src = os.path.join(self.installdir, path) dn = path.split(os.path.sep)[-2] fn = path.split(os.path.sep)[-1] dstdir = os.path.join(self.installdir, 'UMFPACK', dn) mkdir(dstdir) if os.path.exists(src): try: os.symlink(src, os.path.join(dstdir, fn)) except OSError, err: raise EasyBuildError("Failed to make symbolic link from %s to %s: %s", src, dst, err) def make_module_req_guess(self): """Add config dir to CPATH so include file is found.""" guesses = super(EB_SuiteSparse, self).make_module_req_guess() guesses.update({'CPATH': [self.config_name]}) return guesses def sanity_check_step(self): """Custom sanity check for SuiteSparse.""" if LooseVersion(self.version) < LooseVersion('4.0'): csparse_dir = 'CSparse3' else: csparse_dir = 'CSparse' custom_paths = { 'files': [os.path.join(x, 'lib', 'lib%s.a' % x.lower()) for x in ["AMD", "BTF", "CAMD", "CCOLAMD", "CHOLMOD", "COLAMD", "CXSparse", "KLU", "LDL", "RBio", "SPQR", "UMFPACK"]] + [os.path.join(csparse_dir, 'lib', 'libcsparse.a')], 'dirs': ["MATLAB_Tools"], } super(EB_SuiteSparse, self).sanity_check_step(custom_paths=custom_paths)
from dja
ngo.urls import path from . import views urlpatterns = [ path('start-ga', views.St
artGA.as_view()), path('stop-ga', views.StopGA.as_view()), path('check-ga', views.CheckGA.as_view()), ]
#!/usr/bin/env python # Copyright 2014 The Swarming Authors. All rights reserved. # Use of this source code is governed by the Apache v2.0 license that can be # found in the LICENSE file. import datetime import sys import unittest import test_env test_env.setup_test_env() # From components/third_party/ import webtest import webapp2 import stats from components import stats_framework from support import stats_framework_mock from support import test_case # pylint: disable=R0201 class Store(webapp2.RequestHandler): def get(self): """Generates fake stats.""" stats.add_entry(stats.STORE, 2048, 'GS; inline') self.response.write('Yay') class Return(webapp2.RequestHandler): def get(self): """Generates fake stats.""" stats.add_entry(stats.RETURN, 4096, 'memcache') self.response.write('Yay') class Lookup(webapp2.RequestHandler): def get(self): """Generates fake stats.""" stats.add_entry(stats.LOOKUP, 200, 103) self.response.write('Yay') class Dupe(webapp2.RequestHandler): def get(self): """Generates fake stats.""" stats.add_entry(stats.DUPE, 1024, 'inline') self.response.write('Yay') def to_str(now, delta): """Converts a datetime to unicode.""" now = now + datetime.timedelta(seconds=delta) return unicode(now.strftime(stats.utils.DATETIME_FORMAT)) class StatsTest(test_case.TestCase, stats_framework_mock.MockMixIn): def setUp(self): super(StatsTest, self).setUp() fake_routes = [ ('/store', Store), ('/return', Return), ('/lookup', Lookup), ('/dupe', Dupe), ] self.app = webtest.TestApp( webapp2.WSGIApplication(fake_routes, debug=True), extra_environ={'REMOTE_ADDR': 'fake-ip'}) stats_framework_mock.configure(self) self.now = datetime.datetime(2010, 1, 2, 3, 4, 5, 6) self.mock_now(self.now, 0) def _test_handler(self, url, added_data): stats_framework_mock.reset_timestamp(stats.STATS_HANDLER, self.now) self.assertEqual('Yay', self.app.get(url).body) self.assertEqual(1, len(list(stats_framework.yield_entries(None, None)))) self.mock_now(self.now, 60) self.assertEqual(10, stats.generate_stats()) actual = stats_framework.get_stats( stats.STATS_HANDLER, 'minutes', self.now, 1, True) expected = [ { 'contains_lookups': 0, 'contains_requests': 0, 'downloads': 0, 'downloads_byt
es': 0, 'failures': 0, 'key': datetime.datetime(2010, 1, 2, 3, 4), 'other_requests': 0, 'requests': 1, 'uploads': 0, 'uploads_bytes': 0, }, ] expected[0].update(added_data) self.assertEqual(expected, act
ual) def test_store(self): expected = { 'uploads': 1, 'uploads_bytes': 2048, } self._test_handler('/store', expected) def test_return(self): expected = { 'downloads': 1, 'downloads_bytes': 4096, } self._test_handler('/return', expected) def test_lookup(self): expected = { 'contains_lookups': 200, 'contains_requests': 1, } self._test_handler('/lookup', expected) def test_dupe(self): expected = { 'other_requests': 1, } self._test_handler('/dupe', expected) if __name__ == '__main__': if '-v' in sys.argv: unittest.TestCase.maxDiff = None unittest.main()
""" Updated on 19.12.2009 @author: alen, pinda """ from django.conf import settings from django.conf.urls.defaults import * from socialregistration.utils import OpenID, OAuthClient, OAuthTwitter, OAuthLinkedin urlpatterns = patterns('', url('^setup/$', 'socialregistration.views.setup', name='socialregistration_setup'), ur
l('^logout/$', 'socialregistration.views.logout', name='social_logout'), ) # Setup Facebook URLs if there's an API key specified if getattr(settings, 'FACEBOOK_API_KEY', None) is not None:
urlpatterns = urlpatterns + patterns('', url('^facebook/login/$', 'socialregistration.views.facebook_login', name='facebook_login'), url('^facebook/connect/$', 'socialregistration.views.facebook_connect', name='facebook_connect'), url('^xd_receiver.htm', 'django.views.generic.simple.direct_to_template', {'template':'socialregistration/xd_receiver.html'}, name='facebook_xd_receiver'), ) #Setup Twitter URLs if there's an API key specified if getattr(settings, 'TWITTER_CONSUMER_KEY', None) is not None: urlpatterns = urlpatterns + patterns('', url('^twitter/redirect/$', 'socialregistration.views.oauth_redirect', dict( consumer_key=settings.TWITTER_CONSUMER_KEY, secret_key=settings.TWITTER_CONSUMER_SECRET_KEY, request_token_url=settings.TWITTER_REQUEST_TOKEN_URL, access_token_url=settings.TWITTER_ACCESS_TOKEN_URL, authorization_url=settings.TWITTER_AUTHORIZATION_URL, callback_url='twitter_callback', client_class = OAuthClient ), name='twitter_redirect'), url('^twitter/callback/$', 'socialregistration.views.oauth_callback', dict( consumer_key=settings.TWITTER_CONSUMER_KEY, secret_key=settings.TWITTER_CONSUMER_SECRET_KEY, request_token_url=settings.TWITTER_REQUEST_TOKEN_URL, access_token_url=settings.TWITTER_ACCESS_TOKEN_URL, authorization_url=settings.TWITTER_AUTHORIZATION_URL, callback_url='twitter', client_class = OAuthClient ), name='twitter_callback' ), url('^twitter/$', 'socialregistration.views.twitter', {'client_class': OAuthTwitter}, name='twitter'), ) #Setup Linkedin URLs if there's an API key specified if getattr(settings, 'LINKEDIN_CONSUMER_KEY', None) is not None: urlpatterns = urlpatterns + patterns('', url('^linkedin/redirect/$', 'socialregistration.views.oauth_redirect', dict( consumer_key=settings.LINKEDIN_CONSUMER_KEY, secret_key=settings.LINKEDIN_CONSUMER_SECRET_KEY, request_token_url=settings.LINKEDIN_REQUEST_TOKEN_URL, access_token_url=settings.LINKEDIN_ACCESS_TOKEN_URL, authorization_url=settings.LINKEDIN_AUTHORIZATION_URL, callback_url='linkedin_callback', client_class = OAuthClient ), name='linkedin_redirect'), url('^linkedin/callback/$', 'socialregistration.views.oauth_callback', dict( consumer_key=settings.LINKEDIN_CONSUMER_KEY, secret_key=settings.LINKEDIN_CONSUMER_SECRET_KEY, request_token_url=settings.LINKEDIN_REQUEST_TOKEN_URL, access_token_url=settings.LINKEDIN_ACCESS_TOKEN_URL, authorization_url=settings.LINKEDIN_AUTHORIZATION_URL, callback_url='linkedin', client_class = OAuthClient, parameters={'oauth_verifier':''} ), name='linkedin_callback' ), url('^linkedin/$', 'socialregistration.views.linkedin', {'client_class': OAuthLinkedin}, name='linkedin'), ) urlpatterns = urlpatterns + patterns('', url('^openid/redirect/$', 'socialregistration.views.openid_redirect', { 'client_class': OpenID}, name='openid_redirect'), url('^openid/callback/$', 'socialregistration.views.openid_callback', { 'client_class': OpenID}, name='openid_callback') )
# -*- coding: utf-8 -*- """ 兼容Python版本 """ import sys is_py2 = (sys.version_info[0] == 2) is_py3 = (sys.version_info[0] == 3) is_py33 = (sys.version_info[0] == 3 and sys.version_info[1] == 3) try: import simplejson as json except (ImportError, SyntaxError): import json if is_py2: from urllib import quote as urlquote, unquote as urlunquote from urlparse import urlparse, parse_qs, urlsplit def to_bytes(data): """若输入为unicode, 则转为utf-8编码的bytes;其他则原样返回。""" if isinstance(data, unicode): return data.encode('utf-8') else: return data def to_string(data): """把输入转换为str对象""" return to_bytes(data) def to_unicode(data): """把输入转换为unicode,要求输入是unicode或者utf-8编码的bytes。""" if isinstance(data, bytes): return data.decode('utf-8') else: return data def stringify(input): if isinstance(input, dict): return dict([(stringify(key), stringify(value)) for key,value in input.iteritems()]) elif isinstance(input, list): return [stringify(element) for element in input] elif isinstance(input, unicode): return input.encode('utf-8') else: return input builtin_str = str bytes = str str = unicode elif is_py3: from urllib.parse import quote as urlquote, unquote as urlunquote from urllib.parse import urlparse, parse_qs, urlsplit def to_bytes(data): "
""若输入为str(即unicode),则转为utf-8编码的bytes;其他则原样返回""" if isinstance(data, str): return data.encode(encoding='utf-8') else: return data def to_string(data): """若输入为bytes,则认为是utf-8编码,并返回str""" if isinstance(data, bytes): return data.decode('utf-8') else: return data def to_unicode(data): """把输入转换为unicode,要求输入是unicode或者utf-8编码的bytes。""" re
turn to_string(data) def stringify(input): return input builtin_str = str bytes = bytes str = str
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import re # Todo this mapping REALLY needs a non-hardcoded home _slave_type = { "bld-linux64-ec2": [ re.compile("^bld-centos6-hp-"), re.compile("^bld-linux64-ec2-"), re.compile("^bld-linux64-ix-"), re.compile("^b-linux64-ix-"), re.compile("^bld-linux64-spot-"), re.compile("^b-linux64-hp-"), re.compile("^try-linux64-spot-"), ], "bld-lion-r5": [ re.compile("^bld-lion-r5-"), ], "b-2008-ix": [ re.compile("^b-2008-ix-"), re.compile("^b-2008-sm-"), re.compile("^w64-ix-"), ], "tst-linux64-ec2": [ re.compile("^talos-linux64-ix-"), re.compile("^tst-linux64-spot-"), re.compile("^tst-linux64-ec2-"), ], "tst-linux32-ec2": [ re.compile("^talos-linux32-ix-"), re.compile("^tst-linux32-spot-"), re.compile("^tst-linux32-ec2-"), ], "t-yosemite-r5": [ re.compile("^t-yosemite-r5-"), ], "talos-mtnlion-r5": [ re.compile("^talos-mtnlion-r5-"), ], "t-snow-r4": [ re.compile("^t-snow-r4-"), re.compile("^talos-r4-snow-"), ], "t-w732-ix": [ re.compile("^t-w732-ix-"), ], "t-w864-ix": [ re.compile("^t-w864-ix-"), ], "t-xp32-ix": [ re.compile("^t-xp32-ix-"), ], } _gpo_needed = [ "b-2008-ix", "t-w732-ix", "t-w864-ix", "t-xp32-ix" ] def slave_patterns(): vals = [] ret = {} for key, values in _slave_type.items(): for regex in values: vals += [regex.pattern[1:] + "*"] vals.sort() ret[key] = vals vals = [] return ret def slave_to_slavetype(slave): if slave in _slave_type.keys(): return slave for key, values in _slave_type.items(): for regex in values: if regex.match(slave): return key return None def is_aws_serviceable(slave): slaveclass = slave_to_slavetype(slave) if 'ec2' in slaveclass: return True return False def needs_gpo(slave): slavec
lass = slave_to_slavetype(slave) if sla
veclass in _gpo_needed: return True return False def slave_filter(slave_class): def _inner_slave_filter(item): for i in _slave_type[slave_class]: if i.match(item["name"]): return True return False # If we got here, no match return _inner_slave_filter def slavetype_to_awsprefix(slave_class): if not is_aws_serviceable(slave_class): raise ValueError("Unsupported Slave") basic_slave_prefix = slave_to_slavetype(slave_class) if basic_slave_prefix.startswith("bld"): loan_prefix = basic_slave_prefix.replace("bld-", "dev-") elif basic_slave_prefix.startswith("tst"): loan_prefix = basic_slave_prefix else: raise ValueError("Unsure how to name this aws slave") return loan_prefix
# Time: O(n) # Space: O(1) # Suppose you have a long flowerbed in which some of the plots are planted and some are not. # However, flowers cannot be planted in adjacent plots - they would compete for water # and both would die. # # Given a flowerbed (represented as an array containing 0 and 1, # where 0 means empty and 1 means not empty), and a number n, # return if n new flowers can be planted in it without violating the no-adjacent-flowers rule. # # Example 1: # Input: flowerbed = [1,0,0,0,1], n = 1 # Output: True # Example 2: # Input: flowerbed = [1,0,0,0,1], n = 2 # Output: False # Note: # The input array won't violate no-adjacent-flowers rule. # The input array size is in the ra
nge of [1, 20000]. # n is a non-negative integer which won't exceed the input array size. class Solution(object): def canPlaceFlowers(self, flowerbed, n): """ :type flowerbed: List[int] :type n: int :rtype: bool
""" for i in xrange(len(flowerbed)): if flowerbed[i] == 0 and (i == 0 or flowerbed[i-1] == 0) and \ (i == len(flowerbed)-1 or flowerbed[i+1] == 0): flowerbed[i] = 1 n -= 1 if n <= 0: return True return False
ective """ import webnotes from webnotes.utils import cstr class DocType: def __init__(self, doc, doclist=[]): self.doc, self.doclist = doc, doclist self.doctype_properties = [ 'search_fields', 'default_print_format', 'read_only_onload', 'allow_print', 'allow_email', 'allow_copy', 'allow_attach', 'max_attachments' ] self.docfield_properties = [ 'idx', 'label', 'fieldtype', 'fieldname', 'options', 'permlevel', 'width', 'print_width', 'reqd', 'in_filter', 'in_list_view', 'hidden', 'print_hide', 'report_hide', 'allow_on_submit', 'depends_on', 'description', 'default', 'name' ] self.property_restrictions = { 'fieldtype': [['Currency', 'Float'], ['Small Text', 'Data'], ['Text', 'Text Editor', 'Code']], } self.forbidden_properties = ['idx'] def get(self): """ Gets DocFields applied with Property Setter customizations via Customize Form Field """ self.clear() if self.doc.doc_type: from webnotes.model.doc import addchild for d in self.get_ref_doclist(): if d.doctype=='DocField': new = addchild(self.doc, 'fields', 'Customize Form Field', self.doclist) self.set( { 'list': self.docfield_properties, 'doc' : d, 'doc_to_set': new } ) elif d.doctype=='DocType': self.set({ 'list': self.doctype_properties, 'doc': d }) def get_ref_doclist(
self): """ * Gets doclist of type self.doc.doc_type * Applies property setter properties on the doclist * returns the modified doclist """ from webnotes.model.doctype import get ref_doclist = get(self.doc.doc_type) ref_doclist = webnotes.doclist([ref_doclist[0]] + ref_doclist.get({"parent": self.doc.doc_type})) return ref_doclist def clear(sel
f): """ Clear fields in the doc """ # Clear table before adding new doctype's fields self.doclist = self.doc.clear_table(self.doclist, 'fields') self.set({ 'list': self.doctype_properties, 'value': None }) def set(self, args): """ Set a list of attributes of a doc to a value or to attribute values of a doc passed args can contain: * list --> list of attributes to set * doc_to_set --> defaults to self.doc * value --> to set all attributes to one value eg. None * doc --> copy attributes from doc to doc_to_set """ if not 'doc_to_set' in args: args['doc_to_set'] = self.doc if 'list' in args: if 'value' in args: for f in args['list']: args['doc_to_set'].fields[f] = None elif 'doc' in args: for f in args['list']: args['doc_to_set'].fields[f] = args['doc'].fields.get(f) else: webnotes.msgprint("Please specify args['list'] to set", raise_exception=1) def post(self): """ Save diff between Customize Form Bean and DocType Bean as property setter entries """ if self.doc.doc_type: from webnotes.model import doc from core.doctype.doctype.doctype import validate_fields_for_doctype this_doclist = webnotes.doclist([self.doc] + self.doclist) ref_doclist = self.get_ref_doclist() dt_doclist = doc.get('DocType', self.doc.doc_type) # get a list of property setter docs diff_list = self.diff(this_doclist, ref_doclist, dt_doclist) self.set_properties(diff_list) validate_fields_for_doctype(self.doc.doc_type) webnotes.clear_cache(doctype=self.doc.doc_type) webnotes.msgprint("Updated") def diff(self, new_dl, ref_dl, dt_dl): """ Get difference between new_dl doclist and ref_dl doclist then check how it differs from dt_dl i.e. default doclist """ import re self.defaults = self.get_defaults() diff_list = [] for new_d in new_dl: for ref_d in ref_dl: if ref_d.doctype == 'DocField' and new_d.name == ref_d.name: for prop in self.docfield_properties: # do not set forbidden properties like idx if prop in self.forbidden_properties: continue d = self.prepare_to_set(prop, new_d, ref_d, dt_dl) if d: diff_list.append(d) break elif ref_d.doctype == 'DocType' and new_d.doctype == 'Customize Form': for prop in self.doctype_properties: d = self.prepare_to_set(prop, new_d, ref_d, dt_dl) if d: diff_list.append(d) break return diff_list def get_defaults(self): """ Get fieldtype and default value for properties of a field """ df_defaults = webnotes.conn.sql(""" SELECT fieldname, fieldtype, `default`, label FROM `tabDocField` WHERE parent='DocField' or parent='DocType'""", as_dict=1) defaults = {} for d in df_defaults: defaults[d['fieldname']] = d defaults['idx'] = {'fieldname' : 'idx', 'fieldtype' : 'Int', 'default' : 1, 'label' : 'idx'} defaults['previous_field'] = {'fieldname' : 'previous_field', 'fieldtype' : 'Data', 'default' : None, 'label' : 'Previous Field'} return defaults def prepare_to_set(self, prop, new_d, ref_d, dt_doclist, delete=0): """ Prepares docs of property setter sets delete property if it is required to be deleted """ # Check if property has changed compared to when it was loaded if new_d.fields.get(prop) != ref_d.fields.get(prop) \ and not \ ( \ new_d.fields.get(prop) in [None, 0] \ and ref_d.fields.get(prop) in [None, 0] \ ) and not \ ( \ new_d.fields.get(prop) in [None, ''] \ and ref_d.fields.get(prop) in [None, ''] \ ): #webnotes.msgprint("new: " + str(new_d.fields[prop]) + " | old: " + str(ref_d.fields[prop])) # Check if the new property is same as that in original doctype # If yes, we need to delete the property setter entry for dt_d in dt_doclist: if dt_d.name == ref_d.name \ and (new_d.fields.get(prop) == dt_d.fields.get(prop) \ or \ ( \ new_d.fields.get(prop) in [None, 0] \ and dt_d.fields.get(prop) in [None, 0] \ ) or \ ( \ new_d.fields.get(prop) in [None, ''] \ and dt_d.fields.get(prop) in [None, ''] \ )): delete = 1 break value = new_d.fields.get(prop) if prop in self.property_restrictions: allow_change = False for restrict_list in self.property_restrictions.get(prop): if value in restrict_list and \ ref_d.fields.get(prop) in restrict_list: allow_change = True break if not allow_change: webnotes.msgprint("""\ You cannot change '%s' of '%s' from '%s' to '%s'. %s can only be changed among %s. <i>Ignoring this change and saving.</i>""" % \ (self.defaults.get(prop, {}).get("label") or prop, new_d.fields.get("label") or new_d.fields.get("idx"), ref_d.fields.get(prop), value, self.defaults.get(prop, {}).get("label") or prop, " -or- ".join([", ".join(r) for r in \ self.property_restrictions.get(prop)]))) return None # If the above conditions are fulfilled, # create a property setter doc, but dont save it yet. from webnotes.model.doc import Document d = Document('Property Setter') d.doctype_or_field = ref_d.doctype=='DocField' and 'DocField' or 'DocType' d.doc_type = self.doc.doc_type d.field_name = ref_d.fieldname d.property = prop d.value = value d.property_type = self.defaults[prop]['fieldtype'] #d.default_value = self.defaults[prop]['default'] if delete: d.delete = 1 if d.select_item: d.select_item = self.remove_forbidden(d.select_item) # return the property setter doc return d else: return None def set_properties(self, ps_doclist): """ * Delete a property setter entry + if it already exists + if marked for deletion * Save the property setter doc in the list """ for d in ps_doclist: # Delete existing property setter entry if not d.fields.get("field_name"): webnotes.conn.sql(""" DELETE FROM `tabProperty Setter` WHERE doc_type = %(doc_type)s AND property = %(property)s""", d.fields) else: webnotes.conn.sql(""" DELETE FROM `tabProperty Setter` WHERE doc_type = %(doc_type)s AND field_name = %(field_name)s AND property = %(property)s""", d.fields) # Save the property setter doc if not marked for deletion i.e. delete=0 if not d.delete:
he Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from typing import Callable, List import tensorflow as tf import tensorflow_hub as tfhub from discretezoo import attack_setup class EmbeddedCosineDistance: """EmbeddedCosineDistance calculates cosine distance in embedding space. Attributes: embeddings: A tensor containing an embedding vector for each index in vocab. <float32>[vocab_size, embedding_dimension] """ def __init__(self, embeddings: tf.Tensor): """Initializes EmbeddedCosineDistance with embeddings. Arguments: embeddings: A tensor containing an embedding for each index in vocab. <float32>[vocab_size, embedding_dimension] """ assert embeddings.ndim == 2, ( 'Embeddings are expected to have 2 dimensions' f' but you passed a tensor with {embeddings.ndim}.') self._embeddings = embeddings @tf.function def __call__(self, original_sentences: tf.Tensor, adversarial_sentences: tf.Tensor) -> tf.Tensor: r"""Calculates cosine distance between reduced embedded sentences. Sentences are embedded and then reduced by summing them up. Cosine similarity is then given by \frac{v_{original} \cdot v_{adversarial}} {|v_{original}| \times |v_{adversarial|}}. Cosine distance is defined as 1 - similarity. Arguments: original_sentences: A tensor of token indices in the original sentences. <int32>[batch_size, sentence_length] adversarial_sentences: A tensor of token indices in the adversarial sentences. <int32>[batch_size, sentence_length] Returns: A tensor <float32>[batch_size, 1] of cosine distances between original and adversarial sentences. Return values are in the range [0, 2] https://www.tensorflow.org/api_docs/python/tf/keras/losses/cosine_similarity is used internally, which computes negative similarity, and 1 is added. """ original_sentences_embedded = tf.nn.embedding_lookup( self._embeddings, original_sentences) adversarial_sentences_embedded = tf.nn.embedding_lookup( self._embeddings, adversarial_sentences) original_sentences_reduced = tf.math.reduce_sum(original_sentences_embedded, axis=1) adversarial_sentences_reduced = tf.math.reduce_sum( adversarial_sentences_embedded, axis=1) # Unintuitively, tf.keras.losses.cosine_similarity returns negative cosine # similarity. Adding 1 means that two vectors will have 0 as a minimum # distance instead of -1, which is helpful in later loss computation. distance = 1 + tf.keras.losses.cosine_similarity( original_sentences_reduced, adversarial_sentences_reduced) return tf.expand_dims(distance, 1) class EmbeddedEuclideanDistance: """EmbeddedEuclideanDistance calculates euclidean distance in embedding space. Attributes: embeddings: A tensor containing an embedding vector for each index in vocab. <float32>[vocab_size, embedding_dimension] reduce_mean: This is a boolean flag that signals how embedded sentences will be reduced to a single vector. True for mean, False for sum. """ def __init__(self, embeddings: tf.Tensor, reduce_mean: bool = True): """Initializes EmbeddedEuclideanDistance with embeddings and reduction type. Arguments: embeddings: A tensor containing an embedding for each index in vocab. <float32>[vocab_size, embedding_dimension] reduce_mean: This boolean flag signals how embedded sentences will be reduced to a single vector. True for mean, False for sum. """ assert embeddings.ndim == 2, ( 'Embeddings are expected to have 2 dimensions' f' but you passed a tensor with {embeddings.ndim}.') self._embeddings = embeddings self._reduce_mean = reduce_mean @tf.function def __call__(self, original_sentences: tf.Tensor, adversarial_sentences: tf.Tensor) -> tf.Tensor: """Calculates euclidean distances between reduced embedded sentences. Arguments: original_sentences: A tensor of token indices in the original sentences. <int32>[batch_size, sentence_length] adversarial_sentences: A tensor of token indices in the adversarial sentences. <int32>[batch_size, sentence_length] Returns: A tensor <float32>[batch_size, 1] of euclidean distances between original and adversarial sentences. """ original_sentences_embedded = tf.nn.embedding_lookup( self._embeddings, original_sentences) adversarial_sentences_embedded = tf.nn.embedding_lookup( self._embeddings, adversarial_sentences) if self._reduce_mean: original_sentences_reduced = tf.math.reduce_mean( original_sentences_embedded, axis=1) adversarial_sentences_reduced = tf.math.reduce_m
ean( adversarial_sentences_embedded, axi
s=1) else: original_sentences_reduced = tf.math.reduce_sum( original_sentences_embedded, axis=1) adversarial_sentences_reduced = tf.math.reduce_sum( adversarial_sentences_embedded, axis=1) difference_vector = tf.math.subtract(original_sentences_reduced, adversarial_sentences_reduced) distance = tf.norm(difference_vector, axis=-1, keepdims=True) return distance class UniversalSentenceEncoderDistance: """Wraps the Universal Sentence Encoder and converts tensors to strings. The Universal Sentence Encoder expects python strings as input and includes its own tokenizer. The attack functions on tensors, so we need to convert vocab indices to tokens and then detokenize the text back into strings. Attributes: detokenizer: Detokenizer accepts a list of tokens, joins them by whitespace, and then undoes the regexes used to tokenize text. vocab: A list of tokens in the vocabulary. padding_index: An integer indicating which vocab entry is the padding token. encoder: This is a tensorflow hub module corresponding to the Universal Sentence Encoder. """ def __init__( self, detokenizer: Callable[[List[str]], str], vocab: List[str], padding_index: int = 0, use_tfhub_url: str = 'https://tfhub.dev/google/universal-sentence-encoder-large/5'): """Initializes the UniversalSentenceEncoderDistance class. Arguments: detokenizer: Detokenizer accepts a list of tokens, joins them by whitespace, and then undoes the regexes used to tokenize text. vocab: A list of tokens in the vocabulary. padding_index: An integer indicating which vocab entry is the padding token. use_tfhub_url: The URL to the Universal Sentence Encoder on the Tensorflow Hub. The default value corresponds to the Transformer based model, but Deep Averaging Networks and multilingual versions are also available. """ self._vocab = vocab self._padding_index = padding_index self._detokenizer = detokenizer self._encoder = tfhub.load(use_tfhub_url) def __call__(self, original_sentences: tf.Tensor, adversarial_sentences: tf.Tensor) -> tf.Tensor: """Converts tensors of vocabulary indices to strings and calls the encoder. Arguments: original_sentences: A tensor of token indices in the original sentences. <int32>[batch_size, sentence_length] adversarial_sentences: A tensor of token indices in the adversarial sentences. <int32>[batch_size, sentence_length] Returns: A tensor <float32>[batch_size, 1] of cosine distances between original and adversarial sentences encoded by the Universal Sentence Encoder. """ or
# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from solar.orchestration.runner
import app from solar.system_log.operations import set_error, move_to_commited __all__ = ['error_logitem', 'commit_logitem'] @app.task(name='error_logitem') def error_logitem(task_uuid
): return set_error(task_uuid.rsplit(':', 1)[-1]) @app.task(name='commit_logitem') def commit_logitem(task_uuid): return move_to_commited(task_uuid.rsplit(':', 1)[-1])
import numpy import data_generator class Generator: def __init__(self): self.combinations = data_generator.generateAllByteToAx25DataCombinations() self.frameSeparatorOne = data_generator.calculateNewAx25DataFromOldImpl(1, 0, 0x7E, False) self.frameSeparatorZero = data_generator.calculateNewAx25DataFromOldImpl(0, 0, 0x7E, False) def generateDefinitionsHeader(self, filePath): text = '''#pragma once #include <stdint.h> typedef struct AX25EncodedData_t { uint16_t dataGivenThatPreviousBitWasZero; uint8_t dataBitsCount; uint8_t newNumberOfOnes; } AX25EncodedData; // // To figure out what those values me
an see ax25-utils Python project, // code_generation_v2.py file // extern const AX25EncodedData byte2ax25EncodedData[]; #define FRAME_SEPAR
ATOR_GIVEN_THAT_PREVIOUS_BIT_WAS_ZERO ''' + str(self.frameSeparatorZero[0]) + ''' #define FRAME_SEPARATOR_GIVEN_THAT_PREVIOUS_BIT_WAS_ONE ''' + str(self.frameSeparatorOne[0]) + ''' #define GET_VALUE_IF_LAST_BIT_IS_ONE(pAx25EncodedData) \\ ((~(pAx25EncodedData)->dataGivenThatPreviousBitWasZero) & ((1 << ((pAx25EncodedData)->dataBitsCount)) - 1)) #define GET_VALUE_IF_LAST_BIT_IS_ZERO(pAx25EncodedData) \\ ((pAx25EncodedData)->dataGivenThatPreviousBitWasZero) #define GET_LAST_BIT(value, pAx25EncodedData) \\ (((value) >> ((pData)->dataBitsCount - 1)) & 1) #define GENERATE_AX25_TABLE_INDEX(currentNumberOfOnes, byte) \\ (((currentNumberOfOnes) << 8) + (byte)) #define GET_AX25_ENCODED_DATA_FOR_BYTE(currentNumberOfOnes, byte) \\ &byte2ax25EncodedData[GENERATE_AX25_TABLE_INDEX((currentNumberOfOnes), (byte))]; ''' with open(filePath, 'w+') as f: f.write(text) def generateSource(self, filePath): text = '''#include "ax25.h" const AX25EncodedData byte2ax25EncodedData[] = { ''' i = 0 for (oldNumberOfOnes, byte2Encode, newDataGiventLastBitWasZero, newLastBitGiventLastBitWasZero, newDataGiventLastBitWasOne, newLastBitGiventLastBitWasOne, newDataNumberOfBits, newNumberOfOnes) in self.combinations: text += ' {' + '{:>3}'.format(newDataGiventLastBitWasZero) + ', ' + '{:>2}'.format(newDataNumberOfBits) + ', ' + '{:>2}'.format(newNumberOfOnes) + '}, ' + \ '// idx = ' + '{:0>4}'.format(i) + ', oldNumberOfOnes = ' + str(oldNumberOfOnes) + ', byte2Encode = ' + '{:0>3}'.format(byte2Encode) + '\n' i += 1 text += '''}; ''' with open(filePath, 'w+') as f: f.write(text) generator = Generator() generator.generateDefinitionsHeader("../com-telemetry/src/aprs/generated/ax25.h") generator.generateSource("../com-telemetry/src/aprs/generated/ax25.c")
from documents.models import Document from categories.models import Category import os def move_doc(doc_id, cat_
id): doc = Document.objects.get(pk=int(doc_id)) old_cat = doc.refer_category new_cat = Category.objects.get(pk=int(cat_id)) for p in doc.pages.all(): cmd = "mv " + p.get_absolute_path() + " " + new_cat.get_absolute_path() + "/" os.system(cmd) doc.refer_category = new_cat doc.save() old_cat.documen
ts.remove(doc) new_cat.documents.add(doc)
from hypothesis import given from hypothesis.strategies import binary from msgpack import packb from mitmproxy.contentviews import msgpack from . import full_eval def msgpack_encode(content): return packb(con
tent, use_bin_type=True) def test_parse_msgpack(): assert msgpack.parse_msgpack(msgpack_encode({"foo": 1})) assert msgpack.parse_msgpack(b"aoesuteoahu") is msgpack.PARSE_ERROR assert msgpack.parse_msgpack(msgpack_encode({"foo": "\xe4\xb8\x96\xe7\x95\x8c"})) def t
est_format_msgpack(): assert list(msgpack.format_msgpack({ "data": [ "str", 42, True, False, None, {}, [] ] })) def test_view_msgpack(): v = full_eval(msgpack.ViewMsgPack()) assert v(msgpack_encode({})) assert not v(b"aoesuteoahu") assert v(msgpack_encode([1, 2, 3, 4, 5])) assert v(msgpack_encode({"foo": 3})) assert v(msgpack_encode({"foo": True, "nullvalue": None})) @given(binary()) def test_view_msgpack_doesnt_crash(data): v = full_eval(msgpack.ViewMsgPack()) v(data) def test_render_priority(): v = msgpack.ViewMsgPack() assert v.render_priority(b"data", content_type="application/msgpack") assert v.render_priority(b"data", content_type="application/x-msgpack") assert not v.render_priority(b"data", content_type="text/plain")
# vim: set ts=2 expandtab: ''' Module: read.py Desc: unpack data from binary files Author: John O'Neil Email: oneil.john@gmail.com DATE: Thursday, March 13th 2014 ''' import struct DEBUG = False class EOFError(Exception): """ Custom exception raised when we read to EOF """ pass def split_buffer(length, buf): '''split provided array at index x ''' #print "split-buffer******" a = [] if len(buf)<length: return (a, buf) #print "length of buf is" + str(len(buf)) for i in range(length): a.append(buf.pop(0)) return (a,buf) def dump_list(list): print(u' '.join(u'{:#x}'.format(x) for x in list)) def ucb(f): '''Read unsigned char byte from binary file ''' if isinstance(f, list): if len(f) < 1: raise EOFError() b, f = split_buffer(1, f) return struct.unpack('B', ''.join(b))[0] else: _f = f.read(1) if len(_f) < 1: raise EOFError() return struct.unpack('B', _f)[0] def usb(f): '''Read unsigned short from binary file ''' if isinstance(f, list): n, f = split_buffer(2, f) return struct.unpack('>H', ''.join(n))[0] else: _f = f.read(2) if DEBUG: print("usb: " + hex(ord(_f[0])) + ":" + hex(ord(_f[1]))) if len(_f) < 2: raise EOFError() return struct.unpack('>H', _f)[0] def ui3b(f): '''Read 3 byte unsigned short from binary file ''' if isinstance(f, list): n, f = split_buffer(3, f) return struct.unpack('>I', '\x00'+ ''.join(n))[0] else: _f = f.read(3) if len(_f) < 3: raise EOFError() return struct.unpack('>I', '\x00'+ (_f))[0] def uib(f): ''' ''' if isinstance(f, list): n, f = split_buffer(4, f) return struct.unpack('>L', ''.join(n))[0] else: _f = f.read(4) if len(_f) < 4: raise EOFError() return struct.unpack('>L', _f)[0] def ulb(f): '''Read unsigned long long (64bit inte
ger) from binary file ''' if isinstance(f, list): n, f = split_buffer(8, f)
return struct.unpack('>Q', ''.join(n))[0] else: _f = f.read(8) if len(_f) < 8: raise EOFError() return struct.unpack('>Q', _f)[0] def buffer(f, size): '''Read N bytes from either a file or list ''' if isinstance(f, list): n, f = split_buffer(size, f) return ''.join(n) else: _f = f.read(size) if len(_f) < size: raise EOFError() return _f
# Copyright 2013 NEC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from tempest.api.compute import base from tempest import exceptions from tempest import test class TenantUsagesNegativeTestJSON(base.BaseV2ComputeAdminTest): @classmethod def resource_setup(cls): super(TenantUsagesNegativeTestJSON, cls).resource_setup() cls.adm_client = cls.os_adm.tenant_usages_client cls.client = cls.os.tenant_usages_client cls.identity_client = cls._get_identity_admin_client() now = datetime.datetime.now() cls.start = cls._parse_strtime(now - datetime.timedelta(days=1)) cls.end = cls._parse_strtime(now + datetime.timedelta(days=1)) @classmethod def _parse_strtime(cls, at): # Returns formatted datetime return at.strftime('%Y-%m-%dT%H:%M:%S.%f') @test.attr(type=['negative', 'gate']) def test_get_usage_tenant_with_empty_tenant_id(self): # Get usage for a specific tenant empty params = {'start': self.start, 'end': self.end} self.assertRaises(exceptions.NotFound, self.adm_client.get_tenant_usage, '', params) @test.attr(type=['negative', 'gate']) def test_get_usage_tenant_with_invalid_date(self): # Get usage for tenant with invalid date params = {'start': self.end, 'end': self.start} self.assertRaises(exceptions.BadRequest, self.adm_client.get_tenant_usage, self.client.tenant_id, params) @test.attr(type=['negative', 'gate']) def test_list_usage_all_tenants_with_non_admin_user(self): # Get usage for all tenants with non admin user params = {'start': self.start,
'end': self.end, 'detailed': int(bool(True))} self.assertRaises(exceptions.Unauthorized,
self.client.list_tenant_usages, params) class TenantUsagesNegativeTestXML(TenantUsagesNegativeTestJSON): _interface = 'xml'
"""Tests the surveytools.footprint module.""" import numpy as np from surveytools.footprint import VphasFootprint, VphasOffset def test_vphas_offset_coordinates(): """Test the offset pattern, which is expected to equal ra -0, dec +0 arcsec for the "a" pointing; ra -588, dec +660 arcsec for the "b" pointing; ra -300, dec +350 arcsec for the "c" pointing. """ vf = VphasFootprint() np.testing.assert_almost_equal(vf.offsets['0001a']['ra'], 97.2192513369) np.testing.assert_almost_equal(vf.offsets['0001a']['dec'], 0) np.testing.assert_almost_equal(vf.offsets['0001b']['ra'], 97.2192513369 - 588/3600.) np.testing.assert_almost_equal(vf.offsets['0001b']['dec'], 0 + 660/3600.) np.testing.assert_almost_equal(vf.offsets['0001c']['ra'], 97.2192513369 - 300/3600.) np.testing.assert_almost_equal(vf.offsets['0001c']['dec'], 0 + 350/3600.) def test_vphas_offset_pattern(): vf = VphasFootprint() for field in ['0500', '1000', '2000']: ra, dec = vf.offsets[field+'a']['ra'], vf.offsets[field+'a']['dec'] np.testing.assert_almost_equal(vf.offsets[field+'b']['ra'], ra - (588/3600.) / np.cos(np.radians(dec))) np.testing.assert_almost_equal(vf.offsets[field+'b']['dec'], dec + 660/3600.) def test_vphas_filenames():
"""Ensure the right filename is returned for a given band/offset.""" assert VphasOffset('1122a').image_filenames['ha'] == 'o20120330_00032.fit' assert VphasOffset('1122b').image_filenames['ha'] == 'o20120330_00034.fit' assert VphasOffset('1122c').image_filenames['ha'] == 'o20120330_00033.fit' assert VphasOffset('1842a').image_filenames['r'] == 'o20
130314_00061.fit' assert VphasOffset('1842b').image_filenames['r'] == 'o20130314_00062.fit' assert VphasOffset('0765a').image_filenames['g'] == 'o20130413_00024.fit' assert VphasOffset('0765b').image_filenames['g'] == 'o20130413_00026.fit' assert VphasOffset('0765c').image_filenames['g'] == 'o20130413_00025.fit' if __name__ == '__main__': test_vphas_filenames()
ata) def get_def(dictionary, key, default_value): if key not in dictionary or len(dictionary[key]) == 0: return default_value return dictionary[key] start_date = datetime.strptime("10/10/2015 10:10", "%d/%m/%Y %H:%M") if len(election["Start date time"]) > 0: try: start_date = datetime.strptime(election["Start date time"], "%d/%m/%Y %H:%M:%S") except: start_date = datetime.strptime(election["Start date time"], "%d/%m/%Y %H:%M") ret = { "id": int(election['Id']) + add_to_id, "authorities": config['authorities'], "director": config['director'], "title": election['Title'], "description": election['Description'], "layout": election.get('Layout', ''), "presentation": { "share_text": [ { "network": "Twitter", "button_text": "", "social_message": election.get('Share Text', '') }, { "network": "Facebook", "button_text": "", "social_message": "__URL__" } ], "theme": election.get('Theme', 'default'), "urls": [], "theme_css": "", "extra_options": parse_extra(election), "show_login_link_on_home": parse_bool(iget(election, 'login link on home', False)), }, "end_date": (start_date + timedelta(hours=int(get_def(election, 'Duration in hours', '24')))).isoformat() + ".001", "start_date": start_date.isoformat() + ".001", "questions": questions, "real": True } return ret def form_to_elections(path, separator, config, add_to_id): ''' Converts the google forms into election configurations ''' election_funcs = { "Título": lambda d: ["title", d], "Descripción": lambda d: ["description", d], "Comienzo": lambda d: ["start_date", datetime.strptime(d, "%m/%d/%Y %H:%M:%S").isoformat()+ ".001"], "Final": lambda d: ["end_date", datetime.strptime(d, "%m/%d/%Y %H:%M:%S").isoformat()+ ".001"], } census_key = "Censo" more_keys = { "¿Más preguntas?": lambda v: "No" not in v } auth_method = config['authapi']['event_config']['auth_method'] question_options_key = "Opciones" question_funcs = { "Título": lambda d: ["title", d], "Descripción": lambda d: ["description", d], "Número de ganadores": lambda d: ["num_winners", int(d)], "Número máximo de opciones": lambda d: ["max", int(d)], "Número mínimo de opciones": lambda d: ["min", int(d)], "Orden aleatorio": lambda d: ["randomize_answer_order", d == "Aleatorio"], "Resultados": lambda d: ["answer_total_votes_percentage", "over-total-votes" if d == "Sobre votos totales" else "over-total-valid-votes"] } elections = [] base_election = copy.deepcopy(BASE_ELECTION) base_election['director'] = config['director'] base_election['authorities'] = config['authorities'] with open(path, mode='r', encoding="utf-8", errors='strict') as f: fcsv = csv.reader(f, delimiter=',', quotechar='"') keys = fcsv.__next__() for values in fcsv: if len(values) == 0: continue question_num = -1 election = copy.deepcopy(base_election) election['id'] = add_to_id + len(elections) question = None for key, value, index in zip(keys, values, range(len(values))): if question_num == -1 and key not in more_keys.keys() and key in election_funcs.keys(): dest_key, dest_value = election_funcs[key](value) election[dest_key] = dest_value elif key == census_key: if auth_method == "sms": election['census'] = [{"tlf": item} for item in value.split("\n")] else: # email election['census'] = [{"email": item} for item in value.split("\n")] question_num += 1 question = copy.deepcopy(BASE_QUESTION) elif question_num >= 0 and key in question_funcs.keys(): dest_key, dest_value = question_funcs[key](value) question[dest_key] = dest_value elif question_num >= 0 and key == question_options_key: options = value.strip().split("\n") question['answers'] = [{ "id": opt_id, "category": '', "details": '', "sort_order": opt_id, "urls": [], "text": opt } for opt, opt_id in zip(options, range(len(options)))] elif question_num >= 0 and key in more_keys.keys(): question_num += 1 election['questions'].append(question) question = copy.deepcopy(BASE_QUESTION) if not more_keys[key](value): elections.append(election) break return elections if __name__ == '__main__': parser = argparse.ArgumentParser(description='Converts a CSV into the json to create an election.') parser.add_argument('-c', '--config-path', help='default config for the election') parser.add_argument('-i', '--input-path', help='input file or directory') parser.add_argument('-o', '--output-path', help='output file or directory') parser.add_argument('-A', '--admin-format', help='use create format for agora-admin instead of agora-elections', action="store_true") parser.add_argument('-a', '--add-to-id', type=int, help='add an int number to the id', default=0) parser.add_argument( '-f', '--format', choices=['csv-blocks', 'tsv-blocks', 'csv-google-forms'], default="csv-blocks", help='output file or directory') args = parser.parse_args() if not os.access(args.input_path, os.R_OK): print("can't read %s" %
args.input_path) exit(2) if os.path.isdir(args.output_path) and not os.access(args.output_path, os.W_OK): print("can't write to %s" % args.output_path)
exit(2) if not os.access(args.config_path, os.R_OK): print("can't read %s" % args.config_path) exit(2) config = None with open(args.config_path, mode='r', encoding="utf-8", errors='strict') as f: config = json.loads(f.read()) try: if args.format == "csv-blocks" or args.format == "tsv-blocks": separator = { "csv-blocks": ",", "tsv-blocks": "\t" }[args.format] extension = { "csv-blocks": ".csv", "tsv-blocks": ".tsv" }[args.format] if os.path.isdir(args.input_path): if not os.path.exists(args.output_path): os.makedirs(args.output_path) i = 0 files = sorted([name for name in os.listdir(args.input_path) if os.path.isfile(os.path.join(args.input_path, name)) and name.endswith(extension)]) for name in files: print("importing %s" % name) file_path = os.path.join(args.input_path, name) blocks = csv_to_blocks(path=file_path, separator=separator) election = blocks_to_election(blocks, config, args.add_to_id) if str(election['id']) + extension != name: print("WARNING: election id %i doesn't match filename %s" % (election['id'], name)) if not args.admin_format: output_path = os.path.join(args.output_path, str(election['id']) + ".config.json") else: output_path = os.path.join(args.output_path, str(i) + ".json") auth_config_path = os.path.join(args.outp
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import base64 from StringIO import StringIO from openerp.modules.module import get_module_resource import openerp.modules.registry from openerp.osv import osv from openerp_sxw2rml import sxw2rml class report_xml(osv.osv): _inherit = 'ir.actions.report.xml' def sxwtorml(self, cr, uid, file_sxw, file_type): ''' The use of this function is to get rml file from sxw file. ''' sxwval = StringIO(base64.decodestring(file_sxw)) if file_type=='sxw': fp = open(get_module_resource('base_report_designer','openerp_sxw2rml
', 'normalized_oo2rml.xsl'),'rb') if file_type=='odt': fp = open(get_module_resource('base_report_designer','openerp_sxw2rml', 'normalized_odt2rml.xsl'),'rb') return {'report_rml_content': str(sxw2rml(sxwval, xsl=fp.read()))} def upload_report(se
lf, cr, uid, report_id, file_sxw, file_type, context=None): ''' Untested function ''' sxwval = StringIO(base64.decodestring(file_sxw)) if file_type=='sxw': fp = open(get_module_resource('base_report_designer','openerp_sxw2rml', 'normalized_oo2rml.xsl'),'rb') if file_type=='odt': fp = open(get_module_resource('base_report_designer','openerp_sxw2rml', 'normalized_odt2rml.xsl'),'rb') report = self.pool['ir.actions.report.xml'].write(cr, uid, [report_id], { 'report_sxw_content': base64.decodestring(file_sxw), 'report_rml_content': str(sxw2rml(sxwval, xsl=fp.read())), }) return True def report_get(self, cr, uid, report_id, context=None): if context is None: context = {} # skip osv.fields.sanitize_binary_value() because we want the raw bytes in all cases context.update(bin_raw=True) report = self.browse(cr, uid, report_id, context=context) sxw_data = report.report_sxw_content rml_data = report.report_rml_content if isinstance(sxw_data, unicode): sxw_data = sxw_data.encode("iso-8859-1", "replace") if isinstance(rml_data, unicode): rml_data = rml_data.encode("iso-8859-1", "replace") return { 'file_type' : report.report_type, 'report_sxw_content': sxw_data and base64.encodestring(sxw_data) or False, 'report_rml_content': rml_data and base64.encodestring(rml_data) or False } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
ort slugify, domain, markdown from newsmeme.permissions import auth, moderator from newsmeme.models.types import DenormalizedText from newsmeme.models.users import User class PostQuery(BaseQuery): def jsonify(self): for post in self.all(): yield post.json def as_list(self): """ Return restricted list of columns for list queries """ deferred_cols = ("description", "tags", "author.email", "author.password", "author.activation_key", "author.openid", "author.date_joined", "author.receive_email", "author.email_alerts", "author.followers", "author.following") options = [db.defer(col) for col in deferred_cols] return self.options(*options) def deadpooled(self): return self.filter(Post.score <= 0) def popular(self): return self.filter(Post.score > 0) def hottest(self): return self.order_by(Post.num_comments.desc(), Post.score.desc(), Post.id.desc()) def public(self): return self.filter(Post.access == Post.PUBLIC) def restricted(self, user=None): """ Returns posts filtered for a) public posts b) posts authored by the user or c) posts authored by friends """ if user and user.is_moderator: return self criteria = [Post.access == Post.PUBLIC] if user: criteria.append(Post.author_id == user.id) if user.friends: criteria.append(db.and_(Post.access == Post.FRIENDS, Post.author_id.in_(user.friends))) return self.filter(reduce(db.or_, criteria)) def search(self, keywords): criteria = [] for keyword in keywords.split(): keyword = '%' + keyword + '%' criteria.append(db.or_(Post.title.ilike(keyword), Post.description.ilike(keyword), Post.link.ilike(keyword), Post.tags.ilike(keyword), User.username.ilike(keyword))) q = reduce(db.and_, criteria) return self.filter(q).join(User).distinct() class Post(db.Model): __tablename__ = "posts" PUBLIC = 100 FRIENDS = 200 PRIVATE = 300 PER_PAGE = 40 query_class = PostQuery id = db.Column(db.Integer, primary_key=True) author_id = db.Column(db.Integer, db.ForeignKey(User.id, ondelete='CASCADE'), nullable=False) title = db.Column(db.Unicode(200)) description = db.Column(db.UnicodeText) link = db.Column(db.String(250)) date_created = db.Column(db.DateTime, default=datetime.utcnow) score = db.Column(db.Integer, default=1) num_comments = db.Column(db.Integer, default=0) votes = db.Column(DenormalizedText) access = db.Column(db.Integer, default=PUBLIC) _tags = db.Column("tags", db.UnicodeText) author = db.relation(User, innerjoin=True, lazy="joined") __mapper_args__ = {'order_by': id.desc()} class Permissions(object): def __init__(self, obj): self.obj = obj @cached_property def default(self): return Permission(UserNeed(self.obj.author_id)) & mode
rator @cached_property def view(self): if self.obj.access == Post.PUBLIC: return Permission() if self.obj.access == Post.FRIENDS: needs = [UserNeed(user_id) for user_id in self.obj.author.friends] return self.default & Permission(*needs) return self.default @cached_property def edit(self): return self.default @cached_property
def delete(self): return self.default @cached_property def vote(self): needs = [UserNeed(user_id) for user_id in self.obj.votes] needs.append(UserNeed(self.obj.author_id)) return auth & Denial(*needs) @cached_property def comment(self): return auth def __init__(self, *args, **kwargs): super(Post, self).__init__(*args, **kwargs) self.votes = self.votes or set() self.access = self.access or self.PUBLIC def __str__(self): return self.title def __repr__(self): return "<%s>" % self @cached_property def permissions(self): return self.Permissions(self) def vote(self, user): self.votes.add(user.id) def _get_tags(self): return self._tags def _set_tags(self, tags): self._tags = tags if self.id: # ensure existing tag references are removed d = db.delete(post_tags, post_tags.c.post_id == self.id) db.engine.execute(d) for tag in set(self.taglist): slug = slugify(tag) tag_obj = Tag.query.filter(Tag.slug == slug).first() if tag_obj is None: tag_obj = Tag(name=tag, slug=slug) db.session.add(tag_obj) if self not in tag_obj.posts: tag_obj.posts.append(self) tags = db.synonym("_tags", descriptor=property(_get_tags, _set_tags)) @property def taglist(self): if self.tags is None: return [] tags = [t.strip() for t in self.tags.split(",")] return [t for t in tags if t] @cached_property def linked_taglist(self): """ Returns the tags in the original order and format, with link to tag page """ return [(tag, url_for('frontend.tag', slug=slugify(tag))) for tag in self.taglist] @cached_property def domain(self): if not self.link: return '' return domain(self.link) @cached_property def json(self): """ Returns dict of safe attributes for passing into a JSON request. """ return dict(post_id=self.id, score=self.score, title=self.title, link=self.link, description=self.description, num_comments=self.num_comments, author=self.author.username) @cached_property def access_name(self): return { Post.PUBLIC: "public", Post.FRIENDS: "friends", Post.PRIVATE: "private" }.get(self.access, "public") def can_access(self, user=None): if self.access == self.PUBLIC: return True if user is None: return False if user.is_moderator or user.id == self.author_id: return True return self.access == self.FRIENDS and self.author_id in user.friends @cached_property def comments(self): """ Returns comments in tree. Each parent comment has a "comments" attribute appended and a "depth" attribute. """ from newsmeme.models.comments import Comment comments = Comment.query.filter(Comment.post_id == self.id).all() def _get_comments(parent, depth): parent.comments = [] parent.depth = depth for comment in comments: if comment.parent_id == parent.id: parent.comments.append(comment) _get_comments(comment, depth + 1) parents = [c for c in comments if c.parent_id is None] for parent in parents: _get_comments(parent, 0) return parents def _url(self, _external=False): return url_for('post.view', post_id=self.id, slug=self.slug, _external=_external)
# coding=utf-8 # Copyright 2020 The TF-Agents Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test for tf_agents.utils.random_py_policy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import from tf_agents.policies import random_py_policy from tf_agents.specs import array_spec from tf_agents.trajectories import time_step from tf_agents.utils import test_utils class RandomPyPolicyTest(test_utils.TestCase): def setUp(self): super(RandomPyPolicyTest, self).setUp() self._time_step_spec = time_step.time_step_spec( observation_spec=array_spec.ArraySpec((1,), np.int32)) self._time_step = time_step.restart(observation=np.array([1])) def testGeneratesActions(self): action_spec = [ array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10), array_spec.BoundedArraySpec((1, 2), np.int32, -10, 10) ] policy = random_py_policy.RandomPyPolicy( time_step_spec=self._time_step_spec, action_spec=action_spec) action_step = policy.action(self._time_step) tf.nest.assert_same_structure(action_spec, action_step.action) self.assertTrue(np.all(action_step.action[0] >= -10)) self.assertTrue(np.all(action_step.action[0] <= 10)) self.assertTrue(np.all(action_step.action[1] >= -10)) self.assertTrue(np.all(action_step.action[1] <= 10)) def testGeneratesBatchedActions(self): action_spec = [ array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10), array_spec.BoundedArraySpec((1, 2), np.int32, -10, 10) ] policy = random_py_policy.RandomPyPolicy( time_step_spec=self._time_step_spec, action_spec=action_spec, outer_dims=(3,)) action_step = policy.action(self._time_step) tf.nest.assert_same_structure(action_spec, action_step.action) self.assertEqual((3, 2, 3), action_step.action[0].shape) self.assertEqual((3, 1, 2), action_step.action[1].shape) self.assertTrue(np.all(action_step.action[0] >= -10)) self.assertTrue(np.all(action_step.action[0] <= 10)) self.assertTrue(np.all(action_step.action[1] >= -10)) self.assertTrue(np.all(action_step.action[1] <= 10)) def testGeneratesBatchedActionsWithoutSpecifyingOuterDims(self): action_spec = [ array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10), array_spec.BoundedArraySpec((1, 2), np.int32, -10, 10) ] time_step_spec = time_step.time_step
_spec( o
bservation_spec=array_spec.ArraySpec((1,), np.int32)) policy = random_py_policy.RandomPyPolicy( time_step_spec=time_step_spec, action_spec=action_spec) action_step = policy.action( time_step.restart(np.array([[1], [2], [3]], dtype=np.int32))) tf.nest.assert_same_structure(action_spec, action_step.action) self.assertEqual((3, 2, 3), action_step.action[0].shape) self.assertEqual((3, 1, 2), action_step.action[1].shape) self.assertTrue(np.all(action_step.action[0] >= -10)) self.assertTrue(np.all(action_step.action[0] <= 10)) self.assertTrue(np.all(action_step.action[1] >= -10)) self.assertTrue(np.all(action_step.action[1] <= 10)) def testPolicyStateSpecIsEmpty(self): policy = random_py_policy.RandomPyPolicy( time_step_spec=self._time_step_spec, action_spec=[]) self.assertEqual(policy.policy_state_spec, ()) def testMasking(self): batch_size = 1000 time_step_spec = time_step.time_step_spec( observation_spec=array_spec.ArraySpec((1,), np.int32)) action_spec = array_spec.BoundedArraySpec((), np.int64, -5, 5) # We create a fixed mask here for testing purposes. Normally the mask would # be part of the observation. mask = [0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0] np_mask = np.array(mask) batched_mask = np.array([mask for _ in range(batch_size)]) policy = random_py_policy.RandomPyPolicy( time_step_spec=time_step_spec, action_spec=action_spec, observation_and_action_constraint_splitter=( lambda obs: (obs, batched_mask))) my_time_step = time_step.restart(time_step_spec, batch_size) action_step = policy.action(my_time_step) tf.nest.assert_same_structure(action_spec, action_step.action) # Sample from the policy 1000 times, and ensure that actions considered # invalid according to the mask are never chosen. action_ = self.evaluate(action_step.action) self.assertTrue(np.all(action_ >= -5)) self.assertTrue(np.all(action_ <= 5)) self.assertAllEqual(np_mask[action_ - action_spec.minimum], np.ones([batch_size])) # Ensure that all valid actions occur somewhere within the batch. Because we # sample 1000 times, the chance of this failing for any particular action is # (2/3)^1000, roughly 1e-176. for index in range(action_spec.minimum, action_spec.maximum + 1): if np_mask[index - action_spec.minimum]: self.assertIn(index, action_) if __name__ == '__main__': test_utils.main()
.assertEqual( exception.annotation, ( u"+\n" u"^" ) ) def test_group(self): self.assertEqual( parse(u"(a)"), Group(Character(u"a")) ) def test_group_missing_begin(self): with self.assertRaises(ParserError) as context: parse(u"a)") exception = context.exception self.assertEqual( exception.reason, u"found unmatched )" ) self.assertEqual( exception.annotation, ( u"a)\n" u" ^" ) ) def test_group
_missing_end(self): with self.assertRaises(ParserError) as context: parse(u"(a") exception = context.exception self.assertEqual( exception.reason, u"unexpected end of string, expected ) corresponding to (" ) self.assertEqual( exception.annotation, ( u"(a\n" u"^
-^" ) ) def test_either(self): self.assertEqual( parse(u"[ab]"), Either(frozenset(map(Character, u"ab"))) ) def test_either_missing_begin(self): with self.assertRaises(ParserError) as context: parse(u"ab]") exception = context.exception self.assertEqual( exception.reason, u"found unmatched ]" ) self.assertEqual( exception.annotation, ( u"ab]\n" u" ^" ) ) def test_either_missing_end(self): with self.assertRaises(ParserError) as context: parse(u"[ab") exception = context.exception self.assertEqual( exception.reason, u"unexpected end of string, expected ] corresponding to [" ) self.assertEqual( exception.annotation, ( u"[ab\n" u"^--^" ) ) def test_neither(self): self.assertEqual( parse(u"[^ab]"), Neither(frozenset(map(Character, u"ab")), DEFAULT_ALPHABET) ) def test_range(self): self.assertEqual( parse(u"[a-c]"), Either(frozenset([Range( Character(u"a"), Character(u"c"), DEFAULT_ALPHABET )])) ) def test_range_missing_start(self): with self.assertRaises(ParserError) as context: parse(u"[-c]") exception = context.exception self.assertEqual(exception.reason, u"range is missing start") self.assertEqual( exception.annotation, ( u"[-c]\n" u"^" ) ) def test_range_missing_end(self): with self.assertRaises(ParserError) as context: parse(u"[a-]") exception = context.exception self.assertEqual( exception.reason, u"expected character, found instruction: ]" ) self.assertEqual( exception.annotation, ( u"[a-]\n" u" ^" ) ) def test_any(self): parser = Parser(DEFAULT_LANGUAGE, alphabet=frozenset(u"ab")) self.assertEqual( parser.parse(u"."), Any(frozenset(u"ab")) ) class RegexTestWrapper(object): def __init__(self, regex): self.regex = regex self.ast = parse(regex) @property def nfa(self): if not hasattr(self, "_nfa"): self._nfa = self.ast.to_nfa() return self._nfa @property def dfa(self): if not hasattr(self, "_dfa"): self._dfa = self.ast.to_dfa() return self._dfa @property def dfa_table(self): if not hasattr(self, "_dfa_table"): self._dfa_table = self.dfa.to_dfa_table() return self._dfa_table @property def matchers(self): if hasattr(self, "_matchers"): return self._matchers return self._iter_matchers() def _iter_matchers(self): self._matchers = [] matcher = lambda x: self._matchers.append(x) or x yield matcher(self.nfa) yield matcher(self.dfa) yield matcher(self.dfa_table) def assertMatches(self, string, expected_end): for matcher in self.matchers: end = matcher.match(string) assert end == expected_end, end def assertAllMatches(self, matches): for string, end in matches: self.assertMatches(string, end) def assertNotMatches(self, string): for matcher in self.matchers: end = matcher.match(string) assert end is None, end def assertNotMatchesAny(self, strings): for string in strings: self.assertNotMatches(string) def assertFindEqual(self, string, span): for matcher in self.matchers: find = matcher.find(string) assert find == Find(string, span), find def assertAllFinds(self, finds): for string, span in finds: self.assertFindEqual(string, span) def assertFindAllEqual(self, string, spans): for matcher in self.matchers: finds = matcher.find_all(string) for find, span in izip(finds, spans): assert find == Find(string, span), find try: find = finds.next() raise AssertionError("unexpected find: %r" % find) except StopIteration: pass def assertSub(self, string, sub, expected_result): for matcher in self.matchers: result = matcher.subn(string, sub) assert result == expected_result, result assert matcher.sub(string, sub) == expected_result[0] class TestMatcher(TestCase): compilers = ["to_nfa", "to_dfa", "to_dfa_table"] @contextmanager def regex(self, regex): yield RegexTestWrapper(regex) def test_epsilon(self): with self.regex(u"") as regex: regex.assertMatches(u"", 0) regex.assertNotMatches(u"a") regex.assertAllFinds([ (u"", Span(0, 0)), (u"a", Span(1, 1)) ]) regex.assertSub(u"", u"a", (u"a", 1)) def test_any(self): with self.regex(u".") as regex: regex.assertMatches(u"a", 1) regex.assertFindEqual(u"a", Span(0, 1)) regex.assertFindAllEqual(u"aa", [ Span(0, 1), Span(1, 2) ]) regex.assertSub(u"a", u"b", (u"b", 1)) regex.assertSub(u"aa", u"b", (u"bb", 2)) def test_character(self): with self.regex(u"a") as regex: regex.assertMatches(u"a", 1) regex.assertMatches(u"aa", 1) regex.assertAllFinds([ (u"a", Span(0, 1)), (u"ba", Span(1, 2)) ]) regex.assertFindAllEqual(u"aa", [ Span(0, 1), Span(1, 2) ]) regex.assertFindAllEqual(u"aba", [ Span(0, 1), Span(2, 3) ]) regex.assertSub(u"a", u"b", (u"b", 1)) regex.assertSub(u"ab", u"b", (u"bb", 1)) regex.assertSub(u"aa", u"b", (u"bb", 2)) regex.assertSub(u"bab", u"b", (u"bbb", 1)) def test_concatenation(self): with self.regex(u"ab") as regex: regex.assertMatches(u"ab", 2) regex.assertMatches(u"abab", 2) regex.assertAllFinds([ (u"ab", Span(0, 2)), (U"cab", Span(1, 3)) ]) regex.assertFindAllEqual(u"abab", [ Span(0, 2), Span(2, 4) ]) regex.assertFindAllEqual(u"abcab", [ Span(0, 2), Span(3, 5) ]) regex.assertSub(u"ab", u"c", (u"c", 1)) regex.a
namely, the type of request, the package and os required (if applies). :param: msg (string) the content of the email to be parsed. :return: (list) 3-tuple with the type of request, os and pt info. """ # by default we asume the request is asking for help req = {} req['type'] = 'help' req['os'] = None # core knows what OS are supported supported_os = self.core.get_supported_os() # search for OS or mirrors request # if nothing is found, help by default found_request = False words = re.split('\s+', msg.strip()) for word in words: if not found_request: # OS first for os in supported_os: if re.match(os, word, re.IGNORECASE): req['os'] = os req['type'] = 'links' found_request = True break # mirrors if re.match("mirrors?", word, re.IGNORECASE): req['type'] = 'mirrors' found_request = True else: break return req def _create_email(self, from_addr, to_addr, subject, msg): """Create an email object. This object will be used to construct the reply. :param: from_addr (string) the address of the sender. :param: to_addr (string) the address of the recipient. :param: subject (string) the subject of the email. :param: msg (string) the content of the email. :return: (object) the email object. """ email_obj = MIMEMultipart() email_obj.set_charset("utf-8") email_obj['Subject'] = subject email_obj['From'] = from_addr email_obj['To'] = to_addr msg_attach = MIMEText(msg, 'plain') email_obj.attach(msg_attach) return email_obj def _send_email(self, from_addr, to_addr, subject, msg, attach=None): """Send an email. Take a 'from' and 'to' addresses, a subject and the content, creates the email and send it. :param: from_addr (string) the address of the sender. :param: to_addr (string) the address of the recipient. :param: subject (string) the subject of the email. :param: msg (string) the content of the email. :param: attach (string) the path of the mirrors list. """ email_obj = self._create_email(from_addr, to_addr, subject, msg) if(attach): # for now, the only email with attachment is the one for mirrors try: part = MIMEBase('application', "octet-stream") part.set_payload(open(attach, "rb").read()) Encoders.encode_base64(part) part.add_header( 'Content-Disposition', 'attachment; filename="mirrors.txt"' ) email_obj.attach(part) except IOError as e: raise SendEmailError('Error with mirrors: %s' % str(e)) try: s = smtplib.SMTP("localhost") s.sendmail(from_addr, to_addr, email_obj.as_string()) s.quit() except smtplib.SMTPException as e: raise SendEmailError("Error with SMTP: %s" % str(e)) def _send_links(self, links, lc, os, from_addr, to_addr): """Send links to the user. Get the message in the proper language (according to the locale), replace variables and send the email. :param: links (string) the links to be sent. :param: lc (string) the locale. :param: os (string) the operating system. :param: from_addr (string) the address of the sender. :param: to_addr (string) the address of the recipient. """ # obtain the content in the proper language and send it try: links_subject = self._get_msg('links_subject', 'en') links_msg = self._get_msg('links_msg', 'en') links_msg = links_msg % (OS[os], links) self._send_email( from_addr, to_addr, links_subject, links_msg, None ) except ConfigError as e: raise InternalError("Error while getting message %s" % str(e)) except SendEmailError as e: raise InternalError("Error while sending links message") def _send_mirrors(self, lc, from_addr, to_addr): """Send mirrors message. Get the message in the proper language (according to the locale), replace variables (if any) and send the email. :param: lc (string) the locale. :param: from_addr (string) the address of the sender. :param: to_addr (string) the address of the recipient. """ # obtain the content in the proper language and send it try: mirrors_subject = self._get_msg('mirrors_subject', lc) mirrors_msg = self._get_msg('mirrors_msg', lc) self._send_email( from_addr, to_addr, mirrors_subject, mirrors_msg, self.mirrors ) except ConfigError as e: raise InternalError("Error while getting message %s" % str(e)) except SendEmailError as e: raise InternalError("Error while sending mirrors message") def _send_help(self, lc, from_addr, to_addr): """Send help message. Get the message in the proper language (according to the locale), replace variables (if any) and send the email. :param: lc (string) the locale. :param: from_addr (string) the address of the sender. :param: to_addr (string) the address of the recipient. """ # obtain the content in the proper language and send it try: help_subject = self._get_msg('help_subject', lc) help_msg = self._get_msg('help_msg', lc) self._send_email(from_addr, to_addr, help_subject, help_msg, None) except ConfigError as e: raise InternalError("Error while getting message %s" % str(e)) except SendEmailError as e: raise InternalError("Error while sending help message") def process_email(self, raw_msg): """Process the email received. Create an email object from the string received. The processing flow is as following: - check for blacklisted address. - parse the email. - check the type of request. - send reply. :param: raw_msg (string) the email received. :raise: InternalError if something goes wrong while asking for the links to the Core module. """ self.log.debug("Processing email") parsed_msg = e
mail.message_from_string(raw_msg) content = self._get_content(parsed_msg) from_addr = parsed_msg['From'] to_addr = parsed_msg['To'] bogus_request = False
status = '' req = None try: # two ways for a request to be bogus: address malformed or # blacklisted try: self.log.debug("Normalizing address...") norm_from_addr = self._get_normalized_address(from_addr) except AddressError as e: bogus_request = True self.log.info('invalid; none; none') if norm_from_addr: anon_addr = utils.get_sha256(norm_from_addr) if self._is_blacklisted(anon_addr): bogus_request = True self.log.info('blacklist; none; none') if not bogus_request: # try to figure out what the user is asking self.log.debug("Request seems legit; parsing it...") req = self._parse_email(content, to_addr) # our address should have the locale requested our_addr = "gettor+%s@%s" % (req['lc'], self.our_domain) # possible options: help, links, m
ad it and override the default options. Most tools in $ROOT/tools take a --cfg option to specify an override file. - See tools/{train,test}_net.py for example code that uses cfg_from_file() - See experiments/cfgs/*.yml for example YAML config override files """ import os import os.path as osp import numpy as np import math # `pip install easydict` if you don't have it from easydict import EasyDict as edict __C = edict() # Consumers can get config by: # from fast_rcnn_config import cfg cfg = __C # region proposal network (RPN) or not __C.IS_RPN = False __C.FLIP_X = False __C.INPUT = 'COLOR' # multiscale training and testing __C.IS_MULTISCALE = True __C.IS_EXTRAPOLATING = True # __C.REGION_PROPOSAL = 'RPN' __C.NET_NAME = 'CaffeNet' __C.SUBCLS_NAME = 'voxel_exemplars' # # Training options # __C.TRAIN = edict() __C.TRAIN.VISUALIZE = False __C.TRAIN.VERTEX_REG = False __C.TRAIN.GRID_SIZE = 256 __C.TRAIN.CHROMATIC = False # Scales to compute real features __C.TRAIN.SCALES_BASE = (0.25, 0.5, 1.0, 2.0, 3.0) # The number of scales per octave in the image pyramid # An octave is the set of scales up to half of the initial scale __C.TRAIN.NUM_PER_OCTAVE = 4 # parameters for ROI generating __C.TRAIN.SPATIAL_SCALE = 0.0625 __C.TRAIN.KERNEL_SIZE = 5 # Aspect ratio to use during training __C.TRAIN.ASPECTS = (1, 0.75, 0.5, 0.25) # Images to use per minibatch __C.TRAIN.IMS_PER_BATCH = 2 # Minibatch size (number of regions of interest [ROIs]) __C.TRAIN.BATCH_SIZE = 128 # Fraction of minibatch that is labeled foreground (i.e. class > 0) __C.TRAIN.FG_FRACTION = 0.25 # Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH) __C.TRAIN.FG_THRESH = (0.5,) # Overlap threshold for a ROI to be considered background (class = 0 if # overlap in [LO, HI)) __C.TRAIN.BG_THRESH_HI = (0.5,) __C.TRAIN.BG_THRESH_LO = (0.1,) # Use horizontally-flipped images during training? __C.TRAIN.USE_FLIPPED = True # Train bounding-box regressors __C.TRAIN.BBOX_REG = True # Overlap required between a ROI and ground-truth box in order for that ROI to # be used as a bounding-box regression training example __C.TRAIN.BBOX_THRESH = (0.5,) # Iterations between snapshots __C.TRAIN.SNAPSHOT_ITERS = 10000 # solver.prototxt specifies the snapshot path prefix, this adds an optional # infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel __C.TRAIN.SNAPSHOT_INFIX = '' # Use a prefetch thread in roi_data_layer.layer # So far I haven't found this useful; likely more engineering work is required __C.TRAIN.USE_PREFETCH = False # Train using subclasses __C.TRAIN.SUBCLS = True # Train using viewpoint __C.TRAIN.VIEWPOINT = False # Threshold of ROIs in training RCNN __C.TRAIN.ROI_THRESHOLD = 0.1 # IOU
>= thresh: positive example __C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7 # IOU < thresh: negative example __C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3 # If an anchor statisfied by positive and negative conditions set to negative __C.TRAIN.RPN_CLOBBER_POSITIVES = False # Max number of foreground examples __C.TRAIN.RPN_FG_FRACTION = 0.5 # Total number of examples __C.TRAIN.RPN_BATCHSIZE = 256 # NMS threshold used on RPN proposals __C.TRAIN.RPN_NMS_THR
ESH = 0.7 # Number of top scoring boxes to keep before apply NMS to RPN proposals __C.TRAIN.RPN_PRE_NMS_TOP_N = 12000 # Number of top scoring boxes to keep after applying NMS to RPN proposals __C.TRAIN.RPN_POST_NMS_TOP_N = 2000 # Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale) __C.TRAIN.RPN_MIN_SIZE = 16 # Deprecated (outside weights) __C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0) # Give the positive RPN examples weight of p * 1 / {num positives} # and give negatives a weight of (1 - p) # Set to -1.0 to use uniform example weighting __C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0 __C.TRAIN.RPN_BASE_SIZE = 16 __C.TRAIN.RPN_ASPECTS = [0.25, 0.5, 0.75, 1, 1.5, 2, 3] # 7 aspects __C.TRAIN.RPN_SCALES = [2, 2.82842712, 4, 5.65685425, 8, 11.3137085, 16, 22.627417, 32, 45.254834] # 2**np.arange(1, 6, 0.5), 10 scales # # Testing options # __C.TEST = edict() __C.TEST.IS_PATCH = False; __C.TEST.VERTEX_REG = False __C.TEST.VISUALIZE = False # Scales to compute real features __C.TEST.SCALES_BASE = (0.25, 0.5, 1.0, 2.0, 3.0) # The number of scales per octave in the image pyramid # An octave is the set of scales up to half of the initial scale __C.TEST.NUM_PER_OCTAVE = 4 # Aspect ratio to use during testing __C.TEST.ASPECTS = (1, 0.75, 0.5, 0.25) # parameters for ROI generating __C.TEST.SPATIAL_SCALE = 0.0625 __C.TEST.KERNEL_SIZE = 5 # Overlap threshold used for non-maximum suppression (suppress boxes with # IoU >= this threshold) __C.TEST.NMS = 0.5 # Experimental: treat the (K+1) units in the cls_score layer as linear # predictors (trained, eg, with one-vs-rest SVMs). __C.TEST.SVM = False # Test using bounding-box regressors __C.TEST.BBOX_REG = True # Test using subclass __C.TEST.SUBCLS = True # Train using viewpoint __C.TEST.VIEWPOINT = False # Threshold of ROIs in testing __C.TEST.ROI_THRESHOLD = 0.1 __C.TEST.ROI_THRESHOLD_NUM = 80000 __C.TEST.ROI_NUM = 2000 __C.TEST.DET_THRESHOLD = 0.0001 ## NMS threshold used on RPN proposals __C.TEST.RPN_NMS_THRESH = 0.7 ## Number of top scoring boxes to keep before apply NMS to RPN proposals __C.TEST.RPN_PRE_NMS_TOP_N = 6000 ## Number of top scoring boxes to keep after applying NMS to RPN proposals __C.TEST.RPN_POST_NMS_TOP_N = 300 # Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale) __C.TEST.RPN_MIN_SIZE = 16 # # MISC # # The mapping from image coordinates to feature map coordinates might cause # some boxes that are distinct in image space to become identical in feature # coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor # for identifying duplicate boxes. # 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16 __C.DEDUP_BOXES = 1./16. # Pixel mean values (BGR order) as a (1, 1, 3) array # These are the values originally used for training VGG16 __C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]]) # For reproducibility __C.RNG_SEED = 3 # A small number that's used many times __C.EPS = 1e-14 # Root directory of project __C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..')) # Place outputs under an experiments directory __C.EXP_DIR = 'default' # Use GPU implementation of non-maximum suppression __C.USE_GPU_NMS = True # Default GPU device id __C.GPU_ID = 0 def get_output_dir(imdb, net): """Return the directory where experimental artifacts are placed. A canonical path is built using the name from an imdb and a network (if not None). """ path = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name)) if net is None: return path else: return osp.join(path, net.name) def _add_more_info(is_train): # compute all the scales if is_train: scales_base = __C.TRAIN.SCALES_BASE num_per_octave = __C.TRAIN.NUM_PER_OCTAVE else: scales_base = __C.TEST.SCALES_BASE num_per_octave = __C.TEST.NUM_PER_OCTAVE num_scale_base = len(scales_base) num = (num_scale_base - 1) * num_per_octave + 1 scales = [] for i in xrange(num): index_scale_base = i / num_per_octave sbase = scales_base[index_scale_base] j = i % num_per_octave if j == 0: scales.append(sbase) else: sbase_next = scales_base[index_scale_base+1] step = (sbase_next - sbase) / num_per_octave scales.append(sbase + j * step) if is_train: __C.TRAIN.SCALES = scales else: __C.TEST.SCALES = scales print scales # map the scales to scales for RoI pooling of classification if is_train: kernel_size = __C.TRAIN.KERNEL_SIZE / __C.TRAIN.SPATIAL_SCALE else: kernel_size = __C.TEST.KERNEL_SIZE / __C.TEST.SPATIAL_SCALE area = kernel_size * kernel_size scales = np.array(scales) areas = np.repeat(area, num) / (scales ** 2) scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2) diff_areas = np.abs(scaled_
""" Parse spotify URLs """ from __future__ import unicode_literals import re import logging log = logging.getLogger('spotify') def handle_privmsg(bot, user, channel, args): """Grab Spotify URLs from the messages and handle them""" m = re.match(".*(http:\/\/open.spotify.com\/|spotify:)(?P<item>album|artist|track|user[:\/]\S+[:\/]playlist)[:\/](?P<id>[a-zA-Z0-9]+)\/?.*", args) if not m: return None spotify_id = m.group('id') item = m.group('item').replace(':', '/').split('/') item[0] += 's' if item[0] == 'users': # All playlists seem to return 401 at the time, even the public ones return None apiurl = "https://api.spotify.com/v1/%s/%s" % ('/'.join(item), spotify_id) r = bot.get_url(api
url) if r.status_code != 200: if r.status_code not in [401, 403]: log.warning('Spotify API returned %s while trying to fetch %s' % r.status_code, apiurl) return data = r.json() title = '[Spotify] ' if item[0] in ['albums', 'tracks']: artists = [] for artist in data['artists']: artists.append(artist['name']) title += ', '.join(artists) if item[0] == 'albums': title += ' - %s (%s)' % (data['name'], data['relea
se_date']) if item[0] == 'artists': title += data['name'] genres_n = len(data['genres']) if genres_n > 0: genitive = 's' if genres_n > 1 else '' genres = data['genres'][0:4] more = ' +%s more' % genres_n - 5 if genres_n > 4 else '' title += ' (Genre%s: %s%s)' % (genitive, ', '.join(genres), more) if item[0] == 'tracks': title += ' - %s - %s' % (data['album']['name'], data['name']) return bot.say(channel, title)
if tokens[3] != 'as': raise template.TemplateSyntaxError("Third argument in %r must be 'as'" % tokens[0]) return cls( object_expr = parser.compile_filter(tokens[2]), as_varname = tokens[4], ) # {% get_whatever for app.model pk as varname %} elif len(tokens) == 6: if tokens[4] != 'as': raise template.TemplateSyntaxError("Fourth argument in %r must be 'as'" % tokens[0]) return cls( ctype = BaseCommentNode.lookup_content_type(tokens[2], tokens[0]), object_pk_expr = parser.compile_filter(tokens[3]), as_varname = tokens[5] ) else: raise template.TemplateSyntaxError("%r tag requires 4 or 5 arguments" % tokens[0]) handle_token = classmethod(handle_token) #@staticmethod def lookup_content_type(token, tagname): try: app, model = token.split('.') return ContentType.objects.get(app_label=app, model=model) except ValueError: raise template.TemplateSyntaxError("Third argument in %r must be in the format 'app.model'" % tagname) except ContentType.DoesNotExist: raise template.TemplateSyntaxError("%r tag has non-existant content-type: '%s.%s'" % (tagname, app, model)) lookup_content_type = staticmethod(lookup_content_type) def __init__(self, ctype=None, object_pk_expr=None, object_expr=None, as_varname=None, comment=None): if ctype is None and object_expr is None: raise template.TemplateSyntaxError("Comment nodes must be given either a literal object or a ctype and object pk.") self.comment_model = comments.get_model() self.as_varname = as_varname self.ctype = ctype self.object_pk_expr = object_pk_expr self.object_expr = object_expr self.comment = comment def render(self, context): qs = self.get_query_set(context) context[self.as_varname] = self.get_context_value_from_queryset(context, qs) return '' def get_query_set(self, context): ctype, object_pk = self.get_target_ctype_pk(context) if not object_pk: return self.comment_model.objects.none() qs = self.comment_model.objects.filter( content_type = ctype, object_pk = smart_unicode(object_pk), site__pk = settings.SITE_ID, ) # The is_public and is_removed fields are implementation details of the # built-in comment model's spam filtering system, so they might not # be present on a custom comment model subclass. If they exist, we # should filter on them. field_names = [f.name for f in self.comment_model._meta.fields] if 'is_public' in field_names: qs = qs.filter(is_public=True) if getattr(settings, 'COMMENTS_HIDE_REMO
VED', True) and 'is_removed' in field_names: qs = qs.filter(is_removed=False) return qs def get_target_ctype_pk(self, context): if self.object_expr: try: obj = self.object_expr.resolve(context) except template.VariableDoesNotExist:
return None, None return ContentType.objects.get_for_model(obj), obj.pk else: return self.ctype, self.object_pk_expr.resolve(context, ignore_failures=True) def get_context_value_from_queryset(self, context, qs): """Subclasses should override this.""" raise NotImplementedError class CommentListNode(BaseCommentNode): """Insert a list of comments into the context.""" def get_context_value_from_queryset(self, context, qs): return list(qs) class CommentCountNode(BaseCommentNode): """Insert a count of comments into the context.""" def get_context_value_from_queryset(self, context, qs): return qs.count() class CommentFormNode(BaseCommentNode): """Insert a form for the comment model into the context.""" def get_form(self, context): ctype, object_pk = self.get_target_ctype_pk(context) if object_pk: return comments.get_form()(ctype.get_object_for_this_type(pk=object_pk)) else: return None def render(self, context): context[self.as_varname] = self.get_form(context) return '' class RenderCommentFormNode(CommentFormNode): """Render the comment form directly""" #@classmethod def handle_token(cls, parser, token): """Class method to parse render_comment_form and return a Node.""" tokens = token.contents.split() if tokens[1] != 'for': raise template.TemplateSyntaxError("Second argument in %r tag must be 'for'" % tokens[0]) # {% render_comment_form for obj %} if len(tokens) == 3: return cls(object_expr=parser.compile_filter(tokens[2])) # {% render_comment_form for app.models pk %} elif len(tokens) == 4: return cls( ctype = BaseCommentNode.lookup_content_type(tokens[2], tokens[0]), object_pk_expr = parser.compile_filter(tokens[3]) ) handle_token = classmethod(handle_token) def render(self, context): ctype, object_pk = self.get_target_ctype_pk(context) if object_pk: template_search_list = [ "comments/%s/%s/form.html" % (ctype.app_label, ctype.model), "comments/%s/form.html" % ctype.app_label, "comments/form.html" ] context.push() formstr = render_to_string(template_search_list, {"form" : self.get_form(context)}, context) context.pop() return formstr else: return '' class RenderCommentListNode(CommentListNode): """Render the comment list directly""" #@classmethod def handle_token(cls, parser, token): """Class method to parse render_comment_list and return a Node.""" tokens = token.contents.split() if tokens[1] != 'for': raise template.TemplateSyntaxError("Second argument in %r tag must be 'for'" % tokens[0]) # {% render_comment_list for obj %} if len(tokens) == 3: return cls(object_expr=parser.compile_filter(tokens[2])) # {% render_comment_list for app.models pk %} elif len(tokens) == 4: return cls( ctype = BaseCommentNode.lookup_content_type(tokens[2], tokens[0]), object_pk_expr = parser.compile_filter(tokens[3]) ) handle_token = classmethod(handle_token) def render(self, context): ctype, object_pk = self.get_target_ctype_pk(context) if object_pk: template_search_list = [ "comments/%s/%s/list.html" % (ctype.app_label, ctype.model), "comments/%s/list.html" % ctype.app_label, "comments/list.html" ] qs = self.get_query_set(context) context.push() liststr = render_to_string(template_search_list, { "comment_list" : self.get_context_value_from_queryset(context, qs) }, context) context.pop() return liststr else: return '' # We could just register each classmethod directly, but then we'd lose out on # the automagic docstrings-into-admin-docs tricks. So each node gets a cute # wrapper function that just exists to hold the docstring. #@register.tag def get_comment_count(parser, token): """ Gets the comment count for the given params and populates the template context with a variable containing that value, whose name is defined by the 'as' clause. Syntax:: {% get_comment_count for [object] as [varname] %} {% get_comment_count for [app].[model] [object_id] as [varname] %} Ex
#!/usr/bin/env python # -*- coding: utf-8 -*- from novel import serial, utils BASE_URL = 'http://www.quanben5.com/n/{}/xiaoshuo.html' class Quanben5(serial.SerialNovel): def __init__(self, tid): super().__init__(utils.base_to_url(BASE_URL, tid), '#content', intro_sel='.description', chap_type=serial.ChapterType.path, chap_sel='.list
li', ti
d=tid) def get_title_and_author(self): name = self.doc('h1').text() author = self.doc('.author').text() return name, author