prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
# # ovirt-engine-setup -- ovirt engine setup # Copyright (C) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Tools configuration plugin.""" import os import gettext _ = lambda m: gettext.dgettext(message=m, domain='ovirt-engine-setup') from otopi import constants as otopicons from otopi import util from otopi import filetransaction from otopi import plugin from ovirt_engine_setup import constants as osetupcons from ovirt_engine_setup.engine import constants as oenginecons from ovirt_engine_setup.engine_common \ import constants as oengcommcons @util.export class Plugin(plugin.PluginBase): """Tools configuration plugin.""" # 'section' is not used here, left for reference - hopefully # one day the code will be merged with the generation code TOOLS_CONFIG = ( { 'dir': '{engine_sysconf}/isouploader.conf.d', 'section': 'ISOUploader', }, { 'dir': '{engine_sysconf}/imageuploader.conf.d', 'section': 'ImageUploader', }, { 'dir': '{engine_sysconf}/logcollector.conf.d', 'section': 'LogCollector', }, ) def _entry_filename(self, entry): return os.path.join( entry['dir'], '10-engine-setup.conf' ).format( engine_sysconf=( oenginecons.FileLocations. OVIRT_ENGINE_SYSCONFDIR ), ) def _content_with_renamed_fqdn(self, config): with open(config, 'r') as f: content = [] for line in f: line = line.rstrip('\n') if line.startswith('engine='): line = (
'engine=%s:%s' ) % ( self.environment[ osetupcons.RenameEnv.FQDN
], self.environment[ oengcommcons.ConfigEnv.PUBLIC_HTTPS_PORT ], ) content.append(line) return content def __init__(self, context): super(Plugin, self).__init__(context=context) @plugin.event( stage=plugin.Stages.STAGE_SETUP, ) def _setup(self): for entry in self.TOOLS_CONFIG: self.environment[ osetupcons.RenameEnv.FILES_TO_BE_MODIFIED ].append(self._entry_filename(entry)) @plugin.event( stage=plugin.Stages.STAGE_MISC, ) def _misc(self): for entry in self.TOOLS_CONFIG: name = self._entry_filename(entry) self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append( filetransaction.FileTransaction( name=name, content=self._content_with_renamed_fqdn(name), modifiedList=self.environment[ otopicons.CoreEnv.MODIFIED_FILES ], ) ) # vim: expandtab tabstop=4 shiftwidth=4
#!/usr/bin/env python # # email.py # TurboHvZ # # Copyright (C) 2008 Ross Light # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """ Support for email :Variables: cell_providers : dict Dictionary of cell phone supported providers. Each value is a ``(name, sms_domain)`` pair. """ import turbogears import turbomail from turbomail.message import Message __author__ = 'Ross Light' __date__ = 'April 16, 2008' __docformat__ = 'reStructuredText' __all__ = ['GenshiMessage', 'sendmail', 'send_generic_mail', 'send_sms',] cell_providers = \ { 'att': (_("AT&T"), 'mms.att.net'), 'nextel': (_("Nextel"), 'messaging.nextel.com'), 'sprint': (_("Sprint"), 'messaging.sprintpcs.com'), 't-mobile': (_("T-Mobile"), 'tmomail.net'), 'verizon': (_("Verizon"), 'vtext.com'), 'virgin': (_("Virgin Mobile"), 'vmobl.com'), 'boost': (_("Boost"), 'myboostmobile.com'), } class GenshiMessage(Message): """A message created from a Genshi template.""" def __init__(self, sender, recipient, subject, template, variables={}, **kw): """ Store the additonal template and variable information. :Parameters: template : str A dot-path to a valid Genshi template. variables : dict A dictionary containing named variables to pass to the template engine. """ self.plain_only = kw.pop('plain_only', False) self._template = template self._variables = dict(sender=sender, recipient=recipient, subject=subject) self._variables.update(variables) super(GenshiMessage, self).__init__(sender, recipient, subject, **kw) def _process(self): """Automatically generate the plain and rich text content.""" turbogears.view.base.load_engines() data = dict() for (i, j) in self._variables.iteritems(): if callable(j): data[i] = j() else: data[i] = j engine = turbogears.view.engines.get('genshi') encoding = turbogears.config.get('genshi.encoding', 'utf-8') data['email_format'] = 'plain' self.plain = engine.render(data, template=self._template, format="text") self.plain = self._clean_plain(self.plain) self.plain = self.plain.decode(encoding) if not self.plain_only: data['email_format'] = 'rich' self.rich = engine.render(data, template=self._template) self.rich = self.rich.decode(encoding) return super(GenshiMessage, self)._process() @staticmethod def _clean_plain(text): text = text.strip() lines = [] for line in text.splitlines(): line = line.strip() try: last_line = lines[-1] except IndexError: last_line = None if line or last_line: # Only allow one blank line between text chunks lines.append(line) return '\r\n'.join(lines) def sendmail(recipient, subject, template, variables={}, **kw): """ Convenie
ntly sends an email. This will immediately return if mail has been turned off. The sender is set to the value of the configuration value ``hvz.webmaster_email``. :Returns: The newly created message :ReturnType: turbomail.message.Message """ if not turbogears.config.get('mail.on', False): # Mail has been turned off, ignore it. return variables = variables.
copy() variables.setdefault('message_format', 'email') from_address = turbogears.config.get('hvz.webmaster_email') new_message = GenshiMessage(from_address, recipient, subject, template, variables, **kw) turbomail.enqueue(new_message) return new_message def send_generic_mail(recipients, subject, message): """ Conveniently sends a custom email. This will immediately return if mail has been turned off. The sender is set to the value of the configuration value ``hvz.webmaster_email``. :Returns: The newly created message :ReturnType: turbomail.message.Message """ return sendmail(recipients, subject, "hvz.templates.mail.generic", dict(subject=subject, content=message,)) def send_sms(numbers, subject, template, variables={}): """ Sends a text message. :Parameters: numbers : tuple or list of tuple Numbers to send to. Each item must be a ``(number, provider)`` pair where number is a ten-digit US phone number. subject : unicode Subject to send with template : unicode Template to use variables : dict Variables to pass to template :Returns: The newly created message :ReturnType: turbomail.message.Message """ def _make_address(item): number, provider = item if len(number) != 10: raise ValueError('Number is not a valid US phone number') provider_name, provider_domain = cell_providers[provider] return number + '@' + provider_domain if not turbogears.config.get('hvz.notify_sms', True): # SMS has been turned off, ignore it. return if isinstance(numbers, tuple): numbers = [numbers] addresses = [_make_address(item) for item in numbers] variables = variables.copy() variables.setdefault('message_format', 'sms') return sendmail(addresses, subject, template, variables, plain_only=True)
import unittest from unittest import mock from betfairlightweight import APIClient from betfairlightweight import resources from betfairlightweight.endpoints.scores import Scores from betfairlightweight.exceptions import APIError from tests.tools import create_mock_json class ScoresInit(unittest.TestCase): def test_base_endpoint_init(self): client = APIClient("username", "password", "app_key") scores = Scores(client) assert scores.connect_timeout == 3.05 assert scores._error == APIError assert scores.client == client assert scores.URI == "ScoresAPING/v1.0/" class ScoresTest(unittest.TestCase): def setUp(self): client = APIClient("username", "password", "app_key", "UK") self.scores = Scores(client) @mock.patch("betfairlightweight.endpoints.scores.Scores.request") def test_list_race_details(self, mock_response): mock = create_mock_json("tests/resources/list_race_details.json") mock_response.return_value = (mock.Mock(), mock.json(), 1.5) response = self.scores.list_race_details() assert mock.json.call_count == 1 mock_response.assert_called_with("ScoresAPING/v1.0/listRaceDetails", {}, None) assert isinstance(response[0], resources.RaceDetails) assert len(response) == 475 @mock.patch("betfairlightweight.endpoints.scores.Scores.request") def test_list_available_events(self, mock_response): mock = create_mock_json("tests/resources/availableevents.json") mock_response.return_value = (mock.Mock(), mock.json(), 1.3) response = self.scores.list_available_events() assert mock.json.call_count == 1 mock_response.assert_called_with( "ScoresAPING/v1.0/listAvailableEvents", {}, None ) assert all(isinstance(event, resources.AvailableEvent) for event in resp
onse) @mock.patch("betfairlightweight.endpoints.scores.Score
s.request") def test_list_scores(self, mock_response): mock = create_mock_json("tests/resources/score.json") mock_response.return_value = (mock.Mock(), mock.json(), 1.3) mock_update_keys = mock.Mock() response = self.scores.list_scores(mock_update_keys) assert mock.json.call_count == 1 mock_response.assert_called_with( "ScoresAPING/v1.0/listScores", {"updateKeys": mock_update_keys}, None ) assert all(isinstance(event, resources.Score) for event in response) @mock.patch("betfairlightweight.endpoints.scores.Scores.request") def test_list_incidents(self, mock_response): mock = create_mock_json("tests/resources/incidents.json") mock_response.return_value = (mock.Mock(), mock.json(), 1.3) mock_update_keys = mock.Mock() response = self.scores.list_incidents(mock_update_keys) assert mock.json.call_count == 1 mock_response.assert_called_with( "ScoresAPING/v1.0/listIncidents", {"updateKeys": mock_update_keys}, None ) assert all(isinstance(event, resources.Incidents) for event in response) def test_url(self): assert self.scores.url == "%s%s" % ( self.scores.client.api_uri, "scores/json-rpc/v1", )
c_uint32, POINTER(c_uint32), c_uint32) vmb_feature_info_query = _vimba_lib.VmbFeatureInfoQuery vmb_feature_info_query.restype = c_int32 vmb_feature_info_query.argtypes = (c_void_p, c_char_p, POINTER(VmbFeatureInfo), c_uint32) # todo VmbFeatureListAffected # todo VmbFeatureListSelected # todo VmbFeatureAccessQuery vmb_feature_int_get = _vimba_lib.VmbFeatureIntGet vmb_feature_int_get.restype = c_int32 vmb_feature_int_get.argtypes = (c_void_p, c_char_p, POINTER(c_int64)) vmb_feature_int_set = _vimba_lib.VmbFeatureIntSet vmb_feature_int_set.restype = c_int32 vmb_feature_int_set.argtypes = (c_void_p, c_char_p, c_int64) vmb_feature_int_range_query = _vimba_lib.VmbFeatureIntRangeQuery vmb_feature_int_range_query.restype = c_int32 vmb_feature_int_range_query.argtypes = (c_void_p, c_char_p, POINTER(c_int64), POINTER(c_int64)) # todo VmbFeatureIntIncrementQuery vmb_feature_float_get = _vimba_lib.VmbFeatureFloatGet vmb_feature_float_get.restype = c_int32 vmb_feature_float_get.argtypes = (c_void_p, c_char_p, POINTER(c_double)) vmb_feature_float_set = _vimba_lib.VmbFeatureFloatSet vmb_feature_float_set.restype = c_int32 vmb_feature_float_set.argtypes = (c_void_p, c_char_p, c_double) vmb_feature_float_range_query = _vimba_lib.VmbFeatureFloatRangeQuery vmb_feature_float_range_query.restype = c_int32 vmb_feature_float_range_query.argtypes = (c_void_p, c_char_p, POINTER(c_double), POINTER(c_double)) # todo VmbFeatureFloatIncrementQuery vmb_feature_enum_get = _vimba_lib.VmbFeatureEnumGet vmb_feature_enum_get.restype = c_int32 vmb_feature_enum_get.argtypes = (c_void_p, c_char_p, POINTER(c_char_p)) vmb_feature_enum_set = _vimba_lib.VmbFeatureEnumSet vmb_feature_enum_set.restype = c_int32 vmb_feature_enum_set.argtypes = (c_void_p, c_char_p, c_char_p) vmb_feature_enum_range_query = _vimba_lib.VmbFeatureEnumRangeQuery vmb_feature_enum_range_query.restype = c_int32 vmb_feature_enum_range_query.argtypes = (c_void_p, c_char_p, POINTER(c_char_p), c_uint32, POINTER(c_uint32)) # todo VmbFeatureEnumIsAvailable # todo VmbFeatureEnumAsInt # todo VmbFeatureEnumAsString # todo VmbFeatureEnumEntryGet vmb_feature_string_get = _vimba_lib.VmbFeatureStringGet vmb_feature_string_get.restype = c_int32 vmb_feature_string_get.argtypes = (c_void_p, c_char_p, c_char_p, c_uint32, POINTER(c_uint32)) vmb_feature_string_set = _vimba_lib.VmbFeatureStringSet vmb_feature_string_set.restype = c_int32 vmb_feature_string_set.argtypes = (c_void_p, c_char_p, c_char_p) # todo VmbFeatureStringMaxlengthQuery vmb_feature_bool_get = _vimba_lib.VmbFeatureBoolGet vmb_feature_bool_get.restype = c_int32 vmb_feature_bool_get.argtypes = (c_void_p, c_char_p, POINTER(c_bool)) vmb_feature_bool_set = _vimba_lib.VmbFeatureBoolSet vmb_feature_bool_set.restype = c_int32 vmb_feature_bool_set.argtypes = (c_void_p, c_char_p, c_bool) vmb_feature_command_run = _vimba_lib.VmbFeatureCommandRun vmb_feature_command_run.restype = c_int32 vmb_feature_command_run.argtypes = (c_void_p, c_char_p) vmb_feature_command_is_done = _vimba_lib.VmbFeatureCommandIsDone vmb_feature_command_is_done.restype = c_int32 vmb_feature_command_is_done.argtypes = (c_void_p, c_char_p, POINTER(c_bool)) # todo VmbFeatureRawGet # todo VmbFeatureRawSet # todo VmbFeatureRawLengthQuery vmb_feature_invalidation_register = _vimba_lib.VmbFeatureInvalidationRegister vmb_feature_invalidation_register.restype = c_int32 vmb_feature_invalidation_register.argtypes = (c_void_p, c_char_p, vmb_feature_invalidation_callback_fun, c_void_p) vmb_feature_invalidation_unregister = _vimba_lib.VmbFeatureInvalidationUnregister vmb_feature_invalidation_unregister.restype = c_int32 vmb_feature_invalidation_unregister.argtypes = (c_void_p, c_char_p, vmb_feature_invalidation_callback_fun) vmb_frame_announce = _vimba_lib.VmbFrameAnnounce vmb_frame_announce.restype = c_int32 vmb_frame_announce.argtypes = (c_void_p, POINTER(VmbFrame), c_uint32) vmb_frame_revoke = _vimba_lib.VmbFrameRevoke vmb_frame_revoke.restype = c_int32 vmb_frame_revoke.argtypes = (c_void_p, POINTER(VmbFrame)) vmb_frame_revoke_all = _vimba_lib.VmbFrameRevokeAll vmb_frame_revoke_all.restype = c_int32 vmb_frame_revoke_all.argtypes = (c_void_p,) vmb_capture_start = _vimba_lib.VmbCaptureStart vmb_capture_start.restype = c_int32 vmb_capture_start.argtypes = (c_void_p,) vmb_capture_end = _vimba_lib.VmbCaptureEnd vmb_capture_end.restype = c_int32 vmb_capture_end.argtypes = (c_void_p,) vmb_capture_frame_queue = _vimba_lib.VmbCaptureFrameQueue vmb_capture_frame_queue.restype = c_int32 vmb_capture_frame_queue.argtypes = (c_void_p, POINTER(VmbFrame), c_void_p) vmb_capture_frame_wait = _vimba_lib.VmbCaptureFrameWait vmb_capture_frame_wait.restype = c_int32 vmb_capture_frame_wait.argtypes = (c_void_p, POINTER(VmbFrame), c_uint32) vmb_capture_queue_flush = _vimba_lib.VmbCap
tureQueueFlush vmb_capture_queue_flush.restype = c_int32 vmb_capture_queue_flush.argtypes = (c_void_p,) vmb_interfaces_list = _vimba_lib.VmbInterfacesList vmb_interfaces_list.restype = c_int32 vmb_interfaces_list.argtypes = (POINTER(VmbInterfaceInfo), c_uint32,
POINTER(c_uint32), c_uint32) vmb_interface_open = _vimba_lib.VmbInterfaceOpen vmb_interface_open.restype = c_int32 vmb_interface_open.argtypes = (c_char_p, c_void_p) vmb_interface_close = _vimba_lib.VmbInterfaceClose vmb_interface_close.restype = c_int32 vmb_interface_close.argtypes = (c_void_p,) vmb_ancillary_data_open = _vimba_lib.VmbAncillaryDataOpen vmb_interface_close.restype = c_int32 vmb_interface_close.argtypes = (POINTER(VmbFrame), POINTER(c_void_p)) vmb_ancillary_data_close = _vimba_lib.VmbAncillaryDataClose vmb_interface_close.restype = c_int32 vmb_interface_close.argtypes = (c_void_p, ) # todo VmbMemoryRead # todo VmbMemoryWrite vmb_registers_read = _vimba_lib.VmbRegistersRead vmb_registers_read.restype = c_int32 vmb_registers_read.argtypes = (c_void_p, c_uint32, POINTER(c_uint64), POINTER(c_uint64), POINTER(c_uint32)) vmb_registers_write = _vimba_lib.VmbRegistersWrite vmb_registers_write.restype = c_int32 vmb_regist
.contrib.auth.models import User # app imports from oweb.tests import OWebViewTests from oweb.models.account import Account from oweb.models.research import Research from oweb.models.ship import Ship from oweb.models.planet import Planet, Moon from oweb.models.building import Building from oweb.models.defense import Defense @override_settings(AUTH_USER_MODEL='auth.User') class OWebViewsItemUpdateTests(OWebViewTests): def test_login_required(self): """Unauthenticated users should be redirected to oweb:app_login""" r = self.client.get(reverse('oweb:item_update')) self.assertRedirects(r, reverse('oweb:app_login'), status_code=302, target_status_code=200) def test_account_owner(self): """Can somebody update an item he doesn't posess?""" u = User.objects.get(username='test01') acc = Account.objects.get(owner=u) res_pre = Research.objects.filter(account=acc).first() self.client.login(username='test02', password='foo') r = self.client.post(reverse('oweb:item_update'), data={ 'item_type': 'research', 'item_id': res_pre.id, 'item_level': res_pre.level + 1 }, HTTP_REFERER=reverse('oweb:account_research', args=[acc.id])) self.assertEqual(r.status_code, 403) self.assertTemplateUsed(r, 'oweb/403.html') def test_no_post(self): """What if no POST data is supplied?""" self.client.login(username='test01', password='foo') r = self.client.post(reverse('oweb:item_update')) self.assertEqual(r.status_code, 500) self.assertTemplateUsed(r, 'oweb/500.html') def test_research_update(self): """Does ``item_update()`` correctly update researches? Basically the Django ORM can be trusted, but since there is some logic involved in determine the correct field to update, this test is included """ u = User.objects.get(username='test01') acc = Account.objects.get(owner=u) res_pre = Research.objects.filter(account=acc).first() self.client.login(username='test01', password='foo') r = self.client.post(reverse('oweb:item_update'), data={ 'item_type': 'research', 'item_id': res_pre.id, 'item_level': res_pre.level + 1 }, HTTP_REFERER=reverse('oweb:account_research', args=[acc.id])) self.assertRedirects(r, reverse('oweb:account_research', args=[acc.id]), status_code=302, target_status_code=200) res_post = Research.objects.get(pk=res_pre.pk) self.assertEqual(res_pre.level + 1, res_post.level) def test_ship_update(self): """Does ``item_update()`` correctly update ships? Basically the Django ORM can be trusted, but since there is some logic involved in determine the correct field to update, this test is included """ u = User.objects.get(username='test01') acc = Account.objects.get(owner=u) ship_pre = Ship.objects.filter(account=acc).first() self.client.login(username='test01', password='foo') r = self.client.post(reverse('oweb:item_update'), data={ 'item_type': 'ship', 'item_id': ship_pre.id, 'item_level': ship_pre.count + 1338 }, HTTP_REFERER=reverse('oweb:account_ships', args=[acc.id])) self.assertRedirects(r, reverse('oweb:account_ships', args=[acc.id]), status_code=302, target_status_code=200) ship_post = Ship.objects.get(pk=ship_pre.pk) self.assertEqual(ship_pre.count + 1338, ship_post.count) def test_building_update(self): """Does ``item_update()`` correctly update buildings? Basically the Django ORM can be trusted, but since there is some logic involved in determine the correct field to update, this test is included """ u = User.objects.get(username='test01') acc = Account.objects.get(owner=u) p = Planet.objects.filter(account=acc).first() b_pre = Building.objects.filter(astro_object=p).first() self.client.login(username='test01', password='foo') r = self.client.post(reverse('oweb:item_update'), data={ 'item_type': 'building', 'item_id': b_pre.id, 'item_level': b_pre.level - 1 }, HTTP_REFERER=reverse('oweb:planet_buildings', args=[p.id])) self.assertRedirects(r, reverse('oweb:planet_buildings', args=[p.id]), status_code=302, target_status_code=200) b_post = Building.objects.get(pk=b_pre.pk) self.assertEqual(b_pre.level - 1, b_post.level) def test_moon_building_update(self): """Does ``item_update()`` correctly update moon buildings? Basically the Django ORM can be trusted, but since there is some logic involved in determine the correct field to update, this test is included """ u = User.objects.get(username='test01') acc = Account.objects.get(owner=u) p = Planet.objects.filter(account=acc).values_list('id', flat=True) m = Moon.objects.filter(planet__in=p).first() b_pre = Building.objects.filter(astro_object=m).first() self.client.login(username='test01', password='foo') r = self.client.post(reverse('oweb:item_update'), data={ 'item_type': 'moon_building', 'item_id': b_pre.id, 'item_level': b_pre.level + 2 }, HTTP_REFERER=reverse('oweb:moon_buildings', args=[m.id])) self.assertRedirects(r, reverse('oweb:moon_buildings', args=[m.id]), status_code=302, target_status_code=200) b_post = Building.objects.get(pk=b_pre.pk) self.assertEqual(b_pre.level + 2, b_post.level) def test_defense_update(self): """Does ``item_update()`` correctly update defense devices? Basically the Django ORM can be trusted, but since there is some logic involved in determine the correct field to update, this test is included """ u = User.objects.get(username='test01') acc = Account.objects.get(owner=u) p = Planet.objects.filter(account=acc).first() d_pre = Defense.objects.filter(astro_object=p).first() self.client.login(username='test01', password='foo') r = self.client.post(reverse('oweb:item_update'), data={
'item_type': 'defense', 'item_id': d_pre.id, 'item_level': d_pre.count - 1 }, HTTP_REFERER=reverse('oweb:planet_defense', args=[p.id])) self.assertRedirects(r, reverse('oweb:planet_defense', args=[p.
id]), status_code=302, target_status_code=200) d_post = Defense.objects.get(pk=d_pre.pk) self.assertEqual(d_pre.count - 1, d_post.count) def test_moon_defense_update(
"""Emoji config functions""" import json import os import re from logging import getLogger from card_py_bot import BASEDIR __log__ = getLogger(__name__) # Path where the emoji_config.json will be stored EMOJI_CONFIG_PATH = os.path.join(BASEDIR, "emoji_config.json") # Dictionary that is keyed by the Discord short emoji id and the web id # of each emoji MANA_ID_DICT = { ":15m:": "15", ":13m:": "13", ":wbm:": "White or Black", ":Energy:": "Energy", ":10m:": "10", ":7m:": "7", ":Untap:": "Untap", ":brm:": "Black or Red", ":bpm:": "Phyrexian Black", ":rgm:": "Red or Green", ":9m:": "9", ":8m:": "8", ":1m:": "1", ":gum:": "Green or Blue", ":2wm:": "Two or White", ":wpm:": "Phyrexian White", ":4m:": "4", ":12m:": "12", ":rm:": "Red", ":bm:": "Black", ":wum:": "White or Blue", ":rwm:": "Red or White", ":2bm:": "Two or Blue", ":gpm:": "Phyrexian Green", ":gm:": "Green", ":14m:": "14", ":bgm:": "Black or Green", ":3m:": "3", ":5m:": "5", ":Tap:": "Tap", ":1000000m:": "1000000", ":upm:": "Phyrexian Blue", ":2gm:": "Two or Green", ":rpm:": "Phyrexian Red", ":2m:": "2
", ":6m:": "6", ":2rm:": "Two or Red", ":gwm:": "Green or White", ":wm:": "White", ":um:": "Blue", ":16m:": "16", ":urm:": "Blue or Red", ":ubm:": "Blue or Black", ":11m:": "11" } def get_emoji_config_string() -> str: """Return a string of all the mana ids (in order) for config setup in discord""" config_string = "?save_setup\n" for short_emoji_id in MANA_ID_DICT: config_string += ("\\\\{}\n".format(short_emoji_id)) return config_strin
g EMOJI_CONFIG_STRING = get_emoji_config_string() def create_config_json() -> dict: """Create and save a blank default config json also return the dict that created the json""" emoji_config = dict() for short_emoji_id in MANA_ID_DICT: emoji_config[short_emoji_id] = { "web_id": MANA_ID_DICT[short_emoji_id], "discord_raw_id": None } with open(EMOJI_CONFIG_PATH, "w") as file: json.dump(emoji_config, file, indent=4) return emoji_config def load_mana_dict() -> dict: """Load the emoji config into a mana dict""" try: with open(EMOJI_CONFIG_PATH, "r") as file: emoji_config = json.load(file) except FileNotFoundError: emoji_config = create_config_json() mana_dict = dict() for short_emoji_id in emoji_config: emoji = emoji_config[short_emoji_id] if not emoji["discord_raw_id"]: mana_dict[emoji["web_id"]] = "ERROR: NO ID Configured " \ "for {}".format(emoji["web_id"]) else: mana_dict[emoji["web_id"]] = emoji["discord_raw_id"] __log__.debug("WOTC Magic mana to Discord emoji " "dictionary constructed: {}".format(mana_dict)) return mana_dict MANA_DICT = load_mana_dict() def parse_raw_emoji_id(raw_emoji_id: str) -> str: """Parse a raw emoji id to short emoji id""" m = re.search(":[A-Za-z0-9]*:", raw_emoji_id) return m.group(0) def save_emoji_config(raw_emoji_ids): """Save the emoji mana config""" try: with open(EMOJI_CONFIG_PATH, "r") as file: emoji_config = json.load(file) except FileNotFoundError: # if no mana config file is found initialize a new one emoji_config = create_config_json() for raw_emoji_id in raw_emoji_ids: short_emoji_id = parse_raw_emoji_id(raw_emoji_id) if short_emoji_id in MANA_ID_DICT: emoji_config[short_emoji_id] = { "web_id": MANA_ID_DICT[short_emoji_id], "discord_raw_id": raw_emoji_id } else: raise KeyError("Short Discord emoji id is unknown: " "{}".format(short_emoji_id)) with open(EMOJI_CONFIG_PATH, "w") as file: json.dump(emoji_config, file, indent=4) # update MANA_DICT global global MANA_DICT MANA_DICT = load_mana_dict()
exceeded is: " + limit_reason]} raise XeroRateLimitExceeded(response, payload) elif response.status_code == 500: raise XeroInternalError(response) elif response.status_code == 501: raise XeroNotImplemented(response) elif response.status_code == 503: # Two 503 responses are possible. Rate limit errors # return encoded content; offline errors don't. # If you parse the response text and there's nothing # encoded, it must be a not-available error. payload = parse_qs(response.text) if payload: raise XeroRateLimitExceeded(response, payload) else: raise XeroNotAvailable(response) else: raise XeroExceptionUnknown(response) return wrapper def _get(self, id, headers=None, params=None): uri = "/".join([self.base_url, self.name, id]) uri_params = self.extra_params.copy() uri_params.update(params if params else {}) return uri, uri_params, "get", None, headers, True def _get_history(self, id): uri = "/".join([self.base_url, self.name, id, "history"]) + "/" return uri, {}, "get", None, None, False def _get_attachments(self, id): """Retrieve a list of attachments associated with this Xero object.""" uri = "/".join([self.base_url, self.name, id, "Attachments"]) + "/" return uri, {}, "get", None, None, False def _get_attachment_data(self, id, filename): """ Retrieve the contents of a specific attachment (identified by filename). """ uri = "/".join([self.base_url, self.name, id, "Attachments", filename]) return uri, {}, "get", None, None, False def get_attachment(self, id, filename, file): """ Retrieve the contents of a specific attachment (identified by filename). Writes data to file object, returns length of data written. """ data = self.get_attachment_data(id, filename) file.write(data) return len(data) def _email(self, id): uri = "/".join([self.base_url, self.name, id, "Email"]) return uri, {}, "post", None, None, True def _online_invoice(self, id): uri = "/".join([self.base_url, self.name, id, "OnlineInvoice"]) return uri, {}, "get", None, None, True
def save_or_put(self, data, method="post", headers=None, summarize_errors=True): uri = "/".join([self
.base_url, self.name]) body = self._prepare_data_for_save(data) params = self.extra_params.copy() if not summarize_errors: params["summarizeErrors"] = "false" return uri, params, method, body, headers, False def _save(self, data): return self.save_or_put(data, method="post") def _put(self, data, summarize_errors=True): return self.save_or_put(data, method="put", summarize_errors=summarize_errors) def _delete(self, id): uri = "/".join([self.base_url, self.name, id]) return uri, {}, "delete", None, None, False def _put_history_data(self, id, details): """Add a history note to the Xero object.""" uri = "/".join([self.base_url, self.name, id, "history"]) details_data = {"Details": details} root_elm = Element("HistoryRecord") self.dict_to_xml(root_elm, details_data) data = six.u(tostring(root_elm)) return uri, {}, "put", data, None, False def _put_history(self, id, details): """Upload a history note to the Xero object.""" return self._put_history_data(id, details) def _put_attachment_data( self, id, filename, data, content_type, include_online=False ): """Upload an attachment to the Xero object.""" uri = "/".join([self.base_url, self.name, id, "Attachments", filename]) params = {"IncludeOnline": "true"} if include_online else {} headers = {"Content-Type": content_type, "Content-Length": str(len(data))} return uri, params, "put", data, headers, False def put_attachment(self, id, filename, file, content_type, include_online=False): """Upload an attachment to the Xero object (from file object).""" return self.put_attachment_data( id, filename, file.read(), content_type, include_online=include_online ) def prepare_filtering_date(self, val): if isinstance(val, datetime): val = val.strftime("%a, %d %b %Y %H:%M:%S GMT") else: val = '"%s"' % val return {"If-Modified-Since": val} def _filter(self, **kwargs): params = self.extra_params.copy() headers = None uri = "/".join([self.base_url, self.name]) if kwargs: if "since" in kwargs: val = kwargs["since"] headers = self.prepare_filtering_date(val) del kwargs["since"] # Accept IDs parameter for Invoices and Contacts endpoints if "IDs" in kwargs: params["IDs"] = ",".join(kwargs["IDs"]) del kwargs["IDs"] def get_filter_params(key, value): last_key = key.split("_")[-1] if last_key.endswith("ID"): return 'Guid("%s")' % six.text_type(value) if key in self.BOOLEAN_FIELDS: return "true" if value else "false" elif key in self.DATE_FIELDS: return "DateTime(%s,%s,%s)" % (value.year, value.month, value.day) elif key in self.DATETIME_FIELDS: return value.isoformat() else: return '"%s"' % six.text_type(value) def generate_param(key, value): parts = key.split("__") field = key.replace("_", ".") fmt = "%s==%s" if len(parts) == 2: # support filters: # Name__Contains=John becomes Name.Contains("John") if parts[1] in ["contains", "startswith", "endswith"]: field = parts[0] fmt = "".join(["%s.", parts[1], "(%s)"]) elif parts[1] in ["tolower", "toupper"]: field = parts[0] fmt = "".join(["%s.", parts[1], "()==%s"]) elif parts[1] in self.OPERATOR_MAPPINGS: field = parts[0] key = field fmt = "%s" + self.OPERATOR_MAPPINGS[parts[1]] + "%s" elif parts[1] in ["isnull"]: sign = "=" if value else "!" return "%s%s=null" % (parts[0], sign) field = field.replace("_", ".") return fmt % (field, get_filter_params(key, value)) # Move any known parameter names to the query string KNOWN_PARAMETERS = ["order", "offset", "page", "includeArchived"] for param in KNOWN_PARAMETERS: if param in kwargs: params[param] = kwargs.pop(param) filter_params = [] if "raw" in kwargs: raw = kwargs.pop("raw") filter_params.append(raw) # Treat any remaining arguments as filter predicates # Xero will break if you search without a check for null in the first position: # http://developer.xero.com/documentation/getting-started/http-requests-and-responses/#title3 sortedkwargs = sorted( six.iteritems(kwargs), key=lambda item: -1 if "isnull" in item[0] else 0 ) for key, value in sortedkwargs: filter_params.append(generate_param(key, value)) if filter_params: params["where"] = "&&".join(filter_params) return uri, params, "get", None, headers, False def _all(self): uri = "/".join([self.base_url, self.name])
def pbj_while(slices): output = '' while
(slices > 0): slices = slices - 2 if
slices >= 2: output += 'I am making a sandwich! I have bread for {0} more sandwiches.\n'.format(slices / 2) elif slices < 2: output += 'I am making a sandwich! But, this is my last sandwich.' return output print pbj_while(int(raw_input('How many slices of bread do you have? ')))
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2007 Troy Melhase # Distributed under the terms of the GNU General Public License v2 # Author: Troy Melhase <troy@gci.net> import sys from PyQt4.QtCore import QVariant from PyQt4.QtGui import (QApplication, QFrame, QIcon, QStandardItem, QStandardItemModel) from profit.lib import BasicHandler from profit.lib import Signals, tickerIdRole from profit.lib.widgets.ui_breadfan_train import Ui_BreadFanTrainTree class SessionTreeItem(QStandardItem): """ Session tree item. """ iconNameMap = { 'account':'identity', 'connection':'server', 'messages':'view_text', 'orders':'klipper_dock', 'portfolio':'bookcase', 'strategy':'services', 'tickers':'view_detailed', } def __init__(self, text): """ Constructor. @param text value for item display """ QStandardItem.__init__(self, text) self.setEditable(False) self.setIcon(self.lookupIcon(text)) hint = self.sizeHint() hint.setHeight(20) self.setSizeHint(hint) def lookupIcon(self, key): """ Locates icon for given key. @param key item text @return QIcon instance """ try: name = self.iconNameMap[key] icon = QIcon(':images/icons/%s.png' % name) except (KeyError, ): style = QApplication.style() icon = style.standardIcon(style.SP_DirIcon) return icon class SessionTreeTickerItem(SessionTreeItem): """ Specalized session tree item for ticker symbols. """ def lookupIcon(self, key): """ Locates icon for given key. @param key ticker symbol @return QIcon instance """ return QIcon(':images/tickers/%s.png' % key.lower()) def setTickerId(self, tickerId): """ Sets item data for ticker id. @param tickerId id for ticker as integer @return None """ self.setD
ata(QVariant(tickerId), tickerIdRole) class SessionTreeModel(QStandardItemModel): def __init__(self, session, parent=None): """ Construct
or. @param session Session instance @param parent ancestor object """ QStandardItemModel.__init__(self) self.session = session root = self.invisibleRootItem() for key, values in session.items(): item = SessionTreeItem(key) root.appendRow(item) for value in values: if key == 'tickers': subitem = SessionTreeTickerItem(value) subitem.setTickerId(values[value]) else: subitem = SessionTreeItem(value) item.appendRow(subitem) class BreadFanTrainTree(QFrame, Ui_BreadFanTrainTree): """ Tree view of a Session object. """ def __init__(self, parent=None): """ Constructor. @param parent ancestor of this widget """ QFrame.__init__(self, parent) self.setupUi(self) connect = self.connect def setupBasic(self, sender, newSignal): self.connect(sender, newSignal, self.on_newBreadNet) def on_newBreadNet(self, net): print '### signaled network', net ## fill and trainAlgorithm combo algorNames = [name for name in net.train_meta] algorCombo = self.trainAlgorithm algorCombo.clear() algorCombo.addItems(algorNames) if net.trained in algorNames: algorCombo.setCurrentIndex(algorNames.index(net.trained)) ## fill params ## don't touch fill training data sources ## set training progress bar value
""" Tests of neo.io.igorproio """ import unittest try: import igor HAVE_IGOR = True except ImportError:
HAVE_IGOR = False from neo.io.igorproio import IgorIO from neo.test.iotest.common_io_test import BaseTestIO @unittest.skipUnless(HAVE_IGOR, "requires igor") class TestIgorIO(BaseTestI
O, unittest.TestCase): ioclass = IgorIO entities_to_download = [ 'igor' ] entities_to_test = [ 'igor/mac-version2.ibw', 'igor/win-version2.ibw' ] if __name__ == "__main__": unittest.main()
# "Copyright (c) 2000-2003 The Regents of the University of California. # All rights reserved. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose, without fee, and without written agreement # is hereby granted, provided that the above copyright notice, the following # two paragraphs and the author appear in all copies of this software. # # IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR # DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT # OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY # OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY # AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS # ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO # PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS." # # @author Kamin Whitehouse # import sys, math, Queue import pytos.tools.Drain as Drain import pytos.Comm as Comm from struct import * class Straw( object ) : def __init__( self , app ) : self.app=app self.linkLatency = .1 if "StrawM" not in app._moduleNames: raise Exception("The StrawM module is not compiled into the application.") def read(self, nodeID, strawID, start, size): data=[] #store the data in here response=None while response==None: print "pinging node %d" % nodeID response = self.app.StrawM.msgDataSize.peek(address=nodeID, timeout=3) #find num bytes/msg dataSize = response[0].value['value'].value numHops = self.app.enums.DRAIN_MAX_TTL - response[0].getParentMsg(self.app.enums.AM_DRAINMSG).ttl self.app.StrawM.sendPeriod.poke(self.linkLatency * numHops * 1000, address=nodeID, responseDesired=False) msgs = [0 for i in range(int(math.ceil(size/float(dataSize))))] #keep track of straw msgs in here msgQueue = Comm.MessageQueue(10) Drain.getDrainObject(self.app)[0].register(self.app.msgs.StrawMsg, msgQueue) print "Sucking %d bytes from node %d through Straw %d:" % (size, nodeID, strawID) while msgs.count(1) < len(msgs): subStart = msgs.index(0) * dataSize try: subSize = min(size, (msgs.index(1, subStart)*dataSize - subStart) ) except: subSize = size - subStart response = [] #while response == []: self.app.StrawM.read(strawID, subStart, subSize, address=nodeID) sys.stdout.write("%d-%d: " % (subStart, subStart+subSize)) numPrintedChars=0 while True : try: (addr, msg) = msgQueue.get(block=True, timeout=self.linkLatency * numHops * 4) if msg.parentMsg.source == nodeID :#and msgs[msg.startIndex//dataSize] == 0: msgs[msg.startIndex//dataSize] = 1 data[msg.startIndex:msg.startIndex+dataSize-1] = msg.data[:] strg = "" for i in range(numPrintedChars) : strg += "\b"
strg += "%s/%s" % (msgs.count(1),len(msgs)) sys.stdout.write(strg) sys.stdout.flush()
numPrintedChars = len(strg)-numPrintedChars except Queue.Empty: print "" break #now, pack the data so that it can be easily unpacked for i in range(len(data)): data[i] = pack('B',data[i]) return ''.join(data[0:size])
from webhelpers import * from datetime import datetime def time_ago( x ): return date.d
istance_of_time_in_words( x,
datetime.utcnow() ) def iff( a, b, c ): if a: return b else: return c
iano'), (70, 'BA', 'BIH', 387, 'Bosnia y Herzegovina', 'Europa', '', 'BAM', 'Marco convertible de Bosnia-Herzegovina'), (72, 'BW', 'BWA', 267, 'Botsuana', 'África', '', 'BWP', 'Pula de Botsuana'), (74, 'BV', 'BVT', 0, 'Isla Bouvet', '', '', '', ''), (76, 'BR', 'BRA', 55, 'Brasil', 'América', 'América del Sur', 'BRL', 'Real brasileño'), (84, 'BZ', 'BLZ', 501, 'Belice', 'América', 'América Central', 'BZD', 'Dólar de Belice'), (86, 'IO', 'IOT', 0, 'Territorio Británico del Océano Índico', '', '', '', ''), (90, 'SB', 'SLB', 677, 'Islas Salomón', 'Oceanía', '', 'SBD', 'Dólar de las Islas Salomón'), (92, 'VG', 'VGB', 1284, 'Islas Vírgenes Británicas', 'América', 'El Caribe', '', ''), (96, 'BN', 'BRN', 673, 'Brunéi', 'Asia', '', 'BND', 'Dólar de Brunéi'), (100, 'BG', 'BGR', 359, 'Bulgaria', 'Europa', '', 'BGN', 'Lev búlgaro'), (104, 'MM', 'MMR', 95, 'Myanmar', 'Asia', '', 'MMK', 'Kyat birmano'), (108, 'BI', 'BDI', 257, 'Burundi', 'África', '', 'BIF', 'Franco burundés'), (112, 'BY', 'BLR', 375, 'Bielorrusia', 'Europa', '', 'BYR', 'Rublo bielorruso'), (116, 'KH', 'KHM', 855, 'Camboya', 'Asia', '', 'KHR', 'Riel camboyano'), (120, 'CM', 'CMR', 237, 'Camerún', 'África', '', '', ''), (124, 'CA', 'CAN', 1, 'Canadá', 'América', 'América del Norte', 'CAD', 'Dólar canadiense'), (132, 'CV', 'CPV', 238, 'Cabo Verde', 'África', '', 'CVE', 'Escudo caboverdiano'), (136, 'KY', 'CYM', 1345, 'Islas Caimán', 'América', 'El Caribe', 'KYD', 'Dólar caimano de Islas Caimán'), (140, 'CF', 'CAF', 236, 'República Centroafricana', 'África', '', '', ''), (144, 'LK', 'LKA', 94, 'Sri Lanka', 'Asia', '', 'LKR', 'Rupia de Sri Lanka'), (148, 'TD', 'TCD', 235, 'Chad', 'África', '', '', ''), (152, 'CL', 'CHL', 56, 'Chile', 'América', 'América del Sur', 'CLP', 'Peso chileno'), (156, 'CN', 'CHN', 86, 'China', 'Asia', '', 'CNY', 'Yuan Renminbi de China'), (158, 'TW', 'TWN', 886, 'Taiwán', 'Asia', '', 'TWD', 'Dólar taiwanés'), (162, 'CX', 'CXR', 61, 'Isla de Navidad', 'Oceanía', '', '', ''), (166, 'CC', 'CCK', 61, 'Islas Cocos', 'Óceanía', '', '', ''), (170, 'CO', 'COL', 57, 'Colombia', 'América', 'América del Sur', 'COP', 'Peso colombiano'), (174, 'KM', 'COM', 269, 'Comoras', 'África', '', 'KMF', 'Franco comoriano de Comoras'), (175, 'YT', 'MYT', 262, 'Mayotte', 'África', '', '', ''), (178, 'CG', 'COG', 242, 'Congo', 'África', '', '', ''), (180, 'CD', 'COD', 243, 'República Democrática del Congo', 'África', '', 'CDF', 'Franco congoleño'), (184, 'CK', 'COK', 682, 'Islas Cook', 'Oceanía', '', '', ''), (188, 'CR', 'CRI', 506, 'Costa Rica', 'América', 'América Central', 'CRC', 'Colón costarricense'), (191, 'HR', 'HRV', 385, 'Croacia', 'Europa', '', 'HRK', 'Kuna croata'), (192, 'CU', 'CUB', 53, 'Cuba', 'América', 'El Caribe', 'CUP', 'Peso cubano'), (196, 'CY', 'CYP', 357, 'Chipre', 'Europa', '', 'CYP', 'Libra chipriota'), (203, 'CZ', 'CZE', 420, 'República Checa', 'Europa', '', 'CZK', 'Koruna checa'), (204, 'BJ', 'BEN', 229, 'Benín', 'África', '', '', ''), (208, 'DK', 'DNK', 45, 'Dinamarca', 'Europa', '', 'DKK', 'Corona danesa'), (212, 'DM', 'DMA', 1767, 'Dominica', 'América', 'El Caribe', '', ''), (214, 'DO', 'DOM', 1809, 'República Dominicana', 'América', 'El Caribe', 'DOP', 'Peso dominicano'), (218, 'EC', 'ECU',
593, 'Ecuador', 'América', 'América del Sur', '',
''), (222, 'SV', 'SLV', 503, 'El Salvador', 'América', 'América Central', 'SVC', 'Colón salvadoreño'), (226, 'GQ', 'GNQ', 240, 'Guinea Ecuatorial', 'África', '', '', ''), (231, 'ET', 'ETH', 251, 'Etiopía', 'África', '', 'ETB', 'Birr etíope'), (232, 'ER', 'ERI', 291, 'Eritrea', 'África', '', 'ERN', 'Nakfa eritreo'), (233, 'EE', 'EST', 372, 'Estonia', 'Europa', '', 'EEK', 'Corona estonia'), (234, 'FO', 'FRO', 298, 'Islas Feroe', 'Europa', '', '', ''), (238, 'FK', 'FLK', 500, 'Islas Malvinas', 'América', 'América del Sur', 'FKP', 'Libra malvinense'), (239, 'GS', 'SGS', 0, 'Islas Georgias del Sur y Sandwich del Sur', 'América', 'América del Sur', '', ''), (242, 'FJ', 'FJI', 679, 'Fiyi', 'Oceanía', '', 'FJD', 'Dólar fijiano'), (246, 'FI', 'FIN', 358, 'Finlandia', 'Europa', '', 'EUR', 'Euro'), (248, 'AX', 'ALA', 0, 'Islas Gland', 'Europa', '', '', ''), (250, 'FR', 'FRA', 33, 'Francia', 'Europa', '', 'EUR', 'Euro'), (254, 'GF', 'GUF', 0, 'Guayana Francesa', 'América', 'América del Sur', '', ''), (258, 'PF', 'PYF', 689, 'Polinesia Francesa', 'Oceanía', '', '', ''), (260, 'TF', 'ATF', 0, 'Territorios Australes Franceses', '', '', '', ''), (262, 'DJ', 'DJI', 253, 'Yibuti', 'África', '', 'DJF', 'Franco yibutiano'), (266, 'GA', 'GAB', 241, 'Gabón', 'África', '', '', ''), (268, 'GE', 'GEO', 995, 'Georgia', 'Europa', '', 'GEL', 'Lari georgiano'), (270, 'GM', 'GMB', 220, 'Gambia', 'África', '', 'GMD', 'Dalasi gambiano'), (275, 'PS', 'PSE', 0, 'Palestina', 'Asia', '', '', ''), (276, 'DE', 'DEU', 49, 'Alemania', 'Europa', '', 'EUR', 'Euro'), (288, 'GH', 'GHA', 233, 'Ghana', 'África', '', 'GHC', 'Cedi ghanés'), (292, 'GI', 'GIB', 350, 'Gibraltar', 'Europa', '', 'GIP', 'Libra de Gibraltar'), (296, 'KI', 'KIR', 686, 'Kiribati', 'Oceanía', '', '', ''), (300, 'GR', 'GRC', 30, 'Grecia', 'Europa', '', 'EUR', 'Euro'), (304, 'GL', 'GRL', 299, 'Groenlandia', 'América', 'América del Norte', '', ''), (308, 'GD', 'GRD', 1473, 'Granada', 'América', 'El Caribe', '', ''), (312, 'GP', 'GLP', 0, 'Guadalupe', 'América', 'El Caribe', '', ''), (316, 'GU', 'GUM', 1671, 'Guam', 'Oceanía', '', '', ''), (320, 'GT', 'GTM', 502, 'Guatemala', 'América', 'América Central', 'GTQ', 'Quetzal guatemalteco'), (324, 'GN', 'GIN', 224, 'Guinea', 'África', '', 'GNF', 'Franco guineano'), (328, 'GY', 'GUY', 592, 'Guyana', 'América', 'América del Sur', 'GYD', 'Dólar guyanés'), (332, 'HT', 'HTI', 509, 'Haití', 'América', 'El Caribe', 'HTG', 'Gourde haitiano'), (334, 'HM', 'HMD', 0, 'Islas Heard y McDonald', 'Oceanía', '', '', ''), (336, 'VA', 'VAT', 39, 'Ciudad del Vaticano', 'Europa', '', '', ''), (340, 'HN', 'HND', 504, 'Honduras', 'América', 'América Central', 'HNL', 'Lempira hondureño'), (344, 'HK', 'HKG', 852, 'Hong Kong', 'Asia', '', 'HKD', 'Dólar de Hong Kong'), (348, 'HU', 'HUN', 36, 'Hungría', 'Europa', '', 'HUF', 'Forint húngaro'), (352, 'IS', 'ISL', 354, 'Islandia', 'Europa', '', 'ISK', 'Króna islandesa'), (356, 'IN', 'IND', 91, 'India', 'Asia', '', 'INR', 'Rupia india'), (360, 'ID', 'IDN', 62, 'Indonesia', 'Asia', '', 'IDR', 'Rupiah indonesia'), (364, 'IR', 'IRN', 98, 'Irán', 'Asia', '', 'IRR', 'Rial iraní'), (368, 'IQ', 'IRQ', 964, 'Iraq', 'Asia', '', 'IQD', 'Dinar iraquí'), (372, 'IE', 'IRL', 353, 'Irlanda', 'Europa', '', 'EUR', 'Euro'), (376, 'IL', 'ISR', 972, 'Israel', 'Asia', '', 'ILS', 'Nuevo shéquel israelí'), (380, 'IT', 'ITA', 39, 'Italia', 'Europa', '', 'EUR', 'Euro'), (384, 'CI', 'CIV', 225, 'Costa de Marfil', 'África', '', '', ''), (388, 'JM', 'JAM', 1876, 'Jamaica', 'América', 'El Caribe', 'JMD', 'Dólar jamaicano'), (392, 'JP', 'JPN', 81, 'Japón', 'Asia', '', 'JPY', 'Yen japonés'), (398, 'KZ', 'KAZ', 7, 'Kazajstán', 'Asia', '', 'KZT', 'Tenge kazajo'), (400, 'JO', 'JOR', 962, 'Jordania', 'Asia', '', 'JOD', 'Dinar jordano'), (404, 'KE', 'KEN', 254, 'Kenia', 'África', '', 'KES', 'Chelín keniata'), (408, 'KP', 'PRK', 850, 'Corea del Norte', 'Asia', '', 'KPW', 'Won norcoreano'), (410, 'KR', 'KOR', 82, 'Corea del Sur', 'Asia', '', 'KRW', 'Won surcoreano'), (414, 'KW', 'KWT', 965, 'Kuwait', 'Asia', '', 'KWD', 'Dinar kuwaití'), (417, 'KG', 'KGZ', 996, 'Kirguistán', 'Asia', '', 'KGS', 'Som kirguís de Kirguistán'), (418, 'LA', 'LAO', 856, 'Laos', 'Asia', '', 'LAK', 'Kip lao'), (422, 'LB', 'LBN', 961, 'Líbano', 'Asia', '', 'LBP', 'Libra libanesa'), (426, 'LS', 'LSO', 266, 'Lesotho', 'África', '', 'LSL', 'Loti lesotense'), (428, 'LV', 'LVA', 371, 'Letonia', 'Europa', '', 'LVL', 'Lat letón'), (430, 'LR', 'LBR', 231, 'Liberia', 'África', '', 'LRD', 'Dólar liberiano'), (434, 'LY', 'LBY', 218, 'Libia', 'África', '', 'LYD', 'Dinar libio'), (438, 'LI', 'LIE', 423, 'Liechtenstein', 'Europa', '', '', ''), (440, 'LT', 'LTU', 370, 'Lituania', 'Europa', '', 'LTL', 'Litas lituano'), (442, 'LU', 'LUX', 352, 'Luxemburgo', 'Europa', '', 'EUR', 'Euro'), (446, 'MO', 'MAC', 853, 'Macao', 'Asia', '', 'MOP', 'Pataca de Macao'), (450, 'MG', 'MDG', 261, 'Madagascar', 'África', '',
"""Resolwe collection model.""" from django.contrib.postgres.fields import ArrayField from django.contrib.postgres.indexes import GinIndex from django.contrib.postgres.search import SearchVectorField from django.db import models, transaction from resolwe.permissions.models import PermissionObject, PermissionQuerySet from .base import BaseModel, BaseQuerySet from .utils import DirtyError, bulk_duplicate, validate_schema class BaseCollection(BaseModel): """Temp
late for Postgres model for storing a collection.
""" class Meta(BaseModel.Meta): """BaseCollection Meta options.""" abstract = True #: detailed description description = models.TextField(blank=True) settings = models.JSONField(default=dict) #: collection descriptor schema descriptor_schema = models.ForeignKey( "flow.DescriptorSchema", blank=True, null=True, on_delete=models.PROTECT ) #: collection descriptor descriptor = models.JSONField(default=dict) #: indicate whether `descriptor` doesn't match `descriptor_schema` (is dirty) descriptor_dirty = models.BooleanField(default=False) #: tags for categorizing objects tags = ArrayField(models.CharField(max_length=255), default=list) #: field used for full-text search search = SearchVectorField(null=True) def save(self, *args, **kwargs): """Perform descriptor validation and save object.""" if self.descriptor_schema: try: validate_schema(self.descriptor, self.descriptor_schema.schema) self.descriptor_dirty = False except DirtyError: self.descriptor_dirty = True elif self.descriptor and self.descriptor != {}: raise ValueError( "`descriptor_schema` must be defined if `descriptor` is given" ) super().save() class CollectionQuerySet(BaseQuerySet, PermissionQuerySet): """Query set for ``Collection`` objects.""" @transaction.atomic def duplicate(self, contributor): """Duplicate (make a copy) ``Collection`` objects.""" return bulk_duplicate(collections=self, contributor=contributor) class Collection(BaseCollection, PermissionObject): """Postgres model for storing a collection.""" class Meta(BaseCollection.Meta): """Collection Meta options.""" permissions = ( ("view", "Can view collection"), ("edit", "Can edit collection"), ("share", "Can share collection"), ("owner", "Is owner of the collection"), ) indexes = [ models.Index(name="idx_collection_name", fields=["name"]), GinIndex( name="idx_collection_name_trgm", fields=["name"], opclasses=["gin_trgm_ops"], ), models.Index(name="idx_collection_slug", fields=["slug"]), GinIndex(name="idx_collection_tags", fields=["tags"]), GinIndex(name="idx_collection_search", fields=["search"]), ] #: manager objects = CollectionQuerySet.as_manager() #: duplication date and time duplicated = models.DateTimeField(blank=True, null=True) def is_duplicate(self): """Return True if collection is a duplicate.""" return bool(self.duplicated) def duplicate(self, contributor): """Duplicate (make a copy).""" return bulk_duplicate( collections=self._meta.model.objects.filter(pk=self.pk), contributor=contributor, )[0]
""" Python Interchangeable Virtual Instrument Library Copyright (c) 2012-2016 Alex Forencich Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from .lecroyWRXIA import * class lecroyWR204MXIA(lecroyWRXIA): "Lecroy WaveRunner 204MXi-A IVI oscilloscope driver" def __init__(self, *args, **kwargs): self.__dict__.setdefault('_instr
ument_id', 'WaveRunner 204MXi-A') super(lecroy104MXiA, self).__init__(*args, **kwargs) self._analog_channel_count = 4 self._digital_channel_count = 0 self._channel_count = self._analog_channel_count + self._digital
_channel_count self._bandwidth = 1e9 self._init_channels()
rt * from mmls import * from mount import * from mount_ewf import * from get_ntuser_paths import * from get_usrclass_paths import * from get_system_paths import * from done import * from unix2dos import * from check_for_folder import * from mount_encase_v6_l01 import * from calculate_md5 import * import os import codecs from os.path import join import re import io import sys import string import subprocess import pickle import datetime import base64 ### process_folder ####################################################################################### def process_folder(folder_to_process, export_file, outfile): for root,dirs,files in os.walk(folder_to_process): for file_name in files: abs_file_path = os.path.join(root,file_name) quoted_abs_file_path = '"'+abs_file_path+'"' file_name_print = file_name.encode('utf-8') abs_file_path_print = abs_file_path.encode('utf-8') #clean up printable variables file_name_print = re.sub('b\'','',str(file_name_print)) file_name_print = re.sub("'",'',file_name_print) abs_file_path_print = re.sub('b\'','',str(abs_file_path_print)) abs_file_path_print = re.sub("'",'', abs_file_path_print) #don't process link files if not (os.path.islink(abs_file_path)): #get file size try: file_size = os.path.getsize(abs_file_path) except: print("Could not get filesize for file: " + abs_file_path) outfile.write("Could not get filesize for file: " + abs_file_path + "\n") if(file_size): try: ent = calc_entropy(abs_file_path) print("Filename: " + file_name + "\t" + "Entropy: " + ent) export_file.write(ent + "," + str(file_name_print) + "," + str(file_size) + "," + str(abs_file_path_print) + "\n") except: print("Could not get entropy for file: " + abs_file_path) outfile.write("Could not get entropy for file: " + str(abs_file_path_print) + "\n") else: print("File: " + file_name + " has 0 file size....skipping") outfile.write("File: " + file_name + "has 0 file size....skipping\n") else: print("File: " + file_name + " is link file....skipping") outfile.write("File: " + file_name + "is link file....skipping\n") ########################################################################################################## ### calc_entropy ######################################################################################### def calc_entropy(file_to_process): if(re.search("'", file_to_process)): entropy = subprocess.check_output(['ent ' + '"' + file_to_process + '"' + " | grep Entropy | awk '{print $3}'"], shell=True) else: entropy = subprocess.check_output(['ent ' + "'" + file_to_process + "'" + " | grep Entropy | awk '{print $3}'"], shell=True) entropy = entropy.strip() entropy_string = entropy.decode(encoding='UTF-8') return entropy_string ########################################################################################################## def entropy_mr(item_to_process, case_number, root_folder_path, evidence): print("The item to process is: " + item_to_process) print("The case_name is: " + case_number) print("The output folder is: " + root_folder_path) print("The evidence to process is: " + evidence) evidence_no_quotes = evidence evidence = '"' + evidence + '"' #get datetime now = datetime.datetime.now() #set Mount Point mount_point = "/mnt/" + "MantaRay_" + now.strftime("%Y-%m-%d_%H_%M_%S_%f") #create output folder path folder_path = root_folder_path + "/" + "Entropy" check_for_folder(folder_path, "NONE") #open a log file for output log_file = folder_path + "/Entropy_logfile.txt" outfile = open(log_file, 'wt+') #open file to write output exp_file = folder_path + "/" + case_number +"_entropy.csv" export_file = open(exp_file, 'a+', encoding='latin-1', errors="ignore") #export_file = open(exp_file, 'a') if(item_to_process == "Single File"): ent = calc_entropy(evidence) print(ent) elif(item_to_process == "Directory"): folder_to_process = evidence_no_quotes process_folder(folder_to_process, export_file, outfile) elif(item_to_process =="EnCase Logical Evidence File"): file_to_process = evidence mount_point = mount_encase_v6_l01(case_number, file_to_process, outfile) process_folder(mount_point, export_file, outfile) #umount if(os.path.exists(mount_point)): subprocess.call(['sudo umount -f ' + mount_point], shell=True) os.rmdir(mount_point) elif(item_to_process == "Bit-Stream Image"): Image_Path = evidence #process every file on every partition #get datetime now = datetime.datetime.now() #set Mount Point mount_point = "/mnt/" + now.strftime("%Y-%m-%d_%H_%M_%S_%f") #check if Image file is in Encase format if re.search(".E01", Image_Path): #str
ip out single quotes from the quoted path #no_quotes_path = Image_Path.replace("'","") #print("THe no quotes path is: " + no_quotes_path) #call mount_ewf function Image_Path = mount_ewf(Image_Path, outfile,mount_point) #call mmls function partition_info_dict, temp_time = mmls(outfile, Image_Path) partition_info_dict_temp = partition_info_dict #get filesize of mmls_output.txt file_size = os.path.getsize("/tmp/mmls_output_" + temp_time + ".tx
t") #if filesize of mmls output is 0 then run parted if(file_size == 0): print("mmls output was empty, running parted") outfile.write("mmls output was empty, running parted") #call parted function partition_info_dict, temp_time = parted(outfile, Image_Path) else: #read through the mmls output and look for GUID Partition Tables (used on MACS) mmls_output_file = open("/tmp/mmls_output_" + temp_time + ".txt", 'r') for line in mmls_output_file: if re.search("GUID Partition Table", line): print("We found a GUID partition table, need to use parted") outfile.write("We found a GUID partition table, need to use parted\n") #call parted function partition_info_dict, temp_time = parted(outfile, Image_Path) mmls_output_file.close() #loop through the dictionary containing the partition info (filesystem is VALUE, offset is KEY) for key,value in sorted(partition_info_dict.items()): #disable auto-mount in nautilis - this stops a nautilis window from popping up everytime the mount command is executed cmd_false = "sudo gsettings set org.gnome.desktop.media-handling automount false && sudo gsettings set org.gnome.desktop.media-handling automount-open false" try: subprocess.call([cmd_false], shell=True) except: print("Autmount false failed") #call mount sub-routine success_code, loopback_device_mount = mount(value,str(key),Image_Path, outfile, mount_point) if(success_code): print("Could not mount partition with filesystem: " + value + " at offset:" + str(key)) outfile.write("Could not mount partition with filesystem: " + value + " at offset:" + str(key)) else: print("We just mounted filesystem: " + value + " at offset:" + str(key) + "\n") outfile.write("We just mounted filesystem: " + value + " at offset:" + str(key) + "\n") #call entropy function for each mount_point process_folder(mount_point, export_file, outfile) print("We just finished calculating the entropy for every file...sorting output") #unmount and remove mount points if(os.path.exists(mount_point)): subprocess.call(['sudo umount -f ' + mount_point], shell=True) os.rmdir(mount_point) #unmount loopback device if this image was HFS+ - need to run losetup -d <loop_device> before unmounting if not (loopback_device_mount == "NONE"): losetup_d_command = "losetup -d " + loopback_device_mount subprocess.call([losetup_d_command], shell=True) #delete /tmp files created for each partition if (os.path.exists("/tmp/mmls_output_" + temp_time + ".txt")): os.remove("/tmp/mmls_output_" + temp_time + ".txt") #close output file export_file.close() #sort output file sort_command = "strings -a " + "'" + exp_file + "'" + " |sort -t\| -r -k 2n > " + "'" + folder_path + "'" + "/" + case_number +"_entropy_sorted.csv" subprocess.call([sort_command], shel
1df367ecd4f68aab894e57b31", "repos@localhost:gentoo-kde-shard.git", pull=True), "gnome" : GitTree("gentoo-gnome-shard", "ffabb752f8f4e23a865ffe9caf72f950695e2f26", "repos@localhost:ports/gentoo-gnome-shard.git", pull=True), "x11" : GitTree("gentoo-x11-shard", "12c1bdf9a9bfd28f48d66bccb107c17b5f5af577", "repos@localhost:ports/gentoo-x11-shard.git", pull=True), "office" : GitTree("gentoo-office-shard", "9a702057d23e7fa277e9626344671a82ce59442f", "repos@localhost:ports/gentoo-office-shard.git", pull=True), "core" : GitTree("gentoo-core-shard", "56e5b9edff7dc27e828b71010d019dcbd8e176fd", "repos@localhost:gentoo-core-shard.git", pull=True) } # perl: 1fc10379b04cb4aaa29e824288f3ec22badc6b33 (Updated 6 Dec 2016) # kde: cd4e1129ddddaa21df367ecd4f68aab894e57b31 (Updated 25 Dec 2016) # gnome: ffabb752f8f4e23a865ffe9caf72f950695e2f26 (Updated 20 Sep 2016) # x11: 12c1bdf9a9bfd28f48d66bccb107c17b5f5af577 (Updated 24 Dec 2016) # office: 9a702057d23e7fa277e9626344671a82ce59442f (Updated 29 Nov 2016) # core: 56e5b9edff7dc27e828b71010d019dcbd8e176fd (Updated 17 Dec 2016) # funtoo-toolchain: b97787318b7ffc
feaacde82cd21ddd5e207ad1f4 (Updated 25 Dec 2016) funtoo_overlays = { "funtoo_
media" : GitTree("funtoo-media", "master", "repos@localhost:funtoo-media.git", pull=True), "plex_overlay" : GitTree("funtoo-plex", "master", "https://github.com/Ghent/funtoo-plex.git", pull=True), #"gnome_fixups" : GitTree("gnome-3.16-fixups", "master", "repos@localhost:ports/gnome-3.16-fixups.git", pull=True), "gnome_fixups" : GitTree("gnome-3.20-fixups", "master", "repos@localhost:ports/gnome-3.20-fixups.git", pull=True), "funtoo_toolchain" : GitTree("funtoo-toolchain", "b97787318b7ffcfeaacde82cd21ddd5e207ad1f4", "repos@localhost:funtoo-toolchain-overlay.git", pull=True), "ldap_overlay" : GitTree("funtoo-ldap", "master", "repos@localhost:funtoo-ldap-overlay.git", pull=True), "deadbeef_overlay" : GitTree("deadbeef-overlay", "master", "https://github.com/damex/deadbeef-overlay.git", pull=True), "gambas_overlay" : GitTree("gambas-overlay", "master", "https://github.com/damex/gambas-overlay.git", pull=True), "wmfs_overlay" : GitTree("wmfs-overlay", "master", "https://github.com/damex/wmfs-overlay.git", pull=True), "flora" : GitTree("flora", "master", "repos@localhost:flora.git", pull=True), } # These are other overlays that we merge into the Funtoo tree. However, we just pull in the most recent versions # of these when we regenerate our tree. other_overlays = { "foo_overlay" : GitTree("foo-overlay", "master", "https://github.com/slashbeast/foo-overlay.git", pull=True), "bar_overlay" : GitTree("bar-overlay", "master", "git://github.com/adessemond/bar-overlay.git", pull=True), "squeezebox_overlay" : GitTree("squeezebox", "master", "git://anongit.gentoo.org/user/squeezebox.git", pull=True), "pantheon_overlay" : GitTree("pantheon", "master", "https://github.com/pimvullers/elementary.git", pull=True), "pinsard_overlay" : GitTree("pinsard", "master", "https://github.com/apinsard/sapher-overlay.git", pull=True), "sabayon_for_gentoo" : GitTree("sabayon-for-gentoo", "master", "git://github.com/Sabayon/for-gentoo.git", pull=True), "tripsix_overlay" : GitTree("tripsix", "master", "https://github.com/666threesixes666/tripsix.git", pull=True), "faustoo_overlay" : GitTree("faustoo", "master", "https://github.com/fmoro/faustoo.git", pull=True), "wltjr_overlay" : GitTree("wltjr", "master", "https://github.com/Obsidian-StudiosInc/os-xtoo", pull=True), "vmware_overlay" : GitTree("vmware", "master", "git://anongit.gentoo.org/proj/vmware.git", pull=True) } funtoo_changes = False if funtoo_overlay.changes: funtoo_changes = True elif gentoo_staging_r.changes: funtoo_changes = True else: for fo in funtoo_overlays: if funtoo_overlays[fo].changes: funtoo_changes = True break # This next code regenerates the contents of the funtoo-staging tree. Funtoo's tree is itself composed of # many different overlays which are merged in an automated fashion. This code does it all. pull = True if nopush: push = False else: push = "master" # base_steps define the initial steps that prepare our destination tree for writing. Checking out the correct # branch, copying almost the full entirety of Gentoo's portage tree to our destination tree, and copying over # funtoo overlay licenses, metadata, and also copying over GLSA's. base_steps = [ GitCheckout("master"), SyncFromTree(gentoo_staging_r, exclude=[ "/metadata/cache/**", "ChangeLog", "dev-util/metro", "skel.ChangeLog", ]), ] # Steps related to generating system profiles. These can be quite order-dependent and should be handled carefully. # Generally, the funtoo_overlay sync should be first, then the gentoo_staging_r SyncFiles, which overwrites some stub # files in the funtoo overlay. profile_steps = [ SyncDir(funtoo_overlay.root, "profiles", "profiles", exclude=["categories", "updates"]), CopyAndRename("profiles/funtoo/1.0/linux-gnu/arch/x86-64bit/subarch", "profiles/funtoo/1.0/linux-gnu/arch/pure64/subarch", lambda x: os.path.basename(x) + "-pure64"), SyncFiles(gentoo_staging_r.root, { "profiles/package.mask":"profiles/package.mask/00-gentoo", "profiles/arch/amd64/package.use.mask":"profiles/funtoo/1.0/linux-gnu/arch/x86-64bit/package.use.mask/01-gentoo", "profiles/features/multilib/package.use.mask":"profiles/funtoo/1.0/linux-gnu/arch/x86-64bit/package.use.mask/02-gentoo", "profiles/arch/amd64/use.mask":"profiles/funtoo/1.0/linux-gnu/arch/x86-64bit/use.mask/01-gentoo", "profiles/arch/x86/package.use.mask":"profiles/funtoo/1.0/linux-gnu/arch/x86-32bit/package.use.mask/01-gentoo", "profiles/arch/x86/use.mask":"profiles/funtoo/1.0/linux-gnu/arch/x86-32bit/use.mask/01-gentoo", "profiles/default/linux/package.use.mask":"profiles/funtoo/1.0/linux-gnu/package.use.mask/01-gentoo", "profiles/default/linux/use.mask":"profiles/funtoo/1.0/linux-gnu/use.mask/01-gentoo", "profiles/arch/amd64/no-multilib/package.use.mask":"profiles/funtoo/1.0/linux-gnu/arch/pure64/package.use.mask/01-gentoo", "profiles/arch/amd64/no-multilib/package.mask":"profiles/funtoo/1.0/linux-gnu/arch/pure64/package.mask/01-gentoo", "profiles/arch/amd64/no-multilib/use.mask":"profiles/funtoo/1.0/linux-gnu/arch/pure64/use.mask/01-gentoo" }), SyncFiles(funtoo_overlays["deadbeef_overlay"].root, { "profiles/package.mask":"profiles/package.mask/deadbeef-mask" }), SyncFiles(funtoo_overlays["wmfs_overlay"].root, { "profiles/package.mask":"profiles/package.mask/wmfs-mask" }) ] profile_steps += [ SyncFiles(funtoo_overlays["funtoo_toolchain"].root, { "profiles/package.mask/funtoo-toolchain":"profiles/funtoo/1.0/linux-gnu/build/current/package.mask/funtoo-toolchain", }), SyncFiles(funtoo_overlays["funtoo_toolchain"].root, { "profiles/package.mask/funtoo-toolchain":"profiles/funtoo/1.0/linux-gnu/build/stable/package.mask/funtoo-toolchain", "profiles/package.mask/funtoo-toolchain-experimental":"profiles/funtoo/1.0/linux-gnu/build/experimental/package.mask/funtoo-toolchain", }), RunSed(["profiles/base/make.defaults"], ["/^PYTHON_TARGETS=/d", "/^PYTHON_SINGLE_TARGET=/d"]), ] # Steps related to copying ebuilds. Note that order can make a difference here when multiple overlays are # providing identical catpkgs. # Ebuild additions -- these are less-risky changes because ebuilds are only added, and not replaced. ebuild_additions = [ InsertEbuilds(other_overlays["bar_overlay"], select="all", skip=["app-emulation/qemu"], replace=False), InsertEbuilds(other_overlays["squeezebox_overlay"], select="all", skip=None, replace=False), InsertEbuilds(funtoo_overlays["deadbeef_overlay"], select="all", skip=None, replace=False), InsertEbuilds(funtoo_overlays["gambas_overlay"], select="all", skip=None, replace=False), InsertEbuilds(funtoo_overlays["wmfs_overlay"], select="all", skip=None, replace=False), InsertEbuilds(funtoo_overlays["flora"], select="all", skip=None, replace=True, merge=True), ] # Ebuild modifications -- these changes need to be treated more carefully as ordering can be important # for wholesale replacing as well as merging. ebuild_modifications = [ InsertEbuilds(oth
#expone
nt #find 2^n n = input("Enter n: ") print 2**n
# -*- coding: utf-8 -*- # The MIT License (MIT) # Copyright (c) 2015 Percy Li # See LICENSE for details. import struct import threading import copy class FrameBuffer(object): def __init__(self,decoder = None): self.data_buffer = bytes([]) self.decoder = decoder def set_decoder (self,decoder = None): self.decoder = decoder def pop_frame (self): if self.decoder is not None: fe,df = self.decoder(self.data_buffer) self.data_buffer = self.data_buffer[fe:] if df: #logger.ods ('Decoded frame : \n ' + str(df) ,lv='dev',cat = 'foundation.tcp') pass return df # None if fail else: # if no decoder was specified , return a single byte in the head of the buffer. return self.pop_buffer() def push_frame (self,frame): if hasattr(frame,'marshal'): self.data_buffer += frame.marshal() return if isinstance(frame,bytes): self.data_buffer += frame return def append_buffer (self,buf): self.data_buffer += buf def pop_buffer (self): if self.get_buffer_length() == 0: return None _head = self.data_buffer[0] self.data_buffer = self.data_buffer[1:] return _head def get_buffer (self): return self.data_buffer def clear_buffer (self): self.data_buffer = bytes([]) def get_buffer_length(self): return len(self.data_buffer) # thread safe version class TSFrameBuffer(FrameBuffer): def __init__(self,decoder = None): FrameBuffer.__init__(self,decoder) self.internal_lock
= threading.RLock() def set_decoder (self,decoder = None): with self.internal_lock: return FrameBuffer.set_decoder(self,decoder) def pop_frame (self): with self.internal_lock: return FrameBuffer.pop_frame(self) def push_frame (self,fra
me): with self.internal_lock: return FrameBuffer.push_frame(self,frame) def append_buffer (self,buf): with self.internal_lock: return FrameBuffer.append_buffer(self,buf) def pop_buffer (self): with self.internal_lock: return FrameBuffer.pop_buffer(self) def get_buffer (self): with self.internal_lock: return copy.deepcopy( FrameBuffer.get_buffer(self) ) def clear_buffer (self): with self.internal_lock: return FrameBuffer.clear_buffer(self) def get_buffer_length(self): with self.internal_lock: return FrameBuffer.get_buffer_length(self) if __name__ == '__main__': fb = FrameBuffer()
imp
ort re def pythonize_camelcase_name(name): """ GetProperty -> get_property """ def repl(match): return '_' + match.group(0).lower() s = re.sub(r'([A-Z])', repl, name) if s.startswith('_'):
return s[1:] else: return s
''' Given: A protein string PP of length at most 1000 aa. Return: The
total weight of PP. Consult the monoisotopic mass table. ''' def weight(protein): # Build mass table from mass_table.txt mass = {} with open("mass_table.txt", "r") as m: for line in m: lst = line.split(" ") mass[lst[0]] = float(lst[1].rstrip()) # Calculate the mass of protein total = 0 for aa in
protein: total += mass[aa] return total
# coding: utf-8 # license: GPLv3 from enemies import * from hero import * def annoying_input_int(message =''): answer = None while answer == None: try: answer = int(input(message)) except ValueError: print('Вы ввели недопустимые символы') return answer def game_tournament(hero, dragon_list): for dragon in dragon_list: print('Вышел', dragon._color, 'дракон!') while dragon.is_alive() and hero.is_alive(): print('Вопрос:', dragon.question()) answer = annoying_input_int('Ответ:') if dragon.check_answer(answer): hero.attack(dragon) print('Верно! \n** дракон кричит от боли **') else: dragon.attack(hero) print('Ошибка! \n** вам нанес
ён удар... **') if dragon.is_alive(): break print('Дракон', dragon._color, 'повержен!\n') if hero.is_alive(): print('Поздравляем! Вы победили!') print('Ваш накопленный опыт:', hero._experience) else: print('К сожалению, Вы проиграли...') def start_game(): try: print('Добро пожаловать в арифметико-ролевую игру с драконами!') print('Представьтесь, пожалуйста: ', end = '') hero = Hero(input()) d
ragon_number = 3 dragon_list = generate_dragon_list(dragon_number) assert(len(dragon_list) == 3) print('У Вас на пути', dragon_number, 'драконов!') game_tournament(hero, dragon_list) except EOFError: print('Поток ввода закончился. Извините, принимать ответы более невозможно.')
from django.core.urlresolvers import reverse import django.http import django.utils.simplejson as json import functools def make_url(request, reversible): return request.build_absolute_uri(reverse(reversible)) def json_output(func): @functools.wraps(func) def wrapper(*args,
**kwargs): output = func(*args, **kwargs)
return django.http.HttpResponse(json.dumps(output), content_type="application/json") return wrapper
import time from netCDF4 import Dataset from oceansar.ocs_io import NETCDFHandler class ProcFile(NETCDFHandler): """ Processed raw data file generated by the OASIS Simulator :param file_name: File name :param mode: Access mode (w = write, r = read, r+ = read + append) :param proc_dim: Processed raw data dimensions :param format: netCDF format .. note:: Refer to netCDF4 Python library for details on access mode and available formats """ def __init__(self, file_name, mode, proc_dim=None, format='NETCDF4'): self.__file__ = Dataset(file_name, mode, format) # If writing, define file if mode == 'w': # Set file attributes self.__file__.description = 'OCEANSAR Processed SLC Data File' self.__file__.history = 'Created ' + time.ctime(time.time()) self.__file__.source = 'OCEANSAR Simulator' # Dimensions if not proc_dim: raise ValueError('Processed raw data dimensions are needed when creating a new file!') self.__file__.createDimension('ch_dim', proc_dim[0]) self.__file__.createDimension('pol_dim', proc_dim[1]) self.__file__.createDimension('az_dim', proc_dim[2]) self.__file__.createDimension('rg_dim', proc_dim[3]) num_ch = self.__file__.createVariable('num_ch', 'i4') # Variables slc_r = self.__file__.createVariable('slc_r', 'f8', ('ch_dim', 'pol_dim', 'az_dim', 'rg_dim')) slc_i = self.__file__.createVariable('slc_i', 'f8', ('ch_dim', 'pol_dim', 'az_dim',
'rg_dim')) slc_r.units = '[]' slc_i.units = '[]' inc_angle = self.__file__.createVariable('inc_angle', 'f8') inc_angle.units
= '[deg]' f0 = self.__file__.createVariable('f0', 'f8') f0.units = '[Hz]' ant_L = self.__file__.createVariable('ant_L', 'f8') ant_L.units = '[m]' prf = self.__file__.createVariable('prf', 'f8') prf.units = '[Hz]' v_ground = self.__file__.createVariable('v_ground', 'f8') v_ground.units = '[m/s]' orbit_alt = self.__file__.createVariable('orbit_alt', 'f8') orbit_alt.units = '[m]' sr0 = self.__file__.createVariable('sr0', 'f8') sr0.units = '[m]' rg_sampling = self.__file__.createVariable('rg_sampling', 'f8') rg_sampling.units = '[Hz]' rg_bw = self.__file__.createVariable('rg_bw', 'f8') rg_bw.units = '[Hz]' b_ati = self.__file__.createVariable('b_ati', 'f8', 'ch_dim') b_ati.units = '[m]' b_xti = self.__file__.createVariable('b_xti', 'f8', 'ch_dim') b_xti.units = '[m]'
"""Regularizations. Each regularization method is implemented as a subclass of :class:`Regularizer`, where the constructor takes the hyperparameters, an
d the `__call__` method constructs the symbolic loss expression given a parameter. These are made for use with :meth:`Model.regularize`, but can also be used directly in the :meth:`loss` method of :class:`.Model` subclasses. """ import theano import theano.tensor as T from theano.ifelse import ifelse class Regularizer: pass class L2(Regularizer): """L2 loss.""" def __init__(self, penalty=0.01): self.penalty = penalty def __call__(self, p): return
T.sqrt(T.sqr(p).sum()) * T.as_tensor_variable(self.penalty) class StateNorm(Regularizer): """Squared norm difference between recurrent states. Note that this method seems to be unstable if the initial hidden state is initialized to zero. David Krueger & Roland Memisevic (2016). `Regularizing RNNs by stabilizing activations. <http://arxiv.org/pdf/1511.08400v7.pdf>`_ """ def __init__(self, penalty=50.0): self.penalty = penalty def __call__(self, p, p_mask): """Compute the squared norm difference of a sequence. Example ------- >>> def loss(self, outputs, outputs_mask): ... # loss() definition from a custom Model subclass ... loss = super().loss() ... pred_states, pred_symbols = self(outputs, outputs_mask) ... # Include transition from initial state ... pred_states = T.concatenate([initial_state, pred_states], ... axis=0) ... return loss + StateNorm(50.0)(pred_states, outputs_mask) """ mask = p_mask[:-1] l2 = T.sqrt(T.sqr(p).sum(axis=2)) diff = (l2[1:] - l2[:-1]) * mask return (self.penalty * T.sqr(diff).sum() / mask.sum().astype(theano.config.floatX))
# Print the version splitted in three components import
sys verfile = sys.argv[1] f = open(verfile) version = f.read() l = [a[0] for a in version.split('.') if a[0] in '0123456789'] # If no revision, '0' is added if len(l) == 2: l.appe
nd('0') for i in l: print i, f.close()
ed with this software. #----------------------------------------------------------------------------- ''' ''' #----------------------------------------------------------------------------- # Boilerplate #----------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function, unicode_literals import logging log = logging.getLogger(__name__) #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Standard library imports import json from warnings import warn from uuid import uuid4 # External imports # Bokeh imports from .state import curstate from ..util.serialization import make_id #----------------------------------------------------------------------------- # Globals and constants #----------------------------------------------------------------------------- HTML_MIME_TYPE = 'text/html' JS_MIME_TYPE = 'application/javascript' LOAD_MIME_TYPE = 'application/vnd.bokehjs_load.v0+json' EXEC_MIME_TYPE = 'application/vnd.bokehjs_exec.v0+json' #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- class CommsHandle(object): ''' ''' _json = {} def __init__(self, comms, cell_doc): self._cellno = None try: from IPython import get_ipython ip = get_ipython() hm = ip.history_manager p_prompt = list(hm.get_tail(1, include_latest=True))[0][1] self._cellno = p_prompt except Exception as e: log.debug("Could not get Notebook cell number, reason: %s", e) self._comms = comms self._doc = cell_doc # Our internal copy of the doc is in perpetual "hold". Events from the # originating doc will be triggered and collected it it. Events are # processed/cleared when push_notebook is called for this comms handle self._doc.hold() def _repr_html_(self): if self._cellno is not None: return "<p><code>&lt;Bokeh Notebook handle for <strong>In[%s]</strong>&gt;</code></p>" % str(self._cellno) else: return "<p><code>&lt;Bokeh Notebook handle&gt;</code></p>" @property def comms(self): return self._comms @property def doc(self): return self._doc # Adding this method makes curdoc dispatch to this Comms to handle # and Document model changed events. If we find that the event is # for a model in our internal copy of the docs, then trigger the # internal doc with the event so that it is collected (until a # call to push_notebook processes and clear colleted events) def _document_model_changed(self, event): if event.model._id in self.doc._all_models: self.doc._trigger_on_change(event) def install_notebook_hook(notebook_type, load, show_doc, show_app, overwrite=False): ''' Install a new notebook display hook. Bokeh comes with support for Jupyter notebooks built-in. However, there are other kinds of notebooks in use by different communities. This function provides a mechanism for
other projects to instruct Bokeh how to display content in other notebooks. This function is primarily of use to developers wishing to integrate Bokeh
with new notebook types. Args: notebook_type (str) : A name for the notebook type, e.e. ``'Jupyter'`` or ``'Zeppelin'`` If the name has previously been installed, a ``RuntimeError`` will be raised, unless ``overwrite=True`` load (callable) : A function for loading BokehJS in a notebook type. The function will be called with the following arguments: .. code-block:: python load( resources, # A Resources object for how to load BokehJS verbose, # Whether to display verbose loading banner hide_banner, # Whether to hide the output banner entirely load_timeout # Time after which to report a load fail error ) show_doc (callable) : A function for displaying Bokeh standalone documents in the notebook type. This function will be called with the following arguments: .. code-block:: python show_doc( obj, # the Bokeh object to display state, # current bokeh.io "state" notebook_handle # whether a notebook handle was requested ) If the notebook platform is capable of supporting in-place updates to plots then this function may return an opaque notebook handle that can be used for that purpose. The handle will be returned by ``show()``, and can be used by as appropriate to update plots, etc. by additional functions in the library that installed the hooks. show_app (callable) : A function for displaying Bokeh applications in the notebook type. This function will be called with the following arguments: .. code-block:: python show_app( app, # the Bokeh Application to display state, # current bokeh.io "state" notebook_url # URL to the current active notebook page ) overwrite (bool, optional) : Whether to allow an existing hook to be overwritten by a new definition (default: False) Returns: None Raises: RuntimeError If ``notebook_type`` is already installed and ``overwrite=False`` ''' if notebook_type in _HOOKS and not overwrite: raise RuntimeError("hook for notebook type %r already exists" % notebook_type) _HOOKS[notebook_type] = dict(load=load, doc=show_doc, app=show_app) def push_notebook(document=None, state=None, handle=None): ''' Update Bokeh plots in a Jupyter notebook output cells with new data or property values. When working the the notebook, the ``show`` function can be passed the argument ``notebook_handle=True``, which will cause it to return a handle object that can be used to update the Bokeh output later. When ``push_notebook`` is called, any property updates (e.g. plot titles or data source values, etc.) since the last call to ``push_notebook`` or the original ``show`` call are applied to the Bokeh output in the previously rendered Jupyter output cell. Several example notebooks can be found in the GitHub repository in the :bokeh-tree:`examples/howto/notebook_comms` directory. Args: document (Document, optional) : A :class:`~bokeh.document.Document` to push from. If None, uses ``curdoc()``. (default: None) state (State, optional) : A :class:`State` object. If None, then the current default state (set by ``output_file``, etc.) is used. (default: None) Returns: None Examples: Typical usage is typically similar to this: .. code-block:: python from bokeh.plotting import figure from bokeh.io import output_notebook, push_notebook, show output_notebook() plot = figure() plot.circle([1,2,3], [4,6,5]) handle = show(plot, notebook_handle=True) # Update the plot title in the earlier cell plot.title.text = "New Title" push_notebook(handle=handle) ''' from ..protocol import Protocol if state is None: state = curstate() if not document: document = state.document if not document: warn("No document to push") return if handle is None: handle = state.last_comms_handle if not handle: warn("Cannot find a last shown plot to updat
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' Lyndor runs from here - contains the main functions ''' import sys, time, os import module.message as message import module.save as save import module.cookies as cookies import module.read as read import install import module.move as move import module.draw as draw import module.rename as rename import module.exercise_file as exercise_file from colorama import Fore, init def main(): ''' Main function ''' init() message.animate_characters(Fore.LIGHTYELLOW_EX, draw.ROCKET, 0.02) message.spinning_cursor() message.print_line('\r1. Paste course url or\n' + '2. Press enter for Bulk Download') url = input() print('') start_time = time.time() #start time counter begins if url == "": # If user press Enter (i.e. url empty), get urls from Bulkdownload.txt urls = read.bulk_download() if not urls: sys.exit(message.colored_message(Fore.LIGHTRED_EX, 'Please paste urls in Bulk Download.txt\n')) for url in urls: schedule_download(url) else: # begin regular download schedule_download(url) try: end_time = time.time() message.animate_characters(Fore.LIGHTGREEN_EX, draw.COW, 0.02) message.colored_message(Fore.LIGHTGREEN_EX, "\nThe whole process took {}\n".format(move.hms_string(end_time - start_time))) except KeyboardInterrupt: sys.exit(message.colored_message(Fore.LIGHTRED_EX, "\n- Program Interrupted!!\n")) def schedule_download(url): ''' Look for the scheduled time in settings.json ''' if not read.aria2_installed: tip = '☝🏻 Tip: Install aria2c for faster downloads, read README.md to learn more.' message.carriage_return_animate(tip) if read.download_time == '': # If download time not set, begin download download_course(url) return else: counter = True message.colored_message(Fore.LIGHTGREEN_EX, 'Download time set to: ' + read.download_time + '\ in settings.json, you can change or remove this time in settings.json\n') try: while counter: if time.strftime("%H:%M") == read.download_time: download_course(url) return print(f'Download will start at: {read.download_time} leave this window open.') time.sleep(60) except KeyboardInterrupt: sys.exit(message.colored_message(Fore.LIGHTRED_EX, "\n- Program Interrupted!!\n")) def download_course(url): ''' download course
''' # Check for a valid url if url.find('.html') == -1: sys.exit(message.animate_characters(Fore.LIGHTRED_EX, draw.ANONYMOUS, 0.02)) url = url[:url.find(".html")+5
] #strip any extra text after .html in the url # Folder/File paths lynda_folder_path = read.location + '/' course_folder_path = save.course_path(url, lynda_folder_path) desktop_folder_path = install.get_path("Desktop") download_folder_path = install.get_path("Downloads") # Read preferences use_cookie_for_download = read.course_download_pref if use_cookie_for_download in ['cookies', 'cookie'] or read.exfile_download_method == 'aria2': cookie_path = cookies.find_cookie(desktop_folder_path, download_folder_path) downloading_from_cookie = message.return_colored_message(Fore.LIGHTBLUE_EX, '🍪 Downloading videos using cookies.txt') message.carriage_return_animate(downloading_from_cookie) else: cookie_path = '' usr_pass_message = message.return_colored_message(Fore.LIGHTGREEN_EX, '⛺ Using username and password combination for download\n') message.carriage_return_animate(usr_pass_message) try: # main operations -> save.course(url, lynda_folder_path) # Create course folder save.info_file(url, course_folder_path) # Gather information save.chapters(url, course_folder_path) # Create chapter folders save.contentmd(url) # Create content.md save.videos(url, cookie_path, course_folder_path) # Download videos rename.videos(course_folder_path) # rename videos rename.subtitles(course_folder_path) # rename subtitles move.vid_srt_to_chapter(url, course_folder_path) # Move videos and subtitles to chapter folders # Download exercise files if save.check_exercise_file(url): print('\nExercise file is available to download') if not read.download_exercise_file: # if user do not want to download ex-file print("settings.json says you do not want to download ex-file -> 'download_exercise_file': false") else: # if user wants to download ex-file if read.course_download_pref == 'regular-login': exercise_file.download(url, course_folder_path, cookie_path) elif read.exfile_download_pref == 'library-login': if read.card_number == '': print('\nTo download ex-file via library login -> Please save library card details in settings.json') else: exercise_file.download(url, course_folder_path, cookie_path) else: print('\nThe exercise file can only be downloaded through one of the below combinations:') print('~ Regular login: username + password or') print('~ Library login: card number, pin and org. url\n') else: # if exercise file not present print('This course does not include Exercise files.') except KeyboardInterrupt: sys.exit(message.colored_message(Fore.LIGHTRED_EX, "\n- Program Interrupted!!\n")) if __name__ == '__main__': try: main() except KeyboardInterrupt: sys.exit(message.colored_message(Fore.LIGHTRED_EX, "\n- Program Interrupted!!\n"))
{ 'name': "Sale only available products on Website", 'summary': """Sale only available products on Website""", 'version': '1.0.0', 'author': 'IT-Projects LLC, Ivan Yelizariev', 'license': 'GPL-3', 'category': 'Custom', 'website': 'https://yelizariev.github.io', 'images': ['images/available.png'], 'price': 9
.00, 'currency': 'EUR', 'depends': ['website_sale'], '
data': [ 'website_sale_available_views.xml', ], 'installable': True, }
""" JobRunningWaitingRatioPolicy Policy that calculates the efficiency following the formula: ( running ) / ( running + waiting + staging ) if the denominator is smaller than 10, it does not take any decision. """ from DIRAC import S_OK from DIRAC.ResourceStatusSystem.PolicySystem.PolicyBase import PolicyBase __RCSID__ = '$Id: JobRunningWaitingRatioPolicy.py 60769 2013-01-18 11:50:36Z ubeda $' class JobRunningWaitingRatioPolicy( PolicyBase ): """ The JobRunningWaitingRatioPolicy class is a policy that checks the efficiency of the jobs according to what is on JobDB. Evaluates the JobRunningWaitingRatioPolicy results given by the JobCommand.JobCommand """ @staticmethod def _evaluate( commandResult ): """ _evaluate efficiency < 0.5 :: Banned efficiency < 0.9 :: Degraded """ result = { 'Status' : None, 'Reason' : None } if not commandResult[ 'OK' ]
: result[ 'Status' ] = 'Error' result[ 'Reason' ] = commandResult[ 'Message' ] return S_OK( result ) commandResult = commandResult[ 'Value' ] if not commandResult: result[ 'Status' ]
= 'Unknown' result[ 'Reason' ] = 'No values to take a decision' return S_OK( result ) commandResult = commandResult[ 0 ] if not commandResult: result[ 'Status' ] = 'Unknown' result[ 'Reason' ] = 'No values to take a decision' return S_OK( result ) running = float( commandResult[ 'Running' ] ) waiting = float( commandResult[ 'Waiting' ] ) staging = float( commandResult[ 'Staging' ] ) total = running + waiting + staging #we want a minimum amount of jobs to take a decision ( at least 10 pilots ) if total < 10: result[ 'Status' ] = 'Unknown' result[ 'Reason' ] = 'Not enough jobs to take a decision' return S_OK( result ) efficiency = running / total if efficiency < 0.4: result[ 'Status' ] = 'Banned' elif efficiency < 0.65: result[ 'Status' ] = 'Degraded' else: result[ 'Status' ] = 'Active' result[ 'Reason' ] = 'Job Running / Waiting ratio of %.2f' % efficiency return S_OK( result ) #............................................................................... #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
complexe = importeur.salle.creer_etendue("complexe") complexe.origine = (20, 20) obstacle = importeur.salle.obstacles["falaise"
] coords = [ (20, 20), (21, 20), (22, 20), (23, 20)
, (24, 20), (25, 20), (20, 21), (20, 22), (20, 23), (20, 24), (20, 25), (19, 25), (19, 26), (18, 26), (17, 26), (19, 27), (17, 27), (17, 28), (18, 28), (19, 28), (20, 28), (21, 28), (22, 28), (23, 28), (24, 28), (24, 27), (24, 26), (23, 26), (23, 25), (23, 24), (24, 24), (25, 24), (25, 23), (23, 23), (25, 22), (22, 22), (23, 22), (25, 21), ] for coord in coords: complexe.ajouter_obstacle(coord, obstacle) complexe.trouver_contour()
# Copyright (c) 2012 - 2015 Lars
Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT. from framework import api_select def create_jobs(api_type): api = api_select.api(__file__, api_type) api.flow_job() api.job('passwd_args', exec_time=0.5, max_fails=0, expect_invocations=1, expect_order=1, params=(('s1', 'no-secret', 'desc'), ('passwd', 'p2', 'desc'), ('PASS', 'p3', 'desc'))) return api if __name__ == '__main__': create_jobs(api_select.ApiType.JENKINS)
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### P
LEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Installation() result.template = "object/installation/faction_perk/turret/shared_block_sm.iff" result.attribute_template_id = -1 result.stfName("turret_n","block_small") ##
## BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
# -*- coding: utf-8 -*- try: f1 = open("input.txt","r",encoding="utf-8") except IOError: print("Не удалось найти входной файл input.txt") try: f2 = open("out
put.txt","w",encoding="utf-8") except IOError: print("Не удалось открыть выходной файл output.txt") import re # импортируем модуль работы с регулярными выражениями # --- регулярное выражение для заголовков вида: == ййй == zagolovok_level2 = re.compile("==.*==") # жадный квантификатор .* # --- регулярные выражения для внутренних ссылок вида [[id**|**]], [[club**|**]], [[public**|**]] #ssylka_inner_tpl = re.compile("\[\[.*?\|.*?\]\]") # [[ | ]] нежадн
ый кватнификатор .*? ssylka_inner_id = re.compile("\[\[id.*?\|.*?\]\]") # id ssylka_inner_club = re.compile("\[\[club.*?\|.*?\]\]") # club ssylka_inner_public = re.compile("\[\[public.*?\|.*?\]\]") # public # --- регулярное выражение для внешних ссылок вида [http**|**] ssylka_outer = re.compile("\[http.*?\|.*?\]") # --- регулярное выражение для вставки переноса на другую строку (если заканчивается на ":" + пробелы) perenos = re.compile(":\s*$") # -------- for stroka in f1.readlines(): #читаем входной файл построчно # ---- Замена заголовков if re.match(zagolovok_level2, stroka): stroka = stroka.replace("==","##",1) stroka = stroka.replace("==", "") # ---- Замена жирного шрифта и курсива ---- stroka = stroka.replace("'''",'**') # жирный шрифт - переделать в регулярные выражения! stroka = stroka.replace("''",'*') # курсив - переделать в регулярные выражения! # ---- Замена внутренних ссылок (id, club, public) ---- iskomoe = (re.findall(ssylka_inner_id, stroka) + re.findall(ssylka_inner_club, stroka) + re.findall(ssylka_inner_public, stroka)) # находим все id,club,public if iskomoe: for ssylka in iskomoe: # перебираем найденные ссылки в строке ssylka_id = ssylka.split("|")[0].replace('[[','') #выделяем id ссылки ssylka_name = ssylka.split("|")[1].replace(']]','') #выделяем имя ссылки ssylka_new = ('['+ssylka_name+']('+'http://vk.com/'+ssylka_id+')') stroka = stroka.replace(ssylka, ssylka_new) #заменяем старую ссылку на новую # ---- Замена внешних ссылок [http**|**] ---- iskomoe2 = re.findall(ssylka_outer, stroka) if iskomoe2: for ssylka2 in iskomoe2: ssylka2_id = ssylka2.split("|")[0].replace('[http','http') ssylka2_name = ssylka2.split("|")[1].replace(']','') ssylka2_new = '['+ssylka2_name+']('+ssylka2_id+')' stroka = stroka.replace(ssylka2, ssylka2_new) # ---- Запись преобразованной строки в выходной файл ---- if re.search(perenos, stroka): f2.write('\n' + stroka) else: f2.write(stroka) # -------- f1.close() f2.close()
yld = 0 num = 1 ship = Ship.objects.get(id=1) harvester = Harvester.objects.get(id=1) cycle_bonus = skill * Decimal(0.05) yld = harvester.yld c = harvester.cycle * (1 - cycle_bonus) y = yld * (1 + ship.yld_bonus) * num #parse Dscan sites = [] proc_sites = [] if show_data == True: #print(scan) scan_re = re.compile(r'Gas Site *(\S* \S* \S*) *') scan_re_b = re.compile(r'(Instrumental Core Reservoir|Ordinary Perimeter Reservoir|Minor Perimeter Reservoir|Bountiful Frontier Reservoir|Barren Perimeter Reservoir|Token Perimeter Reservoir|Sizable Perimeter Reservoir|Vast Frontier Reservoir|Vital Core Reservoir)') scan_results = scan_re.findall(scan) if scan_results == []: scan_results = scan_re_b.findall(scan) print(scan_results) for res in scan_results: sites.append(res) for s in sites: site = Site.objects.get(name=s) site_name = site.name site_isk = (site.p_gas.last_price * site.p_qty) + (site.s_gas.last_price * site.s_qty) #ninja scanning #determine best gas p_isk_min = ((Decimal(y) / Decimal(site.p_gas.volume)) * 2) * (60 / Decimal(c)) * Decimal(site.p_gas.last_price) s_isk_min = ((Decimal(y) / Decimal(site.s_gas.volume)) * 2) * (60 / Decimal(c)) * Decimal(site.s_gas.last_price) if p_isk_min >= s_isk_min: first_cloud = site.p_gas first_qty = site.p_qty sec_cloud = site.s_gas sec_qty = site.s_qty if p_isk_min <= s_isk_min: first_cloud = site.s_gas first_qty = site.s_qty sec_cloud = site.p_gas sec_qty = site.p_qty #calculate how much you can get in 15 minutes units_15 = ((Decimal(y) / Decimal(first_cloud.volume)) * 2) * (60 / Decimal(c)) * 15 if units_15 <= first_qty: ninja_isk = units_15 * first_cloud.last_price if ninja_isk > site_isk: ninja_isk = site_isk m_per_s = (units_15 / num) * first_cloud.volume #if it is more than the qty in the best cloud, calculate the remaining time if units_15 > first_qty: min_left = 15 - (first_qty / (units_15 / 15)) sec_units_min = ((Decimal(y) / Decimal(sec_cloud.volume)) * 2) * (60 / Decimal(c)) rem_units = sec_units_min * min_left ninja_isk = (rem_units * sec_cloud.last_price) + (first_qty * first_cloud.last_price) if ninja_isk > site_isk: ninja_isk = site_isk m_per_s = ((units_15 / num) * first_cloud.volume) + ((rem_units / num) * sec_cloud.volume) if m_per_s * num > (site.p_qty * site.p_gas.volume) + (site.s_qty * site.s_gas.volume): m_per_s = ((site.p_qty * site.p_gas.volume) + (site.s_qty * site.s_gas.volume)) / num sipm = ninja_isk / 15 / num nips = ninja_isk / num if site_name == 'Ordinary Perimeter Reservoir': sipm = 0 m_per_s = 0 nips = 0 ninja_isk = 0 ninja_si = (site_name, site_isk, sipm, first_cloud.name, m_per_s, nips, ninja_isk) #print(ninja_si) proc_sites.append(ninja_si) t_site_isk = 0 t_sipm = 0 t_sipm_c = 0 t_m_per_s = 0 t_nips = 0 t_ninja_isk = 0 for s in proc_sites: t_site_isk = t_site_isk + s[1] t_sipm = t_sipm + s[2] if s[0] != "Ordinary Perimeter Reservoir": t_sipm_c = t_sipm_c + 1 t_m_per_s = t_m_per_s + s[4] t_nips = t_nips + s[5] t_ninja_isk = t_ninja_isk + s[6] ships = t_m_per_s / ship.cargo if t_sipm_c == 0: t_sipm_c = 1 if t_site_isk == 0: t_site_isk = 1 percent = (t_ninja_isk / t_site_isk) * 100 totals = (t_site_isk, t_sipm / t_sipm_c, t_m_per_s, t_nips, t_ninja_isk, ships, percent) t_min = t_sipm_c * 15 u = APICheck.objects.get(id=1) #site clearing #take sites #isk present, blue loot isk present, time to fully clear site, rat dps, rat ehp context = {'show_data': show_data, 'form': form, 'sites': sites, 'proc_sites': proc_sites, 'totals': totals, 't_min': t_min, 'updated': str(u.updated)} return render(request, "home/site_an.html", context) def pull_prices(request): tag_re = re.compile(r'<.*>(.*)</.*>') gs = Gas.objects.all() id_str = '' for g in gs: gid = g.item_id id_str = id_str+'&typeid='+gid #r = Region.objects.get(id=1) #r = r.region_id r = '10000002' url = 'http://api.eve-central.com/api/marketstat?'+id_str+'&regionlimit='+r xml_raw = requests.get(url) if xml_raw.status_code == requests.codes.ok: path = 'data/prices.xml' xml = open(path, 'w') xml.write(xml_raw.text) xml.close() status = 'OK' else: status = 'Error' xml_file = open(path, 'r') xml = xml_file.read() soup = bs4.BeautifulSoup(xml, 'xml') types = soup.find_all('type') for t in types: t_dict = dict(t.attrs) type_id = t_dict['id'] buy = t.buy avg = buy.find_all('max') avg_in = tag_re.search(str(avg)) avg_in = avg_in.group(1) avg_price = Decimal(avg_in) avg_price = round(avg_price, 2)
g = Gas.objects.get(item_id=type_id) g.last_price = avg_price g.save() gas
es = Gas.objects.all() a, c = APICheck.objects.get_or_create(id=1) a.save() context = {'status': status, 'gases': gases} return render(request, "home/pull_prices.html", context) @staff_member_required def wipe_db(request): s = Site.objects.all() s.delete() g = Gas.objects.all() g.delete() r = Region.objects.all() r.delete() s = Station.objects.all() s.delete() s = Ship.objects.all() s.delete() h = Harvester.objects.all() h.delete() s = Setup.objects.all() s.delete() return HttpResponseRedirect(reverse('home:home')) @staff_member_required def setup_site(request): try: s = Setup.objects.get(id=1) if s==1: return HttpResponseRedirect(reverse('home:home')) except: g = Gas(name='Fullerite-C28',item_id='30375', volume='2') g.save() g = Gas(name='Fullerite-C32',item_id='30376', volume='5') g.save() g = Gas(name='Fullerite-C320',item_id='30377', volume='5') g.save() g = Gas(name='Fullerite-C50',item_id='30370', volume='1') g.save() g = Gas(name='Fullerite-C540',item_id='30378', volume='10') g.save() g = Gas(name='Fullerite-C60',item_id='30371', volume='1') g.save() g = Gas(name='Fullerite-C70',item_id='30372', volume='1') g.save() g = Gas(name='Fullerite-C72',item_id='30373', volume='2') g.save() g = Gas(name='Fullerite-C84',item_id='30374', volume='2') g.save() r = Region(name='The Forge', region_id='10000002') r.save() s = Station(name='Jita IV - Moon 4 - Caldari Navy Assembly Plant ( Caldari Administrative Station )',station_id='60003760') s.save() s = Ship(name='Venture',cargo=5000,yld_bonus=1.00) s.save() s = Ship(name='Prospect',cargo=10000,yld_bonus=1.00) s.save() h = Harvester(name='Gas Cloud Harvester I',harv_id='25266',cycle=30,yld=10) h.save() h = Harvester(name='\'Crop\' Gas Cloud Harvester',harv_id='25540',cycle=30,yld=10) h.save() h = Harvester(name='\'Plow\' Gas Cloud Harvester',harv_id='25542',cycle=30,yld=10) h.save() h = Harvester(name='Gas Cloud Harvester II',harv_id='25812',cycle=40,yld=20) h.save() h = Harvester(name='Syndicate Gas Cloud Harvester',harv_id='28788',cycle=30,yld=10) h.save() c50 = Gas.objects.get(name='Fullerite-C50') c60 = Gas.objects.get(name='
from quanthistling.tests import * class TestBookController(TestController): def test_index(self): response = self.app.get(url(contro
ller='book', action='index'))
# Test response...
lass implements the mixed case where the bra does not equal the ket. @author: R. Bourquin @copyright: Copyright (C) 2014, 2016 R. Bourquin @license: Modified BSD License """ from functools import partial from numpy import squeeze, sum from WaveBlocksND.Observables import Observables __all__ = ["ObservablesMixedHAWP"] class ObservablesMixedHAWP(Observables): r"""This class implements the mixed case observable computation :math:`\langle \Psi | \cdot | \Psi^{\prime} \rangle` for Hagedorn wavepackets :math:`\Psi` where the bra :math:`\Psi` does not equal the ket :math:`\Psi^{\prime}`. """ def __init__(self, *, innerproduct=None, gradient=None): r"""Initialize a new :py:class:`ObservablesMixedHAWP` instance for observable computation of Hagedorn wavepackets. """ self._innerproduct = None self._gradient = None def set_innerproduct(self, innerproduct): r"""Set the innerproduct. :param innerproduct: An inner product for computing the integrals. The inner product is used for the computation of all brakets :math:`\langle \Psi | \cdot | \Psi^{\prime} \rangle`. :type innerproduct: A :py:class:`InnerProduct` subclass instance. .. note:: Make sure to use an inhomogeneous inner product here. """ self._innerproduct = innerproduct def set_gradient(self, gradient): r"""Set the gradient. :param gradient: A gradient operator. The gradient is only used for the computation of the kinetic energy :math:`\langle \Psi | T | \Psi^{\prime} \rangle`. :type gradient: A :py:class:`Gradient` subclass instance. """ self._gradient = gradient def overlap(self, pacbra, packet, *, component=None, summed=False): r"""Calculate the overlap :math:`\langle \Psi | \Psi^{\prime} \rangle` of the wavepackets :math:`\Psi` and :math:`\Psi^{\prime}`. :param pacbra: The wavepacket :math:`\Psi` which takes part in the overlap integral. :type pacbra: A :py:class:`HagedornWavepacketBase` subclass instance. :param packet: The wavepacket :math:`\Psi^{\prime}` which takes part in the overlap integral. :type packet: A :py:class:`HagedornWavepacketBase` subclass instance. :param component: The index :math:`i` of the components :math:`\Phi_i` of :math:`\Psi` and :math:`\Phi_i^{\prime}` of :math:`\Psi^{\prime}` whose overlap is computed. The default value is ``None`` which means to compute the overlaps with all :math:`N` components involved. :type component: Integer or ``None``. :param summed: Whether to sum up the overlaps :math:`\langle \Phi_i | \Phi_i^{\prime} \rangle` of the individual components :math:`\Phi_i` and :math:`\Phi_i^{\prime}`. :type summed: Boolean, default is ``False``. :return: The overlap of :math:`\Psi` with :math:`\Psi^{\prime}` or the overlap of :math:`\Phi_i` with :math:`\Phi_i^{\prime}` or a list with the :math:`N` overlaps of all components. (Depending on the optional arguments.)
""" return self._innerproduct.quadrature(pacbra, packet, diag_component=component, diagonal=True, summed=summed) def norm(self, wavepacket, *, component=None, summed=False): r"""Calculate the :math:`L^2` norm :math:`\langle \Psi | \Psi
\rangle` of the wavepacket :math:`\Psi`. :param wavepacket: The wavepacket :math:`\Psi` of which we compute the norm. :type wavepacket: A :py:class:`HagedornWavepacketBase` subclass instance. :param component: The index :math:`i` of the component :math:`\Phi_i` whose norm is computed. The default value is ``None`` which means to compute the norms of all :math:`N` components. :type component: int or ``None``. :param summed: Whether to sum up the norms :math:`\langle \Phi_i | \Phi_i \rangle` of the individual components :math:`\Phi_i`. :type summed: Boolean, default is ``False``. :return: The norm of :math:`\Psi` or the norm of :math:`\Phi_i` or a list with the :math:`N` norms of all components. (Depending on the optional arguments.) .. note:: This method just redirects to a call to :py:meth:`HagedornWavepacketBase.norm`. """ return wavepacket.norm(component=component, summed=summed) def kinetic_overlap_energy(self, pacbra, packet, *, component=None, summed=False): r"""Compute the kinetic energy overlap :math:`\langle \Psi | T | \Psi^{\prime} \rangle` of the different components :math:`\Phi_i` and :math:`\Phi_i^{\prime}` of the wavepackets :math:`\Psi` and :math:`\Psi^{\prime}`. :param pacbra: The wavepacket :math:`\Psi` which takes part in the kinetic energy integral. :type pacbra: A :py:class:`HagedornWavepacketBase` subclass instance. :param packet: The wavepacket :math:`\Psi^{\prime}` which takes part in the kinetic energy integral. :type packet: A :py:class:`HagedornWavepacketBase` subclass instance. :param component: The index :math:`i` of the components :math:`\Phi_i` of :math:`\Psi` and :math:`\Phi_i^{\prime}` of :math:`\Psi^{\prime}` which take part in the kinetic energy integral. If set to ``None`` the computation is performed for all :math:`N` components of :math:`\Psi` and :math:`\Psi^{\prime}`. :type component: Integer or ``None``. :param summed: Whether to sum up the kinetic energies :math:`E_i` of the individual components :math:`\Phi_i` and :math:`\Phi_i^{\prime}`. :type summed: Boolean, default is ``False``. :return: A list of the kinetic energy overlap integrals of the individual components or the overall kinetic energy overlap of the wavepackets. (Depending on the optional arguments.) """ Nbra = pacbra.get_number_components() Nket = packet.get_number_components() if not Nbra == Nket: # TODO: Drop this requirement, should be easy when zip(...) exhausts raise ValueError("Number of components in bra (%d) and ket (%d) differs!" % (Nbra, Nket)) if component is None: components = range(Nbra) else: components = [component] ekin = [] for n in components: gradpacbra = self._gradient.apply_gradient(pacbra, component=n) gradpacket = self._gradient.apply_gradient(packet, component=n) Q = [self._innerproduct.quadrature(gpb, gpk, diag_component=n) for gpb, gpk in zip(gradpacbra, gradpacket)] ekin.append(0.5 * sum(Q)) if summed is True: ekin = sum(ekin) elif component is not None: # Do not return a list for specific single components ekin = ekin[0] return ekin def kinetic_energy(self, wavepacket, *, component=None, summed=False): r"""Compute the kinetic energy :math:`E_{\text{kin}} := \langle \Psi | T | \Psi \rangle` of the different components :math:`\Phi_i` of the wavepacket :math:`\Psi`. :param wavepacket: The wavepacket :math:`\Psi` of which we compute the kinetic energy. :type wavepacket: A :py:class:`HagedornWavepacketBase` subclass instance. :param component: The index :math:`i` of the component :math:`\Phi_i` whose kinetic energy we compute. If set to ``None`` the computation is performed for all :math:`N` components. :type component: Integer or ``None``. :param summed: Whether to sum up the kinetic energies :math:`E_i` of the individual components :math:`\Phi_i`. :type summed: Boolean, default is ``False``. :return: A list of the kinetic energies of the individual components or the overall k
he terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see # <http://www.gnu.org/licenses/>. # import sys import os import os.path separator = "/*" + ("*" * 50) + "*\n" wmi_classes_by_name = {} class WmiClass: """Represents WMI class and provides methods to generate C code.""" def __init__(self, name, properties, uri_info): self.name = name self.properties = properties self.uri_info = uri_info def generate_classes_header(self): """Generate C header code and return it as string Declares: <class_name>_Data - used as hypervObject->data <class_name>_TypeInfo - used as wsman XmlSerializerInfo <class_name> - "inherits" hypervObject struct """ name_upper = self.name.upper() header = separator header += " * %s\n" % self.name header += " */\n" header += "\n" header += "#define %s_WQL_SELECT \\\n" % name_upper header += " \"SELECT * FROM %s \"\n" % self.name header += "\n" header += "extern hypervWmiClassInfo *%s_WmiInfo;\n\n" % self.name header += self._declare_data_structs() header += self._declare_hypervObject_struct() return header def generate_classes_source(self): """Returns a C code string defining wsman data structs Defines: <class_name>_Data struct <class_name>_WmiInfo - list holding metadata (e.g. request URIs) for the WMI class """ source = separator source += " * %s\n" % self.name source += " */\n" source += "SER_START_ITEMS(%s_Data)\n" % self.name for property in self.properties: source += property.generate_classes_source(self.name) source += "SER_END_ITEMS(%s_Data);\n\n" % self.name # also generate typemap data while we're here source += "hypervCimType %s_Typemap[] = {\n" % self.name for
property in self.properties: source += property.generate_typemap() source += ' { "", "", 0 },\n' # null terminated
source += '};\n\n' source += self._define_WmiInfo_struct() source += "\n\n" return source def generate_classes_typedef(self): """Returns C string for typedefs""" typedef = "typedef struct _%s %s;\n" % (self.name, self.name) typedef += "typedef struct _%s_Data %s_Data;\n" % (self.name, self.name) typedef += "G_DEFINE_AUTOPTR_CLEANUP_FUNC(%s, hypervFreeObject);\n" % self.name typedef += "\n" return typedef def _declare_data_structs(self): """Returns string C code declaring data structs. The *_Data structs are used as hypervObject->data. Each one has corresponding *_TypeInfo that is used for wsman unserialization of response XML into the *_Data structs. """ header = "#define %s_RESOURCE_URI \\\n" % self.name.upper() header += " \"%s\"\n" % self.uri_info.resourceUri header += "\n" header += "struct _%s_Data {\n" % self.name for property in self.properties: header += property.generate_classes_header() header += "};\n\n" header += "SER_DECLARE_TYPE(%s_Data);\n" % self.name return header def _declare_hypervObject_struct(self): """Return string for C code declaring hypervObject instance""" header = "\n/* must match hypervObject */\n" header += "struct _%s {\n" % self.name header += " %s_Data *data;\n" % self.name header += " hypervWmiClassInfo *info;\n" header += " %s *next;\n" % self.name header += "};\n" header += "\n\n\n" return header def _define_WmiInfo_struct(self): """Return string for C code defining *_WmiInfo struct This struct holds info with meta-data needed to make wsman requests for the WMI class. """ source = "hypervWmiClassInfo *%s_WmiInfo = &(hypervWmiClassInfo) {\n" % self.name source += " .name = \"%s\",\n" % self.name source += " .rootUri = %s,\n" % self.uri_info.rootUri source += " .resourceUri = %s_RESOURCE_URI,\n" % self.name.upper() source += " .serializerInfo = %s_Data_TypeInfo,\n" % self.name source += " .propertyInfo = %s_Typemap\n" % self.name source += "};\n" return source class ClassUriInfo: """Prepares URI information needed for wsman requests.""" def __init__(self, wmi_name): if wmi_name.startswith("Msvm_"): self.rootUri = "ROOT_VIRTUALIZATION_V2" baseUri = "http://schemas.microsoft.com/wbem/wsman/1/wmi/root/virtualization/v2" else: self.rootUri = "ROOT_CIMV2" baseUri = "http://schemas.microsoft.com/wbem/wsman/1/wmi/root/cimv2" self.resourceUri = "%s/%s" % (baseUri, wmi_name) class Property: typemap = { "boolean": "BOOL", "string": "STR", "datetime": "STR", "int8": "INT8", "sint8": "INT8", "int16": "INT16", "sint16": "INT16", "int32": "INT32", "sint32": "INT32", "int64": "INT64", "sint64": "INT64", "uint8": "UINT8", "uint16": "UINT16", "uint32": "UINT32", "uint64": "UINT64" } def __init__(self, type, name, is_array): if type not in Property.typemap: report_error("unhandled property type %s" % type) self.type = type self.name = name self.is_array = is_array def generate_classes_header(self): if self.is_array: return " XML_TYPE_DYN_ARRAY %s;\n" % self.name else: return " XML_TYPE_%s %s;\n" \ % (Property.typemap[self.type], self.name) def generate_classes_source(self, class_name): if self.is_array: return " SER_NS_DYN_ARRAY(%s_RESOURCE_URI, \"%s\", 0, 0, %s),\n" \ % (class_name.upper(), self.name, self.type) else: return " SER_NS_%s(%s_RESOURCE_URI, \"%s\", 1),\n" \ % (Property.typemap[self.type], class_name.upper(), self.name) def generate_typemap(self): return ' { "%s", "%s", %s },\n' % (self.name, self.type.lower(), str(self.is_array).lower()) def open_file(filename): return open(filename, "wt") def report_error(message): print("error: " + message) sys.exit(1) def parse_class(block, number): # expected format: class <name> : <optional parent> header_items = block[0][1].split() if len(header_items) not in [2, 4]: report_error("line %d: invalid block header" % (number)) assert header_items[0] == "class" name = header_items[1] if name in wmi_classes_by_name: report_error("class '%s' has already been defined" % name) if len(header_items) == 4: parent_class = header_items[3] if parent_class not in wmi_classes_by_name: report_error("nonexistent parent class specified: %s" % parent_class) properties = wmi_classes_by_name[parent_class].properties.copy() else: properties = [] for line in block[1:]: # expected format: <type> <name> items = line[1].split() if len(items) != 2: report_error("line %d: invalid property" % line[0]) if items[1].endswith("[]"): items[1] = items[1][:-2] is_array = True else: is_array = False properties.append(Property(type=items[0], name=items[1], is_array=is_array)) wmi_classes_by_name[name] = WmiC
#!/usr/bin/python from PyQt4 import QtCore, QtGui class Bubble(QtGui.QLabel): def __init__(self,text): super(Bubble,
self).__init__(text) self.setContentsMargins(5,5,5,5) def paintEvent(self, e): p = QtGui.QPainter(self)
p.setRenderHint(QtGui.QPainter.Antialiasing,True) p.drawRoundedRect(0,0,self.width()-1,self.height()-1,5,5) super(Bubble,self).paintEvent(e) class MyWidget(QtGui.QWidget): def __init__(self,text,left=True): super(MyWidget,self).__init__() hbox = QtGui.QHBoxLayout() label = Bubble(text) if left is not True: hbox.addSpacerItem(QtGui.QSpacerItem(1,1,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Preferred)) hbox.addWidget(label) if left is True: hbox.addSpacerItem(QtGui.QSpacerItem(1,1,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Preferred)) hbox.setContentsMargins(0,0,0,0) self.setLayout(hbox) self.setContentsMargins(0,0,0,0) if __name__ == '__main__': a = QtGui.QApplication([]) w = QtGui.QWidget() vbox = QtGui.QVBoxLayout() vbox.addWidget(MyWidget("Left side.. and also check everything needed to fuck around\n\n\n")) vbox.addWidget(MyWidget("Right side",left=False)) vbox.addWidget(MyWidget("Left side")) vbox.addWidget(MyWidget("Left side")) w.setLayout(vbox) w.show() a.exec_()
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WAR
RANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ter
ms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RSva(RPackage): """Surrogate Variable Analysis.""" homepage = "https://www.bioconductor.org/packages/sva/" git = "https://git.bioconductor.org/packages/sva.git" version('3.24.4', commit='ed2ebb6e33374dc9ec50e6ea97cc1d9aef836c73') depends_on('r@3.4.0:3.4.9', when='@3.24.4') depends_on('r-mgcv', type=('build', 'run')) depends_on('r-genefilter', type=('build', 'run')) depends_on('r-biocparallel', type=('build', 'run')) depends_on('r-matrixstats', type=('build', 'run')) depends_on('r-limma', type=('build', 'run'))
import logging import sys import traceback from django.conf import settings from django.core.cache import cache try: from django.utils.module_loading import import_string except ImportError: # compatibility with django < 1.7 from django.utils.module_loading import import_by_path import_string = import_by_path from mohawk import Receiver from mohawk.exc import BadHeaderValue, HawkFail, TokenExpired from rest_framework.authentication import BaseAuthentication from rest_framework.exceptions import AuthenticationFailed from hawkrest.util import get_auth_header, is_hawk_request log = logging.getLogger(__name__) # Number of seconds until a Hawk message expires. default_message_expiration = 60 def default_credentials_lookup(cr_id): if cr_id not in settings.HAWK_CREDENTIALS: raise LookupError('No Hawk ID of {id}'.format(id=cr_id)) return settings.HAWK_CREDENTIALS[cr_id] def default_user_lookup(request, credentials): return HawkAuthenticatedUser(), None class HawkAuthentication(BaseAuthentication): def hawk_credentials_lookup(self, cr_id): lookup = default_credentials_lookup lookup_name = getattr( settings, 'HAWK_CREDENTIALS_LOOKUP', None) if lookup_name: log.debug('Using custom credentials lookup from: {}' .format(lookup_name)) lookup = import_string(lookup_name) return lookup(cr_id) def hawk_user_lookup(self, request, credentials): lookup = default_user_lookup lookup_name = getattr( settings, 'HAWK_USER_LOOKUP', None) if lookup_name: log.debug('Using custom user lookup from: {}' .format(lookup_name)) lookup = import_string(lookup_name) return lookup(request, credentials) def authenticate(self, request): # In case there is an exception, tell others that the view passed # through Hawk authorization. The META dict is used because # middleware may not get an identical request object. # A dot-separated key is to work around potential environ var # pollution of META. request.META['hawk.receiver'] = None http_authorization = get_auth_header(request) if not http_authorization: log.debug('no authorization header in request') return None elif not is_hawk_request(request): log.debug('ignoring non-Hawk authorization header: {} ' .format(http_authorization)) return None try: receiver = Receiver( lambda cr_id: self.hawk_credentials_lookup(cr_id), http_authorization, request.build_absolute_uri(), request.method, content=request.body, seen_nonce=(seen_nonce if getattr(settings, 'USE_CACHE_FOR_HAWK_NONCE', True) else None), content_type=request.META.get('CONTENT_TYPE', ''), timestamp_skew_in_seconds=getattr(settings, 'HAWK_MESSAGE_EXPIRATION', default_message_expiration)) except HawkFail as e: etype, val, tb = sys.exc_info() log.debug(traceback.format_exc()) log.warning('access denied: {etype.__name__}: {val}' .format(etype=etype, val=val)) # The exception message is sent to the client as part of the # 401 response, so we're intentionally vague about the original # exception type/value, to avoid assisting attackers. msg = 'Hawk authentication failed' if isinstance(e, BadHeaderValue): msg += ': The request header was malformed' elif isinstance(e, TokenExpired): msg += ': The token has expired. Is your system clock correct?' raise AuthenticationFailed(msg) # Pass our receiver object to the middleware so the request header # doesn't need to be parsed again. request.META['hawk.receiver'] = receiver return self.hawk_user_lookup(request, receiver.resource.credentials) def authenticate_header(self, request): return 'Hawk' # Added for Django compatibility, allowing use of this class as a # normal Django authentication backend as well (for views outside # Django Rest Framework) def get_user(self, user_id): return HawkAuthenticatedUser() class HawkAuthenticatedUser(object): """ A real-ish user like AbstractBaseUser but not a real Django model. This passes the DRF is_authenticated permission check but it may cause other problems. If you need to work with a real Django model user you might need to subclass HawkAuthentication. """ is_active = True def get_full_name(self): return str(self.__class__.__name__) def get_short_name(self): return str(self.__class__.__name__) def get_username(self): return str(self.__class__._
_name__) def clean(self): # There's nothing to clean, since the name is `self.__class__.__name__`. pass def save(self, *args, **kwargs): raise NotImplementedError() def natural_key(self): return str(self.__class__.__name__) def is_anonymous(self): return False def is_authenticated(self):
return True def set_password(self, password): raise NotImplementedError() def check_password(self, password): raise NotImplementedError() def set_unusable_password(self): pass def has_usable_password(self): return False def get_session_auth_hash(self): raise NotImplementedError() # ----------------------------------------------- # These methods are in older Django versions only: # ----------------------------------------------- def get_previous_by_last_login(self, *args, **kw): raise NotImplementedError() def get_next_by_last_login(self, *args, **kw): raise NotImplementedError() def seen_nonce(id, nonce, timestamp): """ Returns True if the Hawk nonce has been seen already. """ key = '{id}:{n}:{ts}'.format(id=id, n=nonce, ts=timestamp) if cache.get(key): log.warning('replay attack? already processed nonce {k}' .format(k=key)) return True else: log.debug('caching nonce {k}'.format(k=key)) cache.set(key, True, # We only need the nonce until the message itself expires. # This also adds a little bit of padding. timeout=getattr(settings, 'HAWK_MESSAGE_EXPIRATION', default_message_expiration) + 5) return False
# -*- coding: UTF-8 -*- # translation.py # # Copyright (C) 2013 Cleany # # Author(s): Cédric Gaspoz <cga@cleany.ch> # # This file is part of cleany. # # Cleany is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Cleany is
distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License
for more details. # # You should have received a copy of the GNU General Public License # along with Cleany. If not, see <http://www.gnu.org/licenses/>. # Stdlib imports # Core Django imports # Third-party app imports from modeltranslation.translator import translator, TranslationOptions # Cleany imports #from .models import # class AppellationTranslationOptions(TranslationOptions): # fields = ('name', 'description',) # # translator.register(Appellation, AppellationTranslationOptions)
""" Misago-native rehash of Django's createsuperuser command that works with double authentication fields on user model """ import sys from getpass import getpass from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError from django.core.management.base import BaseCommand from django.db import DEFAULT_DB_ALIAS, IntegrityError from django.utils.encoding import force_str from django.utils.six.moves import input from ...validators import validate_email, validate_password, validate_username class NotRunningInTTYException(Exception): pass class Command(BaseCommand): help = 'Used to create a superuser.' def add_arguments(self, parser): parser.add_argument('--username', dest='username', default=None, help='Specifies the username for the superuser.') parser.add_argument('--email', dest='email', default=None, help='Specifies the username for the superuser.') parser.add_argument('--password', dest='password', default=None, help='Specifies the username for the superuser.') parser.add_argument('--noinput', action='store_false', dest='interactive', default=True, help=('Tells Misago to NOT prompt the user for input ' 'of any kind. You must use --username with ' '--noinput, along with an option for any other ' 'required field. Superusers created with ' '--noinput will not be able to log in until ' 'they\'re given a valid password.')) parser.add_argument('--database', action='store', dest='database', default=DEFAULT_DB_ALIAS, help=('Specifies the database to use. ' 'Default is "default".')) def execute(self, *args, **options): self.stdin = options.get('stdin', sys.stdin) # Used for testing return super(Command, self).execute(*args, **options) def handle(self, *args, **options): username = options.get('username') email = options.get('email') password = options.get('password') interactive = options.get('interactive') verbosity = int(options.get('verbosity', 1)) # Validate initial inputs if username is not None: try: username = username.strip() validate_username(username) except ValidationError as e: self.stderr.write(e.messages[0]) username = None if email is not None: try: email = email.strip() validate_email(email) except ValidationError as e
: self.stderr.write(e.messages[0]) email = None if password is not None: try: password = password.strip() validate_password(password) except ValidationError as e: self.stderr.write(e.messages[0]) password = None if not interactive: if username and email and password: # Call User man
ager's create_superuser using our wrapper self.create_superuser(username, email, password, verbosity) else: try: if hasattr(self.stdin, 'isatty') and not self.stdin.isatty(): raise NotRunningInTTYException("Not running in a TTY") # Prompt for username/password, and any other required fields. # Enclose this whole thing in a try/except to trap for a # keyboard interrupt and exit gracefully. while not username: try: message = force_str("Enter displayed username: ") raw_value = input(message).strip() validate_username(raw_value) username = raw_value except ValidationError as e: self.stderr.write(e.messages[0]) while not email: try: raw_value = input("Enter E-mail address: ").strip() validate_email(raw_value) email = raw_value except ValidationError as e: self.stderr.write(e.messages[0]) while not password: try: raw_value = getpass("Enter password: ").strip() validate_password(raw_value) repeat_raw_value = getpass("Repeat password: ").strip() if raw_value != repeat_raw_value: raise ValidationError( "Entered passwords are different.") password = raw_value except ValidationError as e: self.stderr.write(e.messages[0]) # Call User manager's create_superuser using our wrapper self.create_superuser(username, email, password, verbosity) except KeyboardInterrupt: self.stderr.write("\nOperation cancelled.") sys.exit(1) except NotRunningInTTYException: self.stdout.write( "Superuser creation skipped due to not running in a TTY. " "You can run `manage.py createsuperuser` in your project " "to create one manually." ) def create_superuser(self, username, email, password, verbosity): try: User = get_user_model() user = User.objects.create_superuser(username, email, password, set_default_avatar=True) if verbosity >= 1: message = "Superuser #%(pk)s has been created successfully." self.stdout.write(message % {'pk': user.pk}) except ValidationError as e: self.stderr.write(e.messages[0]) except IntegrityError as e: self.stderr.write(e.messages[0])
, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is furnished #to do so, subject to the following conditions: #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, #INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A #PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT #HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION #OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE #SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import sys, string import os.path import unique import time import export def filepath(filename): fn = unique.filepath(filename) return fn def read_directory(sub_dir): dir_list = unique.read_directory(sub_dir) #add in code to prevent folder names from being included dir_list2 = [] for file in dir_list: if '.txt' in file: dir_list2.append(file) return dir_list2 ################# Begin Analysis def cleanUpLine(line): line = string.replace(line,'\n','') line = string.replace(line,'\c','') data = string.replace(line,'\r','') data = string.replace(data,'"','') return data def importAnnotations(filename): firstLine = True fn = filepath(filename) rows = 0 for line in open(fn,'rU').xreadlines(): data = cleanUpLine(line); tab_delimited_data = string.split(data,'\t') if rows > 10: sys.exit() print tab_delimited_data#;sys.exit() rows+=1 def correlateMethylationData(filename,betaLow=0.4,betaHigh=0.6,counts=-1): ### Takes a filtered pre-processed beta-value file as input firstLine = True rows=0; filtered=0 for line in open(filename,'rU').xreadlines(): data = cleanUpLine(line); t = string.split(data,'\t') if firstLine: header = t if len(t)>5 and 'Illumina_name' in header: delimiter = -50 annot_export_object.write(string.join([t[0]]+t[delimiter:],'\t')+'\n') else: delimiter = len(header) headers = t[1:delimiter] firstLine = False export_object.write(string.join([t[0]]+headers,'\t')+'\n') else: probeID = t[0] #try: beta_values = map(float,t[1:50]) beta_values = map(lambda x: conFloat(x,t[1:delimiter]),t[1:delimiter]) if '' in beta_values: print beta_values;sys.exit() high = sum(betaHighCount(x,betaHigh) for x in beta_values) low = sum(betaLowCount(x,betaLow) for x in beta_values) def importMethylationData(filename,betaLow=0.4,betaHigh=0.6,counts=-1, filter=None): annot_file = filepath('AltDatabase/ucsc/Hs/Illumina_methylation_genes.txt') export_object = open(filename[:-4]+'-filtered.txt','w') print filename[:-4]+'-filtered.txt', counts firstLine = True rows=0; filtered=0 for line in open(filename,'rU').xreadlines(): data = cleanUpLine(line); t = string.split(data,'\t') #export_object.write(string.join(t,'\t')+'\n') #""" if firstLine: header = t if len(t)>5 and 'Illumina_name' in header: delimiter = -50 annot_export_object = open(annot_file,'w') annot_export_object.write(string.join([t[0]]+t[delimiter:],'\t')+'\n') else: delimiter = len(header) headers = t[1:delimiter] firstLine = False export_object.write(string.join([t[0]]+headers,'\t')+'\n') else: probeID = t[0] #try: beta_values = map(float,t[1:50]) beta_values = map(lambda x: conFloat(x,t[1:delimiter]),t[1:delimiter]) if '' in beta_values: print beta_values;sys.exit() high = sum(betaHighCount(x,betaHigh) for x in beta_values) low = sum(betaLowCount(x,betaLow) for x in beta_values) #if rows<50: print high, low, max(beta_values), min(beta_values) #else:sys.exit() #export_object.write(string.join(t[:delimiter])+'\n') if high>=counts and low>=counts: #if (high-low) > 0.2: #if rows<50: print 1 if filter!=None: if probeID in filter: proceed=True; probeID = str(filter[probeID])+':'+probeID else: proceed = False else: proceed = True if proceed: filtered+=1 export_object.write(string.join([probeID]+map(str,beta_values),'\t')+'\n') if 'Illumina_name' in header: annot_export_object.write(string.join([t[0]]+t[delimiter:],'\t')+'\n') rows+=1 #""" export_object.close() if delimiter == '-50': annot_export_object.close() print filtered, rows def conFloat(x,betaValues): try: x = float(x) except Excepti
on: x=None if x== None or x == 0: floats=[] for i in betaValues: if i=='': pass elif float(i)==0: pass else: floats.append(float(i)) try: return min(floats) except Exception: print betaValues;sys.exit
() else: return x def betaHighCount(x,betaHigh): if x>betaHigh: return 1 else: return 0 def betaLowCount(x,betaLow): if x<betaLow: return 1 else: return 0 def getIDsFromFile(filename): filterIDs = {} fn = filepath(filename) for line in open(fn,'rU').xreadlines(): data = cleanUpLine(line); t = string.split(data,'\t') filterIDs[string.lower(t[0])]=[] return filterIDs def getRegionType(filename,featureType=None,chromosome=None,filterIDs=None): if filterIDs !=None: filterIDs = getIDsFromFile(filterIDs) firstLine = True fn = filepath(filename) count=0; filter_db={} for line in open(fn,'rU').xreadlines(): data = cleanUpLine(line); t = string.split(data,',') if firstLine: if len(t[2]) >0: header = t firstLine=False chr_ind = header.index('CHR') pos_ind = header.index('Coordinate_36') tss_ind = header.index('UCSC_RefGene_Group') gene_name = header.index('UCSC_RefGene_Name') else: probeID = t[0] count+=1 try: gene_names = string.split(t[gene_name],';') except Exception: gene_names = [] try: if chromosome != None: if t[chr_ind] == chromosome: if filterIDs !=None: for gene in gene_names: if string.lower(gene) in filterIDs: filter_db[probeID]=t[pos_ind] else: filter_db[probeID]=t[pos_ind] if 'promoter' in string.lower(featureType): if 'TSS' in t[tss_ind]: if filterIDs !=None: for gene in gene_names: if string.lower(gene) in filterIDs: filter_db[probeID]=t[pos_ind] else: filter_db[probeID]=t[pos_ind] if 'mir' in string.lower(featureType) or 'micro' in string.lower(featureType): if 'mir' in string.lower(t[gene_name]) or 'let' in string.lower(t[gene_name]): if filterIDs !=None: for gene in gene_names: if string.lower(gene) in filterIDs:
#!/usr/bin/env python2.7 # encoding: utf-8 # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #Reference #https://docs.python.org/2/library/unittest.html #http://eli.thegreenplace.net/2011/08/02/python-unit-testing-parametrized-test-cases #public domain license reference: http://eli.thegreenplace.net/pages/code #Run #python tika/tests/tests_params.py import
csv import unittest import tika.parser class CreateTest(unittest.TestCase): "test for file types" def __init__(self, methodName='runTest', param1=None, param2=None): super(CreateTest, self).__init__(methodName) self.param1 = param1 @staticmethod def parameterize(test_case, param1=None, param2=None): testloader = unittest.TestLoader() testnames = testloader.getTestCaseNames(test_case) s
uite = unittest.TestSuite() for name in testnames: suite.addTest(test_case(name, param1=param1, param2=param2)) return suite class RemoteTest(CreateTest): def setUp(self): self.param1 = tika.parser.from_file(self.param1) def test_true(self): self.assertTrue(self.param1) def test_meta(self): self.assertTrue(self.param1['metadata']) def test_content(self): self.assertTrue(self.param1['content']) def test_url(): with open('tika/tests/arguments/test_remote_content.csv', 'r') as csvfile: urlread = csv.reader(csvfile) for url in urlread: yield url[1] if __name__ == '__main__': suite = unittest.TestSuite() t_urls = list(test_url()) t_urls.pop(0) #remove header for x in t_urls: try: suite.addTest(CreateTest.parameterize(RemoteTest,param1=x)) except IOError as e: print(e.strerror) unittest.TextTestRunner(verbosity=2).run(suite)
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import paddle import numpy as np from op_test import OpTest import os paddle.enable_static() paddle.seed(100) class TestExponentialOp1(OpTest): def setUp(self): self.op_type = "exponential" self.config() self.attrs = {"lambda": self.lam} self.inputs = {'X': np.empty([1024, 1024], dtype=self.dtype)} self.outputs = {'Out': np.ones([1024, 1024], dtype=self.dtype)} def config(self): self.lam = 0.5 self.dtype = "float64" def test_check_output(self): self.check_output_customized(self.verify_output) def verify_output(self, outs): hist1, _ = np.histogram(outs[0], range=(0, 5)) hist1 = hist1.astype("float32") hist1 = hist1 / float(outs[0].size) data_np = np.random.exponential(1. / self.lam, [1024, 1024]) hist2, _ = np.histogram(data_np, range=(0, 5)) hist2 = hist2.astype("float32") hist2 = hist2 / float(data_np.size) self.assertTrue( np.allclose( hist1, hist2, rtol=0.02), "actual: {}, expected: {}".format(hist1, hist2)) def test_check_grad_normal(self): self.check_grad( ['X'], 'Out', user_defined_grads=[np.zeros( [1024, 1024], dtype=self.dtype)], user_defined_grad_outputs=[ np.random.rand(1024, 1024).astype(self.dtype) ]) class TestExponentialOp2(TestExponentialOp1): def config(self): self.lam = 0.25 self.dtype = "float32" class TestExponentialAPI(unittest.TestCase): def test_static(self): with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()): x_np = np.full([10, 10], -1.) x = paddle.static.data(name="X", shape=[10, 10], dtype='float64') x.exponential_(1.0) exe = paddle.static.E
xecutor() out = exe.run(paddle.static.default_main_program(), feed={"X": x_np}, fetch_list=[x]) self.assertTrue(np.min(out) >= 0) def test_dygraph(self): paddle.disable_static() x = paddle.full([10, 10], -1., dtype='float32') x.exponential_(0.5) self.assertTrue(np.min(x.numpy()) >= 0) paddle.enable_static() def test_fixed_random_number(self): # Test GPU Fixed random number, which is generated by 'curandStatePhilox4_32_10_t' if not paddle.is_compiled_with_cuda(): return # Different GPU generatte different random value. Only test V100 here. if not "V100" in paddle.device.cuda.get_device_name(): return if os.getenv("FLAGS_use_curand", None) in ('0', 'False', None): return print("Test Fixed Random number on V100 GPU------>") paddle.disable_static() paddle.set_device('gpu') paddle.seed(2021) x = paddle.empty([64, 3, 1024, 1024], dtype="float32") x.exponential_(1.0) x_np = x.numpy() expect = [ 0.80073667, 0.2249291, 0.07734892, 1.25392, 0.14013891, 0.45736602, 1.9735607, 0.30490234, 0.57100505, 0.8115938 ] self.assertTrue(np.allclose(x_np[0, 0, 0, 0:10], expect)) expect = [ 1.4296371e+00, 9.5411777e-01, 5.2575850e-01, 2.4805880e-01, 1.2322118e-04, 8.4604341e-01, 2.1111444e-01, 1.4143821e+00, 2.8194717e-01, 1.1360573e+00 ] self.assertTrue(np.allclose(x_np[16, 1, 300, 200:210], expect)) expect = [ 1.3448033, 0.35146526, 1.7380928, 0.32012638, 0.10396296, 0.51344526, 0.15308502, 0.18712929, 0.03888268, 0.20771872 ] self.assertTrue(np.allclose(x_np[32, 1, 600, 500:510], expect)) expect = [ 0.5107464, 0.20970327, 2.1986802, 1.580056, 0.31036147, 0.43966478, 0.9056133, 0.30119267, 1.4797124, 1.4319834 ] self.assertTrue(np.allclose(x_np[48, 2, 900, 800:810], expect)) expect = [ 3.4640615, 1.1019983, 0.41195083, 0.22681557, 0.291846, 0.53617656, 1.5791925, 2.4645927, 0.04094889, 0.9057725 ] self.assertTrue(np.allclose(x_np[63, 2, 1023, 1000:1010], expect)) x = paddle.empty([10, 10], dtype="float32") x.exponential_(3.0) x_np = x.numpy() expect = [ 0.02831675, 0.1691551, 0.6798956, 0.69347525, 0.0243443, 0.22180498, 0.30574575, 0.9839696, 0.2834912, 0.59420055 ] self.assertTrue(np.allclose(x_np[5, 0:10], expect)) x = paddle.empty([16, 2, 1024, 768], dtype="float64") x.exponential_(0.25) x_np = x.numpy() expect = [ 10.0541229, 12.67860643, 1.09850734, 7.35289643, 2.65471225, 3.86217432, 2.97902086, 2.92744479, 2.67927152, 0.19667352 ] self.assertTrue(np.allclose(x_np[0, 0, 0, 100:110], expect)) expect = [ 0.68328125, 3.1454553, 0.92158376, 1.95842188, 1.05296941, 12.93242051, 5.20255978, 3.3588624, 1.57377174, 5.73194183 ] self.assertTrue(np.allclose(x_np[4, 0, 300, 190:200], expect)) expect = [ 1.37973974, 3.45036798, 7.94625406, 1.62610973, 0.31032122, 4.13596493, 1.98494535, 1.13207041, 8.30592769, 2.81460147 ] self.assertTrue(np.allclose(x_np[8, 1, 600, 300:310], expect)) expect = [ 2.27710811, 12.25003028, 2.96409124, 4.72405788, 0.67917249, 4.35856718, 0.46870976, 2.31120149, 9.61595826, 4.64446271 ] self.assertTrue(np.allclose(x_np[12, 1, 900, 500:510], expect)) expect = [ 0.95883744, 1.57316361, 15.22524512, 20.49559882, 13.70008548, 3.29430143, 3.90390424, 0.9146657, 0.80972249, 0.33376219 ] self.assertTrue(np.allclose(x_np[15, 1, 1023, 750:760], expect)) x = paddle.empty([512, 768], dtype="float64") x.exponential_(0.3) x_np = x.numpy() expect = [ 8.79266704, 4.79596009, 2.75480243, 6.04670011, 0.35379556, 0.76864868, 3.17428251, 0.26556859, 12.22485885, 10.51690383 ] self.assertTrue(np.allclose(x_np[0, 200:210], expect)) expect = [ 5.6341126, 0.52243418, 5.36410796, 6.83672002, 11.9243311, 5.85985566, 5.75169548, 0.13877972, 6.1348385, 3.82436519 ] self.assertTrue(np.allclose(x_np[300, 400:410], expect)) expect = [ 4.94883581, 0.56345306, 0.85841585, 1.92287801, 6.10036656, 1.19524847, 3.64735434, 5.19618716, 2.57467974, 3.49152791 ] self.assertTrue(np.allclose(x_np[500, 700:710], expect)) x = paddle.empty([10, 10], dtype="float64") x.exponential_(4.0) x_np = x.numpy() expect = [ 0.15713826, 0.56395964, 0.0680941, 0.00316643, 0.27046853, 0.19852724, 0.12776634, 0.09642974, 0.51977551, 1.33739699 ] self.assertTrue(np.allclose(x_np[5, 0:10], expect)) paddle.enable_static() if __name__ == "__main__": unittest.main()
from OpenGL import GL import numpy as np import math def drawLine(start, end, color, width=1): GL.glLineWidth(width) GL.glColor3f(*color) GL.glBegin(GL.GL_LINES) GL.glVertex3f(*start) GL.glVertex3f(*end) GL.glEnd() def drawCircle(center, radius, color, rotation=np.array([0,0,0]), axis=np.array([1,1,1]), width=1, sections = 16): GL.glLineWidth(width) GL.glColor3f(*color) GL.glPushMatrix() GL.glTranslatef(*center) if not type(rotation) is int: GL.gl
Rotatef(rotation[0]*90,axis[0],0,0) GL.glRotatef(rotation[1]*90,0,axis[1],0) GL.glRotatef(rotation[2]*90,0,0,axis[2]) else: GL.glRotatef(rotation*90,axis[0],axis[1],axis[2]) GL.glBegin(GL.GL_POLYGON) steps = [i * ((math.pi*2)/sections)
for i in range(sections)] for i in steps: GL.glVertex3f(math.cos(i)*radius, math.sin(i)*radius, 0,0) GL.glEnd() GL.glPopMatrix() def makeDrawFunction(drawFunction, *args): def closure(): drawFunction(*args) return closure
#!/usr/bin/python # -*- coding: utf-8 -*- import os import sys import numpy as np import argparse from google.protobuf import text_format #https://github.com/BVLC/caffe/issues/861#issuecomment-70124809 import matplotlib matplotlib.use('Agg') BASE_DIR = os.path.dirname(os.path.abspath(__file__)) sys.path.append(os.path.dirname(BASE_DIR)) from global_variables import * from utilities_caffe import * parser = argparse.ArgumentParser(description="Extract image embedding features for IMAGE input.") parser.add_argument('--image', help='Path to input image (cropped)', required=True) parser.add_argument('--iter_num', '-n', help='Use caffemodel trained after iter_num iterations', type=int, default=20000) parser.add_argument('--caffemodel', '-c', help='Path to caffemodel (will ignore -n option if provided)', required=False) parser.add_argument('--prototxt', '-p', help='Path to prototxt (if not at the default place)', required=False) parser.add_argument('--gpu_index', help='GPU index (default=0).', type=int, default=0) args = parser.parse_args() image_embedding_caffemo
del = os.path.join(g_image_embedding_testing_folder, 'snapshots%s_iter_%d.caffemodel'%(g_shapenet_synset_set_handle, args.iter_num)) image_embedding_prototxt = g_image_embedding_testing_prototxt if args.caffemodel: image_embedding_caffemodel = args.caffemodel if args.prototxt: image_embedding_prototxt = args.prototxt print 'Image embedding for %s is:'%(args.image) image_embedding_array = extract_cnn_features(img_filelist=args.image, img_root='/',
prototxt=image_embedding_prototxt, caffemodel=image_embedding_caffemodel, feat_name='image_embedding', caffe_path=g_caffe_install_path, mean_file=g_mean_file)[0] print image_embedding_array.tolist()
# -*- coding: utf-8 -*- # Generated by Django 1.9.1 on 2018-10-26 01:35 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('pttrack', '0006_referral_additional_fields_20180826'), ('followup', '0002_simplehistory_add_change_reason'), ] operations = [ migrations.CreateModel( name='FollowupRequest', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('written_datetime', models.DateTimeField(auto_now_add=True)), ('last_modified', models.DateTimeField(auto_now=True)), ('completion_date', models.DateTimeField(blank=True, null=True)), ('due_date', models.DateField(help_text=b'MM/DD/YYYY or YYYY-MM-DD')), ('contact_instructions', models.TextField()), ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pttrack.Provider')), ('author_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pttrack.ProviderType')), ('completion_author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='referral_followuprequest_completed', to='pttrack.Provider')), ('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pttrack.Patient')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='PatientContact', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('written_datetime', models.DateTimeField(auto_now_add=True)), ('last_modified', models.DateTimeField(auto_now=True)), ('has_appointment', models.CharField(blank=True, choices=[(b'Y', b'Yes'), (b'N', b'No')], help_text=b'Did the patient make an appointment?', max_length=1, verbo
se_name=b'Appointment scheduled?')),
('pt_showed', models.CharField(blank=True, choices=[(b'Y', b'Yes'), (b'N', b'No')], help_text=b'Did the patient show up to the appointment?', max_length=1, null=True, verbose_name=b'Appointment attended?')), ('appointment_location', models.ManyToManyField(blank=True, help_text=b'Where did the patient make an appointment?', to='pttrack.ReferralLocation')), ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pttrack.Provider')), ('author_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pttrack.ProviderType')), ('contact_method', models.ForeignKey(help_text=b'What was the method of contact?', on_delete=django.db.models.deletion.CASCADE, to='pttrack.ContactMethod')), ('contact_status', models.ForeignKey(help_text=b'Did you make contact with the patient about this referral?', on_delete=django.db.models.deletion.CASCADE, to='followup.ContactResult')), ('followup_request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='referral.FollowupRequest')), ('no_apt_reason', models.ForeignKey(blank=True, help_text=b"If the patient didn't make an appointment, why not?", null=True, on_delete=django.db.models.deletion.CASCADE, to='followup.NoAptReason', verbose_name=b'No appointment reason')), ('no_show_reason', models.ForeignKey(blank=True, help_text=b"If the patient didn't go to the appointment, why not?", null=True, on_delete=django.db.models.deletion.CASCADE, to='followup.NoShowReason')), ('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pttrack.Patient')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Referral', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('written_datetime', models.DateTimeField(auto_now_add=True)), ('last_modified', models.DateTimeField(auto_now=True)), ('comments', models.TextField(blank=True)), ('status', models.CharField(choices=[(b'S', b'Successful'), (b'P', b'Pending'), (b'U', b'Unsuccessful')], default=b'P', max_length=50)), ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pttrack.Provider')), ('author_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pttrack.ProviderType')), ('kind', models.ForeignKey(help_text=b'The kind of care the patient should recieve at the referral location.', on_delete=django.db.models.deletion.CASCADE, to='pttrack.ReferralType')), ('location', models.ManyToManyField(to='pttrack.ReferralLocation')), ('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pttrack.Patient')), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='patientcontact', name='referral', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='referral.Referral'), ), migrations.AddField( model_name='followuprequest', name='referral', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='referral.Referral'), ), ]
import os from .base import * # NOQA import dj_database_url DEBUG = False TEMPLATE_DEBUG = DEBUG ADMINS = ( ) DATABASES = {'default': dj_database_url.config()} LOGGING = { 'version': 1, 'disable_existing_loggers': True, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s ' '%(process)d %(thread)d %(message)s' }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'handlers': { 'null': { 'level': 'DEBUG', 'class': 'django.utils.log.NullHandler', }, 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'verbose' }, 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django': { 'handlers': ['console', 'mail_admins'], 'level': 'DEBUG', 'propagate': True, }, 'django.request': { 'handlers': ['console', 'mail_admins'], 'level': 'ERROR', 'propagate': False, }, 'django.db.backends': { 'handlers': ['console', 'mail_admins'], 'level': 'INFO', 'propagate': False, }, # Catch All Logger -- Captures any other logging '': { 'handlers': ['console', 'mail_admins'], 'level': 'DEBUG', 'propagate': True, } } } # Honor the 'X-Forwarded-Proto' header for request.is_secure() SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') # Allow all host headers ALLOWED_HOSTS = ['*'] ########## EMAIL CONFIGURATION # See: https://docs.djangoproject.com/en/1.3/ref/settings/#email-backend EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # See: https://docs.djangoproject.com/en/1.3/ref/settings/#email-host EMAIL_HOST = os.environ.get('MAILGUN_SMTP_SERVER', None) # See: https://docs.djangoproject.com/en/1.3/ref/settings/#email-host-password EMAIL_HOST_PASSWORD = os.environ.get('MAILGUN_SMTP_PASSWORD', None) # See: https://docs.djangoproject.com/en/1.3/ref/settings/#email-host-user EMAIL_HOST_USER = os.environ.get('MAILGUN_SMTP_LOGIN', None) # See: https://docs.djangoproje
ct.com/en/1.3/ref/settings/#email-port EMAIL_PORT = os.environ.get('MAILGUN_SMTP_PORT', None ) # See: https://docs.djangoproject.com/en/1.3/ref/settings/#email-subject-prefix EMAIL_SUBJECT_PREFIX = '[Scorinator] ' # See: https://docs.djangoproject.com/en/1.3/ref/settings/#email-us
e-tls EMAIL_USE_TLS = True # See: https://docs.djangoproject.com/en/1.3/ref/settings/#server-email SERVER_EMAIL = EMAIL_HOST_USER ########## END EMAIL CONFIGURATION
89dbbc) ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],4,0x3956c25bf348b538) ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],5,0x59f111f1b605d019) ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],6,0x923f82a4af194f9b) ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],7,0xab1c5ed5da6d8118) ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],8,0xd807aa98a3030242) ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],9,0x12835b0145706fbe) ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],10,0x243185be4ee4b28c) ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],11,0x550c7dc3d5ffb4e2) ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],12,0x72be5d74f27b896f) ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],13,0x80deb1fe3b1696b1) ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],14,0x9bdc06a725c71235) ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],15,0xc19bf174cf692694) ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],16,0xe49b69c19ef14ad2) ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],17,0xefbe4786384f25e3) ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],18,0x0fc19dc68b8cd5b5) ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],19,0x240ca1cc77ac9c65) ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],20,0x2de92c6f592b0275) ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],21,0x4a7484aa6ea6e483) ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],22,0x5cb0a9dcbd41fbd4) ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],23,0x76f988da831153b5) ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],24,0x983e5152ee66dfab) ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],25,0xa831c66d2db43210) ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],26,0xb00327c898fb213f) ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],27,0xbf597fc7beef0ee4) ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],28,0xc6e00bf33da88fc2) ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],29,0xd5a79147930aa725) ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],30,0x06ca6351e003826f) ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],31,0x142929670a0e6e70) ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],32,0x27b70a8546d22ffc) ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],33,0x2e1b21385c26c926) ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],34,0x4d2c6dfc5ac42aed) ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],35,0x53380d139d95b3df) ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],36,0x650a73548baf63de) ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],37,0x766a0abb3c77b2a8) ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],38,0x81c2c92e47edaee6) ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],39,0x92722c851482353b) ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],40,0xa2bfe8a14cf10364) ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],41,0xa81a664bbc423001) ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],42,0xc24b8b70d0f89791) ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],43,0xc76c51a30654be30) ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],44,0xd192e819d6ef5218) ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],45,0xd69906245565a910) ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],46,0xf40e35855771202a) ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],47,0x106aa07032bbd1b8) ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[
4],ss[5],ss[6],ss[7],48,0x19a4c116b8d2d0c8) ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],49,0x1e376c085141ab53) ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],50,0x2748774cdf8eeb99) ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],51,0x34b0bcb5e19b48a8) ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],52,0x391c0cb3c5c95a63) ss[6], ss[2] = RND(ss[3]
,ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],53,0x4ed8aa4ae3418acb) ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],54,0x5b9cca4f7763e373) ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],55,0x682e6ff3d6b2b8a3) ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],56,0x748f82ee5defb2fc) ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],57,0x78a5636f43172f60) ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],58,0x84c87814a1f0ab72) ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],59,0x8cc702081a6439ec) ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],60,0x90befffa23631e28) ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],61,0xa4506cebde82bde9) ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],62,0xbef9a3f7b2c67915) ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],63,0xc67178f2e372532b) ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],64,0xca273eceea26619c) ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],65,0xd186b8c721c0c207) ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],66,0xeada7dd6cde0eb1e) ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],67,0xf57d4f7fee6ed178) ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],68,0x06f067aa72176fba) ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],69,0x0a637dc5a2c898a6) ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],70,0x113f9804bef90dae) ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],71,0x1b710b35131c471b) ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],72,0x28db77f523047d84) ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],73,0x32caab7b40c72493) ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],74,0x3c9ebe0a15c9bebc) ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],75,0x431d67c49c100d4c) ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],76,0x4cc5d4becb3e42b6) ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],77,0x597f299cfc657e2a) ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],78,0x5fcb6fab3ad6faec) ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],79,0x6c44198c4a475817) dig = [] for i, x in enumerate(sha_info['digest']): dig.append( (x + ss[i]) & 0xffffffffffffffff ) sha_info['digest'] = dig def sha_init(): sha_info = new_shaobject() sha_info['digest'] = [ 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179] sha_info['count_lo'] = 0 sha_info['count_hi'] = 0 sha_info['local'] = 0 sha_info['digestsize'] = 64 return sha_info def sha384_init(): sha_info = new_shaobject() sha_info['digest'] = [ 0xcbbb9d5dc1059ed8, 0x629a292a367cd507, 0x9159015a3070dd17, 0x152fecd8f70e5939, 0x67332667ffc00b31, 0x8eb44a8768581511, 0xdb0c2e0d64f98fa7, 0x47b5481dbefa4fa4] sha_info['count_lo'] = 0 sha_info['count_hi'] = 0 sha_info['local'] = 0 sha_info['digestsize'] = 48 return sha_info def getbuf(s): if isinstance(s, str): return s elif isinstance(s, unicode): return str(s) else: return buff
d in delimiters) if allow_no_value: self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d), re.VERBOSE) else: self._optcre = re.compile(self._OPT_TMPL.format(delim=d), re.VERBOSE) self._comment_prefixes = tuple(comment_prefixes or ()) self._inline_comment_prefixes = tuple(inline_comment_prefixes or ()) self._strict = strict self._allow_no_value = allow_no_value self._empty_lines_in_values = empty_lines_in_values self.default_section=default_section self._interpolation = interpolation if self._interpolation is _UNSET: self._interpolation = self._DEFAULT_INTERPOLATION if self._interpolation is None: self._interpolation = Interpolation() def defaults(self): return self._defaults def sections(self): """Return a list of section names, excluding [DEFAULT]""" # self._sections will never have [DEFAULT] in it return list(self._sections.keys()) def add_section(self, section): """Create a new section in the configuration. Raise DuplicateSectionError if a section by the specified name already exists. Raise ValueError if name is DEFAULT. """ if section == self.default_section: raise ValueError('Invalid section name: %r' % section) if section in self._sections: raise DuplicateSectionError(section) self._sections[section] = self._dict() self._proxies[section] = SectionProxy(self, section) def has_section(self, section): """Indicate whether the named section is present in the configuration. The DEFAULT section is not acknowledged. """ return section in self._sections def options(self, section): """Return a list of option names for the given section name.""" try: opts = self._sections[section].copy() except KeyError: raise NoSectionError(section) opts.update(self._defaults) return list(opts.keys()) def read(self, filenames, encoding=None): """Read and parse a filename or a list of filenames. Files that cannot be opened are silently ignored; this is designed so that you can specify a list of potential configuration file locations (e.g. current directory, user's home directory, systemwide directory), and all existing configuration files in the list will be read. A single filename may also be given. Return list of successfully read files. """ if isinstance(filenames, str): filenames = [filenames] read_ok = [] for filename in filenames: try: with open(filename, encoding=encoding) as fp: self._read(fp, filename) except OSError: continue read_ok.append(filename) return read_ok def read_file(self, f, source=None): """Like read() but the argument must be a file-like object. The `f' argument must be iterable, returning one line at a time. Optional second argument is the `source' specifying the name of the file being read. If not given, it is taken from f.name. If `f' has no `name' attribute, `<???>' is used. """ if source is None: try: source = f.name except AttributeError: source = '<???>' self._read(f, source) def read_string(self, string, source='<string>'): """Read configuration from a given string.""" sfile = io.StringIO(string) self.read_file(sfile, source) def read_dict(self, dictionary, source='<dict>'): """Read configuration from a dictionary. Keys are section names, values are dictionaries with keys and values that should be present in the section. If the used dictionary type preserves order, sections and their keys will be added in order. All types held in the dictionary are converted to strings during reading, including section names, option names and keys. Optional second argument is the `source' specifying the name of the dictionary being read. """ elements_added = set() for section, keys in dictionary.items(): section = str(section) try: self.add_section(section) except (DuplicateSectionError, ValueError): if self._strict and section in elements_added: raise elements_added.add(section) for key, value in keys.items(): key = self.optionxform(str(key)) if value is not None: value = str(value) if self._strict and (section, key) in elements_added: raise DuplicateOptionError(section, key, source) elements_added.add((section, key)) self.set(section, key, value) def readfp(self, fp, filename=None): """Deprecated, use read_file instead.""" warnings.warn( "This method will be removed in future versions. " "Use 'parser.read_file()' instead.", DeprecationWarning, stacklevel=2 ) self.read_file(fp, source=filename) def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET): """Get an option value for a given section. If `vars' is provided, it must be a dictionary. The option is looked up in `vars' (if provided), `section', and in `DEFAULTSECT' in that order. If the key is not found and `fallback' is provided, it is used as a fallback value. `None' can be provided as a `fallback' value. If interpolation is enabled and the optional argument `raw' is False, all interpolations are expanded in the return values. Arguments `raw', `vars', and `fallback' are keyword only. The section DEFAULT is special. """ try: d = self._unify_values(section, vars) except NoSectionError: if fallback is _UNSET: raise else: return fallback option = self.optionxform(option) try: value = d[option] except KeyError: if fallback is _UNSET: raise NoOptionError(option, section) else: return fallback if raw or value is None: return value else: return self._interpolation.before_get(self, section, option, value, d) def _get(self, section, conv, option, **kwargs): return conv(self.get(section, option, **kwargs)) def getint(self, section, option, *, raw=False, vars=None, fallback=_UNSET): try: return self._get(section, int, option, raw=raw, vars=vars) except (NoSectionError, NoOptionError): if fallback is _UNSET: raise else:
return fallback def getfloat(self, section, option, *, raw=False, vars=None, fallback=_U
NSET): try: return self._get(section, float, option, raw=raw, vars=vars) except (NoSectionError, NoOptionError): if fallback is _UNSET: raise else: return fallback def getboolean(self, section, option, *, raw=False, vars=None, fallback=_UNSET): try: return self._get(section, self._convert_to_boolean, option, raw=raw, vars=vars) except (NoSectionError, NoOptionError): if fallback is _UNSET: raise else: return fallback def items(self, section=_UNSET, raw=False, vars=None): """Return a list of (name, value) tuples for each o
""" pystrix.ami.dahdi ================= Provides classes meant to be fed to a `Manager` instance's `send_action()` function. Specifically, this module provides implementations for features specific to the DAHDI technology. Legal ----- This file is part of pystrix. pystrix is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU General Public License and GNU Lesser General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. (C) Ivrnet, inc., 2011 Authors: - Neil Tallim <n.tallim@ivrnet.com> The requests implemented by this module follow the definitions provided by https://wiki.asterisk.org/ """ from ami import (_Request, ManagerError) import dahdi_events import generic_transforms class DAHDIDNDoff(_Request): """ Sets a DAHDI channel's DND status to off. """ def __init__(self, dahdi_channel): """ `dahdi_channel` is the channel to modify. """ _Request.__init__(self, 'DAHDIDNDoff') self['DAHDIChannel'] = dahdi_channel class DAHDIDNDon(_Request): """ Sets a DAHDI channel's DND status to on. """ def __init__(self, dahdi_channel): """ `dahdi_channel` is the channel to modify. """ _Request.__init__(self, 'DAHDIDNDon') self['DAHDIChannel'] = dahdi_channel class DAHDIDialOffhook(_Request): """
Dials a number on an off-hook DAHDI channel. """ def __init__(self, dahdi_channel, number): """ `dahdi_channel` is the channel to use and `number` is the number to dial. """ _Request.__init__(self, 'DAHDIDialOffhook') self['DAHDIChannel'] = dahdi_channel self['Number'] = number class DAHDIHangup(_Request): """ Hangs up a DAHDI channe
l. """ def __init__(self, dahdi_channel): """ `dahdi_channel` is the channel to hang up. """ _Request.__init__(self, 'DAHDIHangup') self['DAHDIChannel'] = dahdi_channel class DAHDIRestart(_Request): """ Fully restarts all DAHDI channels. """ def __init__(self): _Request.__init__(self, 'DAHDIRestart') class DAHDIShowChannels(_Request): """ Provides the current status of all (or one) DAHDI channels through a series of 'DAHDIShowChannels' events, ending with a 'DAHDIShowChannelsComplete' event. """ _aggregates = (dahdi_events.DAHDIShowChannels_Aggregate,) _synchronous_events_list = (dahdi_events.DAHDIShowChannels,) _synchronous_events_finalising = (dahdi_events.DAHDIShowChannelsComplete,) def __init__(self, dahdi_channel=None): _Request.__init__(self, 'DAHDIShowChannels') if not dahdi_channel is None: self['DAHDIChannel'] = dahdi_channel
import RPi.GPIO as GPIO import time from array import * #configuracoin de pines del stepper bipolar out1 = 11 out2 = 13 out3 = 15 out4 = 16 #delay value timeValue = 0.005 #matriz de pines del stepper outs = [out1,out2,out3,out4] #secuencia para mover el stepper matriz = [ [1,0,0,1], [1,1,0,0], [0,1,1,0], [0,0,1,1], ] #seteo de pines GPIO.setmode(GPIO.BOARD) for o in outs: GPIO.setup(o,GPIO.OUT) d
ef wakeupMotor(): for o in outs: GPIO.output(o,GPIO.HIGH) def sleepMotor(): for o in outs: GPIO.output(o,GPIO.LOW) def setMatrizPins(pin,valor): if (valor == 0): GPIO.output(outs[pin],GPIO.LOW) if (valor == 1): GPIO.output(outs[pin],GPIO.HIGH) def runForward():
i = 0 while (i < 4): #print(matriz[i][0],matriz[i][1],matriz[i][2],matriz[i][3]) setMatrizPins(0,matriz[i][0]) setMatrizPins(1,matriz[i][1]) setMatrizPins(2,matriz[i][2]) setMatrizPins(3,matriz[i][3]) i = i + 1 time.sleep(timeValue) def runBackwards(): i = 3 while (i >=0): #print(matriz[i][0],matriz[i][1],matriz[i][2],matriz[i][3]) setMatrizPins(0,matriz[i][0]) setMatrizPins(1,matriz[i][1]) setMatrizPins(2,matriz[i][2]) setMatrizPins(3,matriz[i][3]) i = i - 1 time.sleep(timeValue) #void main() print('starting stepper') for x in range(500): runBackwards() print(x) sleepMotor() GPIO.cleanup()
self.O filename = os.path.join('/tmp', e.name) e.F1.dump(filename) e.update_xml_attribute('Lectura') e.reload(update=True) return e register(SmallDiffError) class UnionFenosa0measError(ImpError): description = 'Union Fenosa NULL measurement' priority = 2 exit = True invoicing = False @classmethod def check(cls, O, e): uf_id = O.ResPartner.search([('name', '=', 'UNIÓN FENOSA DISTRIBUCIÓN S.A.')])[0] return e.polissa.distribuidora[0] == uf_id and e.error.valor_xml == 0 def fix(self): e = self.e O = self.O exception_tag = "Union Fenosa fix" if e.polissa.tarifa not in ['2.0A', '2.1']: raise Exception('{exception_tag}: DHA and >15kW not handled'.format(**locals())) if len(e.F1.root.Facturas.FacturaATR) > 1: raise Exception('{exception_tag}: Factura with multiple FacturaATR'.format(**locals())) TerminoEnergiaActiva = e.F1.root.Facturas.FacturaATR.EnergiaActiva.TerminoEnergiaActiva consumption = None # TODO: Check whethet there's any later invoice for TerminoEnergiaActiva_ in TerminoEnergiaActiva: if TerminoEnergiaActiva.FechaDesde == e.error.data or TerminoEnergiaActiva.FechaHasta == e.error.data: consumption = TerminoEnergiaActiva.Periodo[0].ValorEnergiaActiva break if not consumption: raise Exception('{exception_tag}: Consumption not found'.format(**locals())) if len(e.F1.root.Facturas.FacturaATR) > 1 or len(e.F1.root.Facturas.FacturaATR.Medidas) > 1 : raise Exception('{exception_tag}: Factura with multiple FacturaATR or Medidas'.format(**locals())) # Backup filename = os.path.join('/tmp', e.name) e.F1.dump(filename) for idx_ap, Aparato in enumerate(e.F1.root.Facturas.FacturaATR.Medidas.Aparato): if (Aparato.Tipo in ['CC', 'CA', 'P']) and (Aparato.CodigoDH == 1): for idx_int, Integrador in enumerate(Aparato.Integrador): if not (Integrador.Magnitud == 'AE' or Integrador.CodigoPeriodo == '10'): continue if not Integrador.ConsumoCalculado == consumption: raise Exception('{exception_tag}: Integrador and factura doesn\'t match'.format(**locals())) DesdeFechaHora = dateutil.parser.parse( str(Integrador.LecturaDesde.FechaHora)).date().strftime('%Y-%m-%d') HastaFechaHora = dateutil.parser.parse( str(Integrador.LecturaHasta.FechaHora)).date().strftime('%Y-%m-%d') if (DesdeFechaHora == e.error.data and ((Integrador.LecturaDesde.Lectura == 0) and (Integrador.LecturaHasta.Lectura == 0))): Integrador.LecturaDesde.Lectura = e.error.valor_db Integrador.LecturaHasta.Lectura = e.error.valor_db + consumption e.reload(update=True) fields_to_search = [('comptador.polissa', '=', e.polissa.id[0]), ('name', 'in', [DesdeFechaHora, HastaFechaHora]), ('lectura', '=', e.error.valor_db + consumption )] lect_pool_ids = O.GiscedataLecturesLecturaPool.search(fields_to_search) if not len(lect_pool_ids) > 0: raise Exception('{exception_tag}: Failed updating lectura'.format(**locals())) elif (HastaFechaHora == e.error.data and ((Integrador.LecturaDesde.Lectura == 0) and (Integrador.LecturaHasta.Lectura == 0))): fields_to_search = [('comptador.polissa', '=', e.polissa.id[0]), ('name', '=',DesdeFechaHora)] lect_pool_ids = O.GiscedataLecturesLecturaPool.search(fields_to_search) if len(lect_pool_ids) != 1: raise Exception('{exception_tag}: Failed updating lectura'.format(**locals())) Integrador.LecturaDesde.Lectura = e.error.valor_db Integrador.LecturaHasta.Lectura = e.error.valor_db + consumption e.reload(update=True) fields_to_search = [('comptador.polissa', '=', e.polissa.id[0]),
('name', 'in', [DesdeFechaHora, HastaFechaHora]), ('lectura', '=', e.error.valor_db + consumption )] lect_pool_ids = O.GiscedataLecturesLecturaPool.search(fields_to_search) if not len(lect_pool_ids) > 0: raise Exception('{exception_tag}: Failed updating lectura'.format(**locals())) lect_pool = LectPool(O, lect_pool_ids[0])
lect_pool.update_observacions('R. 0 Estimada a partir de consum F1 (ABr)') return raise Exception('{exception_tag}: Scenario not found'.format(**locals())) register(UnionFenosa0measError) class StartOfContractError(ImpError): description = 'WARNING: ** Contract- First Measure **' priority = 3 exit = True invoicing = False @classmethod def check(cls, O, e): return e.error.data == e.polissa.data_alta register(StartOfContractError) class EndOfContractError(ImpError): description = 'WARNING: ** Contract- Last measure **' priority = 4 exit = False invoicing = True @classmethod def check(cls, O, e): return e.error.data == e.polissa.data_baixa register(EndOfContractError) class StartOfMeterError(ImpError): description = 'WARNING: ** Meter- First measure **' priority = 5 exit = True invoicing = False @classmethod def check(cls, O, e): fields_to_search = [('polissa', '=', e.polissa.id), ('name', '=', e.error.comptador)] comptador_ids = O.GiscedataLecturesComptador.search(fields_to_search, 0, 0, False, {'active_test': False}) if len(comptador_ids) == 0: raise Exception('Comptador missing') comptador_id = comptador_ids[0] fields_to_search = [('comptador', '=', comptador_id)] lect_pool_id = sorted(O.GiscedataLecturesLecturaPool.search(fields_to_search))[0] fields_to_read = ['name'] fields_to_read = ['name'] return e.error.data == O.GiscedataLecturesLecturaPool.read(lect_pool_id, fields_to_read)['name'] register(StartOfMeterError) class EndOfMeterError(ImpError): description = 'WARNING: ** Meter - Last measure **' priority = 6 exit = True invoicing = False @classmethod def check(cls, O, e): fields_to_search = [('polissa', '=', e.polissa.id), ('name', '=', e.error.comptador)] comptador_ids = O.GiscedataLecturesComptador.search(fields_to_search, 0, 0, False, {'active_test': False}) if len(comptador_ids) == 0: raise Exception('Comptador missing') comptador_id = comptador_ids[0] fields_to_search = [('comptador', '=', comptador_id)] lect_pool_id = sorted(O.GiscedataLecturesLecturaPool.search(fields_to_search), reverse=True)[0] fields_to_read = ['name'] fields_to_read = ['name'] return e.error.data == O.GiscedataLecturesLecturaPool.read(lect_pool_id, fields_to_read)['name'] register(EndOfMeterError) class OldError(ImpError): description = 'ERROR: XML entry timestamp < BDD entry timestamp' priority = 7 exit = True invoicing = False @classmethod def check(cls, O, e): # Check F1_write_date <= DDBB_write_date F1_write_date = dateutil.parser.parse(str(e.F1.root.Cabecera.FechaSolicitud)).replace(tzinfo=None) DB_write_date = dateutil.parser.parse(e.error.lects_pool[e.error.periode].write_date) return F1_write_date <= DB_write_date def fix(self): e = self.e O = self.O old_valu
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Author: Andreas Büsching <crunchy@bitkipper.net> # # a generic dispatcher implementation # # Copyright (C) 2006, 2007, 2009, 2010 # Andreas Büsching <crunchy@bitkipper.net> # # This library is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License version # 2.1 as published by the Free Software Foundation. # # This library is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301 USA """generic implementation of external dispatchers, integratable into several notifiers.""" # required for dispatcher use MIN_TIMER = 100 __dispatchers = {} __dispatchers[True] = [] __dispatchers[False] = [] def dispatcher_add(method, min_timeout=True): """The notifier supports external dispatcher functions that will be called within each scheduler step. This functionality may be useful for applications having an own event mechanism that needs to be triggered as often as possible. This method registers a new dispatcher function. To ensure that the notifier loop does not suspend to long in the sleep state during the poll a minimal timer MIN_TIMER is set to guarantee that the dispatcher functions are called at least every MIN_TIMER milliseconds.""" global __dispatchers, MIN_TIMER __dispatchers[min_timeout].append(method) if __
dispatchers[
True]: return MIN_TIMER else: return None def dispatcher_remove(method): """Removes an external dispatcher function from the list""" global __dispatchers, MIN_TIMER for val in (True, False): if method in __dispatchers[val]: __dispatchers[val].remove(method) break if __dispatchers[True]: return MIN_TIMER else: return None def dispatcher_run(): """Invokes all registered dispatcher functions""" global __dispatchers for val in (True, False): # there is no need to copy an empty dict if not __dispatchers[val]: continue for disp in __dispatchers[val][:]: if not disp(): dispatcher_remove(disp) if __dispatchers[True]: return MIN_TIMER else: return None def dispatcher_count(): global __dispatchers return len(__dispatchers)
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2017-01-20 19:10 from __future__ import unicode_literals from django.db import migrations, mod
els class Migration(migrations.Migration): dependencies = [
('subjects', '0012_auto_20170112_1408'), ] operations = [ migrations.AlterField( model_name='subject', name='tags', field=models.ManyToManyField(blank=True, null=True, to='subjects.Tag', verbose_name='tags'), ), ]
# -*- encoding: utf-8 -*- """Test class for Template CLI :Requirement: Template :CaseAutomation: Automated :CaseLevel: Acceptance :CaseComponent: CLI :TestType: Functional :CaseImportance: High :Upstream: No """ from fauxfactory import gen_string from robottelo.cli.base import CLIReturnCodeError from robottelo.cli.factory import ( make_location, make_org, make_os, make_template, make_user, ) from robottelo.cli.template import Template from robottelo.cli.user import User from robottelo.decorators import run_only_on, skip_if_bug_open, tier1, tier2 from robottelo.test import CLITestCase class TemplateTestCase(CLITestCase): """Test class for Config Template CLI.""" @run_only_on('sat') @tier1 def test_positive_create_with_name(self): """Check if Template can be created :id: 77deaae8-447b-47cc-8af3-8b17476c905f :expectedresults: Template is created :CaseImportance: Critical """ name = gen_string('alpha') template = make_template({'name': name}) self.assertEqual(template['name'], name) @run_only_on('sat') @tier1 def test_positive_update_name(self): """Check if Template can be updated :id: 99bdab7b-1279-4349-a655-4294395ecbe1 :expectedresults: Template is updated :CaseImportance: Critical """ template = make_template() updated_name = gen_string('alpha') Template.update({ 'id': template['id'], 'name': updated_name, }) template = Template.info({'id': template['id']}) self.assertEqual(updated_name, template['name']) @tier1 def test_positive_update_with_manager_role(self): """Create template providing the initial name, then update its name with manager user role. :id: 28c4357a-93cb-4b01-a445-5db50435bcc0 :expectedresults: Provisioning Template is created, and its name can be updated. :CaseImportance: Critical :BZ: 1277308 """ new_name = gen_string('alpha') username = gen_string('alpha') password = gen_string('alpha') org = make_org() loc = make_location() template = make_template({ 'organization-ids': org['id'], 'location-ids': loc['id']}) # Create user with Manager role user = make_user({ 'login': username, 'password': password, 'admin': False, 'organization-ids': org['id'], 'location-ids': loc['id'], }) User.add_role({'id': user['id'], 'role': "Manager"}) # Update template name with that user Template.with_user(username=username, password=password).update({ 'id': template['id'], 'name': new_name}) template = Template.info({'id': template['id']}) self.assertEqual(new_name, template['name']) @run_only_on('sat') @tier1 def test_positive_create_with_loc(self): """Check if Template with Location can be created :id: 263aba0e-4f54-4227-af97-f4bc8f5c0788 :expectedresults: Template is created and new Location has been assigned :CaseImportance: Critical """ new_loc = make_location() new_template = make_template({'location-ids': new_loc['id']}) self.assertIn(new_loc['name'], new_template['locations']) @run_only_on('sat') @tier1 def test_positive_create_locked(self): """Check that locked Template can be created :id: ff10e369-85c6-45f3-9cda-7e1c17a6632d :expectedresults: The locked template is created successfully :CaseImportance: Critical """ new_template = make_template({ 'locked': 'true', 'name': gen_string('alpha'), }) self.assertEqual(new_template['locked'], 'yes') @run_only_on('sat') @tier1 def test_positive_create_with_org(self): """Check if Template with Organization can be created :id: 5de5ca76-1a39-46ac-8dd4-5d41b4b49076 :expectedresults: Template is created and new Organization has been assigned :CaseImportance: Critical """ new_org = make_org() new_template = make_template({ 'name': gen_string('alpha'), 'organization-ids': new_org['id'], }) self.assertIn(new_org['name'], new_template['organizations']) @run_only_on('sat') @tier2 def test_positive_add_os_by_id(self): """Check if operating system can be added to a template :id: d9f481b3-9757-4208-b451-baf4792d4d70 :expectedresults: Operating system is added to the template :CaseLevel: Integration """ new_template = make_template() new_os = make_os() Template.add_operatingsystem({ 'id': new_template['id'], 'operatingsystem-id': new_os['id'], }) new_template = Template.info({'id': new_template['id']}) os_string = '{0} {1}.{2}'.format( new_os['name'], new_os['major-version'], new_os['minor-version']) self.assertIn(os_string, new_template['operating-systems']) @run_only_on('sat') @skip_if_bug_open('bugzilla', 1395229) @tier2 def test_positive_remove_os_by_id(self): """Chec
k if operating system can be removed from a template :id: b5362565-6dce-4770-81e1-4fe3ec6f6cee :expectedresults: Ope
rating system is removed from template :CaseLevel: Integration """ template = make_template() new_os = make_os() Template.add_operatingsystem({ 'id': template['id'], 'operatingsystem-id': new_os['id'], }) template = Template.info({'id': template['id']}) os_string = '{0} {1}.{2}'.format( new_os['name'], new_os['major-version'], new_os['minor-version'] ) self.assertIn(os_string, template['operating-systems']) Template.remove_operatingsystem({ 'id': template['id'], 'operatingsystem-id': new_os['id'] }) template = Template.info({'id': template['id']}) self.assertNotIn(os_string, template['operating-systems']) @run_only_on('sat') @tier1 def test_positive_create_with_content(self): """Check if Template can be created with specific content :id: 0fcfc46d-5e97-4451-936a-e8684acac275 :expectedresults: Template is created with specific content :CaseImportance: Critical """ content = gen_string('alpha') name = gen_string('alpha') template = make_template({ 'content': content, 'name': name, }) self.assertEqual(template['name'], name) template_content = Template.dump({'id': template['id']}) self.assertIn(content, template_content[0]) @run_only_on('sat') @tier1 def test_positive_delete_by_id(self): """Check if Template can be deleted :id: 8e5245ee-13dd-44d4-8111-d4382cacf005 :expectedresults: Template is deleted :CaseImportance: Critical """ template = make_template() Template.delete({'id': template['id']}) with self.assertRaises(CLIReturnCodeError): Template.info({'id': template['id']}) @run_only_on('sat') @tier2 def test_positive_clone(self): """Assure ability to clone a provisioning template :id: 27d69c1e-0d83-4b99-8a3c-4f1bdec3d261 :expectedresults: The template is cloned successfully :CaseLevel: Integration """ cloned_template_name = gen_string('alpha') template = make_template() result = Template.clone({ 'id': template['id'], 'new-name': cloned_template_name, }) new_template = Template.info({'id': result[0]['id']}) self.assertEqual(new_template['name'], cloned_template_name)
# -*- coding: utf-8 -*- #+---------------------------------------------------------------------------+ #| 01001110 01100101 01110100 01111010 01101111 01100010 | #| | #| Netzob : Inferring communication protocols | #+---------------------------------------------------------------------------+ #| Copyright (C) 2011 Georges Bossert and Frédéric Guihéry | #| This program is free software: you can redistribute it and/or modify | #| it under the terms of the GNU General Public License as published by | #| the Free Software Foundation, either version 3 of the License, or | #| (at your option) any later version. | #| | #| This program is distributed in the hope that it will be useful, | #| but WITHOUT ANY WARRANTY; without even the implied warranty of | #| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | #| GNU General Public License for more details. | #| | #| You should have received a copy of the GNU General Public License | #| along with this program. If not, see <http://www.gnu.org/licenses/>. | #+---------------------------------------------------------------------------+ #| @url : http://www.netzob.org | #| @contact : contact@netzob.org | #| @sponsors : Amossys, http://www.amossys.fr | #| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ | #+---------------------------------------------------------------------------+ #+---------------------------------------------------------------------------+ #| Standard library imports | #+---------------------------------------------------------------------------+ import logging from netzob.Common.Type.TypeConvertor import TypeConvertor #+---------------------------------------------------------------------------+ #| Related third party imports | #+---------------------------------------------------------------------------+ #+---------------------------------------------------------------------------+ #| Local application imports | #+---------------------------------------------------------------------------+ class Memory(): """Memory: Definition of a memory, used to store variable values in a persisting and independent way. """ def __init__(self): """Constructor of Memory: """ # create logger with the given configuration self.log = logging.getLogger('netzob.Common.MMSTD.Dictionary.Memory.py') self.memory = dict() self.temporaryMemory = dict() self.memory_acces_cb = None def setMemoryAccess_cb(self, cb): """Set the callback to execute after a memory access""" self.memory_acces_cb = cb def duplicate(self): """Duplicates in a new memory""" duplicatedMemory = Memory() for k in self.memory.keys(): duplicatedMemory.memory[k] = self.memory[k] duplicatedMemory.createMemory() return duplicatedMemory #+---------------------------------------------------------------------------+ #| Functions on memories | #+---------------------------------------------------------------------------+ def createMemory(self): """createMemory: Reinit the temporary memory and copy all values from the real memory in it. """ self.temporaryMemory = dict() for key in self.memory.keys(): self.temporaryMemory[key] = self.memory[key] def persistMemory(self): """persistMemory: Copy all values from the temporary memory into the real memory. """ self.memory = dict() for key in self.temporaryMemory.keys(): self.memory[key] = self.temporaryMemory[key] def cleanMemory(self): """cleanMemory: Remove all variables and values from real and temporary memories. """ # self.memory = dict() # TODO: impement this change in all calling functions. self.temporaryMemory = dict() def recallMemory(self): """recallMemory: Return all values store in the temporary memory. @return: the value of all variables in the temporary memory. """ return self.temporaryMemory def printMemory(self): """printMemory: Debug functions which print all values in temporary memory. """ self.log.debug("Memory map:") for _id in self.temporaryMemory.keys(): self.log.debug("> " + str(_id) + " = " + str(self.temporaryMemory.get(_id))) #+---------------------------------------------------------------------------+ #| Functions on temporary memory elements | #+---------------------------------------------------------------------------+ def hasMemorized(self, variable): """hasMemorized:
Check if a variable is in the temporary memory. @param variable: the given variable we search in memory. @return: True if the variable has been found in the memory. """ return variable.getID() in self.temporaryMemory.keys() def res
tore(self, variable): """restore: Copy back the value of a variable from the real memory in the temporary memory. @param variable: the given variable, the value of which we want to restore. """ if variable.getID() in self.memory.keys(): self.temporaryMemory[variable.getID()] = self.memory[variable.getID()] if self.memory_acces_cb is not None: value = variable.getCurrentValue() if value is not None: value = TypeConvertor.bin2strhex(value) self.memory_acces_cb("W", variable, value) def memorize(self, variable): """memorize: Save the current value of a variable in memory. @param variable: the given variable, the value of which we want to save. """ if variable.getCurrentValue() is not None: self.temporaryMemory[variable.getID()] = variable.getCurrentValue() if self.memory_acces_cb is not None: value = variable.getCurrentValue() if value is not None: value = TypeConvertor.bin2strhex(value) self.memory_acces_cb("W", variable, value) def forget(self, variable): """forget: Remove a variable and its value from the temporary memory. """ if self.hasMemorized(variable): self.temporaryMemory.remove(variable.getID()) if self.memory_acces_cb is not None: self.memory_acces_cb("D", variable, None) def recall(self, variable): """recall: Return the value of one variable store in the temporary memory. @param variable: the variable, the value of which we are searching. @return: the value of the given variable in the temporary memory. """ if self.hasMemorized(variable): if self.memory_acces_cb is not None: value = self.temporaryMemory[variable.getID()] if value is not None: value = TypeConvertor.bin2strhex(value) self.memory_acces_cb("R", variable, value) return self.temporaryMemory[variable.getID()] else: return None
eckFolder(): """Used to create the data folder at first startup""" if not os.path.exists(SAVE_FOLDER): print("Creating " + SAVE_FOLDER + " folder...") os.makedirs(SAVE_FOLDER) def checkFiles(): """Used to initialize an empty database at first startup""" theFile = SAVE_FOLDER + SAVE_FILE if not dataIO.is_valid_json(theFile): print("Creating default welcome settings.json...") dataIO.save_json(theFile, {}) class Welcome: # pylint: disable=too-many-instance-attributes """Send a welcome DM on server join.""" def loadSettings(self): """Loads settings from the JSON file""" self.settings = dataIO.load_json(SAVE_FOLDER+SAVE_FILE) def saveSettings(self): """Loads settings from the JSON file""" dataIO.save_json(SAVE_FOLDER+SAVE_FILE, self.settings) #Class constructor def __init__(self, bot): self.bot = bot #The JSON keys for the settings: self.keyWelcomeDMEnabled = "welcomeDMEnabled" self.keyWelcomeLogEnabled = "welcomeLogEnabled" self.keyWelcomeLogChannel = "welcomeLogChannel" self.keyWelcomeTitle = "welcomeTitle" self.keyWelcomeMessage = "welcomeMessage" self.keyWelcomeImage = "welcomeImage" self.keyLeaveLogEnabled = "leaveLogEnabled" self.keyLeaveLogChannel = "leaveLogChannel" checkFolder() checkFiles() self.loadSettings() #The async function that is triggered on new member join. async def sendWelcomeMessage(self, newUser, test=False): """Sends the welcome message in DM.""" serverId = newUser.server.id #Do not send DM if it is disabled! if not self.settings[serverId][self.keyWelcomeDMEnabled]: return try: welcomeEmbed = discord.Embed(title=self.settings[serverId][self.keyWelcomeTitle]) welcomeEmbed.description = self.settings[serverId][self.keyWelcomeMessage] welcomeEmbed.colour = discord.Colour.red() if self.keyWelcomeImage in self.settings[serverId].keys() and \ self.settings[serverId][self.keyWelcomeImage]: imageUrl = self.settings[serverId][self.keyWelcomeImage] welcomeEmbed.set_image(url=imageUrl.replace(" ", "%20")) await self.bot.send_message(newUser, embed=welcomeEmbed) except (discord.Forbidden, discord.HTTPException) as errorMsg: LOGGER.error("Could not send message, the user may have" "turned off DM's from this server." " Also, make sure the server has a title " "and message set!", exc_info=True) LOGGER.error(errorMsg) if self.settings[serverId][self.keyWelcomeLogEnabled] and not test: channel = self.bot.get_channel(self.settings[serverId][self.keyWelcomeLogChannel]) await self.bot.send_message(channel, ":bangbang: ``Server Welcome:`` User " "{0.name}#{0.discriminator} ({0.id}) has" " joined. Could not send DM!".format( newUser)) await self.bot.send_message(channel, errorMsg) else: if self.settings[serverId][self.keyWelcomeLogEnabled] and not test: channel = self.bot.get_channel(self.settings[serverId][self.keyWelcomeLogChannel]) await self.bot.send_message(channel, ":o: ``Server Welcome:`` User {0.name}#" "{0.discriminator} ({0.id}) has joined. " "DM sent.".format(newUser)) LOGGER.info("User %s#%s (%s) has joined. DM sent.", newUser.name, newUser.discriminator, newUser.id) async def logServerLeave(self, leaveUser): """Logs the server leave to a channel, if enabled.""" serverId = leaveUser.server.id if self.set
tings[serverId][self.keyLeaveLogEnabled]: channel = self.bot.get_channel(self.settings[serverId][self.keyLeaveLogChannel]) await self.bot.
send_message(channel, ":x: ``Server Leave :`` User {0.name}#" "{0.discriminator} ({0.id}) has left the " "server.".format(leaveUser)) LOGGER.info("User %s#%s (%s) has left the server.", leaveUser.name, leaveUser.discriminator, leaveUser.id) #################### # MESSAGE COMMANDS # #################### #[p]welcome @commands.group(name="welcome", pass_context=True, no_pm=False) @checks.serverowner() #Only allow server owner to execute the following command. async def _welcome(self, ctx): """Server welcome message settings.""" if ctx.invoked_subcommand is None: await send_cmd_help(ctx) #[p]welcome setmessage @_welcome.command(pass_context=True, no_pm=False) @checks.serverowner() #Only allow server owner to execute the following command. async def setmessage(self, ctx): """Interactively configure the contents of the welcome DM.""" await self.bot.say("What would you like the welcome DM message to be?") message = await self.bot.wait_for_message(timeout=60, author=ctx.message.author, channel=ctx.message.channel) if message is None: await self.bot.say("No response received, not setting anything!") return if len(message.content) > 2048: await self.bot.say("Your message is too long!") return try: self.loadSettings() if ctx.message.author.server.id in self.settings: self.settings[ctx.message.author.server.id] \ [self.keyWelcomeMessage] = message.content else: self.settings[ctx.message.author.server.id] = {} self.settings[ctx.message.author.server.id] \ [self.keyWelcomeMessage] = message.content self.saveSettings() except Exception as errorMsg: # pylint: disable=broad-except await self.bot.say("Could not save settings! Check the console for " "details.") print(errorMsg) else: await self.bot.say("Message set to:") await self.bot.say("```" + message.content + "```") LOGGER.info("Message changed by %s#%s (%s)", ctx.message.author.name, ctx.message.author.discriminator, ctx.message.author.id) LOGGER.info(message.content) #[p]welcome toggledm @_welcome.command(pass_context=True, no_pm=False) @checks.serverowner() #Only allow server owner to execute the following command. async def toggledm(self, ctx): """Toggle sending a welcome DM.""" self.loadSettings() try: if self.settings[ctx.message.author.server.id][self.keyWelcomeDMEnabled]: self.settings[ctx.message.author.server.id][self.keyWelcomeDMEnabled] = False isSet = False else: self.settings[ctx.message.author.server.id][self.keyWelcomeDMEnabled] = True isSet = True except KeyError: self.settings[ctx.message.author.server.id][self.keyWelcomeDMEnabled] = True isSet = True self.saveSettings() if isSet: await self.bot.say(":white_check_mark:
ous view_only_link.anonymous = True view_only_link.save() res = app.get(view_only_link_url) assert 'contributors' not in res.json['data'][0]['relationships'] assert 'implicit_contributors' not in res.json['data'][0]['relationships'] assert 'bibliographic_contributors' not in res.json['data'][0]['relationships'] # delete vol view_only_link.is_deleted = True view_only_link.save() res = app.get(view_only_link_url, expect_errors=True) assert res.status_code == 401 @pytest.mark.django_db class TestNodeChildrenListFiltering: def test_node_child_filtering(self, app, user): project = ProjectFactory(creator=user) title_one, title_two = fake.bs(), fake.bs() component = NodeFactory(title=title_one, parent=project) component_two = NodeFactory(title=title_two, parent=project) url = '/{}nodes/{}/children/?filter[title]={}'.format( API_BASE, project._id, title_one ) res = app.get(url, auth=user.auth) ids = [node['id'] for node in res.json['data']] assert component._id in ids assert component_two._id not in ids @pytest.mark.django_db class TestNodeChildCreate: @pytest.fixture() def project(self, user): return ProjectFactory(creator=user, is_public=True) @pytest.fixture() def url(self, project): return '/{}nodes/{}/children/'.format(API_BASE, project._id) @pytest.fixture() def child(self): return { 'data': { 'type': 'nodes', 'attributes': { 'title': 'child', 'description': 'this is a child project', 'category': 'project' } } } def test_creates_child(self, app, user, project, child, url): # test_creates_child_logged_out_user res = app.post_json_api(url, child, expect_errors=True) assert res.status_code == 401 project.reload() assert len(project.nodes) == 0 # test_creates_child_logged_in_read_contributor read_contrib = AuthUserFactory() project.add_contributor( read_contrib, permissions=permissions.READ, auth=Auth(user), save=True ) res = app.post_json_api( url, child, auth=read_contrib.auth, expect_errors=True ) assert res.status_code == 403 project.reload() assert len(project.nodes) == 0 # test_creates_child_logged_in_non_contributor non_contrib = AuthUserFactory() res = app.post_json_api( url, child, auth=non_contrib.auth, expect_errors=True ) assert res.status_code == 403 project.reload() assert len(project.nodes) == 0 # test_creates_child_group_member_read group_mem = AuthUserFactory() group = OSFGroupFactory(creator=group_mem) project.add_osf_group(group, permissions.READ) res = app.post_json_api( url, child, auth=group_mem.auth, expect_errors=True ) assert res.status_code == 403 project.update_osf_group(group, permissions.WRITE) res = app.post_json_api( url, child, auth=group_mem.auth, expect_errors=True ) assert res.status_code == 201 # test_creates_child_no_type child = { 'data': { 'attributes': { 'title': 'child', 'description': 'this is a child project', 'category': 'project', } } } res = app.post_json_api(url, child, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'This field may not be null.' assert res.json['errors'][0]['source']['pointer'] == '/data/type' # test_creates_child_incorrect_type child = { 'data': { 'type': 'Wrong type.', 'attributes': { 'title': 'child', 'description': 'this is a child project', 'category': 'project', } } } res = app.post_json_api(url, child, auth=user.auth, expect_errors=True) assert res.status_code == 409 assert res.json['errors'][0]['detail'] == 'This resource has a type of "nodes", but you set the json body\'s type field to "Wrong type.". You probably need to change the type field to match the resource\'s type.' # test_creates_child_properties_not_nested child = { 'data': { 'attributes': { 'title': 'child', 'description': 'this is a child project' }, 'category': 'project' } } res = app.post_json_api(url, child, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'This field may not be null.' assert res.json['errors'][0]['source']['pointer'] == '/data/type' assert res.json['errors'][1]['detail'] == 'This field is required.' assert res.json['errors'][1]['source']['pointer'] == '/data/attributes/category' def test_creates_child_logged_in_write_contributor( self, app, user, project, child, url): write_contrib = AuthUserFactory() project.add_contributor( write_contrib, permissions=permissions.WRITE, auth=Auth(user), save=True) res = app.post_json_api(url, child, auth=write_contrib.auth) assert res.status_code == 201 assert res.json['data']['attributes']['title'] == child['data']['attributes']['title'] assert res.json['data']['attributes']['description'] == child['data']['attributes']['description'] assert res.json['data']['attributes']['category'] == child['data']['attributes']['category'] project.reload() child_id = res.json['data']['id'] assert child_id == project.nodes[0]._id assert AbstractNode.load(child_id).logs.latest( ).action == NodeLog.PROJECT_CREATED def test_creates_child_logged_in_owner( self, app, user, project, child, url): res = app.post_json_api(url, child, auth=user.auth) assert res.status_code == 201 assert res.json['data']['attributes']['title'] == child['data']['attributes']['title'] assert res.json['data']['attributes']['description'] == child['data']['attributes']['description'
] assert res.json['data']['attributes']['category'] == child['data']['attributes']['category'] project.reload() assert res.json['data']['id'] == project.nodes[0]._id assert project.nodes[0].logs.latest().action == NodeLog.PROJECT_CREATED def test_creates_child_creates_child_and_sanitizes_html_logged_in_owner( self, app, user, project, url): title = '<em>Reasonable</em>
<strong>Project</strong>' description = 'An <script>alert("even reasonabler")</script> child' res = app.post_json_api(url, { 'data': { 'type': 'nodes', 'attributes': { 'title': title, 'description': description, 'category': 'project', 'public': True } } }, auth=user.auth) child_id = res.json['data']['id'] assert res.status_code == 201 url = '/{}nodes/{}/'.format(API_BASE, child_id) res = app.get(url, auth=user.auth) assert res.json['data']['attributes']['title'] == strip_html(title) assert res.json['data']['attributes']['description'] == strip_html( description) assert res.json['data']['attributes']['category'] == 'project' project.reload() child_id = res.json['data']['id'] assert chi
WrtParentInterval.wrtParentIntervalNum) WrtParentInterval.wrtParentIntervalNum += 1 # Create function interval FunctionInterval.__init__(self, wrtReparentFunc, name = name) ### Function Interval subclasses for instantaneous pose changes ### class PosInterval(FunctionInterval): # PosInterval counter posIntervalNum = 1 # Initialization def __init__(self, nodePath, pos, duration = 0.0, name = None, other = None): """__init__(nodePath, pos, duration, name) """ # Create function def posFunc(np = nodePath, pos = pos, other = other): if other: np.setPos(other, pos) else: np.setPos(pos) # Determine name if (name == None): name = 'PosInterval-%d' % PosInterval.posIntervalNum PosInterval.posIntervalNum += 1 # Create function interval FunctionInterval.__init__(self, posFunc, name = name) class HprInterval(FunctionInterval): # HprInterval counter hprIntervalNum = 1 # Initialization def __init__(self, nodePath, hpr, duration = 0.0, name = None, other = None): """__init__(nodePath, hpr, duration, name) """ # Create function def hprFunc(np = nodePath, hpr = hpr, other = other): if other: np.setHpr(other, hpr) else: np.setHpr(hpr) # Determine name if (name == None): name = 'HprInterval-%d' % HprInterval.hprIntervalNum HprInterval.hprIntervalNum += 1 # Create function interval FunctionInterval.__init__(self, hprFunc, name = name) class ScaleInterval(FunctionInterval): # ScaleInterval counter scaleIntervalNum = 1 # Initialization def __init__(self, nodePath, scale, duration = 0.0, name = None, other = None): """__init__(nodePath, scale, duration, name) """ # Create function def scaleFunc(np = nodePath, scale = scale, other = other): if other: np.setScale(other, scale) else: np.setScale(scale) # Determine name if (name == None): name = 'ScaleInterval-%d' % ScaleInterval.scaleIntervalNum ScaleInterval.scaleIntervalNum += 1 # Create function interval FunctionInterval.__init__(self, scaleFunc, name = name) class PosHprInterval(FunctionInterval): # PosHprInterval counter posHprIntervalNum = 1 # Initialization def __init__(self, nodePath, pos, hpr, duration = 0.0, name = None, other = None): """__init__(nodePath, pos, hpr, duration, name) """ # Create function def posHprFunc(np = nodePath, pos = pos, hpr = hpr, other = other): if other: np.setPosHpr(other, pos, hpr) else: np.setPosHpr(pos, hpr) # Determine name if (name == None): name = 'PosHprInterval-%d' % PosHprInterval.posHprIntervalNum PosHprInterval.posHprIntervalNum += 1 # Create function interval FunctionInterval.__init__(self, posHprFunc, name = name) class HprScaleInterval(FunctionInterval): # HprScaleInterval counter hprScaleIntervalNum = 1 # Initialization def __init__(self, nodePath, hpr, scale, duration = 0.0, name = None, other = None): """__init__(nodePath, hpr, scale, duration, other, name) """ # Create function def hprScaleFunc(np=nodeP
ath, hpr=hpr, scale=scale,
other = other): if other: np.setHprScale(other, hpr, scale) else: np.setHprScale(hpr, scale) # Determine name if (name == None): name = ('HprScale-%d' % HprScaleInterval.hprScaleIntervalNum) HprScaleInterval.hprScaleIntervalNum += 1 # Create function interval FunctionInterval.__init__(self, hprScaleFunc, name = name) class PosHprScaleInterval(FunctionInterval): # PosHprScaleInterval counter posHprScaleIntervalNum = 1 # Initialization def __init__(self, nodePath, pos, hpr, scale, duration = 0.0, name = None, other = None): """__init__(nodePath, pos, hpr, scale, duration, other, name) """ # Create function def posHprScaleFunc(np=nodePath, pos=pos, hpr=hpr, scale=scale, other = other): if other: np.setPosHprScale(other, pos, hpr, scale) else: np.setPosHprScale(pos, hpr, scale) # Determine name if (name == None): name = ('PosHprScale-%d' % PosHprScaleInterval.posHprScaleIntervalNum) PosHprScaleInterval.posHprScaleIntervalNum += 1 # Create function interval FunctionInterval.__init__(self, posHprScaleFunc, name = name) class Func(FunctionInterval): def __init__(self, *args, **kw): function = args[0] assert hasattr(function, '__call__') extraArgs = args[1:] kw['extraArgs'] = extraArgs FunctionInterval.__init__(self, function, **kw) class Wait(WaitInterval): def __init__(self, duration): WaitInterval.__init__(self, duration) """ SAMPLE CODE from IntervalGlobal import * i1 = Func(base.transitions.fadeOut) i2 = Func(base.transitions.fadeIn) def caughtIt(): print 'Caught here-is-an-event' class DummyAcceptor(DirectObject): pass da = DummyAcceptor() i3 = Func(da.accept, 'here-is-an-event', caughtIt) i4 = Func(messenger.send, 'here-is-an-event') i5 = Func(da.ignore, 'here-is-an-event') # Using a function def printDone(): print 'done' i6 = Func(printDone) # Create track t1 = Sequence([ # Fade out (0.0, i1), # Fade in (2.0, i2), # Accept event (4.0, i3), # Throw it, (5.0, i4), # Ignore event (6.0, i5), # Throw event again and see if ignore worked (7.0, i4), # Print done (8.0, i6)], name = 'demo') # Play track t1.play() ### Specifying interval start times during track construction ### # Interval start time can be specified relative to three different points: # PREVIOUS_END # PREVIOUS_START # TRACK_START startTime = 0.0 def printStart(): global startTime startTime = globalClock.getFrameTime() print 'Start' def printPreviousStart(): global startTime currTime = globalClock.getFrameTime() print 'PREVIOUS_END %0.2f' % (currTime - startTime) def printPreviousEnd(): global startTime currTime = globalClock.getFrameTime() print 'PREVIOUS_END %0.2f' % (currTime - startTime) def printTrackStart(): global startTime currTime = globalClock.getFrameTime() print 'TRACK_START %0.2f' % (currTime - startTime) i1 = Func(printStart) # Just to take time i2 = LerpPosInterval(camera, 2.0, Point3(0, 10, 5)) # This will be relative to end of camera move i3 = FunctionInterval(printPreviousEnd) # Just to take time i4 = LerpPosInterval(camera, 2.0, Point3(0, 0, 5)) # This will be relative to the start of the camera move i5 = FunctionInterval(printPreviousStart) # This will be relative to track start i6 = FunctionInterval(printTrackStart) # Create the track, if you don't specify offset type in tuple it defaults to # relative to TRACK_START (first entry below) t2 = Track([(0.0, i1), # i1 start at t = 0, duration = 0.0 (1.0, i2, TRACK_START), # i2 start at t = 1, duration = 2.0 (2.0, i3, PREVIOUS_END), # i3 start at t = 5, duration = 0.0 (1.0, i4, PREVIOUS_END), # i4 start at t = 6, duration = 2.0 (3.0, i5, PREVIOUS_START), # i5 start at t = 9, duration = 0.0 (10.0, i6, TRACK_START)], # i6 start at t = 10, duration = 0.0 name = 'startTimeDemo') t2.play() smiley = loader.loadModel('models/misc/smiley') from direct.actor import Actor donald = Actor.Actor() donald.loadModel("phase_6/models/char/donald-wh
from kivy.config import
Config from kivy.config import ConfigParser import pentai.base.logger as log
import os def config_instance(): return _config def create_config_instance(ini_file, user_path): global _config ini_path = os.path.join(user_path, ini_file) if not ini_file in os.listdir(user_path): log.info("Writing initial ini file %s" % ini_path) import shutil shutil.copy(ini_file, ini_path) else: log.info("Loading ini file from %s" % ini_path) _config = ConfigParser() _config.read(ini_path) log.info("Updating ini file from %s" % ini_file) _config.update_config(ini_file) # Don't need to write it back until something is changed. return _config
#!/usr/bin/env python3 # # Copyright (C) 2013 - Tony Chyi <tonychee1989@gmail.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth F
loor, Boston, MA 02110-1301 USA. from PyQt5 import QtWidgets, QtCore, QtGui import json import urllib.request import sys class Timer(QtCore.QThread): """Run QTimer in another thread.""" trigger = QtCore.pyqtSignal(int, dict) def __init__(self, parent=None): QtCore.QThread.__init__(self, pare
nt) self.interval = 0 self.timer = QtCore.QTimer(self) self.timer.timeout.connect(self.tc) def setup(self, thread_no=1, interval=0): self.thread_no = thread_no self.interval = interval def run(self): self.timer.start(self.interval) @QtCore.pyqtSlot() def tc(self): try: val = self.getValue() self.trigger.emit(self.thread_no, val) except TypeError as err: print('\033[31;1mERR:\033[0m', err) def getValue(self): """This is used for get json from specified address.""" url = "https://data.btcchina.com/data/ticker" try: p_conn = urllib.request.urlopen(url) b = p_conn.read() p_conn.close() jso = json.loads(b.decode("utf8")) return jso["ticker"] except: return None class Window(QtWidgets.QWidget): def __init__(self): self.TITLE = "BtcChina实时报价" self.valPrev = 0 QtWidgets.QWidget.__init__(self) self.setWindowTitle(self.TITLE) self.move(100, 200) self.setMinimumSize(500, 500) self.setMaximumSize(500, 500) # Get ready for widget self.label1 = QtWidgets.QLabel("Loading...") self.label1.setStyleSheet("font-size:50px") self.label2 = QtWidgets.QLabel("Loading...") self.label2.setStyleSheet("font-size:12px") self.label2.setMaximumHeight(60) self.label2.setMinimumHeight(60) self.graph = Graphs() # Set Layout hbox = QtWidgets.QHBoxLayout() hbox.addWidget(self.label1) hbox.addStretch(1) hbox.addWidget(self.label2) vbox = QtWidgets.QVBoxLayout() vbox.addLayout(hbox) vbox.addWidget(self.graph) self.setLayout(vbox) # Start Get Data timer = Timer(self) timer.trigger.connect(self.setLabel) timer.setup(interval=10000) timer.start() @QtCore.pyqtSlot(int, dict) def setLabel(self, thread_no, val): try: self.label1.setText("¥{0}".format(val["last"])) self.label2.setText("High:\t¥{0}\nLow:\t¥{1}\nBuy:\t¥{2}\nSell:\t¥{3}".format(val["high"], val["low"], val["buy"], val["sell"])) self.graph.setPeak(val["high"], val["low"]) self.graph.addPoint(val["last"]) if float(val["last"]) > self.valPrev: self.label1.setStyleSheet("font-size:50px;color:red") # WOW! Bull market! elif float(val["last"]) < self.valPrev: self.label1.setStyleSheet("font-size:50px;color:green") # Damn bear market! self.setWindowTitle("¥{0}|{1}".format(val["last"], self.TITLE)) self.valPrev = float(val["last"]) except: pass class Graphs(QtWidgets.QWidget): """A costomized controller, to show graph on the window.""" def __init__(self, parent=None): QtWidgets.QWidget.__init__(self, parent) self.flagFirst = True self.setMinimumSize(300, 300) self.recentData = [] # To draw lines, a list is needed self.max_ = 10000 self.min_ = 0 self.valuePrev = self.height() self.mousePosit = QtCore.QPoint(0, 0) self.label1 = QtWidgets.QLabel("10k", self) self.label1.move(0, self.height() * 0.03) self.label2 = QtWidgets.QLabel("0", self) self.label2.move(0, self.height() * 0.83) self.setStep(10) def paintEvent(self, event): painter = QtGui.QPainter() painter.begin(self) self.drawGird(event, painter) self.drawFrame(event, painter) self.drawMouse(event, painter) painter.setRenderHint(QtGui.QPainter.Antialiasing) self.draw(event, painter) painter.end() def draw(self, event, painter): """Draw data line on widget.""" pen = QtGui.QPen(QtGui.QColor(0, 0, 0), 1, QtCore.Qt.SolidLine) painter.setPen(pen) xPrev = self.width() * 0.10 xCur = self.width() * 0.10 for value in self.recentData: xCur += self.step painter.drawLine(xPrev, self.valuePrev, xCur, value) self.valuePrev = value xPrev = xCur def drawFrame(self, event, painter): """Draw the border of chart.""" painter.setPen(QtGui.QColor(0, 0, 0)) painter.drawRect(self.width() * 0.10, self.height() * 0.05, self.width() * 0.90, self.height() * 0.95) def drawGird(self, event, painter): """Draw gird on chart""" painter.setPen(QtGui.QColor(192, 192, 192)) for v in range(2, 100): painter.drawLine(self.width() * 0.05 * v, self.height() * 0.05, self.width() * 0.05 * v, self.height()) for h in range(1, 100): painter.drawLine(self.width() * 0.10, self.height() * 0.05 * h, self.width(), self.height() * 0.05 * h) def drawMouse(self, event, painter): if self.mousePosit in QtCore.QRect(self.width() * 0.1, self.height() * 0.05, self.width() * 0.9, self.height() * 0.95): painter.setPen(QtGui.QColor(255, 0, 255)) painter.drawLine(self.mousePosit.x(), self.height() * 0.05, self.mousePosit.x(), self.height()) painter.drawLine(self.width() * 0.10, self.mousePosit.y(), self.width(), self.mousePosit.y()) price = float((1 - (self.mousePosit.y() - self.height() * 0.05) / (self.height() * 0.95)) * (self.max_ - self.min_) + self.min_) painter.setPen(QtGui.QColor(0, 0, 255)) painter.drawText(QtCore.QPoint(self.width() * 0.1, self.mousePosit.y()), format(price, '.2f')) def addPoint(self, value): """Append a data to data list, for drawing lines.""" value = float(value) valueCur = int((1.0 - (value - self.min_) / (self.max_ - self.min_)) * self.height() * 0.95 + self.height() * 0.05) self.recentData.append(valueCur) if len(self.recentData) >= self.posit: del self.recentData[0] # Del the first data, look like the chart moving. self.update() def setPeak(self, max_, min_): """Set the max/min value of the chart.""" self.max_ = float(max_) self.min_ = float(min_) self.label1.setText(max_) self.label1.adjustSize() self.label2.setText(min_) self.label2.adjustSize() self.update() def setStep(self, step): """Set the length of X to a line.""" step = int(step) self.step = step self.posit = len(range(int(self.width() * 0.10), int(self.width() * 0.75), step)) def mouseMoveEvent(self, event): self.mousePosit = event.pos() self.update() def main(): app = QtWidgets.QApplication(sys.argv) win = Window() win.show() app.exec_() if __name__ == "__main__": main()
from json import dumps # pragma: no cover from sqlalchemy.orm import class_mapper # pragma: no cover from app.models import User, Group # pragma: no cover def serialize(obj, columns): # then we return their values in a dict return dict((c, getattr(obj, c)) for c in columns) def queryAllToJson(model,conditions): # we can then use this for your particular example
columns = [c.key for c in class_mapper(model).columns] serialized_objs = [ serialize(obj,columns) for obj in model.query.filter_by(**conditions) ] return dumps(serialized_objs) def objectToJson(obj): columns = [c.key for c in class_mapper(obj.__class__).columns] serialized_obj = serialize(obj, columns) return dumps(serialized_obj) def getUserId(user
name): user = User.query.filter_by(username=username).first() if user is None: raise Exception('username %s not found in database' % username) else: return user.id def getGroupId(groupname): group = Group.query.filter_by(groupname=groupname).first() if group is None: raise Exception('groupname %s not found in database' % groupname) else: return group.id
class ocho: def __init__(self):
self.cadena='' def getString(self): self.cadena = raw_input("Your desires are orders to
me: ") def printString(self): print "Here's your sentence: {cadena}".format(cadena=self.cadena) oct = ocho() oct.getString() oct.printString()
from django.urls import reverse from oppia.test import OppiaTestCase cla
ss CompletionRatesViewTest(OppiaTestCase): fixtures = ['tests/test_user.json', 'tests/test_oppia.json', 'tests/test_quiz.json', 'tests/test_permissions.json', 'tests/test_cohort.json', 'tests/test_course_permissions.json', 'tes
ts/test_usercoursesummary.json'] def setUp(self): super(CompletionRatesViewTest, self).setUp() self.allowed_users = [self.admin_user, self.staff_user] self.disallowed_users = [self.teacher_user, self.normal_user] def test_view_completion_rates(self): template = 'reports/completion_rates.html' url = reverse('reports:completion_rates') for allowed_user in self.allowed_users: self.client.force_login(user=allowed_user) response = self.client.get(url) self.assertTemplateUsed(response, template) self.assertEqual(response.status_code, 200) for disallowed_user in self.disallowed_users: self.client.force_login(user=disallowed_user) response = self.client.get(url) self.assertRedirects(response, '/admin/login/?next=' + url, 302, 200)
import logging log = logging.ge
tLogger(__name__) try: from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation from keras.layers.normalization import BatchNormalization from keras.layers.advanced_activations import PReLU, LeakyReLU from keras.optimizers import Adagrad, Adadelta, RMSprop, Adam from keras.layers.core import Dense from
keras.utils import to_categorical import_keras = True except: import_keras = False log.info('could not import keras. Neural networks will not be used') def keras_create_model(params, problem_type): # creates a neural net model with params definition log.info('creating NN structure') model = Sequential() for l in range(int(params['number_layers'])): if l == 0: model.add(Dense(units=params['units'], input_dim=params['input_dim'])) else: model.add(Dense(units=params['units'])) model.add(Activation(params['activation'])) if params['batch_normalization']: model.add(BatchNormalization()) model.add(Dropout(params['dropout'])) model.add(Dense(params['output_dim'])) if problem_type == 'classification': model.add(Activation('sigmoid')) keras_compile_model(model, params, problem_type) return model def keras_compile_model(model, params, problem_type): # compile the model (usefull to reset weights also) log.info('compiling NN model') if params['optimizer'] == 'Adagrad': optimizer = Adagrad(lr=params['learning_rate']) elif params['optimizer'] == 'Adadelta': optimizer = Adadelta(lr=params['learning_rate']) elif params['optimizer'] == 'Adam': optimizer = Adam(lr=params['learning_rate']) else: optimizer = RMSprop(lr=params['learning_rate']) if problem_type == 'regression': loss = 'mse' elif params['output_dim'] == 2: loss = 'binary_crossentropy' else: loss = 'categorical_crossentropy' model.compile(loss=loss, optimizer=optimizer)
# Copyright 2014 Netflix, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ .. module:: route53 :platform: Unix :synopsis: Module contains a useful Route53 class. .. version:: @VERSION@ .. author:: Kevin Glisson (kglisson@netflix.com), Patrick Kelley (patrick@netflix.com) @monkeysecurity """ import os import re import boto import boto.route53.record from security_monkey import app class Route53Service(object): """ Class provides useful functions of manipulating Route53 records """ def __init__(self, **kwargs): super(Route53Service, self).__init__(**kwargs) self.conn = boto.connect_route53() try: self.hostname = os.environ['EC2_PUBLIC_HOSTNAME'] except KeyError: app.logger.warn("We cannot register a domain on non ec2 instances") def register(self, fqdn, exclusive=False, ttl=60, type='CNAME', regions=None): fqdn = fqdn.replace('_', '-') fqdn = re.sub(r'[^\w\-\.]', '', fqdn) app.logger.debug('route53: register fqdn: {}, hostname: {}'.format(fqdn, self.hostname)) zone_id = self._get_zone_id(fqdn) if exclusive: app.logger.debug('route53: making fqdn: {} exclusive'.format(fqdn)) rrsets = self.conn.get_all_rrsets(zone_id, type, name=fqdn) for rrset in rrse
ts: if rrse
t.name == fqdn + '.': app.logger.debug('found fqdn to delete: {}'.format(rrset)) for rr in rrset.resource_records: changes = boto.route53.record.ResourceRecordSets(self.conn, zone_id) changes.add_change("DELETE", fqdn, type, ttl).add_value(rr) changes.commit() changes = boto.route53.record.ResourceRecordSets(self.conn, zone_id) changes.add_change("CREATE", fqdn, type, ttl).add_value(self.hostname) changes.commit() def unregister(self, fqdn, ttl=60, type='CNAME'): # Unregister this fqdn fqdn = fqdn.replace('_', '-') fqdn = re.sub(r'[^\w\-\.]', '', fqdn) app.logger.debug('route53: unregister fqdn: {}, hostname: {}'.format(fqdn, self.hostname)) zone_id = self._get_zone_id(fqdn) changes = boto.route53.record.ResourceRecordSets(self.conn, zone_id) changes.add_change("DELETE", fqdn, type, ttl).add_value(self.hostname) changes.commit() def _get_zone_id(self, domain): if domain[-1] != '.': domain += '.' result = self.conn.get_all_hosted_zones() hosted_zones = result['ListHostedZonesResponse']['HostedZones'] while domain != '.': for zone in hosted_zones: app.logger.debug("{} {}".format(zone['Name'], domain)) if zone['Name'] == domain: return zone['Id'].replace('/hostedzone/', '') else: domain = domain[domain.find('.') + 1:] raise ZoneIDNotFound(domain)
from __future__ import absolute_import, unicode_literals import json from django.template.loader import render_to_string from django.utils.translation import ugettext_lazy as _ from wagtail.wagtailadmin.widgets import AdminChooser class AdminSnippetChooser(AdminChooser): target_content_type = None def __init__(self, content_type=None, **kwargs): if 'snippet_type_name' in kwargs: snippet_type_name = kwargs.pop('snippet_type_name') self.choose_one_text = _('Choose %s') % snippet_type_name self.choose_another_text = _('Choose another %s') % snippet_type_name super(AdminSnippetChooser, self).__init__(**kwargs) if content_type is not None: self.target_content_type = content_type def render_html(self, name, value, attrs): original_field_html = super(AdminSnippetChooser, self).render_html(name, value, attrs) model_class = self.target_content_type.model_class() instance = self.get_instance(model_class, value) return render_to_string("wagtailsnippets/widgets/snippet_chooser.html", { 'widget': self, 'original_field_html': original_field_html, 'attrs': attrs, 'value': value, 'item': instance, }) def render_js_init(self, id_, name, value): content_type = self.target_content_type re
turn "createSnippetChooser({id}, {content_type
});".format( id=json.dumps(id_), content_type=json.dumps('{app}/{model}'.format( app=content_type.app_label, model=content_type.model)))
#!/usr/bin/env python3 # -*- coding: utf-8 -*- #-------------------------------------------------------------------------------------------------- # Program Name: holy_orders # Program Description: Update program for the Abbot Cantus API server. # # Filename: holy_orders/current.py # Purpose: Functions to determine which resources to update. # # Copyright (C) 2015, 2016 Christopher Antila # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at y
our option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #-----------------------------------------------
--------------------------------------------------- ''' Functions to determine which resources to update. ''' import datetime import logging import tornado.log import iso8601 # settings LOG_LEVEL = logging.DEBUG # script-level "globals" _log = tornado.log.app_log def _now_wrapper(): ''' A wrapper function for datetime.datetime.utcnow() that can be mocked for automated tests. ''' return datetime.datetime.now(datetime.timezone.utc) def get_last_updated(updates_db, rtype): ''' Get a :class:`datetime` of the most recent update for a resource type. :param updates_db: A :class:`Connection` to the database that holds :type updates_db: :class:`sqlite3.Connection` :param str rtype: The resource type to check. :returns: The time of the most recent update for the resource type. :rtype: :class:`datetime.datetime` If the database's most recent update is recorded as ``'never'``, meaning the resource type was never updated, the :class:`datetime` returned corresponds to Unix time ``0``. ''' last_update = updates_db.cursor().execute('SELECT updated FROM rtypes WHERE name=?', (rtype,)) last_update = last_update.fetchone()[0] if last_update == 'never': return datetime.datetime.fromtimestamp(0.0) else: return iso8601.parse_date(last_update) def should_update(rtype, config, updates_db): ''' Check whether HolyOrders "should update" resources of a particular type. :param str rtype: The resource type to check. :param config: Dictionary of the configuration file that has our data. :type config: :class:`configparser.ConfigParser` :param updates_db: A :class:`Connection` to the database that holds :type updates_db: :class:`sqlite3.Connection` :returns: Whether the resource type should be updated. :rtype: bool ''' last_update = get_last_updated(updates_db, rtype) if last_update.year < 1990: _log.info('should_update({0}) -> True (first update)'.format(rtype)) return True late_update_delta = _now_wrapper() - last_update update_freq_delta = config['update_frequency'][rtype] if update_freq_delta.endswith('d'): update_freq_delta = datetime.timedelta(days=int(update_freq_delta[:-1])) else: update_freq_delta = datetime.timedelta(hours=int(update_freq_delta[:-1])) if late_update_delta >= update_freq_delta: _log.info('should_update({0}) -> True'.format(rtype)) return True else: _log.info('should_update({0}) -> False'.format(rtype)) return False def calculate_chant_updates(updates_db): ''' Determine which dates should be requested for updates of "chant" resources. :param updates_db: A :class:`Connection` to the database that holds :type updates_db: :class:`sqlite3.Connection` :returns: The dates that require an update. These are formatted as YYYYMMD, so they may be used directly in Drupal URLs. :rtype: list of str If no updates are required, the function returns an empty list. To ensure no updates are missed, this function always includes one additional day than required. For example, if the most recent update was earlier today, then this function requests updates for both today and yesterday. However, also note that "days ago" is determined in 24-hour periods, rather than the "yesterday" style of thinking that humans use. The actual dates requested aren't especially important---it's enough to know that this function errs on the side of requesting more days than required. ''' post = [] last_update = get_last_updated(updates_db, 'chant') delta = _now_wrapper() - last_update if delta.total_seconds() >= 0: days_to_request = delta.days + 2 one_day = datetime.timedelta(days=1) cursor = _now_wrapper() for _ in range(days_to_request): post.append(cursor.strftime('%Y%m%d')) cursor -= one_day _log.info('Requesting chant updates for {}'.format(post)) return post def update_db(updates_db, rtype, time): ''' Revise the updates database to show a new "last updated" time for a resource type. :param updates_db: A :class:`Connection` to the database that holds :type updates_db: :class:`sqlite3.Connection` :param str rtype: The resource type that was updated. :param time: The time at which the resource type is current. :type time: :class:`datetime.datetime` While it's tempting to think that the ``time`` argument should correspond to the moment this function is called, that's not true---especially for resource types that take considerable time to update (chants). Therefore the :class:`datetime` given to this function should correspond to the moment just before data are requested from Drupal. ''' time = time.isoformat() updates_db.cursor().execute('UPDATE rtypes SET updated=? WHERE name=?;', (time, rtype)) updates_db.commit()
extended by any subscription manager test case to make sure nothing on the actual system is read/touched, and appropriate mocks/stubs are in place. """ def setUp(self): # No matter what, stop all patching (even if we have a failure in setUp itself) self.addCleanup(patch.stopall) # Never attempt to use the actual managercli.cfg which points to a # real file in etc. self.mock_cfg_parser = stubs.StubConfig() original_conf = subscription_manager.managercli.conf def unstub_conf(): subscription_manager.managercli.conf = original_conf # Mock makes it damn near impossible to mock a module attribute (which we shouldn't be using # in the first place because it's terrible) so we monkey-patch it ourselves. # TODO Fix this idiocy by not reading the damn config on module import subscription_manager.managercli.conf = config.Config(self.mock_cfg_parser) self.addCleanup(unstub_conf) facts_host_patcher = patch('rhsmlib.dbus.facts.FactsClient', auto_spec=True) self.mock_facts_host = facts_host_patcher.start() self.mock_facts_host.return_value.GetFacts.return_value = self.set_facts() # By default mock that we are registered. Individual test cases # can override if they are testing disconnected scenario. id_mock = NonCallableMock(name='FixtureIdentityMock') id_mock.exists_and_valid = Mock(return_value=True) id_mock.uuid = 'fixture_identity_mock_uuid' id_mock.name = 'fixture_identity_mock_name' id_mock.cert_dir_path = "/not/a/real/path/to/pki/consumer/" id_mock.keypath.return_value = "/not/a/real/key/path" id_mock.certpath.return_value = "/not/a/real/cert/path" # Don't really care about date ranges here: self.mock_calc = NonCallableMock() self.mock_calc.calculate.return_value = None # Avoid trying to read real /etc/yum.repos.d/redhat.repo self.mock_repofile_path_exists_patcher = patch('subscription_manager.repolib.YumRepoFile.path_exists') mock_repofile_path_exists = self.mock_repofile_path_exists_patcher.start() mock_repofile_path_exists.return_value = True inj.provide(inj.IDENTITY, id_mock) inj.provide(inj.PRODUCT_DATE_RANGE_CALCULATOR, self.mock_calc) inj.provide(inj.ENTITLEMENT_STATUS_CACHE, stubs.StubEntitlementStatusCache()) inj.provide(inj.POOL_STATUS_CACHE, stubs.StubPoolStatusCache()) inj.provide(inj.PROD_STATUS_CACHE, stubs.StubProductStatusCache()) inj.provide(inj.CONTENT_ACCESS_MODE_CACHE, stubs.StubContentAccessModeCache()) inj.provide(inj.SUPPORTED_RESOURCES_CACHE, stubs.StubSupportedResourcesCache()) inj.provide(inj.SYSPURPOSE_VALID_FIELDS_CACHE, stubs.StubSyspurposeValidFieldsCache()) inj.provide(inj.CURRENT_OWNER_CACHE, stubs.StubCurrentOwnerCache) inj.provide(inj.OVERRIDE_STATUS_CACHE, stubs.StubOverrideStatusCache()) inj.provide(inj.RELEASE_STATUS_CACHE, stubs.StubReleaseStatusCache()) inj.provide(inj.AVAILABLE_ENTITLEMENT_CACHE, stubs.StubAvailableEntitlementsCache()) inj.provide(inj.PROFILE_MANAGER, stubs.StubProfileManager()) # By default set up an empty stub entitlement and product dir. # Tests need to modify or create their own but nothing should hit # the system. self.ent_dir = stubs.StubEntitlementDirectory() inj.provide(inj.ENT_DIR, self.ent_dir) self.prod_dir = stubs.StubProductDirectory() inj.provide(inj.PROD_DIR, self.prod_dir) # Installed products manager needs PROD_DIR injected first inj.provide(inj.INSTALLED_PRODUCTS_MANAGER, stubs.StubInstalledProductsManager()) self.stub_cp_provider = stubs.StubCPProvider() self._release_versions = [] self.stub_cp_provider.content_connection.get_versions = self._get_release_versions inj.provide(inj.CP_PROVIDER, self.stub_cp_provider) inj.provide(inj.CERT_SORTER, stubs.StubCertSorter()) # setup and mock the plugin_manager plugin_manager_mock = MagicMock(name='FixturePluginManagerMock') plugin_manager_mock.runiter.return_value = iter([]) inj.provide(inj.PLUGIN_MANAGER, plugin_manager_mock) inj.provide(inj.DBUS_IFACE, Mock(name='FixtureDbusIfaceMock')) pooltype_cache = Mock() inj.provide(inj.POOLTYPE_CACHE, pooltype_cache) # don't use file based locks for tests inj.provide(inj.ACTION_LOCK, RLock) self.stub_facts = stubs.StubFacts() inj.provide(inj.FACTS, self.stub_facts) content_access_cache_mock = MagicMock(name='ContentAccessCacheMock') inj.provide(inj.CONTENT_ACCESS_CACHE, content_access_cache_mock) self.dbus_patcher = patch('subscription_manager.managercli.CliCommand._request_validity_check') self.dbus_patcher.start() # No tests should be trying to connect to any configure or test server # so really, everything needs this mock. May need to be in __init__, or # better, all test classes need to use SubManFixture self.is_valid_server_patcher = patch("subscription_manager.managercli.is_valid_server_info") is_valid_server_mock = self.is_valid_server_patcher.start() is_valid_server_mock.return_value = True # No tests should be trying to test the proxy connection # so really, everything needs this mock. May need to be in __init__, or # better, all test classes need to use SubManFixture self.test_proxy_connection_patcher = patch("subscription_manager.managercli.CliCommand.test_proxy_connection") test_proxy_connection_mock = self.test_proxy_connection_patcher.start() test_proxy_connection_mock.return_value = True self.syncedstore_patcher = patch('subscription_manager.syspurposelib.SyncedStore') syncedstore_mock = self.syncedstore_patcher.start() set_up_mock_sp_store(syncedstore_mock) self.files_to_cleanup = [] def tearDown(self): if not hasattr(self, 'files_to_cleanup'): return for f in self.files_to_cleanup: # Assuming these are tempfile.NamedTemporaryFile, created with # the write_tempfile() method in this class. f.close() def write_tempfile(self, contents): """ Write out a tempfile and append it to the list of those to be cleaned up in tearDown. """ fid = tempfile.NamedTemporaryFile(mode='w+', suffix='.tmp') fid.write(contents) fid.seek(0) self.files_to_cleanup.append(fid) return fid def set_consumer_auth_cp(self, consumer_auth_cp): cp_provider = inj.require(inj.CP_PROVIDER) cp_provider.consumer_auth_cp = consumer_auth_cp def get_consumer_cp(self): cp_provider = inj.require(inj.CP_PROVIDER) consumer_cp = cp_provider.get_consumer_auth_cp() return consumer_cp # The ContentConnection used for reading release versions from # the cdn. The injected one uses this. def _get_release_versions(self, listing_path): return self._release_versions # For changing injection consumer id to one that fails "is_valid" def _inject_mock_valid_consumer(self, uuid=None): """For changing injected consumer identity to one that passes is_valid() Returns the injected identity if it need to be examined. """ identity = NonCallableMock(name='ValidIdentityMock') identity.uuid = uuid or "VALIDCONSUMERUUID" identity.is_valid = Mock(return_value=True) identity.cert_dir_path = "/not/a/real/path/to/pki/consumer/" inj.provide(inj.IDENTITY, identity) return identity def _inject_mock_invalid_consumer(self, uuid=None): """For chaining injected consumer identity to one that fails is_valid() Returns the injected identity if it need to be exa
mined. """ invalid_identity = NonCallableMock(name='In
validIdentityMock') invalid_identity.is_valid
# Copyright 2020 Tensorforce Team. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import tensorflow as tf from tensorforce.core.parameters import Parameter class Random(Parameter): """ Random hyperparameter (specification key: `random`). Args: distribution ("normal" | "uniform"): Distribution type for random hyperparameter value (<span style="color:#C00000"><b>required</b></span>). kwargs: Additional arguments dependent on distribution type.<br> Normal distribution: <ul> <li><b>mean</b> (<i>float</i>) &ndash; Mean (<span style="color:#00C000"><b>default</b></span>: 0.0).</li> <li><b>stddev</b> (<i>float > 0.0</i>) &ndash; Standard deviation (<span style="color:#00C000"><b>default</b></span>: 1.0).</li> </ul>
Uniform distribution: <ul> <li><b>
minval</b> (<i>int / float</i>) &ndash; Lower bound (<span style="color:#00C000"><b>default</b></span>: 0 / 0.0).</li> <li><b>maxval</b> (<i>float > minval</i>) &ndash; Upper bound (<span style="color:#00C000"><b>default</b></span>: 1.0 for float, <span style="color:#C00000"><b>required</b></span> for int).</li> </ul> name (string): <span style="color:#0000C0"><b>internal use</b></span>. dtype (type): <span style="color:#0000C0"><b>internal use</b></span>. shape (iter[int > 0]): <span style="color:#0000C0"><b>internal use</b></span>. min_value (dtype-compatible value): <span style="color:#0000C0"><b>internal use</b></span>. max_value (dtype-compatible value): <span style="color:#0000C0"><b>internal use</b></span>. """ def __init__( self, *, distribution, name=None, dtype=None, shape=(), min_value=None, max_value=None, **kwargs ): assert dtype in ('int', 'float') assert distribution in ('normal', 'uniform') self.distribution = distribution self.kwargs = kwargs super().__init__( name=name, dtype=dtype, shape=shape, min_value=min_value, max_value=max_value ) def min_value(self): if self.distribution == 'uniform': return self.spec.py_type()(self.kwargs.get('minval', 0)) else: return super().min_value() def max_value(self): if self.distribution == 'uniform': return self.spec.py_type()(self.kwargs.get('maxval', 1.0)) else: return super().max_value() def final_value(self): if self.distribution == 'normal': return self.spec.py_type()(self.kwargs.get('mean', 0.0)) elif self.distribution == 'uniform': return self.spec.py_type()( (self.kwargs.get('maxval', 1.0) + self.kwargs.get('minval', 0.0)) / 2.0 ) else: return super().final_value() def parameter_value(self, *, step): if self.distribution == 'normal': parameter = tf.random.normal( shape=self.spec.shape, dtype=self.spec.tf_type(), mean=self.kwargs.get('mean', 0.0), stddev=self.kwargs.get('stddev', 1.0) ) elif self.distribution == 'uniform': parameter = tf.random.uniform( shape=self.spec.shape, dtype=self.spec.tf_type(), minval=self.kwargs.get('minval', 0), maxval=self.kwargs.get('maxval') ) return parameter
from di
stutils.core import setup setup( name = 'ml_easy_peer_grade', packages = ['ml_easy_peer_grade'], version = '0.18', scripts=['bin/ml_easy_peer_grade'], description = 'Ez peer grade your project members, exclusive to privileged Bilkent students', author = 'Cuklahan Dorum', author_email = 'badass@alumni.bilkent.edu.tr', url = 'https://github.com/cagdass/ml-easy-peer-grade', download_url = 'https://github.com/cagdass/ml-easy-peer-grade/tarball/0.1', keywords = ['testing'], classifiers =
[], )
evisions") class Revisions(Collection): """ A collection of revisions indexes by title, page_id and user_text. Note that revisions of deleted pages are queriable via :class:`mw.api.DeletedRevs`. """ PROPERTIES = {'ids', 'flags', 'timestamp', 'user', 'userid', 'size', 'sha1', 'contentmodel', 'comment', 'parsedcomment', 'content', 'tags', 'flagged'} DIFF_TO = {'prev', 'next', 'cur'} # This is *not* the right way to do this, but it should work for all queries. MAX_REVISIONS = 50 def get(self, rev_id, **kwargs): """ Get a single revision based on it's ID. Throws a :py:class:`KeyError` if the rev_id cannot be found. :Parameters: rev_id : int Revision ID ``**kwargs`` Passed to :py:meth:`query` :Returns: A single rev dict """ rev_id = int(rev_id) revs = list(self.query(revids={rev_id}, **kwargs)) if len(revs) < 1: raise KeyError(rev_id) else: return revs[0] def query(self, *args, limit=None, **kwargs): """ Get revision information. See `<https://www.mediawiki.org/wiki/API:Properties#revisions_.2F_rv>`_ :Parameters: properties : set(str) Which properties to get for each revision: * ids - The ID of the revision * flags - Revision flags (minor) * timestamp - The timestamp of the revision * user - User that made the revision * userid - User id of revision creator * size - Length (bytes) of the revision * sha1 - SHA-1 (base 16) of the revision * contentmodel - Content model id * comment - Comment by the user for revision * parsedcomment - Parsed comment by the user for the revision * content - Text of the revision * tags - Tags for the revision limit : int Limit how many revisions will be returned No more than 500 (5000 for bots) allowed start_id : int From which revision id to start enumeration (enum) end_id : int Stop revision enumeration on this revid start : :class:`mw.Timestamp` From which revision timestamp to start enumeration (enum) end : :class:`mw.Timestamp` Enumerate up to this timestamp direction : str "newer" or "older" user : str Only include revisions made by user_text excludeuser : bool Exclude revisions made by user tag : str Only list revisions tagged with this tag expandtemplates : bool Expand templates in revision content (requires "content" propery) generatexml : bool Generate XML parse tree for revision content (requires "content" propery) parse : bool Parse revision content (requires "content" propery) section : int Only retrieve the content of this section number token : set(str) Which tokens to obtain for each revision * rollback - See `<https://www.mediawiki.org/wiki/API:Edit_-_Rollback#Token>`_ rvcontinue : str When more results are available, use this to continue diffto : int Revision ID to diff each revision to. Use "prev", "next" and "cur" for the previous, next and current revision respectively difftotext : str Text to diff each revision to. Only diffs a limited number of revisions. Overrides diffto. If section is set, only that section will be diffed against this text contentformat : str Serialization format used for difftotext and expected for output of content * text/x-wiki * text/javascript * text/css * text/plain * application/json :Returns: An iterator of rev dicts returned from the API. """ revisions_yielded = 0 done = False while not done: if limit == None: kwargs['limit'] = self.MAX_REVISIONS else: kwargs['limit'] = min(limit - revisions_yielded, self.MAX_REVISIONS)
r
ev_docs, rvcontinue = self._query(*args, **kwargs) for doc in rev_docs: yield doc revisions_yielded += 1 if limit != None and revisions_yielded >= limit: done = True break if rvcontinue != None and len(rev_docs) > 0: kwargs['rvcontinue'] = rvcontinue else: done = True def _query(self, revids=None, titles=None, pageids=None, properties=None, limit=None, start_id=None, end_id=None, start=None, end=None, direction=None, user=None, excludeuser=None, tag=None, expandtemplates=None, generatexml=None, parse=None, section=None, token=None, rvcontinue=None, diffto=None, difftotext=None, contentformat=None): params = { 'action': "query", 'prop': "revisions" } params['revids'] = self._items(revids, type=int) params['titles'] = self._items(titles) params['pageids'] = self._items(pageids, type=int) params['rvprop'] = self._items(properties, levels=self.PROPERTIES) if revids == None: # Can't have a limit unless revids is none params['rvlimit'] = none_or(limit, int) params['rvstartid'] = none_or(start_id, int) params['rvendid'] = none_or(end_id, int) params['rvstart'] = self._check_timestamp(start) params['rvend'] = self._check_timestamp(end) params['rvdir'] = self._check_direction(direction) params['rvuser'] = none_or(user, str) params['rvexcludeuser'] = none_or(excludeuser, int) params['rvtag'] = none_or(tag, str) params['rvexpandtemplates'] = none_or(expandtemplates, bool) params['rvgeneratexml'] = none_or(generatexml, bool) params['rvparse'] = none_or(parse, bool) params['rvsection'] = none_or(section, int) params['rvtoken'] = none_or(token, str) params['rvcontinue'] = none_or(rvcontinue, int) params['rvdiffto'] = self._check_diffto(diffto) params['rvdifftotext'] = none_or(difftotext, str) params['rvcontentformat'] = none_or(contentformat, str) doc = self.session.get(params) try: if 'query-continue' in doc: rvcontinue = doc['query-continue']['revisions']['rvcontinue'] else: rvcontinue = None pages = doc['query'].get('pages', {}).values() rev_docs = [] for page_doc in pages: if 'missing' in page_doc or 'revisions' not in page_doc: continue page_rev_docs = page_doc['revisions'] del page_doc['revisions'] for rev_doc in page_rev_docs: rev_doc['page'] = page_doc rev_docs.extend(page_rev_docs) return rev_docs, rvcontinue except KeyError as e: raise MalformedResponse(str(e), doc)
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' ========================================================================= Program: Visualization Toolkit Module: TestNamedColorsIntegration.py Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen All rights reserved. See Copyright.txt or http://www.kitware.com/Copyright.htm for details. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notice for more information. ========================================================================= ''' import vtk import vtk.test.Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() class TestAllMaskBits(vtk.test.Testing.vtkTest): def testAllMaskBits(self): # This script calculates the luminance of an image renWin = vtk.vtkRenderWindow() # Image pipeline image1 = vtk.vtkTIFFReader() image1.SetFileName(VTK_DATA_ROOT + "/Data/beach.tif") # "beach.tif" image contains ORIENTATION tag which is # ORIENTATION_TOPLEFT (row 0 top, col 0 lhs) type. The TIFF # reader parses this tag and sets the internal TIFF image # orientation accordingly. To overwrite this orientation with a vtk # convention of ORIENTATION_BOTLEFT (row 0 bottom, col 0 lhs ), invoke # SetOrientationType method with parameter value of 4. image1.SetOrientationType(4) shrink = vtk.vtkImageShrink3D() shrink.SetInputConnection(image1.GetOutputPort()) shrink.SetShrinkFactors(2, 2, 1) operators = ["ByPass", "And", "Nand", "Xor", "Or", "Nor"] operator = dict() mapper = dict() actor = dict() imager = dict() for idx, op in enumerate(operators): if op != "ByPass": operat
or.update({idx: vtk.vtk
ImageMaskBits()}) operator[idx].SetInputConnection(shrink.GetOutputPort()) eval('operator[' + str(idx) + '].SetOperationTo' + op + '()') operator[idx].SetMasks(255, 255, 0) mapper.update({idx: vtk.vtkImageMapper()}) if op != "ByPass": mapper[idx].SetInputConnection(operator[idx].GetOutputPort()) else: mapper[idx].SetInputConnection(shrink.GetOutputPort()) mapper[idx].SetColorWindow(255) mapper[idx].SetColorLevel(127.5) actor.update({idx: vtk.vtkActor2D()}) actor[idx].SetMapper(mapper[idx]) imager.update({idx: vtk.vtkRenderer()}) imager[idx].AddActor2D(actor[idx]) renWin.AddRenderer(imager[idx]) column = 0 row = 0 deltaX = 1.0 / 3.0 deltaY = 1.0 / 2.0 for idx in range(len(operators)): imager[idx].SetViewport(column * deltaX, row * deltaY, (column + 1) * deltaX, (row + 1) * deltaY) column += 1 if column > 2: column = 0 row += 1 renWin.SetSize(384, 256) # render and interact with data iRen = vtk.vtkRenderWindowInteractor() iRen.SetRenderWindow(renWin); renWin.Render() img_file = "TestAllMaskBits.png" vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25) vtk.test.Testing.interact() if __name__ == "__main__": vtk.test.Testing.main([(TestAllMaskBits, 'test')])
,'period':2}, {'name':'fitness','period':3}, {'name':'reading','period':4}, {'name':'rc-cars','period':14}]}, {'forename':'Andy','surname':'Andy','age':51, 'tags':('family',), 'job':{'name':'Killer','category':'Business'}, 'hobbies':[{'name':'counterstrike','period':1}]} ] class AutoSet(object): def __init__(self, **kwargs): for name in kwargs: self.__setattr__(name, kwargs[name]) class Person(AutoSet): forename = '' surname = '' tags = () age = 0 job = None hobbies = [] def __str__(self): return '<Person surname={0} forename={1} age={2} tags={3}>'.format(self.forename, self.surname, self.age, self.tags) class Job(AutoSet): name = '' category = '' class Hobby(AutoSet): name = '' period = 0 objectData = [ Person(forename='Marc',surname='Mine',age=35, tags=('family','work'), job=Job(name='Baker',category='Business'), hobbies=[Hobby(name='swimming',period=7), Hobby(name='reading',period=1)] ), Person(fore
name='Mike',surname='Meyer',age=14, tags=('work'), job=Job(name='Banker',category='Business'), hobbies=[Hobby(name='swimming',period=14)] ), Person(forename='Marc',surname='Andrew',age=78, tags=('hobby','work'),
job=Job(name='Police',category='Government'), hobbies=[Hobby(name='swimming',period=7), Hobby(name='music',period=1), Hobby(name='reading',period=2),] ), Person(forename='Marc',surname='Muscels',age=35, tags=('family','hobby'), job=Job(name='Teacher',category='Social'), hobbies=[Hobby(name='math',period=30), Hobby(name='ski',period=365)] ), Person(forename='Andy',surname='Young',age=11, tags=('family','work'), job=Job(name='President',category='Government'), hobbies=[Hobby(name='swimming',period=2), Hobby(name='fitness',period=3), Hobby(name='reading',period=4), Hobby(name='rc-cars',period=14)] ), Person(forename='Andy',surname='Andy',age=51, tags=('family',), job=Job(name='Killer',category='Business'), hobbies=[Hobby(name='counterstrike',period=1)] ) ] if __name__ == '__main__': from sqliter.query import select, and_, or_, c, test dictData = objectData matchTest = {'name':'Olaf','weight':67.3,'age':33,'married':True} def dev_null(self, *args): pass printOut = True if printOut: print_func = print else: print_func = dev_null print_func('-------- SELECT FROM dictData') for name in select().from_(dictData): print_func(name) print_func('-------- SELECT FROM dictData WHERE forename = Marc') for name in select().from_(dictData).where(forename='Marc'): print_func(name) print_func('-------- SELECT FROM dictData WHERE surname IN ("Meyer","Muscels")') for name in select().from_(dictData).where(c('surname').in_('Meyer','Muscels')): print_func(name) print_func('-------- SELECT FROM dictData WHERE age > 32') for name in select().from_(dictData).where(c('age') > 32): print_func(name) print_func('-------- SELECT FROM dictData WHERE forname=surname') for name in select().from_(dictData).where(c('forename') == c('surname')): print_func(name) print_func('-------- SELECT FROM dictData WHERE forname=surname OR age < 20') for name in select().from_(dictData).where(or_(c('forename') == c('surname'),c('age') < 20)): print_func(name) print_func('-------- SELECT FROM dictData WHERE forname="Marc" AND surname == "Andrew"') for name in select().from_(dictData).where(and_(c('forename') == 'Marc',c('surname') == 'Andrew')): print_func(name) print_func('-------- SELECT FROM dictData WHERE "family" IN tags') for name in select().from_(dictData).where(c('"family"').in_(c('tags'))): print_func(name) print_func('-------- SELECT FROM dictData WHERE job.name == "President"') for name in select().from_(dictData).where(c('job.name') == 'President'): print_func(name) print_func('-------- SELECT FROM dictData WHERE job.category == "Business"') for name in select().from_(dictData).where(c('job.category') == 'Business'): print_func(name) print_func('-------- SELECT FROM dictData WHERE job.category IN ("Business", "Social")') for name in select().from_(dictData).where(c('job.category').in_('Business','Social')): print_func(name) print_func('-------- SELECT FROM dictData WHERE hobbies[*].name == "reading"') for name in select().from_(dictData).where(c('hobbies[*].name') == "reading"): print_func(name) print_func('-------- SELECT FROM dictData WHERE hobbies[*].period < 7') for name in select().from_(dictData).where(c('hobbies[*].period') < 7): print_func(name) print_func('-------- SELECT FROM dictData WHERE asdkuh < 7') for name in select().from_(dictData).where(c('asdkuh') < 7): print_func(name) print_func('-------- SELECT FROM dictData WHERE hobbies[*].period < 7.first()') print_func(select().from_(dictData).where(c('hobbies[*].period') < 7).first()) print_func('-------- SELECT FROM dictData WHERE hobbies[*].period < 7.last()') print_func(select().from_(dictData).where(c('hobbies[*].period') < 7).last()) print_func('-------- TEST IF age > 22') print_func(test(c('age') > 22).match(matchTest)) print_func('-------- TEST IF age < 22') print_func(test(c('age') < 22).match(matchTest)) print_func('-------- TEST IF name == "Olaf"') print_func(test(c('name') == 'Olaf').match(matchTest)) print_func('-------- TEST IF name != "Olaf"') print_func(test(c('name') != 'Olaf').match(matchTest)) print_func('-------- TEST IF name == "Olafs"') print_func(test(c('name') == 'Olafs').match(matchTest)) print_func('-------- TEST IF name LIKE "Ol%"') print_func(test(c('name').like('Ol%')).match(matchTest)) print_func('-------- TEST IF name LIKE "%laf"') print_func(test(c('name').like('%laf')).match(matchTest)) print_func('-------- TEST IF name LIKE "O%f"') print_func(test(c('name').like('O%f')).match(matchTest)) print_func('-------- TEST IF name LIKE "olaf"') print_func(test(c('name').like('olaf')).match(matchTest)) print_func('-------- SELECT * FROM dictData WHERE job.category IN ("Business", "Social")') for name in select('*').from_(dictData).where(c('job.category').in_('Business','Social')): print_func(name) print_func('-------- SELECT forname FROM dictData WHERE job.category IN ("Business", "Social")') for name in select('forename').from_(dictData).where(c('job.category').in_('Business','Social')): print_func(name) print_func('-------- SELECT forname,tags FROM dictData') for name in select('forename','tags').from_(dictData): print_func(name) print_func('-------- SELECT age FROM dictData WHERE forname="Marc" AND (surname == "Andrew" OR age < size).collect_fieldnames') print_func(select('age').from_(dictData).where(and_(c('forename') == 'Marc',or_(c('surname') == 'Andrew'),c('age') < c('size'))).collect_fieldnames()) print_func('-------- SELECT age FROM dictData WHERE forname="Marc" AND (surname == "Andrew" OR age < size).where_fieldnames') print_func(select('age').from_(dictData).where(and_(c('forename') == 'Marc',or_(c('surname') == 'Andrew'),c('age') < c('size'))).where_fieldnames()) print_func('-------- SELECT age FROM dictData WHERE forname="Marc" AND (surname == "Andrew" OR age < siz
# Copyright (C) 2009, Hyves (Startphone Ltd.) # # This module is part of the Concurrence Framework and is released under # the New BSD License: http://www.opensource.org/licenses/bsd-license.php from concurrence.timer import Timeout from concurrence.database.mysql import ProxyProtocol, PacketReader, PACKET_READ_RESULT, CLIENT_STATES, SERVER_STATES class Proxy(object): #errors EOF_READ = -1 EOF_WRITE = -2 #direction CLIENT_TO_SERVER = 1 SERVER_TO_CLIENT = 2 def __init__(self, clientStream, serverStream, buffer, initState): self.clientStream = clientStream self.serverStream = serverStream self.readStream = self.clientStream self.writeStream = self.serverStream self.direction = self.CLIENT_TO_SERVER self.protocol = ProxyProtocol(initState) self.reader = PacketReader() self.buffer = buffer self.remaining = 0 def close(self): self.clientStream = None self.serverStream = None self.readStream = None self.writeStream = None se
lf.protocol = None self.reader = None self.buffer = None def reset(self, state): self.protocol.reset(state) def readFromStream(self): #read some data from
stream into buffer if self.remaining: #some leftover partially read packet from previous read, put it in front of buffer self.buffer.limit = self.buffer.position + self.remaining self.buffer.compact() else: #normal clear, position = 0, limit = capacity self.buffer.clear() #read data from socket return self.readStream.read(self.buffer, Timeout.current()) def writeToStream(self): #forward data to receiving socket self.buffer.flip() while self.buffer.remaining: if not self.writeStream.write(self.buffer, Timeout.current()): return False return True def next(self, readResult, newState, prevState): return 0 def cycle(self, readProtocol): if not self.readFromStream(): return self.EOF_READ #inspect data read according to protocol n = 0 self.buffer.flip() while True: readResult, newState, prevState = readProtocol(self.reader, self.buffer) #make note of any remaining data (half read packets), # we use buffer.compact to put remainder in front next time around self.remaining = self.buffer.remaining #take action depending on state transition n = self.next(readResult, newState, prevState) if n != 0: break if not (readResult & PACKET_READ_RESULT.MORE): break if n == 0: #write data trough to write stream if not self.writeToStream(): return self.EOF_WRITE return n def run(self): while True: state = self.protocol.state if state in SERVER_STATES: self.direction = self.SERVER_TO_CLIENT self.readStream = self.serverStream self.writeStream = self.clientStream n = self.cycle(self.protocol.readServer) elif state in CLIENT_STATES: self.direction = self.CLIENT_TO_SERVER self.readStream = self.clientStream self.writeStream = self.serverStream n = self.cycle(self.protocol.readClient) else: assert False, "Unknown state %s" % state if n < 0: return n
""" Setup/build script for MasterChess For usage info, see readme.md """ import os, sys, subprocess from distutils.dir_util import copy_tree from setuptools import setup from MasterChessGUI import __description__, __copyright__, __version__ def get_folder(path): if isinstance(path, list): return [get_folder(i) for i in path] else: return (path, [os.path.join(path, i) for i in os.listdir(path) if i[:1] != "." and os.path.isfile(os.path.join(path, i))]) DATA_FILES = [get_folder("resources")] DATA_FILES_MAC = ["QuickLook.py"] DATA_MODULE_PACKAGES = ["MasterChess"] PY2EXE_BUNDLE = False options = { "name": "MasterChess", "version": __version__, "description": __description__, "author": "Jake Hartz", "author_email": "jhartz@outlook.com", "license": "GPL", "url": "http://jhartz.github.io/masterchess/" } if sys.platform == "darwin" and "py2app" in sys.argv: options.update({ "setup_requires": ["py2app"], "app": ["MasterChessGUI.py"], "data_files": DATA_FILES + DATA_FILES_MAC, "options": { "py2app": { "argv_emulation": True, "iconfile": "resources/Chess.icns", "plist": { "CFBundleIdentifier": "com.github.jhartz.masterchess", "CFBundleGetInfoString": __description__, "NSHumanReadableCopyright": __copyright__, "UTExportedTypeDeclarations": [ { "UTTypeIdentifier": "com.github.jhartz.masterchess.mcdb", "UTTypeDescription": "MasterChess database", #"UTTypeIconFile": "Chess.icns", "UTTypeConformsTo": [ "public.data" ], "UTTypeTagSpecification": { "public.filename-extension": "mcdb" } } ], "CFBundleDocumentTypes": [ { #"CFBundleTypeExtensions": [ # "mcdb" #], "CFBundleTypeIconFile": "Chess.icns", #"CFBundleTypeName": "MasterChess database", "CFBundleTypeName": "MasterChess database", "LSItemContentTypes": [ "com.github.jhartz.masterchess.mcdb" ], "CFBundleTypeRole": "Editor", "LSHandlerRank": "Owner" } ] } } } }) elif sys.platform == "win32" and "py2exe" in sys.argv: import py2exe options.update({ "setup_requires": ["py2exe"], "data_files": DATA_FILES, "windows": [ { "script": "MasterChessGUI.py", "icon_resources": [(1, "resources/Chess.ico")], "other_resources": [(u"VERSIONTAG", 1, "MasterChess "
+ __version__)] # TODO: Test this!! } ] }) if PY2EXE_BUNDLE: options.update({ "options": { "py2exe": { "bundle_files": 1 } }, "zipfile": None }) else: options.update({ "scripts": ["MasterChessGUI.py"], "packages": DATA_MODULE_PACKAGES,
"data_files": DATA_FILES, "install_requires": ["wx"] }) setup(**options) if sys.platform == "darwin" and "py2app" in sys.argv: # If we have a compiled MC-QuickLook or MC-Spotlight, include that if os.path.isdir(os.path.join("dist", "MasterChess.app", "Contents")): # QuickLook loc = os.path.join("Mac components", "MC-QuickLook", "Build", "Release", "MC-QuickLook.qlgenerator") if not os.path.exists(loc): # Try debug version loc = os.path.join("Mac components", "MC-QuickLook", "Build", "Debug", "MC-QuickLook.qlgenerator") if os.path.exists(loc): print "" print "Copying MC-QuickLook to app bundle" copy_tree(loc, os.path.join("dist", "MasterChess.app", "Contents", "Library", "QuickLook", os.path.basename(loc))) print "Reloading quicklookd" try: subprocess.call(["qlmanage", "-r"]) subprocess.call(["qlmanage", "-r", "cache"]) except OSError: print "Error calling qlmanage (manually call `qlmanage -r` and `qlmanage -r cache` to reload quicklookd)" # Spotlight loc = os.path.join("Mac components", "MC-Spotlight", "Build", "Release", "MC-Spotlight.mdimporter") if not os.path.exists(loc): # Try debug version loc = os.path.join("Mac components", "MC-Spotlight", "Build", "Debug", "MC-Spotlight.mdimporter") if os.path.exists(loc): print "" print "Copying MC-Spotlight to app bundle" copy_tree(loc, os.path.join("dist", "MasterChess.app", "Contents", "Library", "Spotlight", os.path.basename(loc)))
# -*- coding: utf-8 -*- # © 2011 Raphaël Valyi, Renato Lima, Guewen Baconnier, Sodexis # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). from odoo import api, models, fields class ExceptionRule(models.Model): _inherit = 'exception.rule' rule_group = fields.Selection( selection_add=[('sale', 'Sale')], ) model = fields.Selection( selection_add=[ ('sale.order', 'Sale order'), ('sale.order.line', 'Sale order line'), ]) cla
ss SaleOrder(models.Model): _inherit = ['sale.order', 'base.exception'] _name = 'sale.order' _order = 'main_exception_id asc, date_order desc, name desc' rule_group = fields.Selection( selection_add=[('sale', 'Sale')], default='sale', ) @api.model def test_all_draft_orders(self): order_set = self.search([('state', '=', 'draft')]) order_s
et.test_exceptions() return True @api.constrains('ignore_exception', 'order_line', 'state') def sale_check_exception(self): orders = self.filtered(lambda s: s.state == 'sale') if orders: orders._check_exception() @api.onchange('order_line') def onchange_ignore_exception(self): if self.state == 'sale': self.ignore_exception = False @api.multi def action_confirm(self): if self.detect_exceptions(): return self._popup_exceptions() else: return super(SaleOrder, self).action_confirm() @api.multi def action_draft(self): res = super(SaleOrder, self).action_draft() orders = self.filtered(lambda s: s.ignore_exception) orders.write({ 'ignore_exception': False, }) return res def _sale_get_lines(self): self.ensure_one() return self.order_line @api.model def _get_popup_action(self): action = self.env.ref('sale_exception.action_sale_exception_confirm') return action
# -*- coding: utf-8 -*- # Copyright 2013 Christoph Reiter # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. """Everything related to Ubuntu Unity integration (quicklist..) See the MPRIS plugin for sound menu integration. """ import gi from quodlibet import _ from quodlibet.util import gi_require_versions is_unity = True try: gi.require_version("Dbusmenu", "0.4") from gi.repository import Dbusmenu except (ValueError, ImportError): is_unity = False try: gi_require_versions("Unity", ["7.0", "6.0", "5.0"]) from gi.repository import Unity except (ValueError, ImportError): is_unity = False def init(desktop_id, player): """Set up unity integration. * desktop_id: e.g. 'quodlibet.desktop' * player: BasePlayer() http://developer.ubuntu.com/api/devel/ubuntu-12.04/c/Unity-5.0.html http://developer.ubuntu.com/api/devel/ubuntu-13.10/c/Unity-7.0.html """ if not is_unity: return launcher = Unity.LauncherEntry.get_for_desktop_id(desktop_id) main = Dbusmenu.Menuitem() play_pause = Dbusmenu.Menuitem() play_pause.property_set(Dbusmenu.MENUITE
M_PROP_LABEL, _("Play/Pause")) play_pause.property_set_bool(Db
usmenu.MENUITEM_PROP_VISIBLE, True) main.child_append(play_pause) def play_pause_cb(item, timestamp): player.playpause() play_pause.connect("item-activated", play_pause_cb) next_ = Dbusmenu.Menuitem() next_.property_set(Dbusmenu.MENUITEM_PROP_LABEL, _("Next")) next_.property_set_bool(Dbusmenu.MENUITEM_PROP_VISIBLE, True) main.child_append(next_) def next_cb(item, timestamp): player.next() next_.connect("item-activated", next_cb) prev = Dbusmenu.Menuitem() prev.property_set(Dbusmenu.MENUITEM_PROP_LABEL, _("Previous")) prev.property_set_bool(Dbusmenu.MENUITEM_PROP_VISIBLE, True) main.child_append(prev) def prev_cb(item, timestamp): player.previous() prev.connect("item-activated", prev_cb) launcher.set_property("quicklist", main)
#Problem J4: Wait Time inputarray = [] for i in range(input()): inputarray.append(raw_input().split(" ")) #Number, total, lastwait, response frie
ndarray = [] ctime = 0 for i in range(len(inputarray)): if inputarray[i][0].lower() == "c": ctime += inputarray[i][1] if inputarray[i][0].lower() == "r": friendlist = [friendarray[j][0] for j in range(len(friendarray))] if (inputarray[i][1] not in frien
dlist): friendarray.append([inputarray[i][0].lower(), 0, ]) else: location = friendlist.index(inputarray[i][1]) friendarray[location] = [inputarray[i][1], friendarray[location] + ]
# pytgasu - Automating creation of Telegram sticker packs # Copyright (C) 2017 Lemon Lam <almk@rmntn.net> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from ..constants import * __all__ = ['parse'] def parse(def_file): """ Parse specified sticker set definition file. :param def_file: A Path-like object to .ssd file :return: A tuple of set_title, set_short_name, [(image_fullpath, emojis)] representing the set None on error """ import re _sticker_line_pattern = re.compile(REGEX_MATCHING_EMOJI) try: with open(def_file, encoding='utf-8', errors='strict') as f: lines = [l.rstr
ip() for l in f] # strip line breaks except ValueError: print(ERROR_DEFFILE_ENCODING % def_file) else: set_title = lines[0]
set_short_name = lines[1] stickers = list() # there may be a 120 stickers per set hard limit, idk for sticker_line in lines[2:]: if not _sticker_line_pattern.fullmatch(sticker_line): print(ERROR_INCORRECT_STICKER_LINE % sticker_line) continue image_filename, emoji_seq = sticker_line.split('/') image_path = def_file.with_name(image_filename) if not _validate_image(image_path): continue if not emoji_seq: emoji_seq = DEFAULT_EMOJI stickers.append((image_path, emoji_seq)) if not stickers: print(ERROR_NO_STICKER_IN_SET) return None return set_title, set_short_name, stickers def _validate_image(image_path): """ Check file existence, image is correct PNG, dimension is 512x? or ?x512 and file size < 512KB :param image_path: The image for a sticker :return: Boolean if all limits on stickers met """ from PIL.Image import open as pil_open try: with pil_open(image_path) as image: criteria = [ max(image.size) == 512, image.format == 'PNG', image_path.stat().st_size < 512 * 1000 ] return True if all(criteria) else False except IOError: print(ERROR_INVAILD_STICKER_IMAGE % image_path.name) return False # not a picture or just 404
import pandas as pd from pandas import DataFrame df = pd.read_csv('sp500_ohlc.csv', index_col = 'Date', parse_dates=True) #n
otice what i did, since it is an object df['H-L'] = df.High - df.Low print df.head() df['100MA'] = pd.rolling_mean(df['Close'], 100) # must do a slice, since there will be no value for 100ma until 100 points print df[200:210] df['Difference'] = df['Close']
.diff() print df.head()
w.mouse.LEFT: "left", pyglet.window.mouse.MIDDLE: "middle", pyglet.window.mouse.RIGHT: "right", } @self.window.event def on_mouse_motion(x, y, dx, dy): y = self.h - y h = self.getHandlerMethod("onMouseMove") if h: h(x, y) @self.window.event def on_mouse_press(x, y, button, modifiers): y = self.h - y h = self.getHandlerMethod("onMouseButtonDown") if h: h(buttonLUT[button], x, y) @self.window.event def on_mouse_release(x, y, button, modifiers): y = self.h - y h = self.getHandlerMethod("onMouseButtonUp") if h: h(buttonLUT[button], x, y) @self.window.event def on_mouse_drag(x, y, dx, dy, buttons, modifiers): # we use the same handler as for mouse move y = self.h - y h = self.getHandlerMethod("onMouseMove") if h: h(x, y) #@self.window.event #def on_mouse_scroll(x, y, scroll_x, scroll_y): def getHandlerMethod(self, name): if self.event_handler != None: if hasattr(self.event_handler, name): return getattr(self.event_handler, name) return None def __addChild__(self, c): if not c in self.__children__: self.__children__.append(c) def __removeChild__(self, c): self.__children__.remove(c) def __iter__(self): return self.__children__.__iter__() def __len__(self): return len(self.__children__) def getW(self): return self.window.width def setW(self, w): if w != self.window.width: self.window.width = w w = property(getW, setW) def getH(self): return self.window.height def setH(self, h): if h != self.window.height: self.window.height = h h = property(getH, setH) class Image(ColoredVisible): def __init__(self, p, name, path, x=0, y=0, w=None, h=None, color="#ffffff", opacity=1.0): debug(path) if path: image = pyglet.image.load(path) self.sprite = pyglet.sprite.Sprite(image) if w == None: w = self.sprite.width if h == None: h = self.sprite.height ColoredVisible.__init__(self, p, name, x, y, w, h, color, opacity) def _colorComponentGetter(i): def getter(self): self.sprite.color[i]/255.0 retur
n getter def _colorComponentSetter(i): def setter(self, x): components = list(self.sprite.color) components[i] = int(x * 255) self.sprite.color = components return setter r = property(_colorComponentGetter(0), _colorCompo
nentSetter(0)) g = property(_colorComponentGetter(1), _colorComponentSetter(1)) b = property(_colorComponentGetter(2), _colorComponentSetter(2)) def _setOpacity(self, x): self.sprite.opacity = int(x*255.0) def _getOpacity(self): return self.sprite.opacity/255.0 opacity = property(_getOpacity, _setOpacity) def draw(self): glMatrixMode(gl.GL_MODELVIEW) glPushMatrix() glTranslatef(self.x, self.y+self.h, 0); glScalef(float(self.w) / float(self.sprite.width), -float(self.h) / float(self.sprite.height), 1); self.sprite.draw() glPopMatrix() class Text(ColoredVisible): def __init__(self, p, name, x=0, y=0, h=0, color="#00ff00", opacity=1.0, text=None, font=None): self.label = pyglet.text.Label( text if text != None else name, font_name=font if font != None else "Helvetica", font_size=h, anchor_y = 'center', x=0, y=0) ColoredVisible.__init__(self, p, name, x, y, self.label.content_width, h, color, opacity) def _colorComponentGetter(i): def getter(self): self.label.color[i]/255.0 return getter def _colorComponentSetter(i): def setter(self, x): components = list(self.label.color) components[i] = int(x * 255) self.label.color = components return setter r = property(_colorComponentGetter(0), _colorComponentSetter(0)) g = property(_colorComponentGetter(1), _colorComponentSetter(1)) b = property(_colorComponentGetter(2), _colorComponentSetter(2)) opacity = property(_colorComponentGetter(3), _colorComponentSetter(3)) def _setText(self, t): self.label.text = t def _getText(self): return self.label.text text = property(_getText, _setText) def _setFont(self, x): self.label.font_name = x def _getFont(self): return self.label.font_name font = property(_getFont, _setFont) def draw(self): glMatrixMode(gl.GL_MODELVIEW) glPushMatrix() glTranslatef(self.x, self.y + self.h, 0); glScalef(float(self.w) / float(self.label.content_width), -float(self.h) / float(self.label.font_size), 1); self.label.draw() glPopMatrix() class ClippingContainer(Visible): instanceCount = 0 def __init__(self, p, name, x=0, y=0, w=10, h=10, ox=0, oy=0, clip=True): Visible.__init__(self, p, name, x, y, w, h) self.ox = ox self.oy = oy self.clip = clip def _getOffset(self): return (self.ox, self.oy) def _setOffset(self, value): self.ox, self.oy = value offset = property(_getOffset, _setOffset) def draw(self): if self.clip: self.drawClipped() else: self.drawUnclipped() def drawUnclipped(self): pass def drawClipped(self): # get screen coordinates of lower left corner x = self.x y = self.y + self.h model_view_matrix = (GLdouble * 16)() projection_matrix = (GLdouble * 16)() viewport = (GLint * 4)() glGetDoublev(GL_MODELVIEW_MATRIX, model_view_matrix) glGetDoublev(GL_PROJECTION_MATRIX, projection_matrix) glGetIntegerv(GL_VIEWPORT, viewport) s_x, s_y, s_z = GLdouble(), GLdouble(), GLdouble() gluProject(x, y, 0.0, model_view_matrix, projection_matrix, viewport, s_x, s_y, s_z) scissor_was_enabled = glIsEnabled(GL_SCISSOR_TEST) old_scissor = (GLint*4)(); r = ((int(s_x.value), int(s_y.value)), self.extent) if scissor_was_enabled: glGetIntegerv(GL_SCISSOR_BOX, old_scissor); osr = (old_scissor[0:2], old_scissor[2:4]) r = clip_rect(r, osr) glScissor(*[int(x) for x in flatten_rect(r)]) glEnable(GL_SCISSOR_TEST) self.drawUnclipped() if not scissor_was_enabled: glDisable(GL_SCISSOR_TEST) else: glScissor(old_scissor[0], old_scissor[1], old_scissor[2], old_scissor[3]) class Group(ClippingContainer): instanceCount = 0 def __init__(self, p, name, x=0, y=0, w=10, h=10, ox=0, oy=0, clipChildren=True): self._W, self._H = w, h ClippingContainer.__init__(self, p, name, x, y, w, h*2 if hasattr(self,'fg') else h, ox, oy, clipChildren) self.__children__ = [] def __addChild__(self, c): if not c in self.__children__: self.__children__.append(c) def __removeChild__(self, c): self.__children__.remove(c) def __iter__(self): return self.__children__.__iter__() def __len__(self): return len(self.__children__) def drawUnclipped(self): glMatrixMode(gl.GL_MODELVIEW) glPushMatrix() glTranslatef(self.x - self.ox, self.y - self.oy, 0); for x in self: x.draw() glPopMatrix() ## W TODO: this isn't nice stuff ... def _getW(self): return self._W def _setW(self, value): if self._W == value: return self._W = value self.doLayout(self._W, self._H) w = property(_getW, _setW) ## H def _getH(self): return self._H def
(self, backend, rigor_config): self.rigor_config = rigor_config self.backend = backend try: self.thumbnail_size_max = int(rigor_config.get("webapp","thumbnail_size_max")) except rigor.config.NoValueError: self.thumbnail_size_max = 128 try: self.results_per_page = int(rigor_config.get("webapp","percept_search_page_results_per_page")) except rigor.config.NoValueError: self.results_per_page = 30 def add_routes(self, app, backend, plugin_instances): @app.route('/db/<db_name>/perceptsearch') @AuthClient.check_access_and_inject_user(self.rigor_config) def percept_search_page(db_name, username=None): if not db_name in backend.db_names(): abort(404) # clean up search params search_params = request.args.to_dict() search_params['page'] = max(1, int(search_params.get('page', 1))) # human-readable page number starts at 1, not 0 for int_param in ['random_nth', 'random_out_of']: if int_param in search_params: search_params[int_param] = int(search_params[int_param]) param_whitelist = """ page device_id collection_id hash annotation_domain annotation_model annotation_property percept_property locator random_nth random_out_of """.strip().split() for key in list(search_params.keys()): if not key in param_whitelist: del search_params[key] if search_params[key] == '': del search_params[key] search_results, total_count = backend.search_percepts(db_name=db_name, query=search_params, per_page=self.results_per_page, load_paths='tags') page_state = { 'current_view': kPluginName, 'username': username, kPluginName: dict( db_name=db_name, db_names=backend.db_names(), search_results=search_results, total_count=total_count, per_page=self.results_per_page, num_pages=int(total_count / self.results_per_page + 1), search_params=search_params, ), } template_slots = rigorwebapp.plugin.TemplateSlots() rigorwebapp.plugin.augment_request(plugin_instances, page_state, template_slots) return render_template('standard_template.html', page_state=page_state, template_slots=template_slots) def augment_page_state(self, page_state): pass def augment_template_slots(self, page_state, template_slots): # add to navbar on all pages # first, figure out db_name try: db_name = page_state[page_state['current_view']]['db_name'] except KeyError: try: db_name = self.rigor_config.get("webapp", "initial_db") except rigor.config.NoValueError: db_name='?' navbar_url = '/db/{}/perceptsearch'.format(db_name) template_slots.append('navbar_link', '<a href="{}">Percept Search</a>'.format(navbar_url)) # if this isn't our own page, stop here if page_state['current_view'] != kPluginName: return template_slots.append('js_tail_path', '/static/plugins/percept_search_page/js/index.js') template_slots.append('css_path', '/static/plugins/percept_search_page/css/index.css') # build next/prev links for pagination navigation prev_link = None next_link = None page = page_state[kPluginName]['search_params']['page'] prev_params = page_state[kPluginName]['search_params'].copy() prev_params['page'] -= 1 next_params = page_state[kPluginName]['search_params'].copy() next_params['page'] += 1 if prev_params['page'] >= 1: prev_link = 'perceptsearch?' + urllib.urlencode(prev_params) if next_params['page'] <= page_state[kPluginName]['num_pages']: next_link = 'perceptsearch?' + urllib.urlencode(next_params) template_slots.append('main_panel_pager_bar', dict( prev_link=prev_link, next_link=next_link, num_results=page_state[kPluginName]['total_count'], page_num=page, num_pages=page_state[kPluginName]['num_pages'] )) thumb_grid_template = """ {% for percept in
search_results %} <div class="searchResult"> <a href="/db/{{db_name}}/percept/{{'{}'.format(percept.id)}}">
{% if percept.x_size and percept.y_size: %} <img class="searchResultImg" src="{{percept.img_url+'?max_size='}}{{thumbnail_size_max}}" width="{{thumbsize(percept.x_size, percept.y_size)[0]}}" height="{{thumbsize(percept.x_size, percept.y_size)[1]}}" /> {% else %} <div class="missingImage" style="height:{{thumbnail_size_max}}px; width:{{thumbnail_size_max}}px; display: block;"></div> {% endif %} </a> <div class="searchResultCaption"> {% for tag in percept.tags %} <div class="tag" style="background: hsl({{tag_to_hue(tag)}}, 25%, 50%)" > {{tag}} </div> {% endfor %} </div> </div> {% endfor %} """ thumb_grid_template_context = dict( thumbsize = lambda width,height,maxsize=self.thumbnail_size_max: self.backend.percept_image_scaled_size((width,height),int(maxsize)), tag_to_hue = rigorwebapp.utils.hash_string_to_hue, thumbnail_size_max = self.thumbnail_size_max, **page_state[kPluginName] ) template_slots.append('main_panel', jinja2.Template(thumb_grid_template).render(thumb_grid_template_context)) search_form_template = """ <div class="sidebarTitle"> Search </div> <form id="perceptSearchForm"> <div class="searchFormRow"> <div class="searchFormRowLabel"> Database </div> <select class="searchFormSelect" id="perceptSearchFormDbSelect"> {% for this_db_name in db_names %} {% if this_db_name == db_name %} <option value={{this_db_name}} selected>{{this_db_name}}</option> {% else %} <option value={{this_db_name}}>{{this_db_name}}</option> {% endif %} {% endfor %} </select> </div> {% for facet in facets %} <div class="searchFormRow"> <div class="searchFormRowLabel">{{facet.caption}}</div> {% if facet.get('help_text') %} <div class="searchFormRowHelp">{{facet.help_text}}</div> {% endif %} <input style="width:100%" type="text" id="{{facet.dom_id}}" value="{{facet.value}}"/> </div> {% endfor %} <div class="searchFormRow"> <div class="searchFormRowLabel">Random subset</div> <span class="searchFormRowHelp">The </span> <input style="width:15%" type="text" id="perceptSearchFormRandomNth" value="{{random_nth_value}}"/> <span class="searchFormRowHelp">th percept out of each</span> <input style="width:15%" type="text" id="perceptSearchFormRandomOutOf" value="{{random_out_of_value}}"/> </div> <div class="searchFormButtonRow"> <input class="button" type="submit" value="Search"/> </div> </form> """ search_params = page_state[kPluginName]['search_params'] search_form_template_context = page_state[kPluginName].copy() search_form_template_context['random_nth_value'] = search_params.get('random_nth', '') search_form_template_context['random_out_of_value'] = search_params.get('random_out_of', '') search_form_template_context['facets'] = [ dict( dom_id='perceptSearchFormLocator', value=search_params.get('locator', ''), caption='Locator', help_text='Use "*" as a wildcard.', ), dict( dom_id='perceptSearchFormCollectionId', value=search_params.get('collection_id', ''), caption='Collection ID', ), dict( dom_id='perceptSearchFormDeviceId', value=search_params.get('device_id', ''), caption='Device ID', ), dict( dom_id='perceptSearchFormHash', value=search_params.get('hash', ''), caption='Percept hash', ), dict( dom_id='perceptSearchFormAnnotationDomain', value=search_params.get('annotation_domain', ''), caption='Annotation domain', ), dict( dom_id='perceptSearchFormAnnotationModel', value=search_params.get('annotation_model', ''), caption='Annotation model', help_text='Use "*" as a wildcard.', ), dict( dom_id='perceptSearchFormAnnotationProperty', value=search_params.get('annotation_property', ''), caption='Annotation property and/or value', help_text='Format like "property", "=value", or "property=value". Combine using "AND" or "OR", but not both: "a=aaa OR b=bbb".', ), dict( dom_id='perceptSearchFormPerceptProperty', v
#!/usr/bin/env python from os import path from collections import defaultdict import math root = path.dirname(path.dirname(path.dirname(__file__))) result_dir = path.join(root, 'results') def get_file_name(test): test = '%s_res
ult' % test return path.join(result_dir, test) def mean(l): return float(sum(l))/len(l) if len(l) > 0 else float('nan') def std_dev(l): m = mean(l) return math.sqrt(sum((x - m) ** 2 for x in l) / len(l)) def run_timing_overhead_ana(): test_name = 'timing_overhead' file_name = get_file_name(test_name) datas = [] with open(fi
le_name) as f: for l in f: datas.append(int(l)) datas = [i for i in datas[:10000]] print "%s mean: %f" % (test_name, mean(datas)) print "%s std dev: %f" % (test_name, std_dev(datas)) def run_loop_overhead_ana(): test_name = 'loop_overhead' file_name = get_file_name(test_name) datas = [] with open(file_name) as f: for l in f: datas.append(float(l.split(' ')[0])) datas = [i for i in datas[:10000]] print "%s mean: %f" % (test_name, mean(datas)) print "%s std dev: %f" % (test_name, std_dev(datas)) def run_proc_call_overhead_ana(): test_name = 'proc_call_overhead' file_name = get_file_name(test_name) datas = [] with open(file_name) as f: for l in f: if l.startswith('-'): datas.append([]) continue datas[-1].append(int(l.split(' ')[0]) * 1.0 / 10) print "%s result:" % test_name for i, data in enumerate(datas): m = mean(data) std = std_dev(data) print "%f\t%f" % (m, std) #print "%s %d mean: %f" % (test_name, i, mean(data)) #print "%s %d std dev: %f" % (test_name, i, std_dev(data)) def run_process_context_switch_ana(): test_name = 'process_context_switch' file_name = get_file_name(test_name) datas = [] with open(file_name) as f: for l in f: try: datas.append(int(l.split(' ')[1])) except: pass datas = [i for i in datas[:100]] print "%s mean: %f" % (test_name, mean(datas)) print "%s std dev: %f" % (test_name, std_dev(datas)) def run_thread_context_switch_ana(): test_name = 'thread_context_switch' file_name = get_file_name(test_name) datas = [] with open(file_name) as f: for l in f: datas.append(int(l.split(' ')[1])) datas = [i for i in datas[:100]] print "%s mean: %f" % (test_name, mean(datas)) print "%s std dev: %f" % (test_name, std_dev(datas)) def run_mem_acc_ana(): test_name = 'mem_acc' filename = get_file_name(test_name) datas = defaultdict(lambda: defaultdict(list)) with open(filename) as f: for l in f: ll = l.split(' ') step = int(ll[7]) offset = int(ll[1]) cycle = float(ll[3]) datas[step][offset].append(cycle) results = {} offsets = set() for step, v in sorted(datas.items()): result = [] for offset, cycles in sorted(v.items()): offsets.add(offset) m = mean(cycles) result.append(m) results[step] = (result) print "mem access time result" fl = "step/offset\t%s" % "\t".join(str(i) for i in sorted(offsets)) print fl for step, means in sorted(results.items()): line = "\t".join(str(i) for i in means) line = "%s\t%s" % (str(step), line) print line if __name__ == '__main__': run_timing_overhead_ana() run_loop_overhead_ana() run_proc_call_overhead_ana() run_process_context_switch_ana() run_thread_context_switch_ana() run_mem_acc_ana()
"""Support for the Airly air_quality service.""" from homeassistant.components.air_quality import ( ATTR_AQI, ATTR_PM_2_5, ATTR_PM_10, AirQualityEntity, ) from homeassistant.const import CONF_NAME from .const import ( ATTR_API_ADVICE, ATTR_API_CAQI, ATTR_API_CAQI_DESCRIPTION, ATTR_API_CAQI_LEVEL, ATTR_API_PM10, ATTR_API_PM10_LIMIT, ATTR_API_PM10_PERCENT, ATTR_API_PM25, ATTR_API_PM25_LIMIT, ATTR_API_PM25_PERCENT, DOMAIN, ) ATTRIBUTION = "Data provided by Airly" LABEL_ADVICE = "advice" LABEL_AQI_DESCRIPTION = f"{ATTR_AQI}_description" LABEL_AQI_LEVEL = f"{ATTR_AQI}_level" LABEL_PM_2_5_LIMIT = f"{ATTR_PM_2_5}_limit" LABEL_PM_2_5_PERCENT = f"{ATTR_PM_2_5}_percent_of_limit" LABEL_PM_10_LIMIT = f"{ATTR_PM_10}_limit" LABEL_PM_10_PERCENT = f"{ATTR_PM_10}_percent_of_limit" async def async_setup_entry(hass, config_entry, async_add_entities): """Set up Airly air_quality entity based on a config entry.""" name = config_entry.data[CONF_NAME] coordinator = hass.data[DOMAIN][config_entry.entry_id] async_add_entities( [AirlyAirQuality(coordinator, name, config_entry.unique_id)], False ) def round_state(func): """Round state.""" def _decorator(self): res = func(self) if isinstance(res, float): return round(res) return res return _decorator class AirlyAirQuality(AirQualityEntity): """Define an Airly air quality.""" def __init__(self, coordinator, name, unique_id): """Initialize.""" self.coordinator = coordinator self._name = name self._unique_id = unique_id self._icon = "mdi:blur" @property def name(self): """Return the name.""" return self._name @property def should_poll(self): """Return the polling requirement of the entity.""" return False @property def icon(self): """Return the icon.""" return self._icon @property @round_state def air_quality_index(self): """Return the air quality index.""" return self.coordinator.data[ATTR_API_CAQI] @property @round_state def particulate_matter_2_5(self): """Return the particulate matter 2.5 level.""" return self.coordinator.data[ATTR_API_PM25] @property @round_state def particulate_matter_10(self): """Return the particulate matter 10 level.""" return self.coordinator.data[ATTR_API_PM10] @property def attribution(self): """Return the attribution.""" return ATTRIBUTION @property def unique_id(self): """Return a unique_id for this entity.""" return self._unique_id @property def available(self): """Return True if entity is available.""" return self.coordinator.last_update_success @property def device_state_attributes(self): """Return the state attributes.""" return { LABEL_AQI_DESCRIPTION: self.coordinator.data[ATTR_API_CAQI_DESCRIPTION], LABEL_ADVICE: self.coordinator.data[ATTR_API_ADVICE], LABEL_AQI_LEVEL: self.coordinator.data[ATTR_API_CAQI_LEVEL], LABEL_PM_2_5_LIMIT: self.coordinator.data[ATTR_API_PM25_LIM
IT], LABEL_PM_2_5_PERCENT: round(self.coordinator.data[ATTR_API_PM25_PERCENT]), LABEL_PM_10_LIMIT: self.coordinator.data[ATTR_API_PM10_LIMIT], LABEL_PM_10_PERCENT: round(self.coordinator.data[ATTR_API_PM10_PERCENT]), } async def async_a
dded_to_hass(self): """Connect to dispatcher listening for entity data notifications.""" self.async_on_remove( self.coordinator.async_add_listener(self.async_write_ha_state) ) async def async_update(self): """Update Airly entity.""" await self.coordinator.async_request_refresh()
# -*- coding: utf-8 -*- from openerp impor
t models, fields, api class CalendarEvent(models.Model): _inherit = 'calendar.event' meeting_reason_id = fields.Many2one( 'calendar.event.meeting.reason', string="Meeting reason", ondelete="restrict") class CalendarEventMeetingReason(models.Model): _name = 'calendar.event.meeting.reason' _description = 'Calendar Event Meeting Reason' name = fields.Char('Reason', required=False, translate=True
)
"""SCons.Tool.mwcc Tool-specific initialization for the Metrowerks CodeWarrior compiler. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001 - 2014 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/mwcc.py 2014/08/24 12:12:31 garyo" import os import os.path import SCons.Util def set_vars(env): """Set MWCW_VERSION, MWCW_VERSIONS, and some codewarrior environment vars MWCW_VERSIONS is set to a list of objects representing installed versions MWCW_VERSION is set to the version object that will be used for building. MWCW_VERSION can be set to a string during Environment construction to influence which version is chosen, otherwise the latest one from MWCW_VERSIONS is used. Returns true if at least one version is found, false otherwise """ desired = env.get('MWCW_VERSION', '') # return right away if the variables are already set if isinstance(desired, MWVersion): return 1 elif desired is None: return 0 versions = find_versions() version = None if desired: for v in versions: if str(v) == desired: version = v elif versions: version = versions[-1] env['MWCW_VERSIONS'] = versions env['MWCW_VERSION'] = version if version is None: return 0 env.PrependENVPath('PATH', version.clpath) env.PrependENVPath('PATH', version.dllpath) ENV = env['ENV'] ENV['CWFolder'] = version.path ENV['LM_LICENSE_FILE'] = version.license plus = lambda x: '+%s' % x ENV['MWCIncludes'] = os.pathsep.join(map(plus, version.includes)) ENV['MWLibraries'] = os.pathsep.join(map(plus, version.libs)) return 1 def find_versions(): """Return a list of MWVersion objects representing installed versions""" versions = [] ### This function finds CodeWarrior by reading from the registry on ### Windows. Some other method needs to be implemented for other ### platforms, maybe something that calls env.WhereIs('mwcc') if SCons.Util.can_read_reg: try: HLM = SCons.Util.HKEY_LOCAL_MACHINE product = 'SOFTWARE\\Metrowerks\\CodeWarrior\\Product Versions' product_key = SCons.Util.RegOpenKeyEx(HLM, product) i = 0 while True: name = product + '\\' + SCons.Util.RegEnumKey(product_key, i) name_key = SCons.Util.RegOpenKeyEx(HLM, name) try: version = SCons.Util.RegQueryValueEx(name_key, 'VERSION') path = SCons.Util.RegQueryValueEx(name_key, 'PATH') mwv = MWVersion(version[0], path[0], 'Win32-X86') versions.append(mwv) except SCons.Util.RegError: pass i = i + 1 except SCons.Util.RegError: pass return versions class MWVersion(object): def __init__(self, version, path, platform): self.version = version self.path = path self.platform = platform self.clpath = os.path.join(path, 'Other Metrowerks Tools', 'Command Line Tools') self.dllpath = os.path.join(path, 'Bin') # The Metrowerks tools don't store any configuration data so they # are totally dumb when it comes to locating standard headers, # libraries, and other files, expecting all the information # to be handed to them in environment variables. The members set # below control what information scons injects into the environment ### The paths below give a normal build environment in CodeWarrior for ### Windows, other versions of CodeWarrior might need different paths. msl = os.path.join(path, 'MSL') support = os.path.join(path, '%s Support' % platform) self.lic
ense = os.path.join(path, 'license.dat') self.includes = [msl, support] self.libs = [msl, support] def __str__(self): return self.version CSuffixes = ['.c', '.C'] CXXSuffixes = ['.cc', '.cpp', '.cxx',
'.c++', '.C++'] def generate(env): """Add Builders and construction variables for the mwcc to an Environment.""" import SCons.Defaults import SCons.Tool set_vars(env) static_obj, shared_obj = SCons.Tool.createObjBuilders(env) for suffix in CSuffixes: static_obj.add_action(suffix, SCons.Defaults.CAction) shared_obj.add_action(suffix, SCons.Defaults.ShCAction) for suffix in CXXSuffixes: static_obj.add_action(suffix, SCons.Defaults.CXXAction) shared_obj.add_action(suffix, SCons.Defaults.ShCXXAction) env['CCCOMFLAGS'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -nolink -o $TARGET $SOURCES' env['CC'] = 'mwcc' env['CCCOM'] = '$CC $CFLAGS $CCFLAGS $CCCOMFLAGS' env['CXX'] = 'mwcc' env['CXXCOM'] = '$CXX $CXXFLAGS $CCCOMFLAGS' env['SHCC'] = '$CC' env['SHCCFLAGS'] = '$CCFLAGS' env['SHCFLAGS'] = '$CFLAGS' env['SHCCCOM'] = '$SHCC $SHCFLAGS $SHCCFLAGS $CCCOMFLAGS' env['SHCXX'] = '$CXX' env['SHCXXFLAGS'] = '$CXXFLAGS' env['SHCXXCOM'] = '$SHCXX $SHCXXFLAGS $CCCOMFLAGS' env['CFILESUFFIX'] = '.c' env['CXXFILESUFFIX'] = '.cpp' env['CPPDEFPREFIX'] = '-D' env['CPPDEFSUFFIX'] = '' env['INCPREFIX'] = '-I' env['INCSUFFIX'] = '' #env['PCH'] = ? #env['PCHSTOP'] = ? def exists(env): return set_vars(env) # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
# Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LI
ABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, A
RISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import libtcodpy as libtcod import tokenizer import match import textfield import textview import command SCREEN_WIDTH = 80 SCREEN_HEIGHT = 35 FONT_FLAGS = libtcod.FONT_TYPE_GREYSCALE | libtcod.FONT_LAYOUT_TCOD FONT_FILE = 'fonts/dejavu12x12_gs_tc.png' LIMIT_FPS = 25 TITLE = 'Neko (lost kitty)' text_field = textfield.TextField(0, SCREEN_HEIGHT - 1, SCREEN_WIDTH, 1) text_view = textview.TextView(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT - 1) builtins = {'me': 123} # $tags: stub, example, todo def eval_str(s, globals): try: r = eval(s, globals) return str(r) except Exception as e: return str(e) def on_command(sender, args): if args.key.vk != libtcod.KEY_ENTER: return s = text_field.get_text() if match.starts_with(';', s): r = eval_str(s[1:], builtins) text_view.lines.append(str(r)) else: tokens = tokenizer.tokenize(s) cmd = command.parse(tokens) text_view.lines.append(str(tokens)) text_view.lines.append(str(cmd)) text_field.add_handler(on_command) libtcod.sys_set_fps(LIMIT_FPS) libtcod.console_set_custom_font(FONT_FILE, FONT_FLAGS) libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, TITLE, False) while not libtcod.console_is_window_closed(): libtcod.console_set_default_foreground(0, libtcod.white) libtcod.console_clear(0) text_field.render(0) text_view.render(0) libtcod.console_flush() key = libtcod.console_check_for_keypress(libtcod.KEY_PRESSED) text_field.update(key)
from django.contrib import admin from treebeard.admin import TreeAdmin from treebeard.forms import movenodeform_factory from oscar.core.loading import get_model AttributeOption = get_model('catalogue', 'AttributeOption') AttributeOptionGroup = get_model('catalogue', 'AttributeOptionGroup') Category = get_model('catalogue', 'Category') Option = get_model('catalogue', 'Option') Product = get_model('catalogue', 'Product') ProductAttribute = get_model('catalogue', 'ProductAttribute') ProductAttributeValue = get_model('catalogue', 'ProductAttributeValue') ProductCategory = get_model('catalogue', 'ProductCategory') ProductClass = get_model('catalogue', 'ProductClass') ProductImage = get_model('catalogue', 'ProductImage') ProductRecommendation = get_model('catalogue', 'ProductRecommendation') class AttributeInline(admin.TabularInline): model = ProductAttributeValue class ProductRecommendationInline(admin.TabularInline): model = ProductRecommendation fk_name = 'primary' raw_id_fields = ['primary', 'recommendation'] class CategoryInline(admin.TabularInline): model = ProductCategory extra = 1 class ProductAttributeInline(admin.TabularInline): model = ProductAttribute extra = 2 class ProductClassAdmin(admin.ModelAdmin): list_display = ('name', 'requires_shipping', 'track_stock') inlines = [ProductAttributeInline] class ProductAdmin(admin.ModelAdmin): date_hierarchy = 'date_created' list_display = ('get_title', 'upc', 'get_product_class', 'structure', 'attribute_summary', 'date_created') list_filter = ['structure', 'is_discountable'] raw_id_fields = ['parent'] inlines = [AttributeInline, CategoryInline, ProductRecommendationInline] prepopulated_fields = {"slug": ("title",)} search_fields = ['upc', 'title'] def get_queryset(self, request): qs = super(ProductAdmin, self).get_queryset(request) return ( qs .select_related('product_class', 'parent') .prefetch_related( 'attribute_values', 'attribute_values__attribute')) class ProductAttributeAdmin(admin.ModelAdmin): list_display = ('name', 'code', 'product_class', 'type') prepopulated_fields = {"code": ("name", )} class OptionAdmin(admin.ModelAdmin): pass class ProductAttributeValueAdmin(admin.ModelAdmin): list_display = ('product', 'attribute', 'value') class AttributeOptionInline(admin.TabularInline): model = AttributeOption class AttributeOptionGroupAdmin(admin.ModelAdmin): list_display = ('name', 'option_summary') inlines = [AttributeOptionInline, ] class CategoryAdmin(TreeAdmin): form = movenodeform_factory(Category) a
dmin.site.register(ProductClass, ProductClassAdmin) admin.site.register(Product, ProductAdmin) admin.site.register(ProductAttribute, ProductAttributeAdmin) admin.site.register(ProductAttributeValue, ProductAttributeValueAdmin) admin.site.register(AttributeOptionGroup, AttributeOptionGroupAdmin) admin.site
.register(Option, OptionAdmin) admin.site.register(ProductImage) admin.site.register(Category, CategoryAdmin) admin.site.register(ProductCategory)
# regression tree # input is a dataframe of features # the corresponding y value(called labels here) is the scores for each document import pandas as pd import numpy as np from multiprocessing import Pool from itertools import repeat import scipy import scipy.optimize node_id = 0 def get_splitting_points(args): # given a list # return a list of possible splitting values attribute, col = args attribute.sort() possible_split = [] for i in range(len(attribute)-1): if attribute[i] != attribute[i+1]: possible_split.append(np.mean((attribute[i],attribute[i+1]))) return possible_split, col # create a dictionary, key is the attribute number, value is whole list of possible splits for that column def find_best_split_parallel(args): best_ls = 1000000 best_split = None best_children = None split_point, data, label = args key,possible_split = split_point for split in possible_split: children = split_children(data, label, key, split) #weighted average of left and right ls ls = len(children[1])*least_square(children[1])/len(label) + len(children[3])*least_square(children[3])/len(label) if ls < best_ls: best_ls = ls best_split = (key, split) best_children = children return best_ls, best_split, best_children def find_best_split(data, label, split_points): # split_points is a dictionary of possible splitting values # return the best split best_ls = 1000000 best_split = None best_children = None pool = Pool() for ls, split, children in pool.map(find_best_split_parallel, zip(split_points.items(), repeat(data), repeat(label))): if ls < best_ls: best_ls = ls best_split = split best_children = children pool.close() return best_split, best_children # return a tuple(attribute, value) def split_children(data, label, key, split): left_index = [index for index in xrange(len(data.iloc[:,key])) if data.iloc[index,key] < split] right_index = [index for index in xrange(len(data.iloc[:,key])) if data.iloc[index,key] >= split] left_data = data.iloc[left_index,:] right_data = data.iloc[right_index,:] left_label = [label[i] for i in left_index] right_label =[label[i] for i in right_index] return left_data, left_label, right_data, right_label def least_square(label): if not len(label): return 0 return (np.sum(label)**2)/len(set(label)) def create_leaf(label): global node_id node_id += 1 leaf = {'splittng_feature': None, 'left': None, 'right':None, 'is_leaf':True, 'index':node_id} leaf['value'] = round(np.mean(label),3) return leaf def find_splits_parallel(args): var_space, label, col = args # var_space = data.iloc[:,col].tolist() return scipy.optimize.fminbound(error_function, min(var_space), max(var_space), args = (col, var_space, label), full_output = 1) # return, # if not min_error or error < min_error: # min_error = error # split_var = col # min_split = split def create_tree(data, all_pos_split, label, max_depth, ideal_ls, current_depth = 0): remaining_features = all_pos_split #stopping conditions if sum([len(v)!= 0 for v in remaining_features.values()]) == 0: # If there are no remaining features to consider, make current node a leaf node return create_leaf(label) # #Additional stopping condition (limit tree depth) elif current
_depth > max_depth: return create_leaf(label) ####### min_error = None split_var = None min_split = None var_spaces = [data.iloc[:,col].tolist() for col in xrange(data.shape[1])] cols = [col for col in xrange(data.shape[1])] pool = Pool() for split, error, ierr, numf in pool.map(find_splits_parall
el, zip(var_spaces, repeat(label), cols)): if not min_error or error < min_error: min_error = error split_var = col min_split = split pool.close() splitting_feature = (split_var, min_split) children = split_children(data, label, split_var, min_split) left_data, left_label, right_data, right_label = children if len(left_label) == 0 or len(right_label) == 0: return create_leaf(label) left_least_square = least_square(left_label) # Create a leaf node if the split is "perfect" if left_least_square < ideal_ls: return create_leaf(left_label) if least_square(right_label) < ideal_ls: return create_leaf(right_label) # recurse on children left_tree = create_tree(left_data, remaining_features, left_label, max_depth, ideal_ls, current_depth +1) right_tree = create_tree(right_data, remaining_features, right_label, max_depth, ideal_ls, current_depth +1) return {'is_leaf' : False, 'value' : None, 'splitting_feature': splitting_feature, 'left' : left_tree, 'right' : right_tree, 'index' : None} def error_function(split_point, split_var, data, label): data1 = [] data2 = [] for i in xrange(len(data)): temp_dat = data[i] if temp_dat <= split_point: data1.append(label[i]) else: data2.append(label[i]) return least_square(data1) + least_square(data2) def make_prediction(tree, x, annotate = False): if tree['is_leaf']: if annotate: print "At leaf, predicting %s" % tree['value'] return tree['value'] else: # the splitting value of x. split_feature_value = x[tree['splitting_feature'][0]] if annotate: print "Split on %s = %s" % (tree['splitting_feature'], split_feature_value) if split_feature_value < tree['splitting_feature'][1]: return make_prediction(tree['left'], x, annotate) else: return make_prediction(tree['right'], x, annotate) class RegressionTree: def __init__(self, training_data, labels, max_depth=5, ideal_ls=100): self.training_data = training_data self.labels = labels self.max_depth = max_depth self.ideal_ls = ideal_ls self.tree = None def fit(self): global node_id node_id = 0 all_pos_split = {} pool = Pool() splitting_data = [self.training_data.iloc[:,col].tolist() for col in xrange(self.training_data.shape[1])] cols = [col for col in xrange(self.training_data.shape[1])] for dat, col in pool.map(get_splitting_points, zip(splitting_data, cols)): all_pos_split[col] = dat pool.close() self.tree = create_tree(self.training_data, all_pos_split, self.labels, self.max_depth, self.ideal_ls) def predict(self, test): prediction = np.array([make_prediction(self.tree, x) for x in test]) return prediction if __name__ == '__main__': #read in data, label data = pd.read_excel("mlr06.xls") test = [[478, 184, 40, 74, 11, 31], [1000,10000,10000,10000,10000,1000,100000]] label = data['X7'] del data['X7'] model = RegressionTree(data, label) model.fit() print model.predict(test)
ory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import monary # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.doctest', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Monary' copyright = u'2014, David J. C. Beach' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = monary.__version__ # The full version, including alpha/beta/rc tags. release = monary.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin
themes. html_theme = 'defau
lt' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Monarydoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'Monary.tex', u'Monary Documentation', u'David J. C. Beach', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'monary', u'Monary Documentation', [u'David J. C. Beach'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Monary', u'Monary Documentation', u'David J. C. Beach', 'Monary', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_m
form = self.search_form.refresh() get_filter_args(self._filters) widgets = self._get_chart_widget( filters=self._filters, definition=self.definitions[group_by], order_column=self.definitions[group_by]["group"], order_direction="asc", ) widgets = self._get_search_widget(form=form, widgets=widgets) self.update_redirect() return self.render_template( self.chart_template, route_base=self.route_base, title=self.chart_title, label_columns=self.label_columns, definitions=self.definitions, group_by_label=self.group_by_label, height=self.height, widgets=widgets, appbuilder=self.appbuilder, ) class DirectByChartView(GroupByChartView): """ Use this class to display charts with multiple series, based on columns or methods defined on models. You can display multiple charts on the same view. Default routing point is '/chart' Setup definitions property to configure the chart :label: (optional) String label to display on chart selection. :group: String with the column name or method from model. :formatter: (optional) function that formats the output of 'group' key :series: A list of tuples with the aggregation function and the column name to apply the aggregation The **definitions** property respects the following grammar:: definitions = [ { 'label': 'label for chart definition', 'group': '<COLNAME>'|'<MODEL FUNCNAME>', 'formatter': <FUNC FORMATTER FOR GROUP COL>, 'series': ['<COLNAME>'|'<MODEL FUNCNAME>',...] }, ... ] example:: class CountryDirectChartView(DirectByChartView): datamodel = SQLAInterface(CountryStats) chart_title = 'Direct Data Example' definitions = [ { 'label': 'Unemployment', 'group': 'stat_date', 'series': ['unemployed_perc', 'college_perc'] } ] """ ProcessClass = DirectProcessData # ------------------------------------------------------- # DEPRECATED SECTION # ------------------------------------------------------- class BaseSimpleGroupByChartView(BaseChartView): # pragma: no cover group_by_columns = [] """ A list of columns to be possibly grouped by, this list must be filled """ def __init__(self, **kwargs): if not self.group_by_columns: raise Exception( "Base Chart View property <group_by_columns> must not be empty" ) else: super(BaseSimpleGroupByChartView, self).__init__(**kwargs) def _get_chart_widget( sel
f, filters=None, order_column="", order_direction=
"", widgets=None, group_by=None, height=None, **args ): height = height or self.height widgets = widgets or dict() group_by = group_by or self.group_by_columns[0] joined_filters = filters.get_joined_filters(self._base_filters) value_columns = self.datamodel.query_simple_group( group_by, filters=joined_filters ) widgets["chart"] = self.chart_widget( route_base=self.route_base, chart_title=self.chart_title, chart_type=self.chart_type, chart_3d=self.chart_3d, height=height, value_columns=value_columns, modelview_name=self.__class__.__name__, **args ) return widgets class BaseSimpleDirectChartView(BaseChartView): # pragma: no cover direct_columns = [] """ Make chart using the column on the dict chart_columns = {'chart label 1':('X column','Y1 Column','Y2 Column, ...), 'chart label 2': ('X Column','Y1 Column',...),...} """ def __init__(self, **kwargs): if not self.direct_columns: raise Exception( "Base Chart View property <direct_columns> must not be empty" ) else: super(BaseSimpleDirectChartView, self).__init__(**kwargs) def get_group_by_columns(self): """ returns the keys from direct_columns Used in template, so that user can choose from options """ return list(self.direct_columns.keys()) def _get_chart_widget( self, filters=None, order_column="", order_direction="", widgets=None, direct=None, height=None, **args ): height = height or self.height widgets = widgets or dict() joined_filters = filters.get_joined_filters(self._base_filters) count, lst = self.datamodel.query( filters=joined_filters, order_column=order_column, order_direction=order_direction, ) value_columns = self.datamodel.get_values(lst, list(direct)) value_columns = dict_to_json( direct[0], direct[1:], self.label_columns, value_columns ) widgets["chart"] = self.chart_widget( route_base=self.route_base, chart_title=self.chart_title, chart_type=self.chart_type, chart_3d=self.chart_3d, height=height, value_columns=value_columns, modelview_name=self.__class__.__name__, **args ) return widgets class ChartView(BaseSimpleGroupByChartView): # pragma: no cover """ **DEPRECATED** Provides a simple (and hopefully nice) way to draw charts on your application. This will show Google Charts based on group by of your tables. """ @expose("/chart/<group_by>") @expose("/chart/") @has_access def chart(self, group_by=""): form = self.search_form.refresh() get_filter_args(self._filters) group_by = group_by or self.group_by_columns[0] widgets = self._get_chart_widget(filters=self._filters, group_by=group_by) widgets = self._get_search_widget(form=form, widgets=widgets) return self.render_template( self.chart_template, route_base=self.route_base, title=self.chart_title, label_columns=self.label_columns, group_by_columns=self.group_by_columns, group_by_label=self.group_by_label, height=self.height, widgets=widgets, appbuilder=self.appbuilder, ) class TimeChartView(BaseSimpleGroupByChartView): # pragma: no cover """ **DEPRECATED** Provides a simple way to draw some time charts on your application. This will show Google Charts based on count and group by month and year for your tables. """ chart_template = "appbuilder/general/charts/chart_time.html" chart_type = "ColumnChart" def _get_chart_widget( self, filters=None, order_column="", order_direction="", widgets=None, group_by=None, period=None, height=None, **args ): height = height or self.height widgets = widgets or dict() group_by = group_by or self.group_by_columns[0] joined_filters = filters.get_joined_filters(self._base_filters) if period == "month" or not period: value_columns = self.datamodel.query_month_group( group_by, filters=joined_filters ) elif period == "year": value_columns = self.datamodel.query_year_group( group_by, filters=joined_filters ) widgets["chart"] = self.chart_widget( route_base=self.route_base, chart_title=self.chart_title,
#!/usr/bin/env python ''' Pymodbus Asynchronous Client Examples -------------------------------------------------------------------------- The following is an example of how to use the asynchronous modbus client implementation from pymodbus. ''' #---------------------------------------------------------------------------# # import needed libraries #---------------------------------------------------------------------------# from twisted.internet import reactor, protocol from pymodbus.constants import Defaults #---------------------------------------------------------------------------# # choose the requested modbus protocol #---------------------------------------------------------------------------# from pymodbus.client.async import ModbusClientProtocol #from pymodbus.client.async import ModbusUdpClientProtocol #---------------------------------------------------------------------------# # configure the client logging #---------------------------------------------------------------------------# import logging logging.basicConfig() log = logging.getLogger() log.setLevel(logging.DEBUG) #---------------------------------------------------------------------------# # helper method to test deferred callbacks #---------------------------------------------------------------------------# def dassert(deferred, callback): def _assertor(value): assert(value) deferred.addCallback(lambda r: _assertor(callback(r))) deferred.addErrback(lambda _: _assertor(False)) #---------------------------------------------------------------------------# # specify slave to query #---------------------------------------------------------------------------# # The slave to query is specified in an optional parameter for each # individual request. This can be done by specifying the `unit` parameter # which defaults to `0x00` #---------------------------------------------------------------------------# def exampleRequests(client): rr = client.read_coils(1, 1, unit=0x02) #---------------------------------------------------------------------------# # example requests #---------------------------------------------------------------------------# # simply call the methods that you would like to use. An example session # is displayed below along with some assert checks. Note that unlike the # synchronous version of the client, the asynchronous version returns # deferreds which can be thought of as a handle to the callback to send # the result of the operation. We are handling the result using the # deferred assert helper(dassert). #---------------------------------------------------------------------------# def beginAsynchronousTest(client): rq = client.write_coil(1, True) rr = client.read_coils(1,1) dassert(rq, lambda r: r.function_code < 0x80) # test that we are not an error dassert(rr, lambda r: r.bits[0] == True) # test the expected value rq = client.write_coils(1, [True]*8) rr = client.read_coils(1,8) dassert(rq, lambda r: r.function_code < 0x80) # test that we are not an error dassert(rr, lambda r: r.bits == [True]*8) # test the expected value rq = client.write_coils(1, [False]*8) rr = client.read_discrete_inputs(1,8) dassert(rq, lambda r: r.function_code < 0x80) # test that we are not an error dassert(rr, lambda r: r.bits == [True]*8) # test the expected value rq = client.write_register(1, 10) rr = client.read_holding_registers(1,1) dassert(rq, lambda r: r.function_code < 0x80) # test that we are not an error dassert(rr, lambda r: r.registers[0] == 10) # test the expected value rq = client.write_registers(1, [10]*8) rr = client.read_input_registers(1,8) dassert(rq, lambda r: r.function_code < 0x80) # test that we are not an error dassert(rr, lambda r: r.registers == [17]*8) # test the expected value arguments = { 'read_address
': 1, 'read_count': 8, 'write_address': 1, 'write_registers': [20]*8, } rq = client.readwrite_registers(**arguments) rr = client.read_input_reg
isters(1,8) dassert(rq, lambda r: r.registers == [20]*8) # test the expected value dassert(rr, lambda r: r.registers == [17]*8) # test the expected value #-----------------------------------------------------------------------# # close the client at some time later #-----------------------------------------------------------------------# reactor.callLater(1, client.transport.loseConnection) reactor.callLater(2, reactor.stop) #---------------------------------------------------------------------------# # extra requests #---------------------------------------------------------------------------# # If you are performing a request that is not available in the client # mixin, you have to perform the request like this instead:: # # from pymodbus.diag_message import ClearCountersRequest # from pymodbus.diag_message import ClearCountersResponse # # request = ClearCountersRequest() # response = client.execute(request) # if isinstance(response, ClearCountersResponse): # ... do something with the response # #---------------------------------------------------------------------------# #---------------------------------------------------------------------------# # choose the client you want #---------------------------------------------------------------------------# # make sure to start an implementation to hit against. For this # you can use an existing device, the reference implementation in the tools # directory, or start a pymodbus server. #---------------------------------------------------------------------------# defer = protocol.ClientCreator(reactor, ModbusClientProtocol ).connectTCP("localhost", Defaults.Port) defer.addCallback(beginAsynchronousTest) reactor.run()
import pytest from fastapi.testclient import TestClient from ...utils import needs_py310 openapi_schema = { "openapi": "3.0.2", "info": {"title": "FastAPI", "version": "0.1.0"}, "paths": { "/items/": { "get": { "responses": { "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, }, "422": { "description": "Validation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/HTTPValidationError" } } }, }, }, "summary": "Read Items", "operationId": "read_items_items__get", "parameters": [ { "required": False, "schema": { "title": "Q", "type": "array", "items": {"type": "string"}, }, "name": "q",
"in": "query", } ], } } }, "components": { "schemas": { "ValidationError": { "title": "ValidationError", "required": ["loc", "msg", "type"], "type": "object", "properties": { "loc": { "title": "Location", "type": "array",
"items": {"type": "string"}, }, "msg": {"title": "Message", "type": "string"}, "type": {"title": "Error Type", "type": "string"}, }, }, "HTTPValidationError": { "title": "HTTPValidationError", "type": "object", "properties": { "detail": { "title": "Detail", "type": "array", "items": {"$ref": "#/components/schemas/ValidationError"}, } }, }, } }, } @pytest.fixture(name="client") def get_client(): from docs_src.query_params_str_validations.tutorial011_py310 import app client = TestClient(app) return client @needs_py310 def test_openapi_schema(client: TestClient): response = client.get("/openapi.json") assert response.status_code == 200, response.text assert response.json() == openapi_schema @needs_py310 def test_multi_query_values(client: TestClient): url = "/items/?q=foo&q=bar" response = client.get(url) assert response.status_code == 200, response.text assert response.json() == {"q": ["foo", "bar"]} @needs_py310 def test_query_no_values(client: TestClient): url = "/items/" response = client.get(url) assert response.status_code == 200, response.text assert response.json() == {"q": None}
tProp('Power', experiment, 'Power (dBm)', '0') self.pulsewidth = FloatProp('PulseWidth', experiment, 'Pulse Width (us)', '0') self.pulserep = FloatProp('PulseRep', experiment, 'Pulse Rep Time (us)', '0') self.startfreq = FloatProp('StartFreq', experiment, 'Start Frequency (MHz)', '0') self.endfreq = FloatProp('EndFreq', experiment, 'End Frequency (MHz)', '0') self.sweeptime = IntProp('SweepTime', experiment, 'Sweep Time (ms)', '0') self.properties += ['ID', 'model', 'serial', 'frequency','power','pulsewidth','pulserep','pulseenable','startfreq','endfreq','sweeptime', 'sweepmode', 'sweeptype', 'sweepdir', 'sweepenable', 'internalref', 'useexternalmod', 'rfonoff', 'maxPower'] def initialize(self,va): self.va = va errcode = self.va.fnLMS_InitDevice(self.ID) if (errcode !=0): errcodereset = self.va.fnLMS_CloseDevice(self.ID) if (errcodereset != 0): #if device fails to initialize, it may be because it was not closed previously. Try closing and reinitializing it. logger.error("Failed to initialize Vaunix device {}. Error code {}.".format(self.ID,errcode)) raise PauseError
errcode = self.va.fnLMS_InitDevice(self.ID) if (errcode != 0): logger.error("Failed to initialize Vaunix device {}. Error code {}.".format(self.ID,errcode)) raise PauseError self.maxPower = int(self.va.fnLMS_GetMaxPwr(self.ID)/4) self.minPower = int(self.va.fnLMS_GetMinPwr(self.ID)/4) self.minFreq = int(self.va.fnLMS_GetMinFreq(self.ID))
self.maxFreq = int(self.va.fnLMS_GetMaxFreq(self.ID)) return def freq_unit(self,val): return int(val*100000) def power_unit(self,value): return int((self.maxPower - value)*4) def power_sanity_check(self,value): if (value < self.minPower or value > self.maxPower): logger.error("Vaunix device {} power ({} dBm) outside min/max range: {} dBm, {} dBm.".format(self.ID,value,self.minPower,self.maxPower)) raise PauseError return def freq_sanity_check(self,value): if (value < self.minFreq or value > self.maxFreq): logger.error("Vaunix device {} frequency ({} x10 Hz) outside min/max range: {} x10 Hz, {} x10 Hz.".format(self.ID,value,self.minFreq,self.maxFreq)) raise PauseError return def update(self): if (self.rfonoff): self.freq_sanity_check(self.freq_unit(self.frequency.value)) self.va.fnLMS_SetFrequency(self.ID, self.freq_unit(self.frequency.value)) self.power_sanity_check(self.power.value) self.va.fnLMS_SetPowerLevel(self.ID, self.power_unit(self.power.value)) if (self.sweepenable): self.freq_sanity_check(self.freq_unit(self.startfreq.value)) self.va.fnLMS_SetStartFrequency(self.ID, self.freq_unit(self.startfreq.value)) self.freq_sanity_check(self.freq_unit(self.endfreq.value)) self.va.fnLMS_SetEndFrequency(self.ID, self.freq_unit(self.endfreq.value)) self.va.fnLMS_SetSweepTime(self.ID, self.sweeptime.value) self.va.fnLMS_SetSweepDirection(self.ID, self.sweepdir) self.va.fnLMS_SetSweepMode(self.ID, self.sweepmode) #True: Repeat Sweep, False: Sweep Once self.va.fnLMS_SetSweepType(self.ID, self.sweeptype) #True: Bidirectional Sweep, False: Unidirectional Sweep self.va.fnLMS_StartSweep(self.ID, self.sweepenable) self.va.fnLMS_SetFastPulsedOutput(self.ID, c_float(self.pulsewidth.value*1e-6), c_float(self.pulserep.value*1e-6), self.pulseenable) self.va.fnLMS_SetUseExternalPulseMod(self.ID, self.useexternalmod) self.va.fnLMS_SetUseInternalRef(self.ID, self.internalref) #True: internal ref, False: external ref self.va.fnLMS_SaveSettings(self.ID) self.va.fnLMS_SetRFOn(self.ID, self.rfonoff) self.getparams() return def getparams(self): logger.info("Parameters for Vaunix # {}".format(self.ID)) logger.info("Frequency: {} MHz".format( self.va.fnLMS_GetFrequency(self.ID)/100000)) logger.info("Power Level: {} dBm".format( self.va.fnLMS_GetPowerLevel(self.ID)/4)) class Vaunixs(Instrument): version = '2015.11.19' motors = Member() isInitialized = Bool(False) va = Member() testMode = Bool(False) #Test mode: Set to False for actual use. def __init__(self, name, experiment, description=''): super(Vaunixs, self).__init__(name, experiment, description) self.motors = ListProp('motors', experiment, 'A list of individual Vaunix signal generators', listElementType=Vaunix, listElementName='Vaunix') self.properties += ['version', 'motors'] num = self.initialize() self.motors.length = num self.motors.refreshGUI() #Initialize: loads and initializes DLL def initialize(self): num = 0 if self.enable: CDLL_file = "./vaunix/VNX_fmsynth.dll" self.va = CDLL(CDLL_file) if (self.testMode): logger.warning("Warning: Vaunix in test mode. Set testMode=False in vaunix.py to turn off test mode.") self.va.fnLMS_SetTestMode(self.testMode) #Test mode... this needs to be set False for actual run. Do not remove this command (default setting is True). self.isInitialized = True num = self.detect_generators() return num def preExperiment(self, hdf5): if self.enable: if (not self.isInitialized): self.initialize() for i in self.motors: #initialize serial connection to each power supply i.initialize(self.va) self.isInitialized = True def preIteration(self, iterationresults, hdf5): """ Every iteration, send the motors updated positions. """ if self.enable: msg = '' try: for i in self.motors: i.update() except Exception as e: logger.error('Problem updating Vaunix:\n{}\n{}\n'.format(msg, e)) self.isInitialized = False raise PauseError def postMeasurement(self, measurementresults, iterationresults, hdf5): return def postIteration(self, iterationresults, hdf5): return def postExperiment(self, hdf5): return def finalize(self,hdf5): return #detect_generators: Calls DLL function to check for number of generators and their IDs. def detect_generators(self): if (not self.isInitialized): #test if DLL is already loaded. If not, load it. self.initialize() num=self.va.fnLMS_GetNumDevices() #ask DLL for the number of connected devices logger.debug("Number of vaunix devices detected: {}".format(num)) while (num>len(self.motors)): #if num connected devices > number in array, add elements. self.motors.add() while (num<len(self.motors)): #if <, subtract elements. self.motors.pop(self.motors.length-1) self.motors.length -= 1 devinfotype = c_uint*num devinfo = devinfotype() self.va.fnLMS_GetDevInfo(addressof(devinfo)) #get device IDs for mn, i in enumerate(self.motors): i.ID = int(devinfo[mn]) #copy device IDs to ID variable mo
# -*- coding: utf-8 -*- import os import re import select import socket import struct import time from module.plugins.internal.Hoster import Hoster from module.plugins.internal.misc import exists, fsjoin class XDCC(Hoster): __name__ = "XDCC" __type__ = "hoster" __version__ = "0.42" __status__ = "testing" __pattern__ = r'xdcc://(?P<SERVER>.*?)/#?(?P<CHAN>.*?)/(?P<BOT>.*?)/#?(?P<PACK>\d+)/?' __config__ = [("nick", "str", "Nickname", "pyload" ), ("ident", "str", "Ident", "pyloadident" ), ("realname", "str", "Realname", "pyloadreal" ), ("ctcp_version", "str","CTCP version string", "pyLoad! IRC Interface")] __description__ = """Download from IRC XDCC bot""" __license__ = "GPLv3" __authors__ = [("jeix", "jeix@hasnomail.com" ), ("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")] def setup(self): self.timeout = 30 self.multiDL = False def process(self, pyfile): #: Change request type self.req = self.pyload.requestFactory.getRequest(self.classname, type="XDCC") for _i in xrange(0, 3): try: nmn = self.do_download(pyfile.url) self.log_info("Download of %s finished." % nmn) return except socket.error, e: if hasattr(e, "errno") and e.errno is not None: err_no = e.errno if err_no in (10054, 10061): self.log_warning("Server blocked our ip, retry in 5 min") self.wait(300) continue else: self.log_error(_("Failed due to socket errors. Code: %s") % err_no) self.fail(_("Failed due to socket errors. Code: %s") % err_no) else: err_msg = e.args[0] self.log_error(_("Failed due to socket errors: '%s'") % err_msg) self.fail(_("Failed due to socket errors: '%s'") % err_msg) self.log_error(_("Server blocked our ip, retry again later manually")) self.fail(_("Server blocked our ip, retry again later manually")) def do_download(self, url): self.pyfile.setStatus("waiting") server, chan, bot, pack = re.match(self.__pattern__, url).groups() nick = self.config.get('nick') ident = self.config.get('ident') realname = self.config.get('realname') ctcp_version = self.config.get('ctcp_version') temp = server.split(':') ln = len(temp) if ln == 2: host, port = temp elif ln == 1: host, port = temp[0], 6667 else: self.fail(_("Invalid hostname for IRC Server: %s") % server) ####################### #: CONNECT TO IRC AND IDLE FOR REAL LINK dl_time = time.time() sock = socket.socket() self.log_info(_("Connecting to: %s:%s") % (host, port)) sock.connect((host, int(port))) if nick == "pyload": nick = "pyload-%d" % (time.time() % 1000) #: last 3 digits sock.send("NICK %s\r\n" % nick) sock.send("USER %s %s bla :%s\r\n" % (ident, host, realname)) self.log_info(_("Connect success.")) self.wait(5) # Wait for logon to complete sock.send("JOIN #%s\r\n" % chan) sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack)) #: IRC recv loop readbuffer = "" retry = None m = None while m is None: if retry: if time.time() > retry: retry = None dl_time = time.time() sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack)) else: if (dl_time + self.timeout) < time.time(): #@TODO: add in config sock.send("QUIT :byebye\r\n") sock.close() self.log_error(_("XDCC Bot did not answer")) self.fail(_("XDCC Bot did not answer")) fdset = select.select([sock], [], [], 0) if sock not in fdset[0]: continue readbuffer += sock.recv(1024) lines = readbuffer.split("\n") readbuffer = lines.pop() for line in lines: # if self.pyload.debug: # self.log_debug("*> " + decode(line)) line = line.rstrip() first = line.split() if first[0] == "PING": sock.send("PONG %s\r\n" % first[1]) if first[0] == "ERROR": self.fail(_("IRC-Error: %s") % line) msg = line.split(None, 3) if len(msg) != 4: continue msg = {'origin': msg[0][1:], 'action': msg[1], 'target': msg[2], 'text' : msg[3][1:]} if msg['targe
t'][0:len(nick)] == nick and m
sg['action'] == "PRIVMSG": if msg['text'] == "\x01VERSION\x01": self.log_debug(_("Sending CTCP VERSION")) sock.send("NOTICE %s :%s\r\n" % (msg['origin'], ctcp_version)) elif msg['text'] == "\x01TIME\x01": self.log_debug(_("Sending CTCP TIME")) sock.send("NOTICE %s :%d\r\n" % (msg['origin'], time.time())) elif msg['text'] == "\x01LAG\x01": pass #: don't know how to answer if msg['origin'][0:len(bot)] != bot\ or msg['target'][0:len(nick)] != nick\ or msg['action'] not in ("PRIVMSG", "NOTICE"): continue self.log_debug(_("PrivMsg: <%s> - %s" % (msg['origin'], msg['text']))) if "You already requested that pack" in msg['text']: retry = time.time() + 300 elif "you must be on a known channel to request a pack" in msg['text']: self.log_error(_("Invalid channel")) self.fail(_("Invalid channel")) m = re.match('\x01DCC SEND (?P<NAME>.*?) (?P<IP>\d+) (?P<PORT>\d+)(?: (?P<SIZE>\d+))?\x01', msg['text']) #: Get connection data ip = socket.inet_ntoa(struct.pack('!I', int(m.group('IP')))) port = int(m.group('PORT')) file_name = m.group('NAME') if m.group('SIZE'): self.req.filesize = long(m.group('SIZE')) self.pyfile.name = file_name dl_folder = fsjoin(self.pyload.config.get('general', 'download_folder'), self.pyfile.package().folder if self.pyload.config.get("general", "folder_per_package") else "") dl_file = fsjoin(dl_folder, file_name) if not exists(dl_folder): os.makedirs(dl_folder) self.set_permissions(dl_folder) self.log_info(_("Downloading %s from %s:%d") % (file_name, ip, port)) self.pyfile.setStatus("downloading") newname = self.req.download(ip, port, dl_file, sock, self.pyfile.setProgress) if newname and newname != dl_file: self.log_info(_("%(name)s saved as %(newname)s") % {'name': self.pyfile.name, 'newname': newname}) dl_file = newname #: kill IRC socket #: sock.send("QUIT :byebye\r\n") sock.close() self.last_download = dl_file return self.last_download
import os import argparse import tensorflow as tf import numpy as np import sys sys.path.append('../') from reader import flickr8k_raw_data def make_example(image_feature, caption_feature, id): # The object we return ex = tf.train.SequenceExample() # A non-sequential feature of our example sequence_length = len(caption_feature) for f in image_feature: ex.context.feature["image_feature"].float_list.value.append(float(f)) ex.context.feature["id"].bytes_list.value.append(id) fl_tokens = ex.feature_lists.feature_list["caption_feature"] for token in caption_feature: fl_tokens.feature.add().int64_list.value.append(token) return ex def arguments(): parser = argparse.ArgumentParser() parser.add_argument('cnn_feats_path', help='a numpy.mmap expected') parser.add_argument( 'caption_tokens_dir', help='Directory containing train, test and dev captions.') args = parser.parse_args() return args def _strip_name(paths): return [(os.path.basename(p), i) for i, p in enumerate(paths)] def main(): args = arguments() # read the mmap file containing CNN features feats_fname = os.path.splitext(os.path.basename(args.cnn_feats_path))[0] img_name_list_path = os.path.join( os.path.dirname(args.cnn_feats_path), '{}_list.txt'.format( '_'.join(feats_fname.split('_')[:-3]))) feats_shape = tuple([int(i) for i in feats_fname.split('_')[-1].split('X')]) feats_mmap = np.memmap(args.cnn_feats_path, mode='r', # read-only shape=feats_shape, dtype=np.float32) img_to_idx = {} with open(img_name_list_path, 'r') as fp: img_to_idx = dict(_strip_name(fp.read().split('\n'))) # load all the captions train_caps, test_caps, dev_caps,
vocab = flickr8k_raw_data( args.caption_tokens_dir) rand_idx = np.arange(0, len(train_caps['names'])) rng = np.random.RandomState(seed=1234) rng.shuffle(rand_idx) # dump the captions generated for debugging purpose with open(os.path.join(args.caption_tokens_dir, 'dump.txt'), 'w') as fp: from
pprint import pformat fp.write("\n###### vocab######\n") fp.write(pformat(vocab)) fp.write("\n###### train ######\n") rand_train_caps = { 'names': [train_caps['names'][i] for i in rand_idx], 'word_to_ids': [train_caps['word_to_ids'][i] for i in rand_idx], } fp.write(pformat([(n, w) for n, w in zip( rand_train_caps['names'], rand_train_caps['word_to_ids'])])) fp.write("\n###### test ######\n") fp.write(pformat([(n, w) for n, w in zip( test_caps['names'], test_caps['word_to_ids'])])) fp.write("\n###### dev ######\n") fp.write(pformat([(n, w) for n, w in zip( dev_caps['names'], dev_caps['word_to_ids'])])) # process train imgs and write to a record file train_tfrecord_name = os.path.join( args.caption_tokens_dir, '{}.train.tfrecord'.format( '_'.join(feats_fname.split('_')[:-3]))) train_writer = tf.python_io.TFRecordWriter(train_tfrecord_name) # for i, (img_name, cap_ids) in enumerate( # zip(train_caps['names'], train_caps['word_to_ids'])): for i, (idx) in enumerate(rand_idx): img_name = train_caps['names'][idx].split('#')[0] cap_ids = train_caps['word_to_ids'][idx] img_feat = feats_mmap[img_to_idx[img_name], :] train_writer.write( make_example(img_feat, cap_ids, img_name).SerializeToString()) if i % 100 == 0: print "train records written {}/{}".format( i, len(train_caps['names'])) train_writer.close() # process test imgs and write to a record file test_tfrecord_name = os.path.join( args.caption_tokens_dir, '{}.test.tfrecord'.format( '_'.join(feats_fname.split('_')[:-3]))) test_writer = tf.python_io.TFRecordWriter(test_tfrecord_name) for i, (img_name, cap_ids) in enumerate( zip(test_caps['names'], test_caps['word_to_ids'])): img_name = img_name.split('#')[0] img_feat = feats_mmap[img_to_idx[img_name], :] test_writer.write( make_example(img_feat, cap_ids, img_name).SerializeToString()) if i % 100 == 0: print "test records written {}/{}".format( i, len(test_caps['names'])) test_writer.close() # process dev imgs and write to a record file dev_tfrecord_name = os.path.join( args.caption_tokens_dir, '{}.dev.tfrecord'.format( '_'.join(feats_fname.split('_')[:-3]))) dev_writer = tf.python_io.TFRecordWriter(dev_tfrecord_name) for i, (img_name, cap_ids) in enumerate( zip(dev_caps['names'], dev_caps['word_to_ids'])): img_name = img_name.split('#')[0] img_feat = feats_mmap[img_to_idx[img_name], :] dev_writer.write( make_example(img_feat, cap_ids, img_name).SerializeToString()) if i % 100 == 0: print "dev records written {}/{}".format( i, len(dev_caps['names'])) dev_writer.close() print "Wrote to %s" % train_tfrecord_name print "Wrote to %s" % dev_tfrecord_name print "Wrote to %s" % test_tfrecord_name if __name__ == '__main__': main()