prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
from django.conf import settings # these 3 should be provided by your app FACEBOOK_APP_ID = getattr(settings, 'FACEBOOK_APP_ID', None) FACEBOOK_APP_SECRET = getattr(settings, 'FACEBOOK_APP_SECRET', None) FACEBOOK_DEFAULT_SCOPE = getattr(settings, 'FACEBOOK_DEFAULT_SCOPE', [ 'email', 'user_about_me', 'user_birthday', 'user_website']) # Absolute canvas page url as per facebook standard FACEBOOK_CANVAS_PAGE = getattr(settings, 'FACEBOOK_CANVAS_PAGE', 'http://apps.facebook.com/fashiolista_test/') # Disable this setting if you don't want to store a local image FACEBOOK_STORE_LOCAL_IMAGE = getattr( settings, 'FACEBOOK_STORE_LOCAL_IMAGE', True) # These you don't need to change FACEBOOK_HIDE_CONNECT_TEST = getattr(settings, 'FACEBOOK_HIDE_CONNECT_TEST', False) # Track all raw data coming in from FB FACEBOOK_TRACK_RAW_DATA = getattr(set
tings, 'FACEBOOK_TRACK_RAW_DATA', False) # if we should store friends and likes FACEBOOK_STORE_LIKES = getattr(settings, 'FACEBOOK_STORE_LIKES', False) FACEBOOK_STORE_FRIENDS = getattr(settings, 'FACEBOOK_STORE_FRIENDS', False) # i
f we should be using celery to do the above two, # recommended if you want to store friends or likes FACEBOOK_CELERY_STORE = getattr(settings, 'FACEBOOK_CELERY_STORE', False) # use celery for updating tokens, recommended since it's quite slow FACEBOOK_CELERY_TOKEN_EXTEND = getattr( settings, 'FACEBOOK_CELERY_TOKEN_EXTEND', False) FACEBOOK_DEBUG_REDIRECTS = getattr(settings, 'FACEBOOK_DEBUG_REDIRECTS', False) #READ only mode, convenient when doing load testing etc. FACEBOOK_READ_ONLY = getattr(settings, 'FACEBOOK_READ_ONLY', False) # check for required settings required_settings = ['FACEBOOK_APP_ID', 'FACEBOOK_APP_SECRET'] locals_dict = locals() for setting_name in required_settings: setting_available = locals_dict.get(setting_name) is not None assert setting_available, 'Please provide setting %s' % setting_name # Allow custom registration template FACEBOOK_REGISTRATION_TEMPLATE = getattr(settings, 'FACEBOOK_REGISTRATION_TEMPLATE', ['django_facebook/registration.html', 'registration/registration_form.html']) # Allow custom signup form FACEBOOK_REGISTRATION_FORM = getattr(settings, 'FACEBOOK_REGISTRATION_FORM', None) default_registration_backend = 'django_facebook.registration_backends.FacebookRegistrationBackend' FACEBOOK_REGISTRATION_BACKEND = getattr( settings, 'FACEBOOK_REGISTRATION_BACKEND', default_registration_backend) #Fall back redirect location when no other location was found FACEBOOK_LOGIN_DEFAULT_REDIRECT = getattr( settings, 'FACEBOOK_LOGIN_DEFAULT_REDIRECT', '/') # Force profile update every login FACEBOOK_FORCE_PROFILE_UPDATE_ON_LOGIN = getattr( settings, 'FACEBOOK_FORCE_PROFILE_UPDATE_ON_LOGIN', False) #Retry an open graph share 6 times (once every 15 minutes) FACEBOOK_OG_SHARE_RETRIES = getattr(settings, 'FACEBOOK_OG_SHARE_RETRIES', 6) #Retry a failed open graph share (when we have an updated token) for this number of days FACEBOOK_OG_SHARE_RETRY_DAYS = getattr( settings, 'FACEBOOK_OG_SHARE_RETRY_DAYS', 7) FACEBOOK_OG_SHARE_DB_TABLE = getattr( settings, 'FACEBOOK_OG_SHARE_DB_TABLE', None) # Force profile update every login FACEBOOK_FORCE_PROFILE_UPDATE_ON_LOGIN = getattr( settings, 'FACEBOOK_FORCE_PROFILE_UPDATE_ON_LOGIN', False) # Profile image location FACEBOOK_PROFILE_IMAGE_PATH = getattr( settings, 'FACEBOOK_PROFILE_IMAGE_PATH', None) # Ability to easily overwrite classes used for certain tasks FACEBOOK_CLASS_MAPPING = getattr( settings, 'FACEBOOK_CLASS_MAPPING', None)
from clint import resources import json resources.init('thomasballinger', 'trello-card-updater') #being used as though they have in-memory caches class LocalStorage(object): def __init__(self, name): object.__setattr__(self, 'res', getattr(
resources, name)) def __getattr__(self, att): s = self.res.read(att) if s is None: return None data = json.loads(s) return data def __setattr__(self, att, data): s = json.dumps(data) self.re
s.write(att, s) def __getitem__(self, key): return getattr(self, key) def __setitem__(self, key, value): setattr(self, key, value) class LocalObfuscatedStorage(LocalStorage): """Of questionable use, but should avoid card names being indexed""" def __getattr__(self, att): s = self.res.read(att) if s is None: return None data = json.loads(s.encode('rot13')) return data def __setattr__(self, att, data): s = json.dumps(data).encode('rot13') self.res.write(att, s) user = LocalStorage('user') cache = LocalObfuscatedStorage('cache')
[{"url": "https://raw.
githubusercontent.com/ikesuncat/listas/master/Addon.xml", "fanart": ".\\fanart.jpg", "title": "I
ke"}]
"""iching. See: https://packaging.python.org/en/latest/distributing.html https://github.com/chengjun/iching """ # Always prefer setuptools over distutils from setuptools import setup, find_packages # To use a consistent encoding from codecs import open from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the relevant file with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f: long_description = f.read() setup( name='iching', # Versions should comply with PEP440. For a discussion on single-sourcing # the version across setup.py and the project code, see # https://packaging.python.org/en/latest/single_source_version.html version='3.4.0', description='Predicting your life with the Book of Changes', long_description=long_description, # The project's main homepage. url='https://github.com/chengjun/iching', # Author details author='Cheng-Jun Wang', author_email='wangchj04@gmail.com', # Choose your license license='MIT', # See https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ # How mature is this project? Common values are # 3 - Alpha # 4 - Beta # 5 - Production/Stable 'Development Status :: 4 - Beta', # Indicate who your project is intended for 'Intended Audience :: Developers', 'Topic :: Software Development :: Build Tools', # Pick your license as you wish (should match "license" above) 'License :: OSI Approved :: MIT License', # Specify the Python versions you support here. In particular, ensure # that you indicate whether you support Python 2, Python 3 or both. 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7' ], # What does your project relate to? keywords='I Ching', # You can just specify the packages manually here if your project is # simple. Or you can use find_packages(). packages=find_packages(exclude=['contrib', 'docs', 'tests*']), # List run-time dependencies here. These will be installed by pip when # your project is installed. For an analysis of "install_requires" vs pip's # requirements files see: # https://packaging.python.org/en/latest/requirements.html #install_requires=['matplotlib', 'collections'], # List additional groups of dependencies here (e.g. development # dependencies). You can install these using the following syntax, # for example: # $ pip install -e .[dev,test] extras_require={ 'dev': ['check-manifest'], 'test': ['coverage'], }, # If there are data files included in your packages that need to be # installed, specify them here. If using Python 2.6 or less, then these # have to be included in MANIFEST.in as well. package_data={ 'iching': ['package_data.dat'], }, # Although 'package_data' is the preferred approach, in some case you may # need to place data files outside of your packages. See: # http://docs.python.org/3.4/distutils/setupscript.html#installing-a
dditional-files # noqa # In this case, 'data_file' will be installed into '<sys
.prefix>/my_data' # data_files=[('my_data', ['data/data_file'])], # To provide executable scripts, use entry points in preference to the # "scripts" keyword. Entry points provide cross-platform support and allow # pip to create the appropriate form of executable for the target platform. entry_points={ 'console_scripts': [ 'sample=sample:main', ], }, )
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migrati
on(migrations.Migration): dependencies = [ ('core', '0016_remove_user_is_census_admin'), ] operations = [ migrations.AddField( model_name='group', name='affiliation', field=models.CharField(default='', max_length=255, blank=True), preserve_default=True,
), ]
#!/usr/bin/env python '''relay handling module''' import time from pymavlink import mavutil from MAVProxy.modules.lib import mp_module class RelayModule(mp_mod
ule.MPModule): def __init__(self, mpstate): super(RelayModule, self).__init__(mpstate, "relay") self.add_command('relay', self.cmd_relay, "relay commands") self.add_command('servo', self.cmd_servo, "servo commands") self.add_command('motortest', self.cm
d_motortest, "motortest commands") def cmd_relay(self, args): '''set relays''' if len(args) == 0 or args[0] not in ['set', 'repeat']: print("Usage: relay <set|repeat>") return if args[0] == "set": if len(args) < 3: print("Usage: relay set <RELAY_NUM> <0|1>") return self.master.mav.command_long_send(self.target_system, self.target_component, mavutil.mavlink.MAV_CMD_DO_SET_RELAY, 0, int(args[1]), int(args[2]), 0, 0, 0, 0, 0) if args[0] == "repeat": if len(args) < 4: print("Usage: relay repeat <RELAY_NUM> <COUNT> <PERIOD>") return self.master.mav.command_long_send(self.target_system, self.target_component, mavutil.mavlink.MAV_CMD_DO_REPEAT_RELAY, 0, int(args[1]), int(args[2]), float(args[3]), 0, 0, 0, 0) def cmd_servo(self, args): '''set servos''' if len(args) == 0 or args[0] not in ['set', 'repeat']: print("Usage: servo <set|repeat>") return if args[0] == "set": if len(args) < 3: print("Usage: servo set <SERVO_NUM> <PWM>") return self.master.mav.command_long_send(self.target_system, self.target_component, mavutil.mavlink.MAV_CMD_DO_SET_SERVO, 0, int(args[1]), int(args[2]), 0, 0, 0, 0, 0) if args[0] == "repeat": if len(args) < 5: print("Usage: servo repeat <SERVO_NUM> <PWM> <COUNT> <PERIOD>") return self.master.mav.command_long_send(self.target_system, self.target_component, mavutil.mavlink.MAV_CMD_DO_REPEAT_SERVO, 0, int(args[1]), int(args[2]), int(args[3]), float(args[4]), 0, 0, 0) def cmd_motortest(self, args): '''run motortests on copter''' if len(args) != 4: print("Usage: motortest motornum type value timeout") return self.master.mav.command_long_send(self.target_system, 0, mavutil.mavlink.MAV_CMD_DO_MOTOR_TEST, 0, int(args[0]), int(args[1]), int(args[2]), int(args[3]), 0, 0, 0) def init(mpstate): '''initialise module''' return RelayModule(mpstate)
ort User from django.test import TestCase from django.urls.base import reverse class TestAccountRegistration(TestCase): def setUp(self): # create one user for convenience response = self.client.post( reverse('account:register'), { 'username': 'Alice', 'email': 'alice@localhost', 'password': 'supasecret', 'password2': 'supasecret', }, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:login')) self.assertEqual(response.status_code, 200) def test_registration(self): self.assertEqual(len(User.objects.all()), 1) user = User.objects.get(username='Alice') self.assertEqual(user.email, 'alice@localhost') response = self.client.post( reverse('account:register'), { 'username': 'Bob', 'email': 'bob@localhost', 'password': 'foo', 'password2': 'foo', }, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:login')) self.assertEqual(response.status_code, 200) self.assertEqual(len(User.objects.all()), 2) def test_duplicate_username(self): response = self.client.post( reverse('account:register'), { 'username': 'Alice', 'email': 'alice2@localhost', 'password': 'supasecret', 'password2': 'supasecret', }, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:register')) self.assertEqual(response.status_code, 200) self.assertEqual(len(User.objects.all()), 1) def test_duplicate_email(self): response = self.client.post( reverse('account:register'), { 'username': 'Alice2000', 'email': 'alice@localhost', 'password': 'supasecret', 'password2': 'supasecret', }, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:register')) self.assertEqual(response.status_code, 200) self.assertEqual(len(User.objects.all()), 1) def test_non_matching_passwords(self): response = self.client.post( reverse('account:register'), { 'username': 'Bob', 'email': 'bob@localhost', 'password': 'foo', 'password2': 'bar', }, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:register')) self.assertEqual(response.status_code, 200) self.assertEqual(len(User.objects.all()), 1) def test_form_view(self): response = self.client.get(reverse('account:register')) self.assertEqual(response.status_code, 200) class TestLogin(TestCase): def setUp(self): # create one user for convenience response = self.client.post( reverse('account:register'), { 'username': 'Alice', 'email': 'alice@localhost', 'password': 'supasecret', 'password2': 'supasecret', }, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:login')) self.assertEqual(response.status_code, 200) def test_login(self): response = self.client.post( reverse('account:login'), {'username': 'Alice', 'password': 'supasecret'}, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:home')) self.assertEqual(response.status_code, 200) def test_disabled_login(self): user = User.objects.all().update(is_active=False) response = self.client.post( reverse('account:login'), {'username': 'Alice', 'password': 'supasecret'}, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:login')) self.assertEqual(response.status_code, 200) def test_wrong_credentials(self): response = self.client.post( reverse('account:login'), {'username': 'Alice', 'password': 'wrong'}, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:login')) self.assertEqual(response.status_code, 200) def test_wrong_user(self): response = self.client.post(
reverse('account:login'), {'username': 'Bob', 'password': 'supasecret'}, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:login')) self.assertEqual(response.status_code, 200) def test_login_view(self): response = self.client.get(reverse('account:login')) self.assertEqual(response.status_code, 200) def test_login_view_being_logged_in(self): response = self.client.post( reverse('account:login'), {'username': 'Alice', 'password': 'supasecret'}, follow=True ) response = self.client.get( reverse('account:login'), follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:home')) self.assertEqual(response.status_code, 200) response = self.client.post( reverse('account:login'), {'username': 'Alice', 'password': 'supasecret'}, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:home')) self.assertEqual(response.status_code, 200) def test_home_view_while_not_logged_in(self): response = self.client.get(reverse('account:home'), follow=True) self.assertEqual(response.redirect_chain[0][1], 302) self.assertTrue(response.redirect_chain[0][0].startswith(reverse('account:login'))) self.assertEqual(response.status_code, 200) def test_home_view_while_logged_in(self): response = self.client.post( reverse('account:login'), {'username': 'Alice', 'password': 'supasecret'}, follow=True ) response = self.client.get(reverse('account:home')) self.assertEqual(response.status_code, 200) def test_register_view_while_logged_in(self): response = self.client.post( reverse('account:login'), {'username': 'Alice', 'password': 'supasecret'}, follow=True ) response = self.client.get(reverse('account:register'), follow=True) self.assertEqual(response.redirect_chain[0][1], 302) self.assertTrue(response.redirect_chain[0][0].startswith(reverse('account:home'))) self.assertEqual(response.status_code, 200) def test_logout(self): response = self.client.post( reverse('account:login'), {'username': 'Alice', 'password': 'supasecret'}, follow=True ) user = auth.get_user(self.client) self.assertTrue(user.is_authenticated) response = self.client.get(reverse('account:logout'), follow=True) self.assertEqual(response.redirect_
# -*- coding: utf-8 -*- r""" # .---. .----------- # / \ __ / ------ # / / \( )/ ----- (`-') _ _(`-') <-. (`-')_ # ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .-> # //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-. # // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' / # // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ / # ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /) # '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /` # ''`` `------' `------' `-----' `--' `--' `--' # ###################################################################################### # # Author: edony - edonyzpc@gmail.com # # twitter : @edonyzpc # # Last modified: 2015-05-10 15:02 # # Filename: filebuf.py # # Description: All Rights Are Reserved # """ class PyColor(object): """ This class is for colored print in the python interpreter! "F3" call Addpy() function to add this class which is defined in the .vimrc for vim Editor.""" def __init__(self): self.self_doc = r""" STYLE: \033['display model';'foreground';'background'm DETAILS: FOREGROUND BACKGOUND COLOR --------------------------------------- 30 40 black 31 41 red 32 42 green 33 43 yellow 34 44 blue 35 45 purple 36 46 cyan 37 47 white DISPLAY MODEL DETAILS ------------------------- 0 default 1 highlight 4 underline 5 flicker 7 reverse 8 non-visiable e.g: \033[1;31;40m <!--1-highlight;31-foreground red;40-background black--> \033[0m <!--set all into default--> """ self.warningcolor = '\033[0;37;41m' self.tipcolor = '\033[0;31;42m' self.endcolor = '\033[0m' self._newcolor = '' @property def new(self): """ Customized Python Print Color. """ return self._newcolor @new.setter def new(self,color_str): """ New Color. """ self._newcolor = color_str def disable(self): """ Disable Color Print. """ self.warningcolor = '' self.endcolor = '' class FileBuf(object): """ FILEBUF: class to write the each different lines into buffer file named `tmp`. """ def __init__(self, file1, file2): """ Initialize the instance attributes: [file1, file2, file1_line_num, file2_line_num] """ self.file1 = file1 self.file2 = file2 self.file1_line_num = len(open(self.file1).readlines()) self.file2_line_num = len(open(self.file2).readlines()) self.buffer = [] def mark_diff(self): """ Mark up the different lines into buffer """ f1 = open(self.file1) f2 = open(self.file2) if self.file1_line_num > self.file2_line_num: line1_num_counter = 0 line2_num_counter = 0 for line1 in f1.readlines():
line2 = f2.readline() line1_num_counter += 1 line2_num_counter += 1
if line1 == line2: continue else: if line1 == '': line1 = line1 + '\n' if line2 == '': line2 = line2 + '\n' line1 = str(line1_num_counter) + '-' + line1 line2 = str(line2_num_counter) + '-' + line2 self.buffer.append(line1) self.buffer.append(line2) else: line1_num_counter = 0 line2_num_counter = 0 for line2 in f2.readlines(): line1 = f1.readline() line1_num_counter += 1 line2_num_counter += 1 if line1 == line2: continue else: if line1 == '': line1 = line1 + '\n' if line2 == '': line2 = line2 + '\n' line1 = str(line1_num_counter) + '+' + line1 line2 = str(line2_num_counter) + '+' + line2 self.buffer.append(line1) self.buffer.append(line2) def write_file(self): """ Write the buffer into buffer file `tmp` in current direction """ file_write = open('tmp','w') for line in self.buffer: file_write.write(line) if __name__ == '__main__': test_file_buf = FileBuf('f2.txt', 'f1.txt') test_file_buf.mark_diff() test_file_buf.write_file()
__all__ = ['LEAGUE_PROPERTIES'] LEAGUE_PROPERTIES = { "PL": { "rl": [18, 20], "cl": [1, 4], "el": [5, 5], }, "EL1": { "rl": [21, 24], "cl": [1, 2], "el": [3, 6] }, "EL2": { "rl": [21, 24], "cl": [1, 2], "el": [3, 6] }, "ELC": { "rl": [22, 24], "cl": [1,2], "el": [3,6] }, "BL1": { "rl": [16, 18], "cl": [1, 4], "el": [5, 6] }, "BL2": { "rl": [16, 18], "cl": [1, 2], "el": [3, 3] }, "BL3": { "rl": [18, 20], "cl": [1, 2], "el": [3, 3] }, "PD": { "rl": [18,20], "cl": [1,3], "el": [4,6] }, "SD": { "rl": [19, 22], "cl": [1, 2], "el": [3, 6] }, "SA": { "rl": [18, 20], "cl": [1, 3], "el": [4, 5] }, "PPL": { "rl": [17, 18], "cl": [1, 3
], "el": [4, 5] }, "DED": { "rl": [17, 18], "cl": [1, 3], "el": [4, 5] }, "FL1": { "rl": [19, 20], "cl": [1, 3], "el": [4, 4] }, "FL2": { "rl": [18, 20], "cl": [1, 3], "el": [0, 0] }, "SB": {
"rl": [19, 22], "cl": [1, 2], "el": [3, 6] }, "ENL": { "rl": [22, 24], "cl": [1,2], "el": [3,6] }, }
#from interface.services.icontainer_agent import ContainerAgentClient #from pyon.ion.endpoint import ProcessRPCClient from pyon.public import Container, log, IonObject from pyon.util.containers import DotDict from pyon.util.int_test import IonIntegrationTestCase from interface.services.coi.iresource_registry_service import ResourceRegistryServiceClient from ion.services.sa.observato
ry.observatory_management_service import ObservatoryManagementService from interface.services.sa.iobservatory_management_service import IObservatoryManagementService, ObservatoryManagementServiceClient from interface.services.sa.iinstrument_management_service import InstrumentManagementServiceClient from pyon.util.context import LocalContextM
ixin from pyon.core.exception import BadRequest, NotFound, Conflict, Inconsistent from pyon.public import RT, PRED #from mock import Mock, patch from pyon.util.unit_test import PyonTestCase from nose.plugins.attrib import attr import unittest from ooi.logging import log from ion.services.sa.test.helpers import any_old class FakeProcess(LocalContextMixin): name = '' @attr('INT', group='sa') @unittest.skip('capabilities not yet available') class TestObservatoryNegotiation(IonIntegrationTestCase): def setUp(self): # Start container self._start_container() self.container.start_rel_from_url('res/deploy/r2deploy.yml') self.rrclient = ResourceRegistryServiceClient(node=self.container.node) self.omsclient = ObservatoryManagementServiceClient(node=self.container.node) self.imsclient = InstrumentManagementServiceClient(node=self.container.node) @unittest.skip("TDB") def test_request_resource(self): # L4-CI-SA-RQ-348 : Marine facility shall provide capabilities to define instrument use policies # L4-CI-SA-RQ-115 : Marine facility shall present resource requests to the marine infrastructure # create an observatory with resources including platforms with instruments # create an instrument use policy for one of the defined instruments # request access to the instrument that aligns with defined policy, verify that access is granted # request access to the instrument that is in conflict with defined policy, verify that access is NOT granted pass @unittest.skip("TBD") def test_request_config_change(self): # L4-CI-SA-RQ-342 : Marine facility shall present platform configuration change requests to the marine infrastructure # create an observatory with resources including platforms with instruments # request a configuration change to the platform t, verify that the request is submitted to the # Observatory operator and that then access is granted when that operator approves pass
# coding=utf-8 import time import datetime __author__ = 'JIE' #! /usr/bin/env python #coding=utf-8 from tornado.tcpserver import TCPServer from tornado.ioloop import IOLoop class Connection(object): clients = set() def __init__(self, stream, address): Connection.clients.add(self) self._stream = stream self._address = address self._stream.set_close_callback(self.on_clo
se) self.read_message() print "A new user has entered the chat room.", address def read_message(self): self._stream.read_until('\n', self.broadcast_messages) def broadcast_messages(self, data): print "User said:", data[:-1], self._addr
ess for conn in Connection.clients: conn.send_message(data) self.read_message() def send_message(self, data): self._stream.write(data) def on_close(self): print "A user has left the chat room.", self._address Connection.clients.remove(self) class ChatServer(TCPServer): def handle_stream(self, stream, address): print "New connection :", address, stream Connection(stream, address) print "connection num is:", len(Connection.clients) if __name__ == '__main__': print "Server start ......" server = ChatServer() server.listen(8000) IOLoop.instance().start()
# -*- coding: utf-8 -*- # BankCSVtoQif - Smart conversion of csv files from a bank to qif # Copyright (C) 2015-2016 Nikolai Nowaczyk # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of #
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. from setuptools import setup, find_packages version = "0.0.1" setup( name='bankcsvtoqif', version=version,
description='Smart conversion of csv files from a bank to qif', author='Nikolai Nowaczyk', author_email='mail@nikno.de', license='GNU GPLv2', url='https://github.com/niknow/BankCSVtoQif/tree/master/bankcsvtoqif', packages=find_packages(), test_suite='bankcsvtoqif.tests', tests_require=['pytest'], install_requires=['monthdelta'] )
import datetime import unittest2 from google.appengine.ext import testbed from consts.event_type import EventType from datafeeds.usfirst_matches_parser import UsfirstMatchesParser from helpers.match_helper import MatchHelper from models.event import Event from models.match import Match class TestAddMatchTimes(unittest2.TestCase): def setUp(self): self.testbed = testbed.Testbed() self.testbed.activate() self.testbed.init_urlfetch_stub() self.testbed.init_datastore_v3_stub() self.testbed.init_memcache_stub() self.event = Event( id="2014casj", event_short="casj", event_type_enum=EventType.REGIONAL, name="Silicon Valley Regional", start_date=datetime.datetime(2014, 2, 27, 0, 0), end_date=datetime.datetime(2014, 3,
1, 0, 0), year=2014, timezone_id="America/New_York", ) self.event_dst = Event( id="2014casj", event_short="casj", event_type_enum=EventType.REGIONAL, name="Silicon Valley Regional", start_date=datetime.datetime(2014, 3, 8, 0, 0), end_date=datetime.datetime(2014, 3, 9, 0, 0), # chosen
to span DST change year=2014, timezone_id="America/Los_Angeles", ) def match_dict_to_matches(self, match_dicts): return [Match( id=Match.renderKeyName( self.event.key.id(), match_dict.get("comp_level", None), match_dict.get("set_number", 0), match_dict.get("match_number", 0)), event=self.event.key, year=self.event.year, set_number=match_dict.get("set_number", 0), match_number=match_dict.get("match_number", 0), comp_level=match_dict.get("comp_level", None), team_key_names=match_dict.get("team_key_names", None), time_string=match_dict.get("time_string", None), alliances_json=match_dict.get("alliances_json", None) ) for match_dict in match_dicts] def test_match_times(self): with open('test_data/usfirst_html/usfirst_event_matches_2013cama.html', 'r') as f: # using matches from a random event as data match_dicts, _ = UsfirstMatchesParser.parse(f.read()) matches = self.match_dict_to_matches(match_dicts) MatchHelper.add_match_times(self.event, matches) self.assertEqual(len(matches), 92) PST_OFFSET = -5 self.assertEqual(matches[0].time, datetime.datetime(2014, 2, 28, 9, 0) - datetime.timedelta(hours=PST_OFFSET)) self.assertEqual(matches[75].time, datetime.datetime(2014, 3, 1, 11, 50) - datetime.timedelta(hours=PST_OFFSET)) def test_match_times_dst(self): with open('test_data/usfirst_html/usfirst_event_matches_2012ct.html', 'r') as f: # using matches from a random event as data match_dicts, _ = UsfirstMatchesParser.parse(f.read()) matches = self.match_dict_to_matches(match_dicts) MatchHelper.add_match_times(self.event_dst, matches) self.assertEqual(len(matches), 125) PST_OFFSET = -8 PDT_OFFSET = -7 self.assertEqual(matches[0].time, datetime.datetime(2014, 3, 8, 9, 0) - datetime.timedelta(hours=PST_OFFSET)) self.assertEqual(matches[-1].time, datetime.datetime(2014, 3, 9, 16, 5) - datetime.timedelta(hours=PDT_OFFSET))
from pymacy.db import get_db from bson.json_util import dumps db = get_db() results = [] count = 0 for i in db.benchmark.find({"element": "Ni"}): count += 1 if
count > 100: break results.append(i) print(results[
0]) with open("Ni.json", 'w') as f: file = dumps(results) f.write(file)
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2017 Joshua Charles Campbell # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. from __future__ import print_function, division from six import string_types from runpy import run_path from inspect import isclass, getmembers, isroutine import logging logger = logging.getLogger(__name__) error = logger.error warn = logger.warn info = logger.info debug = logger.deb
ug class Config(object): def __init__(self, file_path): self._config = run_path(file_path) de
f __getattr__(self, attr): return self._config[attr] def restify_class(self, o): if isclass(o): d = {} for k, v in getmembers(o): if '__' not in k: d[k] = self.restify_class(v) return d else: assert (isinstance(o, dict), isinstance(o, float), isinstance(o, list), isinstance(o, int), isinstance(o, string_types) ), o return o def restify(self): d = {} for k, v in self._config.items(): if '__' not in k: x = self.restify_class(v) d[k] = x return d
from sys import argv script, filename = argv print "We're going to erase %r." % filename print "If you don't want that, hit CTRL-C (^C)." print "If you do want that, hit RETURN." raw_input("?") print "Opening the file..." target = open(filename,'w') print "Truncating the file. Goodbye!" target.truncate() print "Now I'm going to ask you for three lines." line1 = raw_input("line 1:") line2 = raw_input("line 2:") line3 = raw_input("line 3:") print "I'm go
ing to write these to the file." content = "%s\n
%s\n%s\n" % (line1, line2, line3) target.write(content) print "And finally, we close it." target.close()
""" Testing hrf module """ from __future__ import absolute_import from os.path import dirname, join as pjoin import numpy as np from scipy.stats import gamma import scipy.io as sio from ..hrf import ( gamma_params, gamma_expr, lambdify_t, spm_hrf_compat, spmt, dspmt, ddspmt, ) from nose.tools import assert_raises from numpy.testing import assert_almost_equal def test_gamma(): t = np.linspace(0, 30, 5000) # make up some numbers pk_t = 5.0 fwhm = 6.0 # get the estimated parameters shape, scale, coef = gamma_params(pk_t, fwhm) # get distribution function g_exp = gamma_expr(pk_t, fwhm) # make matching standard distribution gf = gamma(shape, scale=scale).pdf # get values L1t = gf(t) L2t = lambdify_t(g_exp)(t) # they are the same bar a scaling factor nz = np.abs(L1t) > 1e-15 sf = np.mean(L1t[nz] / L2t[nz]) assert_almost_equal(L1t , L2t*sf) def test_spm_hrf(): # Regression tests for spm hrf, time derivative and dispersion derivative # Check that absolute values don't change (much) with different dt, and that # max values are roughly the same and in the same place in time for dt in 0.1, 0.01, 0.001: t_vec = np.arange(0, 32, dt) hrf = spmt(t_vec) assert_almost_equal(np.max(hrf), 0.21053, 5) assert_almost_equal(t_vec[np.argmax(hrf)], 5, 2) dhrf = dspmt(t_vec) assert_almost_equal(np.max(dhrf), 0.08, 3) assert_almost_equal(t_vec[np.argmax(dhrf)], 3.3, 1) dhrf = ddspmt(t_vec) assert_almost_equal(np.max(dhrf), 0.10, 2) assert_almost_equal(t_vec[np.argmax(dhrf)], 5.7, 1) # Test reversed time vector to check that order of time values does not # affect result rt_vec = np.arange(0, 32, 0.01) rhrf = spmt(rt_vec) assert_almost_equal(np.max(rhrf), 0.21053, 5) assert_almost_equal(t_vec[np.argmax(hrf)], 5, 2) def test_spm_hrf_octave(): # Test SPM hrf against output from SPM code running in Octave my_path = dirname(__file__) hrfs_path = pjoin(my_path, 'spm_hrfs.mat') # mat file resulting from make_hrfs.m hrfs_mat = sio.loadmat(hrfs_path, squeeze_me=True) params = hrfs_mat['params'] hrfs = hrfs_mat['hrfs'] for i, pvec in enumerate(params): dt, ppk, upk, pdsp, udsp, rat = pvec t_vec = np.arange(0, 32.1, dt) our_hrf = spm_hrf_compat(t_vec, peak_delay=ppk, peak_disp=pdsp,
under_delay=upk, under_disp=udsp, p_u_ratio=rat) # Normalize integral to match SPM assert_almost_equal(our_hrf, hrfs[i]) # Test basis functions # mat file resulting from get_td_dd.m bases_path = pjoin(my_path, 'spm_bases.mat') bases_mat = sio.loadmat(bases_path, squeeze_me=True
) dt = bases_mat['dt'] t_vec = np.arange(0, 32 + dt, dt) # SPM function divides by sum of values - revert with dt assert_almost_equal(spmt(t_vec), bases_mat['hrf'] / dt, 4) assert_almost_equal(dspmt(t_vec), bases_mat['dhrf'] / dt, 4) assert_almost_equal(ddspmt(t_vec), bases_mat['ddhrf'] / dt, 4) def test_spm_hrf_errors(): t_vec = np.arange(0, 32) # All 1s is fine res = spm_hrf_compat(t_vec, 1, 1, 1, 1) # 0 or negative raise error for other args args = [0] for i in range(4): assert_raises(ValueError, spm_hrf_compat, t_vec, *args) args[-1] = -1 assert_raises(ValueError, spm_hrf_compat, t_vec, *args) args[-1] = 1 args.append(0)
#!/usr/bin/env python """Plot information needed file""" ######################################################################## # File: plot_raw_read_alignment.py # executable: plot_raw_read_alignment.py # # Author: Andrew Bailey # History: Created 12/01/17 ######################################################################## from __future__ import print_function import sys import os from timeit import default_timer as timer import pysam import matplotlib.pyplot as plt import matplotlib.patches as mplpatches import numpy as np import scipy.stats as stats import seaborn as sns from py3helpers.utils import list_dir from PyPore.parsers import SpeedyStatSplit from nanonet.eventdetection.filters import minknow_event_detect from nanotensor.fast5 import Fast5 from nanotensor.event_detection import resegment_reads, create_anchor_kmers, index_to_time_rna_basecall def raw_scatter_plot(signal_data, label_data, outpath, interval): """plot accuracy distribution of reads""" # define figure size size = (interval[1] - interval[0]) / 100 plt.figure(figsize=(size, 4)) panel1 = plt.axes([0.01, 0.1, .95, .9]) # longest = max(data[0]) + data[1]) # panel1.set_xlim(0, 1000) mean = np.mean(signal_data) stdv = np.std(signal_data) panel1.set_ylim(mean - (3 * stdv), mean + (3 * stdv)) panel1.set_xlim(interval[0], interval[1]) # panel1.set_xscale("log") plt.scatter(x=range(len(signal_data)), y=signal_data, s=1, c="k") plt.title('Nanopore Read') for i in range(len(label_data.start)): if interval[0] < label_data.start[i] < interval[1]: panel1.text(label_data.start[i] + (label_data.length[i] / 2), 2, "{}".format(label_data.base[i]), fontsize=10, va="bottom", ha="center") panel1.axvline(label_data.start[i]) panel1.axvline(label_data.start[i] + label_data.length[i]) plt.show() # plt.savefig(outpath) def raw_scatter_plot_with_events(signal_data, label_data, outpath, interval, events): """plot accuracy distribution of reads""" # define figure size size = (interval[1] - interval[0]) / 75 plt.figure(figsize=(size, 4)) panel1 = plt.axes([0.01, 0.1, .95, .9]) # longest = max(data[0]) + data[1]) # panel1.set_xlim(0, 1000) mean = np.mean(signal_data) stdv = np.std(signal_data) panel1.set_ylim(mean - (3 * stdv), mean + (3 * stdv)) panel1.set_xlim(interval[0], interval[1]) # panel1.set_xscale("log") plt.scatter(x=range(len(signal_data)), y=signal_data, s=1, c="k") plt.title('Nanopore Read') for i in range(len(label_data.start)): if interval[0] < label_data.start[i] < interval[1]: panel1.text(label_data.start[i] + (label_data.length[i] / 2), 2, "{}".format(label_data.base[i]), fontsize=10, va="bottom", ha="center") panel1.axvline(label_data.start[i]) panel1.axvline(label_data.start[i] + label_data.length[i]) for event_peak in events: if interval[0] < event_peak < interval[1]: panel1.axvline(event_peak, linestyle='--', color='r') plt.show() # plt.savefig(outpath) def plot_raw_reads(current, old_events, resegment=None, dna=False, sampling_freq=4000, start_time=0, window_size=None): """Plot raw reads using ideas from Ryan Lorig-Roach's script""" fig1 = plt.figure(figsize=(24, 3)) panel = fig1.add_subplot(111) prevMean = 0 handles = list() handle, = panel.plot(current, color="black", lw=0.2) handles.append(handle) start = 0 if window_size: start = old_events[0]["start"] end = old_events[-1]["start"] if dna: start = (start - (start_time / sampling_freq)) * sampling_freq end = (end - (start_time / sampling_freq)) * sampling_freq start = np.random.randint(start, end - window_size) # print(start, end - window_size) # print(len(old_events), len(resegment)) for j, segment in enumerate(old_events): x0 = segment["start"] x1 = x0 + segment["length"] if dna: x0 = (x0 - (start_time / sampling_freq)) * sampling_freq x1 = (x1 - (start_time / sampling_freq)) * sampling_freq if start < x0 < (start + window_size): kmer = segment["model_state"] mean = segment['mean'] color = [.082, 0.282, 0.776] handle1, = panel.plot([x0, x1], [mean, mean], color=color, lw=0.8) panel.plot([x0, x0], [prevMean, mean], color=color, lw=0.5) # <-- uncomment for pretty square wave # panel.text(x0, mean - 2, bytes.decode(kmer), fontsize=5) prevMean = mean handles.append(handle1) panel.set_title("Signal") panel.set_xlabel("Time (ms)") panel.set_ylabel("Current (pA)") if resegment is not None: color = [1, 0.282, 0.176] prevMean = 0 for indx, segment in enumerate(resegment): kmer = segment["model_state"] x0 = segment["raw_start"] x1 = x0 + segment["raw_length"] mean = segment['mean'] if start < x0 < start + window_size: handle2, = panel.plot([x0, x1], [mean, mean], color=color, lw=0.8) panel.plot([x0, x0], [prevMean, mean], color=color, lw=0.5) # <-- uncomment for pretty square wave panel.text(x0, mean + 2, bytes.decode(kmer), fontsize=5) prevMean = mean handles.append(handle2) box = panel.get_position() panel.set_position([box.x0, box.y0, box.width * 0.95, box.height]) if len(handles) == 3: plt.legend(handles, ["Raw", "OriginalSegment", "New Segment"], loc='upper left', bbox_to_anchor=(1, 1)) else: plt.legend(handles, ["Raw", "OriginalSegment"], loc='upper left', bbox_to_anchor=(1, 1)) plt.show() def plot_segmented_comparison(fast5_handle, window_size=None): """Plot read with segmented lines and kmers. :param fast5_handle: Fast5 instance where there is already a resegemented analysis table :param window_size: size of window to display instead of whole file """ events = fast5_handle.get_basecall_data() signal = fast5_handle.get_read
(raw=True, scale=True) resegment_events = fast5_handle.get_resegment_basecall() if fast5_handle.is_read_rna(): plot_raw_reads(signal, events, resegment=resegment_events, window_size=window_size) else: start_time = fast5_handle.raw_attributes["start_time"] sampling_freq = fast5_handle.sample_rate plot_raw_reads(signal, events, resegment=None, dna=
True, sampling_freq=sampling_freq, start_time=start_time, window_size=window_size) def main(): """Main docstring""" start = timer() minknow_params = dict(window_lengths=(5, 10), thresholds=(2.0, 1.1), peak_height=1.2) speedy_params = dict(min_width=5, max_width=30, min_gain_per_sample=0.008, window_width=800) dna_reads = "/Users/andrewbailey/CLionProjects/nanopore-RNN/test_files/minion-reads/canonical/" files = list_dir(dna_reads, ext='fast5') rna_reads = "/Users/andrewbailey/CLionProjects/nanopore-RNN/test_files/minion-reads/rna_reads" # files = list_dir(rna_reads, ext='fast5') print(files[0]) f5fh = Fast5(files[0]) # f5fh = resegment_reads(files[0], minknow_params, speedy=False, overwrite=True) plot_segmented_comparison(f5fh, window_size=3000) stop = timer() print("Running Time = {} seconds".format(stop - start), file=sys.stderr) if __name__ == "__main__": main() raise SystemExit
f.identifier, self.datatype.subtype.__name__ )) # value is mutated into a new array value = self.datatype(value) # if it's an array, make sure it's valid regarding arrayIndex provided elif issubclass(self.datatype, List): if _debug: Property._debug(" - property is list, checking subtype") # changing a single element if arrayIndex is not None: raise ExecutionError(errorClass='property', errorCode='propertyIsNotAnArray') # replacing the array if not isinstance(value, list): raise InvalidParameterDatatype("elements of %s must be of type %s" % ( self.identifier, self.datatype.subtype.__name__ )) # check validity regarding subtype for item in value: # if it's atomic, make sure it's valid if issubclass(self.datatype.subtype, Atomic): if _debug: Property._debug(" - subtype is atomic, checking value") if not self.datatype.subtype.is_valid(item): raise InvalidParameterDatatype("elements of %s must be of type %s" % ( self.identifier, self.datatype.subtype.__name__, )) # constructed type elif not isinstance(item, self.datatype.subtype): raise InvalidParameterDatatype("elements of %s must be of type %s" % ( self.identifier, self.datatype.subtype.__name__ )) # value is mutated into a new list value = self.datatype(value) # some kind of constructed data elif not isinstance(value, self.datatype): if _debug: Property._debug(" - property is not atomic and wrong type") raise InvalidParameterDatatype("%s must be of type %s" % ( self.identifier, self.datatype.__name__, )) # local check if the property is monitored is_monitored = self.identifier in obj._property_monitors if arrayIndex is not None: if not issubclass(self.datatype, Array): raise ExecutionError(errorClass='property', errorCode='propertyIsNotAnArray') # check the array arry = obj._values[self.identifier] if arry is None: raise RuntimeError("%s uninitialized array" % (self.identifier,)) if is_monitored: old_value = _copy(arry) # seems to be OK, let the array object take over if _debug: Property._debug(" - forwarding to array") try: arry[arrayIndex] = value except IndexError: raise ExecutionError(errorClass='property', errorCode='invalidArrayIndex') except TypeError: raise ExecutionError(errorClass='property', errorCode='valueOutOfRange') # check for monitors, call each one with the old and new value if is_monitored: for fn in obj._property_monitors[self.identifier]: if _debug: Property._debug(" - monitor: %r", fn) fn(old_value, arry) else:
if is_monitored: old_value = obj._values.get
(self.identifier, None) # seems to be OK obj._values[self.identifier] = value # check for monitors, call each one with the old and new value if is_monitored: for fn in obj._property_monitors[self.identifier]: if _debug: Property._debug(" - monitor: %r", fn) fn(old_value, value) # # StandardProperty # @bacpypes_debugging class StandardProperty(Property): def __init__(self, identifier, datatype, default=None, optional=True, mutable=True): if _debug: StandardProperty._debug("__init__ %s %s default=%r optional=%r mutable=%r", identifier, datatype, default, optional, mutable ) # use one of the subclasses if not isinstance(self, (OptionalProperty, ReadableProperty, WritableProperty)): raise ConfigurationError(self.__class__.__name__ + " must derive from OptionalProperty, ReadableProperty, or WritableProperty") # validate the identifier to be one of the standard property enumerations if identifier not in PropertyIdentifier.enumerations: raise ConfigurationError("unknown standard property identifier: %s" % (identifier,)) # continue with the initialization Property.__init__(self, identifier, datatype, default, optional, mutable) # # OptionalProperty # @bacpypes_debugging class OptionalProperty(StandardProperty): """The property is optional and need not be present.""" def __init__(self, identifier, datatype, default=None, optional=True, mutable=False): if _debug: OptionalProperty._debug("__init__ %s %s default=%r optional=%r mutable=%r", identifier, datatype, default, optional, mutable ) # continue with the initialization StandardProperty.__init__(self, identifier, datatype, default, optional, mutable) # # ReadableProperty # @bacpypes_debugging class ReadableProperty(StandardProperty): """The property is required to be present and readable using BACnet services.""" def __init__(self, identifier, datatype, default=None, optional=False, mutable=False): if _debug: ReadableProperty._debug("__init__ %s %s default=%r optional=%r mutable=%r", identifier, datatype, default, optional, mutable ) # continue with the initialization StandardProperty.__init__(self, identifier, datatype, default, optional, mutable) # # WritableProperty # @bacpypes_debugging class WritableProperty(StandardProperty): """The property is required to be present, readable, and writable using BACnet services.""" def __init__(self, identifier, datatype, default=None, optional=False, mutable=True): if _debug: WritableProperty._debug("__init__ %s %s default=%r optional=%r mutable=%r", identifier, datatype, default, optional, mutable ) # continue with the initialization StandardProperty.__init__(self, identifier, datatype, default, optional, mutable) # # ObjectIdentifierProperty # @bacpypes_debugging class ObjectIdentifierProperty(ReadableProperty): def WriteProperty(self, obj, value, arrayIndex=None, priority=None, direct=False): if _debug: ObjectIdentifierProperty._debug("WriteProperty %r %r arrayIndex=%r priority=%r", obj, value, arrayIndex, priority) # make it easy to default if value is None: pass elif isinstance(value, int): value = (obj.objectType, value) elif isinstance(value, tuple) and len(value) == 2: if value[0] != obj.objectType: raise ValueError("%s required" % (obj.objectType,)) else: raise TypeError("object identifier") return Property.WriteProperty( self, obj, value, arrayIndex, priority, direct ) # # Object # @bacpypes_debugging class Object: _debug_contents = ('_app',) _object_supports_cov = False properties = \ [ ObjectIdentifierProperty('objectIdentifier', ObjectIdentifier, optional=False) , ReadableProperty('objectName', CharacterString, optional=False) , OptionalProperty('description', CharacterString) , OptionalProperty('profileName', CharacterString) , ReadableProperty('propertyList', ArrayOf(PropertyIdentifier)) , OptionalProperty('auditLevel', AuditLevel) , OptionalProperty('auditableOperations', AuditOperationFlags)
eng Liang. Date: 2012.09.24 Email: oriental-cds@163.com ############################################################################## """ # Core Library modules import string # Third party modules from rdkit import Chem # First party modules from PyBioMed.PyGetMol import Getmol as getmol from PyBioMed.PyMolecule import ( AtomTypes, basak, bcut, cats2d, charge, connectivity, constitution, estate, fingerprint, geary, ghosecrippen, kappa, moe, molproperty, moran, moreaubroto, topology, ) Version = 1.0 FingerprintName = [ "FP2", "FP3", "FP4", "topological", "Estate", "atompairs", "torsions", "morgan", "ECFP2", "ECFP4", "ECFP6", "MACCS", "FCFP2", "FCFP4", "FCFP6", "Pharm2D2point", "Pharm2D3point", "GhoseCrippen", "PubChem", ] ############################################################################## class PyMolecule: """ ################################################################# A PyDrug class used for computing drug descriptors. ################################################################# """ def __init__(self): """ ################################################################# constructor of PyMolecule. ################################################################# """ pass def ReadMolFromMOL(self, filename=""): """ ################################################################# Read a molecule by SDF or MOL file format. Usage: res=ReadMolFromFile(filename) Input: filename is a file name. Output: res is a molecule object. ################################################################# """ self.mol = Chem.MolFromMolMOL(filename) return self.mol def ReadMolFromSmile(self, smi=""): """ ################################################################# Read a molecule by SMILES string. Usage: res=ReadMolFromSmile(smi) Input: smi is a SMILES string. Output: res is a molecule object. ################################################################# """ self.mol = Chem.MolFromSmiles(smi.strip()) return self.mol def ReadMolFromInchi(self, inchi=""): """ ################################################################# Read a molecule by Inchi string. Usage: res=ReadMolFromInchi(inchi) Input: inchi is a InChi string. Ou
tput: res is a molecule object. ################################################################# """ from openbabel import pybel temp = pybel.readstring("inchi", inchi) smi = temp.write("smi") self.mol = Chem.MolFromSmiles(smi.strip()) return self.mol def ReadMolFromMol(self, filename=""): """ ################################################################# Read a molecule with mol file format. Usage:
res=ReadMolFromMol(filename) Input: filename is a file name. Output: res is a molecule object. ################################################################# """ self.mol = Chem.MolFromMolFile(filename) return self.mol def GetMolFromNCBI(self, ID=""): """ ################################################################# Get a molecule by NCBI id (e.g., 2244). Usage: res=GetMolFromNCBI(ID) Input: ID is a compound ID (CID) in NCBI. Output: res is a SMILES string. ################################################################# """ res = getmol.GetMolFromNCBI(cid=ID) return res def GetMolFromEBI(self, ID=""): """ ################################################################# Get a molecule by EBI id. Usage: res=GetMolFromEBI(ID) Input: ID is a compound identifier in EBI. Output: res is a SMILES string. ################################################################# """ res = getmol.GetMolFromEBI(ID) return res def GetMolFromCAS(self, ID=""): """ ################################################################# Get a molecule by kegg id (e.g., 50-29-3). Usage: res=GetMolFromCAS(ID) Input: ID is a CAS identifier. Output: res is a SMILES string. ################################################################# """ res = getmol.GetMolFromCAS(casid=ID) return res def GetMolFromKegg(self, ID=""): """ ################################################################# Get a molecule by kegg id (e.g., D02176). Usage: res=GetMolFromKegg(ID) Input: ID is a compound identifier in KEGG. Output: res is a SMILES string. ################################################################# """ res = getmol.GetMolFromKegg(kid=ID) return res def GetMolFromDrugbank(self, ID=""): """ ################################################################# Get a molecule by drugbank id (e.g.,DB00133). Usage: res=GetMolFromDrugbank(ID) Input: ID is a compound identifier in Drugbank. Output: res is a SMILES string. ################################################################# """ res = getmol.GetMolFromDrugbank(dbid=ID) return res def GetKappa(self): """ ################################################################# Calculate all kappa descriptors (7). Usage: res=GetKappa() res is a dict form. ################################################################# """ res = kappa.GetKappa(self.mol) return res def GetCharge(self): """ ################################################################# Calculate all charge descriptors (25). Usage: res=GetCharge() res is a dict form. ################################################################# """ res = charge.GetCharge(self.mol) return res def GetConnectivity(self): """ ################################################################# Calculate all conenctivity descriptors (44). Usage: res=GetConnectivity() res is a dict form. ################################################################# """ res = connectivity.GetConnectivity(self.mol) return res def GetConstitution(self): """ ################################################################# Calculate all constitutional descriptors (30). Usage: res=GetConstitution() res is a dict form. ################################################################# """ res = constitution.GetConstitutional(self.mol) return res def GetBasak(self): """ ################################################################# Calculate all basak's information content descriptors (21). Usage: res=GetBasak() res is a dict form. ################################################################# """ res = basak.Getbasak(self.mol) return res def GetBurden(self): """ ################################################################# Calculate all Burden descriptors (64). Usage: res=GetBurden() res is a dict form. ################################################################# """ res
# -*- coding: utf-8 -*- # # pymdstat documentation build configuration file, created by # sphinx-quickstart on Sat Dec 20 16:43:07 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General c
onfiguration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'pymdstat' copyright = u'2014, Nicolargo' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1' # The full version, including alpha/beta/rc tags. release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'pymdstatdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'pymdstat.tex', u'pymdstat Documentation', u'Nicolargo', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'pymdstat', u'pymdstat Documentation', [u'Nicolargo'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'pymdstat', u'pymdstat Documentation', u'Nicolargo', 'pymdstat', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
# Copyright 2020 the V8 project authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Flags:
-expose-wasm --wasm-gdb-remote --wasm-pause-waiting-for-debugger test/debugging/wasm/gdb-server/test_files/test_memory.js import struct import sys import unittest import gdb_rsp import test_files.test_memory as test_memory # These are set up by Main(). COMMAND = None class Tests(unittest.TestCase): # Test that reading from an unreadable address gives a sensible error. def CheckReadMemoryAtInvalidAddr(self, connection): mem_ad
dr = 0xffffffff result = connection.RspRequest('m%x,%x' % (mem_addr, 1)) self.assertEquals(result, 'E02') def RunToWasm(self, connection, breakpoint_addr): # Set a breakpoint. reply = connection.RspRequest('Z0,%x,1' % breakpoint_addr) self.assertEqual(reply, 'OK') # When we run the program, we should hit the breakpoint. reply = connection.RspRequest('c') gdb_rsp.AssertReplySignal(reply, gdb_rsp.SIGTRAP) # Remove the breakpoint. reply = connection.RspRequest('z0,%x,1' % breakpoint_addr) self.assertEqual(reply, 'OK') def test_reading_and_writing_memory(self): with gdb_rsp.LaunchDebugStub(COMMAND) as connection: module_load_addr = gdb_rsp.GetLoadedModuleAddress(connection) breakpoint_addr = module_load_addr + test_memory.FUNC0_START_ADDR self.RunToWasm(connection, breakpoint_addr) self.CheckReadMemoryAtInvalidAddr(connection) # Check reading code memory space. expected_data = b'\0asm' result = gdb_rsp.ReadCodeMemory(connection, module_load_addr, len(expected_data)) self.assertEqual(result, expected_data) # Check reading instance memory at a valid range. reply = connection.RspRequest('qWasmMem:0;%x;%x' % (32, 4)) value = struct.unpack('I', gdb_rsp.DecodeHex(reply))[0] self.assertEquals(int(value), 0) # Check reading instance memory at an invalid range. reply = connection.RspRequest('qWasmMem:0;%x;%x' % (0xf0000000, 4)) self.assertEqual(reply, 'E03') def test_wasm_global(self): with gdb_rsp.LaunchDebugStub(COMMAND) as connection: module_load_addr = gdb_rsp.GetLoadedModuleAddress(connection) breakpoint_addr = module_load_addr + test_memory.FUNC0_START_ADDR self.RunToWasm(connection, breakpoint_addr) # Check reading valid global. reply = connection.RspRequest('qWasmGlobal:0;0') value = struct.unpack('I', gdb_rsp.DecodeHex(reply))[0] self.assertEqual(0, value) # Check reading invalid global. reply = connection.RspRequest('qWasmGlobal:0;9') self.assertEqual("E03", reply) def test_wasm_call_stack(self): with gdb_rsp.LaunchDebugStub(COMMAND) as connection: module_load_addr = gdb_rsp.GetLoadedModuleAddress(connection) breakpoint_addr = module_load_addr + test_memory.FUNC0_START_ADDR self.RunToWasm(connection, breakpoint_addr) reply = connection.RspRequest('qWasmCallStack') stack = gdb_rsp.DecodeUInt64Array(reply) assert(len(stack) > 2) # At least two Wasm frames, plus one or more JS frames. self.assertEqual(stack[0], module_load_addr + test_memory.FUNC0_START_ADDR) self.assertEqual(stack[1], module_load_addr + test_memory.FUNC1_RETURN_ADDR) def Main(): index = sys.argv.index('--') args = sys.argv[index + 1:] # The remaining arguments go to unittest.main(). global COMMAND COMMAND = args unittest.main(argv=sys.argv[:index]) if __name__ == '__main__': Main()
from django.db import models from guardian.shortcuts import get_objects_for_user from readthedocs.builds.constants import LATEST from readthedocs.builds.constants import LATEST_VERBOSE_NAME from readthedocs.builds.constants import STABLE from readthedocs.builds.constants import STABLE_VERBOSE_NAME from readthedocs.projects import constants class ProjectManager(models.Manager): def _add_user_repos(self, queryset, user): # Avoid circular import from readthedocs.projects.models import Project # Show all projects to super user if user.has_perm('projects.view_project'): return Project.objects.all().distinct() # Show user projects to user if user.is_authenticated(): # Add in possible user-specific views user_queryset = get_objects_for_user(user, 'projects.view_project') return user_queryset | queryset # User has no special privs return queryset.distinct() def for_user_and_viewer(self, user, viewer, *args, **kwargs): """ Show projects that a user owns, that another user can see. """ queryset = self.filter(privacy_level=constants.PUBLIC) queryset = self._add_user_repos(queryset, viewer) querys
et = queryset.filter(users__in=[user])
return queryset def for_admin_user(self, user=None, *args, **kwargs): if user.is_authenticated(): return self.filter(users__in=[user]) else: return self.none() def public(self, user=None, *args, **kwargs): queryset = self.filter(privacy_level=constants.PUBLIC) if user: return self._add_user_repos(queryset, user) else: return queryset def protected(self, user=None, *args, **kwargs): queryset = self.filter(privacy_level__in=[constants.PUBLIC, constants.PROTECTED]) if user: return self._add_user_repos(queryset, user) else: return queryset # Aliases def dashboard(self, user=None, *args, **kwargs): return self.for_admin_user(user) def api(self, user=None, *args, **kwargs): return self.public(user) class RelatedProjectManager(models.Manager): def _add_user_repos(self, queryset, user=None, *args, **kwargs): # Hack around get_objects_for_user not supporting global perms if user.has_perm('projects.view_project'): return self.get_queryset().all().distinct() if user.is_authenticated(): # Add in possible user-specific views project_qs = get_objects_for_user(user, 'projects.view_project') pks = [p.pk for p in project_qs] queryset = self.get_queryset().filter(project__pk__in=pks) | queryset return queryset.distinct() def public(self, user=None, project=None, *args, **kwargs): queryset = self.filter(project__privacy_level=constants.PUBLIC) if user: queryset = self._add_user_repos(queryset, user) if project: queryset = queryset.filter(project=project) return queryset def api(self, user=None, *args, **kwargs): return self.public(user) class RelatedBuildManager(models.Manager): '''For models with association to a project through :py:cls:`Build`''' def _add_user_repos(self, queryset, user=None, *args, **kwargs): # Hack around get_objects_for_user not supporting global perms if user.has_perm('projects.view_project'): return self.get_queryset().all().distinct() if user.is_authenticated(): # Add in possible user-specific views project_qs = get_objects_for_user(user, 'projects.view_project') pks = [p.pk for p in project_qs] queryset = (self.get_queryset() .filter(build__project__pk__in=pks) | queryset) return queryset.distinct() def public(self, user=None, project=None, *args, **kwargs): queryset = self.filter(build__project__privacy_level=constants.PUBLIC) if user: queryset = self._add_user_repos(queryset, user) if project: queryset = queryset.filter(build__project=project) return queryset def api(self, user=None, *args, **kwargs): return self.public(user) class VersionManager(RelatedProjectManager): def _add_user_repos(self, queryset, user=None, *args, **kwargs): queryset = super(VersionManager, self)._add_user_repos(queryset, user) if user and user.is_authenticated(): # Add in possible user-specific views user_queryset = get_objects_for_user(user, 'builds.view_version') queryset = user_queryset.distinct() | queryset elif user: # Hack around get_objects_for_user not supporting global perms global_access = user.has_perm('builds.view_version') if global_access: queryset = self.get_queryset().all().distinct() return queryset.distinct() def public(self, user=None, project=None, only_active=True, *args, **kwargs): queryset = self.filter(project__privacy_level=constants.PUBLIC, privacy_level=constants.PUBLIC) if user: queryset = self._add_user_repos(queryset, user) if project: queryset = queryset.filter(project=project) if only_active: queryset = queryset.filter(active=True) return queryset def api(self, user=None, *args, **kwargs): return self.public(user, only_active=False) def create_stable(self, **kwargs): defaults = { 'slug': STABLE, 'verbose_name': STABLE_VERBOSE_NAME, 'machine': True, 'active': True, 'identifier': STABLE, 'type': 'tag', } defaults.update(kwargs) return self.create(**defaults) def create_latest(self, **kwargs): defaults = { 'slug': LATEST, 'verbose_name': LATEST_VERBOSE_NAME, 'machine': True, 'active': True, 'identifier': LATEST, 'type': 'branch', } defaults.update(kwargs) return self.create(**defaults) class AdminPermission(object): @classmethod def is_admin(cls, user, project): return user in project.users.all() class AdminNotAuthorized(ValueError): pass
#!/usr/bin/env python # -*- coding: Latin-1 -*- """ @file ParamEffectsOLD.py @author Sascha Krieg @author Daniel Krajzewicz @author Michael Behrisch @date 2008-07-26 @version $Id: ParamEffectsOLD.py 22608 2017-01-17 06:28:54Z behrisch $ Creates files with a comparison of speeds for each edge between the taxis and the average speed from the current edge. Dependent of the frequency and the taxi quota. SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/ Copyright (C) 2008-2017 DLR (http://www.dlr.de/) and contributors This file is part of SUMO. SUMO is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. """ from __future__ import absolute_import from __future__ import print_function import random import os.path import profile from cPickle import dump from cPickle import load #global vars mainPath = "D:/Krieg/Projekte/Diplom/Daten/fcdQualitaet/" # mainPath="F:/DLR/Projekte/Diplom/Daten/fcdQualitaet/" edgeDumpPath = mainPath + "edgedumpFcdQuality_900_6Uhr.xml" edgeDumpPicklePath = mainPath + "edgedumpFcdPickleDict.pickle" vtypePath = mainPath + "vtypeprobeFcdQuality_1s_6Uhr.out.xml" vtypePicklePath = mainPath + "vtypeprobePickleDict.pickle" vehPicklePath = mainPath + "vehiclePickleList.pickle" outputPath = mainPath + "output/simResult_" simStartTime = 21600 # =6 o'clock ->begin in edgeDump # period in seconds | single element or a hole list period = [5, 10, 20, 30, 40, 50, 60, 90, 120] # how many taxis in percent of the total vehicles | single element or a # hole list quota = [0.25, 0.5, 0.75, 1.0, 1.5, 2.0, 3.0, 5.0, 10.0] def main(): global period, quota print("start program") edgeDumpDict = make(edgeDumpPicklePath, edgeDumpPath, readEdgeDump) vtypeDict = make(vtypePicklePath, vtypePath, readVtype) vehList = make( vehPicklePath, vtypePicklePath, getVehicleList, False, vtypeDict) vehSum = len(vehList) if type(period) != list: period = [period] if type(quota) != list: quota = [quota] pList = period qList = quota for period in pList: for quota in qList: print("create output for: period ", period, " quota ", quota) taxis = chooseTaxis(vehList) taxiSum = len(taxis) vtypeDictR = reduceVtype(vtypeDict, taxis) del taxis createOutput(edgeDumpDict, vtypeDictR, vehSum, taxiSum) print("end") def readEdgeDump(): """Get for each interval all edges with corresponding speed.""" edgeDumpDict = {} begin = False interval = 0 inputFile = open(edgeDumpPath, 'r') for line in inputFile: words = line.split('"') if not begin and words[0].find("<end>") != -1: words = words[0].split(">") interval = int(words[1][:-5]) edgeDumpDict.setdefault(interval, []) elif words[0].find("<interval") != -1 and int(words[1]) >= simStartTime: interval = int(words[1]) begin = True if begin and words[0].find("<edge id") != -1: edge = words[1] speed = float(words[13]) edgeDumpDict.setdefault(interval, []).append((edge, speed)) inputFile.close() return edgeDumpDict def readVtype(): """Gets all necessary information of all vehicles.""" vtypeDict = {} timestep = 0 begin = False inputFile = open(vtypePath, 'r') for line in inputFile: words = line.split('"') if words[0].find("<timestep ") != -1 and int(words[1]) >= simStartTime: timestep = int(words[1]) begin = True if begin and words[0].find("<vehicle id=") != -1: # time id edge speed vtypeDict.setdefault(timestep, []).append( (words[1], words[3][:-2], words[15])) # break inputFile.close() return vtypeDict def getVehicleList(vtypeDict): """Collects all vehicles used in the simulation.""" vehSet = set() for timestepList in vtypeDict.values(): for elm in timestepList: vehSet.add(elm[0]) return list(vehSet) def make(source, dependentOn, builder, buildNew=False, *builderParams): """Fills the target (a variable) with Information of source (pickelt var). It Checks if the pickle file is up to date in comparison to the dependentOn file. If not the builder function is called. If buildNew
is True the builder function is called anyway. """ # check if pickle file exists if not os.path.exists(source): buildNew = True # check date # if source is newer if not buildNew and os.path.getmtime(source) > os.path.getmtime(dependentOn): print("load source: ", os.path.basename(source), "...") target = load(open(source, 'rb')) else: print("build source: ", os.pat
h.basename(source), "...") target = builder(*builderParams) # pickle the target dump(target, open(source, 'wb'), 1) print("Done!") return target def chooseTaxis(vehList): """ Chooses from the vehicle list random vehicles with should act as taxis.""" # calc absolute amount of taxis taxiNo = int(round(quota * len(vehList) / 100)) random.shuffle(vehList) return vehList[:taxiNo] def reduceVtype(vtypeDict, taxis): """Reduces the vtypeDict to the relevant information.""" newVtypeDict = {} for timestep in vtypeDict: # timesteps which are a multiple of the period if timestep % period == 0: newVtypeDict[timestep] = ( [tup for tup in vtypeDict[timestep] if tup[0] in taxis]) return newVtypeDict def createOutput(edgeDumpDict, vtypeDict, vehSum, taxiSum): """Creates a file with a comparison of speeds for each edge between the taxis and the average speed from the current edge.""" intervalList = edgeDumpDict.keys() intervalList.sort() interval = intervalList[1] - intervalList[0] outputFile = open( outputPath + str(period) + "s_" + str(quota) + "%.out.xml", 'w') outputFile.write('<?xml version="1.0"?>\n') outputFile.write('<results simStart="%d" interval="%d" taxiQuota="%.3f" period="%d" vehicles="%d" taxis="%d">\n' % ( simStartTime, interval, quota, period, vehSum, taxiSum)) for i in intervalList[:-1]: # each interval outputFile.write('\t<interval begin="%d" end="%d">\n' % (i, i + interval - 1)) intEdges = {} for timestep, taxiList in vtypeDict.iteritems(): # for each timestep in the interval if i < timestep < intervalList[intervalList.index(i) + 1]: for tup in taxiList: # all elements in this timestep # add speed entry to the relevant edge intEdges.setdefault(tup[1], []).append(float(tup[2])) # wirte results for every founded edge for edge, v in edgeDumpDict[i]: if edge in intEdges: vList = intEdges[edge] meanV = sum(vList) / len(vList) abs = meanV - v rel = abs / v * 100 outputFile.write( '\t\t<edge id="%s" simSpeed="%.2f" fcdSpeed="%.2f" absDeviation="%.2f" relDeviation="%.2f"/>\n' % (edge, v, meanV, abs, rel)) outputFile.write('\t</interval>\n') outputFile.write('</results>') outputFile.close() # start the program # profile.run('main()') main()
# # This file is part of pyasn1 software. # # Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com> # License: http://snmplabs.com/pyasn1/license.html # from sys import version_info if version_in
fo[0:2] < (2, 6): def bin(value): bitstring = [] if value > 0: prefix = '0b' elif value < 0: prefix = '-0b' value = abs(value) else: prefix = '0b0' while value: if value & 1 == 1: bitstring.append('1') else: bitstring.append('0') value >>= 1 bitst
ring.reverse() return prefix + ''.join(bitstring) else: bin = bin
_url=None): self.site_url = site_url def query(self, *args, **kwargs): self.query = MockSolrQuery() return self.query class MockSolrQuery: def __init__(self): self.query_call_count = 0 self.query_args = [] self.field_list = None self.sort_field = None self.has_free_text_query = False self.extra = {} self.score = False def query(self, *args, **kwargs): self.query_call_count += 1 self.query_args.append([args, kwargs]) return self def field_limit(self, fields=None, score=False): self.field_list = fields self.score = score return self def sort_by(self, sort_field): self.sort_field = sort_field return self def add_extra(self, **kwargs): self.extra.update(kwargs) return self class SearchWrapperTests(unittest.TestCase): def setUp(self): self.msi = MockSolrInterface() def test_general_user_can_not_request_field_not_in_whitelist(self): sw = SearchWrapper('General User', 'eldis', self.msi) extra_field = 'contact_position' self.assertTrue(extra_field not in settings.GENERAL_FIELDS) self.assertTrue(extra_field not in settings.ADMIN_ONLY_FIELDS) self.assertRaises(InvalidFieldError, sw.restrict_fields_returned, 'short', {'extra_fields': extra_field}) def test_partner_user_can_request_field_not_in_whitelist(self): sw = SearchWrapper('Partner', 'eldis', self.msi) extra_field = 'contact_position' self.assertTrue(extra_field not in settings.GENERAL_FIELDS) self.assertTrue(extra_field not in settings.ADMIN_ONLY_FIELDS) sw.restrict_fields_returned('short', {'extra_fields': extra_field}) self.assertTrue(extra_field in self.msi.query.field_list) def test_partner_user_can_not_request_admin_only_field(self): sw = SearchWrapper('Partner', 'eldis', self.msi) extra_field = 'legacy_id' self.assertTrue(extra_field in settings.ADMIN_ONLY_FIELDS) self.assertRaises(InvalidFieldError, sw.restrict_fields_returned, 'short', {'extra_fields': extra_field}) def test_admin_user_can_request_field_admin_only_field(self): sw = SearchWrapper('Unlimited', 'eldis', self.msi) extra_field = 'legacy_id' self.assertTrue(extra_field in settings.ADMIN_ONLY_FIELDS) sw.restrict_fields_returned('short', {'extra_fields': extra_field}) self.assertTrue(extra_field in self.msi.query.field_list) def test_request_score_pseudo_field(self): sw = SearchWrapper('Unlimited', 'eldis', self.msi) sw.restrict_fields_returned('short', {'extra_fields': 'score'}) self.assertTrue(self.msi.query.score) class SearchWrapperAddSortTests(unittest.TestCase): def setUp(self): self.msi = MockSolrInterface() settings.SORT_MAPPING = {'dummy': 'dummy_sort'} def test_add_sort_method_disallows_mixed_asc_and_desc_sort(self): sw = SearchWrapper('General User', 'eldis', self.msi) search_params = {'sort_asc': 'title', 'sort_desc': 'title'} self.assertRaises(InvalidQueryError, sw.add_sort, search_params, 'assets') def test_add_descending_sort_inverts_field(self): sw = SearchWrapper('General User', 'eldis', self.msi) sw.add_sort({'sort_desc': 'title'}, 'assets') self.assertEqual(self.msi.query.sort_field, '-title') def test_add_sort_with_no_mapping(self): sw = SearchWrapper('General User', 'eldis', self.msi) sw.add_sort({'sort_asc': 'title'}, 'assets') self.assertEqual(self.msi.query.sort_field, 'title') def test_add_sort_with_mapping(self): """ Sort parameters should be overridable by the user via a mapping dictionary. """ settings.SORT_MAPPING = {'title': 'title_sort'} sw = SearchWrapper('General User', 'eldis', self.msi) sw.add_sort({'sort_asc': 'title'}, 'assets') self.assertEqual(self.msi.query.sort_field, 'title_sort') def test_add_sort_default_ordering_when_no_sort_params(self): """ If there are no sort parameters in the request AND there is no free text query, the sort order is determined using the sort object mapping. Sort field mapping should still take place. """ settings.DEFAULT_SORT_OBJECT_MAPPING = { 'countries': {'field': 'title', 'ascending': True}, } settings.SORT_MAPPING = {'title': 'title_sort'} sw = SearchWrapper('General User', 'eldis', self.msi) sw.add_sort(dict(), 'countries') self.assertEqual(self.msi.query.sort_field, 'title_sort') def test_add_sort_no_default_ordering_when_free_text_query(self): """ Free text queries should have no default sort order set. """ settings.DEFAULT_SORT_FIELD = 'title' settings.DEFAULT_SORT_ASCENDING = True settings.SORT_MAPPING = {'title': 'title_sort'} sw = SearchWrapper('General User', 'eldis', self.msi) sw.has_free_text_query = True sw.add_sort(dict(), 'assets') self.assertIsNone(self.msi.query.sort_field) def test_add_sort_allows_ordering_when_free_text_query(self): """ Free text queries should still be sortable if a sort order is specified. """ settings.DEFAULT_SORT_FIELD = 'title' settings.DEFAULT_SORT_ASCENDING = True settings.SORT_MAPPING = {'title': 'title_sort'} sw = SearchWrapper('General User', 'eldis', self.msi) sw.has_free_text_query = True sw.add_sort({'sort_desc': 'title'}, 'assets') self.assertEqual(self.msi.query.sort_field, '-title_sort') @pytest.mark.xfail(reason="Already broken in tag idsapi_14") class SearchWrapperAddFreeTextQueryTests(unittest.TestCase): # 2014-02-05, HD: we just pass through most of this untouched now # and let dismax sort it out @classmethod def setUpClass(cls): # TODO: there doesn't seem to be a easy way to just test the query # building behaviour with out building a real connection. cls.si = sunburnt.SolrInterface(settings.SOLR_SERVER_URLS['eldis']) def setUp(self): self.msi = MockSolrInterface() self.sw = SearchWrapper('General User', 'eldis', SearchWrapperAddFreeTextQueryTests.si) def solr_q(self): return self.sw.si_query.options()['q'] def test_free_text_query_has_implicit_or(self): self.sw.add_free_text_query('brazil health ozone') self.assertEqual(self.solr_q(), 'brazil\\ health\\ ozone') def test_free_text_query_supports_single_and_operator(self): self.sw.add_free_text_query('brazil and health') self.assertEqual(self.solr_q(), 'brazil\\ and\\ health') def test_free_text_query_supports_single_and_operator_with_implicit_or(self): self.sw.add_free_text_query('brazil and health ozone') self.assertEqual(self.solr_q(), 'brazil\\ and\\ health\\ ozone') def test_free_text_query_supports_single_and_operator_alternative(self): self.sw.add_free_text_query('brazil & health ozone') self.assertEqual(self.solr_q(), 'brazil\\ \\&\\ health\\ ozone') def test_free_text_query_supports_single_and_operator_alternative_with_no_spaces(self): self.sw.add_free_text_query('brazil&health ozone') self.assertEqual(self.solr_q(), 'brazil\\&health\\ ozone') def test_free_text_query_supports_multiple_and_operator(self): self.sw.add_free_text_query('brazil and health and ozone') self.assertEqual(sel
f.solr_q(), 'brazil\\ and\\ health\\ and\\ ozone') def test_free_text_query_ignores_disconnected_and(self): self.sw.add_free_text_query('brazil and health ozone and') self.assertEqual(self.solr_q(), 'brazil\\ and\\ health\\ ozone\\ and') def test_free_text_query_ignores_and_at_start_of_string(self): self.sw.add_free_text_query('and brazil and healt
h ozone') self.assertEqual(self.solr_q(), 'and\\ brazil\\ and\\ health\\ ozone') def test_f
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this
file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # class BaseFlowAPI: def __init__(self, client=None): self._client = client def _get(self, url, handle_result=True, **kwargs): if handle_result: return self._handle_result(self._client.get(url, **kwargs)) else: return self._client.get(url, **kwargs) def _post(self, url, handle_result=True, **kwargs): if handle_result: return self._handle_result(self._client.post(url, **kwargs)) else: return self._client.post(url, **kwargs) def _handle_result(self, response): return self._client._handle_result(response) @property def session(self): return self._client.session @property def ip(self): return self._client.ip @property def port(self): return self._client.port @property def version(self): return self._client.version
# Copyright (C) 2012,2013 # Max Planck Institute for Polymer Research # Copyright (C) 2008,2009,2010,2011 # Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. r""" ****************************** espressopp.integrator.CapForce ****************************** This class can be used to forcecap all particles or a group of particles. Force capping means that the force vector of a particle is rescaled so that the length of the force vector is <= capforce Example Usage: >>> capforce = espressopp.integrator.CapForce(system, 1000.0) >>> integrator.addExtension(capForce) CapForce
can also be used to forcecap only a group of particles: >>> particle_group = [45, 67, 89, 103] >>> capforce = espressopp.integrator.CapForce(system, 1000.0, particle_group) >>> integrator.addExtension(capForce) .. function:: espressopp.integrator.CapForce(system, capForce, particleGroup) :param system: :param capForce:
:param particleGroup: (default: None) :type system: :type capForce: :type particleGroup: """ from espressopp.esutil import cxxinit from espressopp import pmi from espressopp.integrator.Extension import * from _espressopp import integrator_CapForce class CapForceLocal(ExtensionLocal, integrator_CapForce): def __init__(self, system, capForce, particleGroup = None): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): if (particleGroup == None) or (particleGroup.size() == 0): cxxinit(self, integrator_CapForce, system, capForce) else: cxxinit(self, integrator_CapForce, system, capForce, particleGroup) if pmi.isController : class CapForce(Extension, metaclass=pmi.Proxy): pmiproxydefs = dict( cls = 'espressopp.integrator.CapForceLocal', pmicall = ['setCapForce', 'setAbsCapForce', 'getCapForce', 'getAbsCapForce'], pmiproperty = [ 'particleGroup', 'adress' ] )
# Copyright 2020 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Timesketch API credential library. This library contains classes that define how to serialize the different credential objects Timesketch supports. """ from __future__ import unicode_literals import json from google.oauth2 import credentials class TimesketchCredentials: """Class to store and retrieve credentials for Timesketch.""" # The type of credential object. TYPE = '' def __init__(self): """Initialize the credential object.""" self._credential = None @property def credential(self): """Returns the credentials back.""" return self._credential @credential.setter def credential(self, credential_obj): """Sets the credential object.""" self._credential = credential_obj def serialize(self): """Return serialized bytes object.""" data = self.to_bytes() type_string = bytes(self.TYPE, 'utf-8').rjust(10)[:10] return type_string + data def deserialize(self, data): """Deserialize a credential object from bytes. Args: data (bytes): serialized credential object. """ type_data = data[:10] type_string = type_data.decode('utf-8').strip() if not self.TYPE.startswith(type_string): raise TypeError('Not the correct serializer.') self.from_bytes(data[10:]) def to_bytes(self): """Convert the credential object into bytes for storage.""" raise NotImplementedError def from_bytes(self, data): """Deserialize a credential object from bytes. Args: data (bytes): serialized credential object. """ raise NotImplementedError class TimesketchPwdCredentials(TimesketchCredentials): """Username and password credentials for Timesketch authentication.""" TYPE = 'timesketch' def from_bytes(self, data): """Deserialize a credential object from bytes. Args: data (bytes): serialized credential object. Raises: TypeError: if the data is not in bytes. """ if not isinstance(data, bytes): raise TypeError('Data needs to be bytes.') try: data_dict = json.loads(data.decode('utf-8')) except ValueError as exc: raise TypeError('Unable to parse the byte string.') from exc
if not 'username' in data_dict: raise TypeError('Username is not set.') if not 'pass
word' in data_dict: raise TypeError('Password is not set.') self._credential = data_dict def to_bytes(self): """Convert the credential object into bytes for storage.""" if not self._credential: return b'' data_string = json.dumps(self._credential) return bytes(data_string, 'utf-8') class TimesketchOAuthCredentials(TimesketchCredentials): """OAUTH credentials for Timesketch authentication.""" TYPE = 'oauth' def from_bytes(self, data): """Deserialize a credential object from bytes. Args: data (bytes): serialized credential object. Raises: TypeError: if the data is not in bytes. """ if not isinstance(data, bytes): raise TypeError('Data needs to be bytes.') try: token_dict = json.loads(data.decode('utf-8')) except ValueError as exc: raise TypeError('Unable to parse the byte string.') from exc self._credential = credentials.Credentials( token=token_dict.get('token'), refresh_token=token_dict.get('_refresh_token'), id_token=token_dict.get('_id_token'), token_uri=token_dict.get('_token_uri'), client_id=token_dict.get('_client_id'), client_secret=token_dict.get('_client_secret') ) def to_bytes(self): """Convert the credential object into bytes for storage.""" if not self._credential: return b'' cred_obj = self._credential data = { 'token': cred_obj.token, '_scopes': getattr(cred_obj, '_scopes', []), '_refresh_token': getattr(cred_obj, '_refresh_token', ''), '_id_token': getattr(cred_obj, '_id_token', ''), '_token_uri': getattr(cred_obj, '_token_uri', ''), '_client_id': getattr(cred_obj, '_client_id', ''), '_client_secret': getattr(cred_obj, '_client_secret', ''), } if cred_obj.expiry: data['expiry'] = cred_obj.expiry.isoformat() data_string = json.dumps(data) return bytes(data_string, 'utf-8')
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .sub_resource import SubResource class IPConfiguration(SubResource): """IP configuration. :param id: Resource ID. :type id: str :param private_ip_address: The private IP address of the IP configuration. :type private_ip_address: str :param private_ip_allocation_method: The private IP allocation method. Possible values are 'Static' and 'Dynamic'. Possible values include: 'Static', 'Dynamic' :type private_ip_allocation_method: str or ~azure.mgmt.network.v2017_08_01.models.IPAllocationMethod :param subnet: The reference of the subnet resource. :type subnet: ~azure.mgmt.network.v2017_08_01.models.Subnet :param public_ip_address: The reference of the public IP resource. :type public_ip_address: ~azure.mgmt.network.v2017_08_01.models.PublicIPAddress :param provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str :param name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'}, 'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'}, 'subnet': {'key': 'properties.subnet', 'type': 'Subnet'}, 'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'PublicIPAddress'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, } def __init__(self, *, id: str=None, private_ip_address: str=None, private_ip_allocation_method=None, subnet=None, public_ip_address=None, pro
visioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None: super(IPConfiguration, self).__init__(id=id, **kwargs) self.private_ip_address = private_ip_address self.private_ip_allocation_method = private_ip_allocation_method self.subnet = subnet self.public_ip_address = public_ip_address self.provisioning_state = provisioning_state self.name = name sel
f.etag = etag
'''Autogenerated by xml_generate script, do not edit!''' from OpenGL import platform as _p, arrays # Code generation uses this from OpenGL.raw.GL import _types as _cs # End users want this... from OpenGL.raw.GL._types import * from OpenGL.raw.GL import _errors from OpenGL.constant import Constant as _C import ctypes _EXTENSION_NAME = 'GL_NV_register_combiners' def _f( function ): return _p.createFunction( function,_p.PLATFORM.GL,'GL_NV_register_combiners',error_checker=_errors._error_checker) GL_BIAS_BY_NEGATIVE_ONE_HALF_NV=_C('GL_BIAS_BY_NEGATIVE_ONE_HALF_NV',0x8541) GL_COLOR_SUM_CLAMP_NV=_C('GL_COLOR_SUM_CLAMP_NV',0x854F) GL_COMBINER0_NV=_C('GL_COMBINER0_NV',0x8550) GL_COMBINER1_NV=_C('GL_COMBINER1_NV',0x8551) GL_COMBINER2_NV=_C('GL_COMBINER2_NV',0x8552) GL_COMBINER3_NV=_C('GL_COMBINER3_NV',0x8553) GL_COMBINER4_NV=_C('GL_COMBINER4_NV',0x8554) GL_COMBINER5_NV=_C('GL_COMBINER5_NV',0x8555) GL_COMBINER6_NV=_C('GL_COMBINER6_NV',0x8556) GL_COMBINER7_NV=_C('GL_COMBINER7_NV',0x8557) GL_COMBINER_AB_DOT_PRODUCT_NV=_C('GL_COMBINER_AB_DOT_PRODUCT_NV',0x8545) GL_COMBINER_AB_OUTPUT_NV=_C('GL_COMBINER_AB_OUTPUT_NV',0x854A) GL_COMBINER_BIAS_NV=_C('GL_COMBINER_BIAS_NV',0x8549) GL_COMBINER_CD_DOT_PRODUCT_NV=_C('GL_COMBINER_CD_DOT_PRODUCT_NV',0x8546) GL_COMBINER_CD_OUTPUT_NV=_C('GL_COMBINER_CD_OUTPUT_NV',0x854B) GL_COMBINER_COMPONENT_USAGE_NV=_C('GL_COMBINER_COMPONENT_USAGE_NV',0x8544) GL_COMBINER_INPUT_NV=_C('GL_COMBINER_INPUT_NV',0x8542) GL_COMBINER_MAPPING_NV=_C('GL_COMBINER_MAPPING_NV',0x8543) GL_COMBINER_MUX_SUM_NV=_C('GL_COMBINER_MUX_SUM_NV',0x8547) GL_COMBINER_SCALE_NV=_C('GL_COMBINER_SCALE_NV',0x8548) GL_COMBINER_SUM_OUTPUT_NV=_C('GL_COMBINER_SUM_OUTPUT_NV',0x854C) GL_CONSTANT_COLOR0_NV=_C('GL_CONSTANT_COLOR0_NV',0x852A) GL_CONSTANT_COLOR1_NV=_C('GL_CONSTANT_COLOR1_NV',0x852B) GL_DISCARD_NV=_C('GL_DISCARD_NV',0x8530) GL_EXPAND_NEGATE_NV=_C('GL_EXPAND_NEGATE_NV',0x8539) GL_EXPAND_NORMAL_NV=_C('GL_EXPAND_NORMAL_NV',0x8538) GL_E_TIMES_F_NV=_C('GL_E_TIMES_F_NV',0x8531) GL_FOG=_C('GL_FOG',0x0B60) GL_HALF_BIAS_NEGATE_NV=_C('GL_HALF_BIAS_NEGATE_NV',0x853B) GL_HALF_BIAS_NORMAL_NV=_C('GL_HALF_BIAS_NORMAL_NV',0x853A) GL_MAX_GENERAL_COMBINERS_NV=_C('GL_MAX_GENERAL_COMBINERS_NV',0x854D) GL_NONE=_C('GL_NONE',0) GL_NUM_GENERAL_COMBINERS_NV=_C('GL_NUM_GENERAL_COMBINERS_NV',0x854E) GL_PRIMARY_COLOR_NV=_C('GL_PRIMARY_COLOR_NV',0x852C) GL_REGISTER_COMBINERS_NV=_C('GL_REGISTER_COMBINERS_NV',0x8522) GL_SCALE_BY_FOUR_NV=_C('GL_SCALE_BY_FOUR_NV',0x853F) GL_SCALE_BY_ONE_HALF_NV=_C('GL_SCALE_BY_ONE_HALF_NV',0x8540) GL_SCALE_BY_TWO_NV=_C('GL_SCALE_BY_TWO_NV',0x853E) GL_SECONDARY_COLOR_NV=_C('GL_SECONDARY_COLOR_NV',0x852D) GL_SIGNED_IDENTITY_NV=_C('GL_SIGNED_IDENTITY_NV',0x853C) GL_SIGNED_NEGATE_NV=_C('GL_SIGNED_NEGATE_NV',0x853D) GL_SPARE0_NV=_C('GL_SPARE0_NV',0x852E) GL_SPARE0_PLUS_SECONDARY_COLOR_NV=_C('GL_SPARE0_PLUS_SECONDARY_COLOR_NV',0x8532) GL_SPARE1_NV=_C('GL_SPARE1_NV',0x852F) GL_TEXTURE0_ARB=_C('GL_TEXTURE0_ARB',0x84C0) GL_TEXTURE1_ARB=_C('GL_TEXTURE1_ARB',0x84C1) GL_UNSIGNED_IDENTITY_NV=_C('GL_UNSIGNED_IDENTITY_NV',0x8536) GL_UNSIGNED_INVERT_NV=_C('GL_UNSIGNED_INVERT_NV',0x8537) GL_VARIABLE_A_NV=_C('GL_VARIABLE_A_NV',0x8523) GL_VARIABLE_B_NV=_C('GL_VARIABLE_B_NV',0x8524) GL_VARIABLE_C_NV=_C('GL_VARIABLE_C_NV',0x8525) GL_VARIABLE_D_NV=_C('GL_VARIABLE_D_NV',0x8526) GL_VARIABLE_E_NV=_C('GL_VARIABLE_E_NV',0x8527) GL_VARIABLE_F_NV=_C('GL_VARIABLE_F_NV',0x8528) GL_VARIABLE_G_NV=_C('GL_VARIABLE_G_NV',0x8529) GL_ZERO=_C('GL_ZERO',0) @_f @_p.types(None,_cs.GLenum
,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLenum) def glCombinerInputNV(stage,portion,variable,input,mapping,componentUsage):pass @_f @_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,_
cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLboolean,_cs.GLboolean,_cs.GLboolean) def glCombinerOutputNV(stage,portion,abOutput,cdOutput,sumOutput,scale,bias,abDotProduct,cdDotProduct,muxSum):pass @_f @_p.types(None,_cs.GLenum,_cs.GLfloat) def glCombinerParameterfNV(pname,param):pass @_f @_p.types(None,_cs.GLenum,arrays.GLfloatArray) def glCombinerParameterfvNV(pname,params):pass @_f @_p.types(None,_cs.GLenum,_cs.GLint) def glCombinerParameteriNV(pname,param):pass @_f @_p.types(None,_cs.GLenum,arrays.GLintArray) def glCombinerParameterivNV(pname,params):pass @_f @_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLenum) def glFinalCombinerInputNV(variable,input,mapping,componentUsage):pass @_f @_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLenum,arrays.GLfloatArray) def glGetCombinerInputParameterfvNV(stage,portion,variable,pname,params):pass @_f @_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLenum,arrays.GLintArray) def glGetCombinerInputParameterivNV(stage,portion,variable,pname,params):pass @_f @_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,arrays.GLfloatArray) def glGetCombinerOutputParameterfvNV(stage,portion,pname,params):pass @_f @_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,arrays.GLintArray) def glGetCombinerOutputParameterivNV(stage,portion,pname,params):pass @_f @_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLfloatArray) def glGetFinalCombinerInputParameterfvNV(variable,pname,params):pass @_f @_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLintArray) def glGetFinalCombinerInputParameterivNV(variable,pname,params):pass
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Tracking of active requests.""" import ctypes import threading class RequestState(object): """State for a single request.""" def __init__(self, request_id): self.request_id = request_id self._threads = set([threading.current_thread().ident]) self._condition = threading.Condition() def start_thread(self): """Records the start of a user-created thread as part of this request.""" thread_id = threading.current_thread().ident with self._condition: self._threads.add(thread_id) def end_thread(self): """Records the end of a user-created thread as part of this request.""" thread_id = threading.current_thread().ident with self._condition: self._threads.remove(thread_id) self._condition.notify() def end_request(self): """Ends the request and blocks until all threads for this request finish.""" thread_id = threading.current_thread().ident with self._condition: self._threads
.remove(thread_id) while self._threads: self._condition.wait() def inject_exception(self, exception): """Injects an exception to all threads running as part of this request.""" with self._condition: thread_ids = list(self._threads)
for thread_id in thread_ids: ctypes.pythonapi.PyThreadState_SetAsyncExc( ctypes.c_long(thread_id), ctypes.py_object(exception)) _request_states = {} _request_states_lock = threading.RLock() def start_request(request_id): """Starts a request with the provided request id.""" with _request_states_lock: _request_states[request_id] = RequestState(request_id) def end_request(request_id): """Ends the request with the provided request id.""" with _request_states_lock: request_state = _request_states[request_id] request_state.end_request() with _request_states_lock: del _request_states[request_id] def get_request_state(request_id): """Returns the RequestState for the provided request id.""" with _request_states_lock: return _request_states[request_id] def get_request_states(): """Returns a list of RequestState instances for all current requests.""" with _request_states_lock: return _request_states.values()
fig, docker=Docker, assets=Assets, composer=DockerComposer, container_builder=ContainerBuilder) def __init__(self, config, docker, assets, composer, container_builder): self._config = config self._assets = assets self._docker = docker self._composer = composer self._container_builder = container_builder @exception_safe(ConnectionError, False) def run(self, flavour, name, force=False, override=None, provision=None, no_recreate=False, verbose=False): """ :param flavour: The flavour name :param name: The sandbox name :param force: Force restarting without asking :param override: An override path :param provision: A provisioning script path :param no_recreate: Do not recreate containers if they exist on restart :param verbose: Logs the output of the provisioning script :return: True if the sandbox has been started, False otherwise """ if flavour not in self._assets.list_flavours(): log.error('Cannot find flavour {}. Aborted'.format(flavour)) return flavour_file = self._assets.get_abs_flavour_path(flavour) # Get provision and override from flavour with open(flavour_file, 'rb') as _f_yml: flavour_content = load(_f_yml) if 'provision' in flavour_content and provision is None: provision = flavour_content['provision'] if 'override' in flavour_content and override is None: override = flavour_content['override'] override = os.path.abspath(override) if override else None message = 'Spawning {} sandbox'.format(flavour) if override: message += ' with override {}'.format(override) if provision: message += ' and with provision {}'.format(provision) log.info(message) restart = 'y' if not force: running_sandboxes = self.list_running_sandbox() if name in running_sandboxes: restart = raw_input( "\nSandbox {} is already up. Restart? (Y/N): ".format(name)) if force or restart.lower() == 'y': composer = \ self._composer.up(flavour_file, '{}{}'.format(self.SANDBOX_PREFIX, name), override, no_recreate) composer.wait() if provision: provision = os.path.abspath(provision) if os.path.isfile(provision) and os.access(provision, os.X_OK): log.info( 'Running provisioning script: {}'.format(provision)) provisioning_env = { "SANDBOX_NAME": name } if verbose: out = None err = subprocess.STDOUT else: out = open(os.devnull, 'w') err = out p = subprocess.Popen( provision, stdout=out, stderr=err, env=dict(os.environ, **provisioning_env)) p.wait() log.info('Provisioning script completed') else: log.error( 'File {} does not exist or it\'s not executable'.format( provision )) return False return True @staticmethod def __get_sandbox_name(container_name): return container_name.split('_')[0].replace(Composer.SANDBOX_PREFIX, '').replace('/', '') @exception_safe(ConnectionError, []) def list_running_sandbox(self): """ List all the running sandbox :return: The list of all the running sandbox """ sandoxes = set() containers = self._docker.list_containers(self.SANDBOX_PREFIX) for container_ref in containers: container = self._container_builder.for_container_ref(container_ref) sandoxes.add(self.__get_sandbox_name(container.name)) return sandoxes @exception_safe(ConnectionError, None) def stop(self, sandboxes, remove=False): """ Stop the running sandbox """ return self._map_stop_or_kill('stop', sandboxes, remove) @exception_safe(ConnectionError, None) def kill(self, sandboxes, remove=False): """ Kill the running sandbox """ return self._map_stop_or_kill('kill', sandboxes, remove) @exception_safe(ConnectionError, []) def get_sandbox_detail(self, sandbox): """ :param sandbox: :return: """ containers = list() for container_ref in self._docker.list_containers( '{}{}_'.format(self.SANDBOX_PREFIX, sandbox)): container = self._container_builder.for_container_ref(container_ref) ip = container.ip name = container.name image = container.image ports = container.ports(pretty=True) containers.append([sandbox, name, image, ports, ip]) return containers @exception_safe(FlavourNotFound, dict()) def get_components_by_flavour(self, flavour): """ """ flavour_path = self._assets.get_abs_flavour_path(flavour) components = list() with open(flavour_path, 'rb') as _f_yml: yml_content = load(_f_yml) for component, definition in yml_content.items(): if 'image' in [c.lower() for c in definition]: components.append(definition['image']) else: extended = definition['extends']['file'] for var, value in self._composer.VARS.items(): extended = extended.replace(var, value)
service = definition['extends']['service'] image = self._get_base_component_image(extended, service) if ':' not in image: image = '{}:master'.format(image) if image: components.append(image) return Counter(components) def _get_base_component_image(self, yml, service): """ """ #
If it's a relative path, search for it in the extra flavours directory if not os.path.isabs(yml): extra_flavours = self._config.get_sandbox_value('extra_flavours') if extra_flavours: yml = os.path.join(extra_flavours, yml) with open(yml, 'rb') as _f_yml: component_content = load(_f_yml) for component, definition in component_content.items(): if component == service: return definition['image'] return None def _map_stop_or_kill(self, op, sandboxes, remove=False): """ Stop/Kill the running sandbox """ running_sandboxes = self.list_running_sandbox() for sandbox in sandboxes: if sandbox not in running_sandboxes: log.info('Sandbox {} not running. Skipping'.format(sandbox)) continue containers = self._docker.list_containers( '{}{}_'.format(self.SANDBOX_PREFIX, sandbox)) for container_ref in containers: container = self._container_builder.for_container_ref( container_ref) service_name = container.service_name log.info('Sandbox {} - {}ing container {}'.format(sandbox, op, service_name)) docker_op = op + '_container' getattr(self._docker, docker_op)(container_ref) if remove: log.info('Sandbox {} - Removing ' 'container {}'.
""" Consider this game: Write 8 blanks on a sheet of paper. Randomly pick a digit 0-9. After seeing the digit, choose one of the 8 blanks to place that digit in. Randomly choose another digit (with replacement) and then choose one of the 7 remaining blanks to place it in. Repeat until you've filled all 8 blanks. You win if the 8 digits written down ar
e in order from smallest to largest. Write a program that plays this game by itself and determines whether it won or not. Run it 1 million times and post your probability of winning. Assigning d
igits to blanks randomly lets you win about 0.02% of the time. Here's a python script that wins about 10.3% of the time. Can you do better? import random def trial(): indices = range(8) # remaining unassigned indices s = [None] * 8 # the digits in their assigned places while indices: d = random.randint(0,9) # choose a random digit index = indices[int(d*len(indices)/10)] # assign it an index s[index] = str(d) indices.remove(index) return s == sorted(s) print sum(trial() for _ in range(1000000)) thanks to cosmologicon for the challenge at /r/dailyprogrammer_ideas .. link [http://www.reddit.com/r/dailyprogrammer_ideas/comments/s30be/intermediate_digitassigning_game/] """ import random import itertools def que_sort(data): # print(data) return all(b >= a for a, b in zip(data, itertools.islice(data, 1, None))) TRIALS = 1 win = 0 for a in range(TRIALS): l = [None] * 8 p = list(range(8)) while p: d = random.randint(0,9) # i = random.choice(p) i = int(d * (len(p)) / 10) print(p[i]) l[p[i]] = d p.pop(i) print(l) if que_sort(l): win += 1 print('{}/{} - {}%'.format(win, TRIALS, win/TRIALS*100))
from spotdl.version import __version__
from spotdl.command_line
.core import Spotdl
f.check_permissions(request, "unstar", project) votes_service.remove_vote(project, user=request.user) return response.Ok() @detail_route(methods=["GET"]) def fans(self, request, pk=None): project = self.get_object() self.check_permissions(request, "fans", project) voters = votes_service.get_voters(project) voters_data = votes_serializers.VoterSerializer(voters, many=True) return response.Ok(voters_data.data) @detail_route(methods=["POST"]) def create_template(self, request, **kwargs): template_name = request.DATA.get('template_name', None) template_description = request.DATA.get('template_description', None) if not template_name: raise response.BadRequest(_("Not valid template name")) if not template_description: raise response.BadRequest(_("Not valid template description")) template_slug = slugify_uniquely(template_name, models.ProjectTemplate) project = self.get_object() self.check_permissions(request, 'create_template', project) template = models.ProjectTemplate( name=template_name, slug=template_slug, description=template_description, ) template.load_data_from_project(project) template.save() return response.Created(serializers.ProjectTemplateSerializer(template).data) @detail_route(methods=['post']) def leave(self, request, pk=None): project = self.get_object() self.check_permissions(request, 'leave', project) services.remove_user_from_project(request.user, project) return response.Ok() def _set_base_permissions(self, obj): update_permissions = False if not obj.id: if not obj.is_private: # Creating a public project update_permissions = True else: if self.get_object().is_private != obj.is_private: # Changing project public state update_permissions = True if update_permissions: permissions_service.set_base_permissions_for_project(obj) def pre_save(self, obj): if not obj.id: obj.owner = self.request.user # TODO REFACTOR THIS if not obj.id: obj.template = self.request.QUERY_PARAMS.get('template', None) self._set_base_permissions(obj) super().pre_save(obj) def destroy(self, request, *args, **kwargs): obj = self.get_object_or_none() self.check_permissions(request, 'destroy', obj) signals.post_delete.disconnect(sender=UserStory, dispatch_uid="user_story_update_project_colors_on_delete") signals.post_delete.disconnect(sender=Issue, dispatch_uid="issue_update_project_colors_on_delete") signals.post_delete.disconnect(sender=Task, dispatch_uid="tasks_milestone_close_handler_on_delete"
) signals.post_delete.disconnect(sender=Task, dispatch_uid="tasks_us_close_handler_on_delete") signals.post_delete.disconnect(sender=Task,
dispatch_uid="task_update_project_colors_on_delete") signals.post_delete.disconnect(dispatch_uid="refprojdel") signals.post_delete.disconnect(dispatch_uid='update_watchers_on_membership_post_delete') obj.tasks.all().delete() obj.user_stories.all().delete() obj.issues.all().delete() obj.memberships.all().delete() obj.roles.all().delete() if obj is None: raise Http404 self.pre_delete(obj) self.pre_conditions_on_delete(obj) obj.delete() self.post_delete(obj) return response.NoContent() ###################################################### ## Custom values for selectors ###################################################### class PointsViewSet(MoveOnDestroyMixin, ModelCrudViewSet, BulkUpdateOrderMixin): model = models.Points serializer_class = serializers.PointsSerializer permission_classes = (permissions.PointsPermission,) filter_backends = (filters.CanViewProjectFilterBackend,) filter_fields = ('project',) bulk_update_param = "bulk_points" bulk_update_perm = "change_points" bulk_update_order_action = services.bulk_update_points_order move_on_destroy_related_class = RolePoints move_on_destroy_related_field = "points" move_on_destroy_project_default_field = "default_points" class UserStoryStatusViewSet(MoveOnDestroyMixin, ModelCrudViewSet, BulkUpdateOrderMixin): model = models.UserStoryStatus serializer_class = serializers.UserStoryStatusSerializer permission_classes = (permissions.UserStoryStatusPermission,) filter_backends = (filters.CanViewProjectFilterBackend,) filter_fields = ('project',) bulk_update_param = "bulk_userstory_statuses" bulk_update_perm = "change_userstorystatus" bulk_update_order_action = services.bulk_update_userstory_status_order move_on_destroy_related_class = UserStory move_on_destroy_related_field = "status" move_on_destroy_project_default_field = "default_us_status" class TaskStatusViewSet(MoveOnDestroyMixin, ModelCrudViewSet, BulkUpdateOrderMixin): model = models.TaskStatus serializer_class = serializers.TaskStatusSerializer permission_classes = (permissions.TaskStatusPermission,) filter_backends = (filters.CanViewProjectFilterBackend,) filter_fields = ("project",) bulk_update_param = "bulk_task_statuses" bulk_update_perm = "change_taskstatus" bulk_update_order_action = services.bulk_update_task_status_order move_on_destroy_related_class = Task move_on_destroy_related_field = "status" move_on_destroy_project_default_field = "default_task_status" class SeverityViewSet(MoveOnDestroyMixin, ModelCrudViewSet, BulkUpdateOrderMixin): model = models.Severity serializer_class = serializers.SeveritySerializer permission_classes = (permissions.SeverityPermission,) filter_backends = (filters.CanViewProjectFilterBackend,) filter_fields = ("project",) bulk_update_param = "bulk_severities" bulk_update_perm = "change_severity" bulk_update_order_action = services.bulk_update_severity_order move_on_destroy_related_class = Issue move_on_destroy_related_field = "severity" move_on_destroy_project_default_field = "default_severity" class PriorityViewSet(MoveOnDestroyMixin, ModelCrudViewSet, BulkUpdateOrderMixin): model = models.Priority serializer_class = serializers.PrioritySerializer permission_classes = (permissions.PriorityPermission,) filter_backends = (filters.CanViewProjectFilterBackend,) filter_fields = ("project",) bulk_update_param = "bulk_priorities" bulk_update_perm = "change_priority" bulk_update_order_action = services.bulk_update_priority_order move_on_destroy_related_class = Issue move_on_destroy_related_field = "priority" move_on_destroy_project_default_field = "default_priority" class IssueTypeViewSet(MoveOnDestroyMixin, ModelCrudViewSet, BulkUpdateOrderMixin): model = models.IssueType serializer_class = serializers.IssueTypeSerializer permission_classes = (permissions.IssueTypePermission,) filter_backends = (filters.CanViewProjectFilterBackend,) filter_fields = ("project",) bulk_update_param = "bulk_issue_types" bulk_update_perm = "change_issuetype" bulk_update_order_action = services.bulk_update_issue_type_order move_on_destroy_related_class = Issue move_on_destroy_related_field = "type" move_on_destroy_project_default_field = "default_issue_type" class IssueStatusViewSet(MoveOnDestroyMixin, ModelCrudViewSet, BulkUpdateOrderMixin): model = models.IssueStatus serializer_class = serializers.IssueStatusSerializer permission_classes = (permissions.IssueStatusPermission,) filter_backends = (filters.CanViewProjectFilterBackend,) filter_fields = ("project",)
###################################################### # # MouseLook.py Blender 2.55 # # Tutorial for using MouseLook.py can be found at # # www.tutorialsforblender3d.com # # Released under the Creative Commons Attribution 3.0 Unported License. # # If you use this code, please include this information header. # ###################################################### # define main program def main(): # set default values Sensitivity = 0.0005 Invert = -1 Capped = False # get controller controller = bge.logic.getCurrentController() # get the object this script is attached to obj = controller.owner # get the size of the game screen gameScreen = gameWindow() # get mouse movement move = mouseMove(gameScreen, controller, obj) # change mouse sensitivity? sensitivity = mouseSen(Sensitivity, obj) # invert mouse pitch? invert = mousePitch(Invert, obj) # upDown mouse capped? capped = mouseCap(Capped, move, invert, obj) # use mouse look useMouseLook(controller, capped, move, invert, sensitivity) # Center mouse in game window centerCursor(controller, gameScreen) ##################################################### # define game window def gameWindow(): # get width and height of game window width = bge.render.getWindowWidth() height = bge.render.getWindowHeight() return (width, height) ####################################################### # define mouse movement function def mouseMove(gameScreen, controller, obj): # Get sensor named MouseLook mouse = controller.sensors["MouseLook"] # extract width and height from gameScreen width = gameScreen[0] height = gameScreen[1] # distance moved from screen center x = width/2 - mouse.position[0] y = height/2 - mouse.position[1] # initialize mouse so it doesn't jerk first time if not 'mouseInit' in obj: obj['mouseInit'] = True x = 0 y = 0 ######### stops drifting on mac osx # if sensor is deactivated don't move if not mouse.positive: x = 0 y = 0 ######### -- mac fix contributed by Pelle Johnsen # return mouse movement return (x, y) ###################################################### # define Mouse Sensitivity def mouseSen(sensitivity, obj): # check so see if property named Adjust was added if 'Adjust' in obj: # Don't want Negative values if obj['Adjust'] < 0.0 or obj['Adjust'] == 0: obj['Adjust'] = sensitivity # adjust the sensitivity sensitivity = obj['Adjust'] * sensitivity # return sensitivity return sensitivity ######################################################### # define Invert mouse pitch def mousePitch(invert, obj): # check to see if property named Invert was added if 'Invert'in obj: # pitch to be inverted? if obj['Invert'] == True: invert = -1 else: invert = 1 # return mouse pitch return invert ##################################################### # define Cap vertical mouselook def mouseCap(capped, move, invert, obj): # check to see if property named Cap was added if 'Cap' in obj: # import mathutils import mathutils # limit cap to 0 - 180 degrees if o
bj['Cap'] > 180: obj['Cap'] = 180 if
obj['Cap'] < 0: obj['Cap'] = 0 # get the orientation of the camera to parent camOrient = obj.localOrientation # get camera Z axis vector camZ = [camOrient[0][2], camOrient[1][2], camOrient[2][2]] # create a mathutils vector vec1 = mathutils.Vector(camZ) # get camera parent camParent = obj.parent # use Parent z axis parentZ = [ 0.0, 0.0, 1.0] # create a mathutils vector vec2 = mathutils.Vector(parentZ) # find angle in radians between two vectors rads = mathutils.Vector.angle(vec2, vec1) # convert to degrees (approximate) angle = rads * ( 180.00 / 3.14) # get amount to limit mouselook capAngle = obj['Cap'] # get mouse up down movement moveY = move[1] * invert # check capped angle against against camera z-axis and mouse y movement if (angle > (90 + capAngle/2) and moveY > 0) or (angle < (90 - capAngle/2) and moveY < 0) == True: # no movement capped = True # return capped return capped ############################################### # define useMouseLook def useMouseLook(controller, capped, move, invert, sensitivity): # get up/down movement if capped == True: upDown = 0 else: upDown = move[1] * sensitivity * invert # get left/right movement leftRight = move[0] * sensitivity # Get the actuators act_LeftRight = controller.actuators["LeftRight"] act_UpDown = controller.actuators["UpDown"] # set the values act_LeftRight.dRot = [0.0, 0.0, leftRight] act_LeftRight.useLocalDRot = False act_UpDown.dRot = [ upDown, 0.0, 0.0] act_UpDown.useLocalDRot = True # Use the actuators controller.activate(act_LeftRight) controller.activate(act_UpDown) ############################################# # define center mouse cursor def centerCursor(controller, gameScreen): # extract width and height from gameScreen width = gameScreen[0] height = gameScreen[1] # Get sensor named MouseLook mouse = controller.sensors["MouseLook"] # get cursor position pos = mouse.position # if cursor needs to be centered if pos != [int(width/2), int(height/2)]: # Center mouse in game window bge.render.setMousePosition(int(width/2), int(height/2)) # already centered. Turn off actuators else: # Get the actuators act_LeftRight = controller.actuators["LeftRight"] act_UpDown = controller.actuators["UpDown"] # turn off the actuators controller.deactivate(act_LeftRight) controller.deactivate(act_UpDown) ############################################## #import GameLogic import bge # Run program main()
or): def __init__(self, plotly_name="colorbar", parent_name="bar.marker", **kwargs): super(ColorbarValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, data_class_str=kwargs.pop("data_class_str", "ColorBar"), data_docs=kwargs.pop( "data_docs", """ bgcolor Sets the color of padded area. bordercolor Sets the axis line color. borderwidth Sets the width (in px) or the border enclosing this color bar. dtick Sets the step in-between ticks on this axis. Use with `tick0`. Must be a positive number, or special strings available to "log" and "date" axes. If the axis `type` is "log", then ticks are set every 10^(n*dtick) where n is the tick number. For example, to set a tick mark at 1, 10, 100, 1000, ... set dtick to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2. To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to log_10(5), or 0.69897000433. "log" has several special values; "L<f>", where `f` is a positive number, gives ticks linearly spaced in value (but not position). For example `tick0` = 0.1, `dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus small digits between, use "D1" (all digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and "D2". If the axis `type` is "date", then you must convert the time to milliseconds. For example, to set the interval between ticks to one day, set `dtick` to 86400000.0. "date" also has special values "M<n>" gives ticks spaced by a number of months. `n` must be a positive integer. To set ticks on the 15th of every third month, set `tick0` to "2000-01-15" and `dtick` to "M3". To set ticks every 4 years, set `dtick` to "M48" exponentformat Determines a formatting rule for the tick exponents. For example, consider the number 1,000,000,000. If "none", it appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If "power", 1x10^9 (with 9 in a super script). If "SI", 1G. If "B", 1B. len Sets the length of the color bar This measure excludes the padding of both ends. That is, the color bar length is this length minus the padding on both ends. lenmode Determines whether this color bar's length (i.e. the measure in the color variation direction) is set in units of plot "fraction" or in *pixels. Use `len` to set the value. minexponent Hide SI prefix for 10^n if |n| is below this number. This only has an effect when `tickformat` is "SI" or "B". nticks Specifies the maximum number of ticks for the particular axis. The actual number of ticks will be chosen automatically to be less than or equal to `nticks`. Has an effect only if `tickmode` is set to "auto". orientation Sets the orientation of the colorbar. outlinecolor Sets the axis line color. outlinewidth Sets the width (in px) of the axis line. separatethousands If "true", even 4-digit integers are separated showexponent If "all", all exponents are shown besides their significands. If "first", only the exponent of the first tick is shown. If "last", only the exponent of the last tick is shown. If "none", no exponents appear. showticklabels Determines whether or not the tick labels are drawn. showtickprefix If "all", all tick labels are displayed with a prefix. If "first", only the first tick is displayed with a prefix. If "last", only the last tick is displayed with a suffix. If "none", tick prefixes are hidden. showticksuffix Same as `showtickprefix` but for tick suffixes. thickness Sets the thickness of the color bar This measure excludes the size of the padding, ticks and labels. thicknessmode Determines whether this color bar's thickness (i.e. the measure in the constant color direction) is set in units of plot "fraction" or in "pixels". Use `thickness` to set the value. tick0 Sets the placement of the first tick on this axis. Use with `dtick`. If the axis `type` is "log", then you must take the log of your starting tick (e.g. to set the starting tick to 100, set the `tick0` to 2) except when `dtick`=*L<f>* (see `dtick` for more info). If the axis `type` is "date", it should be a date string, like date data. If the axis `type` is "category", it should be a number, using the scale where each category is assigned a serial number from zero in the order it appears. tickangle Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle` of -90 draws the tick labels vertically. tickcolor Sets the tick color. tickfont Sets the color bar's tick label font tickformat Sets the tick label formatting rule using d3 formatting mini-languages which are very simila
r to those in Python. For numbers, see: h ttps://github.com/d3/d3-format/tree/v1.4.5#d3-f ormat. And for dates see: https://github.com/d3/d3-time- format/tree/v2.2.3#locale_format. We add two items to d3's date formatter: "%h" for half of the year as a decimal number as well as "%{n}f" for fractional seconds with n digits. For example, *2016-10-13 09:15:23.456* with tickformat "%H~%M~%S.%2f" would display "09~15~23.46" tickformatstops A tuple of :class:`plotly.graph_objects.bar.mar ker.colorbar.Tickformatstop` instances or dicts with compatible properties tickformatstopdefaults When used in a template (as layout.template.dat a.bar.marker.colorbar.tickformatstopdefaults), sets the default property values to use for elements of bar.marker.colorbar.tickformatstops ticklabeloverflow Determines how we handle tick labels that would overflow either the graph div or the domain of the axis. The default value for inside tick labels is *hide past domain*. In other cases the default is *hide past div*. ticklabelposition Determines where tick labels are drawn relative to the ticks. Left and right options are used when `orientation` is "h", top and bottom when `orientation` is "v". ticklabelstep Sets the spacing between tick labels as
ead. :param dict experiment_dict: the ``dict`` to update.""" if pdb_dict.get("EXPDTA"): if pdb_dict["EXPDTA"][0].strip(): experiment_dict["technique"] = pdb_dict["EXPDTA"][0][6:].strip() def extract_source(pdb_dict, experiment_dict): """Takes a ``dict`` and adds source information to it by parsing SOURCE lines. :param dict pdb_dict: the ``dict`` to read. :param dict experiment_dict: the ``dict`` to update.""" if pdb_dict.get("SOURCE"): data = merge_lines(pdb_dict["SOURCE"], 10) patterns = { "source_organism": r"ORGANISM_SCIENTIFIC\: (.+?);", "expression_system": r"EXPRESSION_SYSTEM\: (.+?);" } for attribute, pattern in patterns.items(): matches = re.findall(pattern, data) if matches: experiment_dict[attribute] = matches[0] def extract_missing_residues(pdb_dict, experiment_dict): """Takes a ``dict`` and adds missing residue information to it by parsing REMARK 465 lines. :param dict pdb_dict: the ``dict`` to read. :param dict experiment_dict: the ``dict`` to update.""" for line in pdb_dict.get("REMARK", {}).get("465", []): chunks = line.strip().split() if len(chunks) == 5: experiment_dict["missing_residues"].append({ "name": chunks[2], "id": f"{chunks[3]}.{chunks[4]}" }) def extract_resolution_remark(pdb_dict, quality_dict): """Takes a ``dict`` and adds resolution information to it by parsing REMARK 2 lines. :param dict pdb_dict: the ``dict`` to read. :param dict quality_dict: the ``dict`` to update.""" if pdb_dict.get("REMARK") and pdb_dict["REMARK"].get("2"): for remark in pdb_dict["REMARK"]["2"]: try: quality_dict["resolution"] = float(remark[10:].strip().split()[1]) break except: pass def extract_rvalue_remark(pdb_dict, quality_dict): """Takes a ``dict`` and adds resoluti
on information to it by parsing REMARK 3 lines. :param dict pdb_dict: the ``dict`` to read. :param dict quality_dict: the ``dict`` to update.""" if pdb_dict.get("REMARK") and pdb_dict["REMARK"].get("3"): patterns = {
"rvalue": r"R VALUE.+WORKING.+?: (.+)", "rfree": r"FREE R VALUE[ ]{2,}: (.+)", } for attribute, pattern in patterns.items(): for remark in pdb_dict["REMARK"]["3"]: matches = re.findall(pattern, remark.strip()) if matches: try: quality_dict[attribute] = float(matches[0].strip()) except: pass break def extract_assembly_remark(pdb_dict, geometry_dict): """Takes a ``dict`` and adds assembly information to it by parsing REMARK 350 lines. :param dict pdb_dict: the ``dict`` to read. :param dict geometry_dict: the ``dict`` to update.""" if pdb_dict.get("REMARK") and pdb_dict["REMARK"].get("350"): groups = [list(g) for k, g in groupby( pdb_dict["REMARK"]["350"], lambda x: "ECULE:" in x )][1:] assemblies = [list(chain(*a)) for a in zip(groups[::2], groups[1::2])] for a in assemblies: geometry_dict["assemblies"].append( assembly_lines_to_assembly_dict(a) ) def assembly_lines_to_assembly_dict(lines): """Takes the lines representing a single biological assembly and turns them into an assembly dictionary. :param list lines: The REMARK lines to read. :rtype: ``dict``""" assembly = { "transformations": [], "software": None, "buried_surface_area": None, "surface_area": None, "delta_energy": None, "id": 0 } patterns = [[r"(.+)SOFTWARE USED: (.+)", "software", lambda x: x], [r"(.+)BIOMOLECULE: (.+)", "id", int], [r"(.+)SURFACE AREA: (.+) [A-Z]", "buried_surface_area", float], [r"(.+)AREA OF THE COMPLEX: (.+) [A-Z]", "surface_area", float], [r"(.+)FREE ENERGY: (.+) [A-Z]", "delta_energy", float]] t = None for line in lines: for p in patterns: matches = re.findall(p[0], line) if matches: assembly[p[1]] = p[2](matches[0][1].strip()) if "APPLY THE FOLLOWING" in line: if t: assembly["transformations"].append(t) t = {"chains": [], "matrix": [], "vector": []} if "CHAINS:" in line: t["chains"] += [c.strip() for c in line.split(":")[-1].strip().split(",") if c.strip()] if "BIOMT" in line: values = [float(x) for x in line.split()[4:]] if len(t["matrix"]) == 3: assembly["transformations"].append(t) t = {"chains": t["chains"], "matrix": [], "vector": []} t["matrix"].append(values[:3]) t["vector"].append(values[-1]) if t: assembly["transformations"].append(t) return assembly def extract_crystallography(pdb_dict, geometry_dict): """Takes a ``dict`` and adds assembly information to it by parsing the CRYST1 record. :param dict pdb_dict: the ``dict`` to read. :param dict geometry_dict: the ``dict`` to update.""" if pdb_dict.get("CRYST1"): line = pdb_dict["CRYST1"][0] values = line.split() geometry_dict["crystallography"]["space_group"] = line[55:66].strip() geometry_dict["crystallography"]["unit_cell"] = [ float(val) for val in values[1:7] ] if len(values) >= 6 else [] def make_sequences(pdb_dict): """Creates a mapping of chain IDs to sequences, by parsing SEQRES records. :param dict pdb_dict: the .pdb dictionary to read. :rtype: ``dict``""" seq = {} if pdb_dict.get("SEQRES"): for line in pdb_dict["SEQRES"]: chain, residues = line[11], line[19:].strip().split() if chain not in seq: seq[chain] = [] seq[chain] += residues return {k: "".join([CODES.get(r, "X") for r in v]) for k, v in seq.items()} def make_secondary_structure(pdb_dict): """Creates a dictionary of helices and strands, with each having a list of start and end residues. :param pdb_dict: the .pdb dict to read. :rtype: ``dict``""" helices, strands = [], [] for helix in pdb_dict.get("HELIX", []): helices.append([ f"{helix[19]}.{helix[21:25].strip()}{helix[25].strip()}", f"{helix[31]}.{helix[33:37].strip()}{helix[37].strip() if len(helix) > 37 else ''}", ]) for strand in pdb_dict.get("SHEET", []): strands.append([ f"{strand[21]}.{strand[22:26].strip()}{strand[26].strip()}", f"{strand[32]}.{strand[33:37].strip()}{strand[37].strip() if len(strand) > 37 else ''}", ]) return {"helices": helices, "strands": strands} def get_full_names(pdb_dict): """Creates a mapping of het names to full English names. :param pdb_dict: the .pdb dict to read. :rtype: ``dict``""" full_names = {} for line in pdb_dict.get("HETNAM", []): try: full_names[line[11:14].strip()] += line[15:].strip() except: full_names[line[11:14].strip()] = line[15:].strip() return full_names def make_aniso(model_lines): """Creates a mapping of chain IDs to anisotropy, by parsing ANISOU records. :param dict pdb_dict: the .pdb dictionary to read. :rtype: ``dict``""" return {int(line[6:11].strip()): [ int(line[n * 7 + 28:n * 7 + 35]) / 10000 for n in range(6) ] for line in model_lines if line[:6] == "ANISOU"} def get_last_ter_line(model_lines): """Gets the index of the last TER record in a list of records. 0 will be returned if there are none. :param list model_lines: the lines to search. :rtype: ``int``""" last_ter = 0 for index, line in enumerate(model_lines[::-1]): if line[:3] == "TER": last_ter = len(model_lines) - index - 1 break return last_ter def id_from_line(line): """Creates a residue ID from an atom line. :param str line: the ATOM or HETATM line record. :rtype: ``str``""" return "{}.{}{
# # Copyright (C) 2001 Andrew T. Csillag <drew_csillag@geocities.com> # # You may distribute under the terms of either the GNU General # Public License or the SkunkWeb License, as specified in the # README file. # import os import DT import sys import time import marshal import stat def phfunc(name, obj): marshal.dump(obj, open(name,'w')) if __name__=='__main__': bt = time.time() fname=sys.argv[1] mtime=os.stat(fname)[stat.ST_MTIME] cform=sys.argv[1]+'.dtcc' try: cmtime=os.stat
(cform)[stat.ST_MTIME] comp_form=marshal.load(open(cform)) except: comp_form=None cmtime=-1 d=DT.DT(open(fname).read(), fname, comp_form, mtime, cmtime, lambda x, y=cform: phfunc(y, x)) class dumb: pass ns=dumb() text = d(ns) et = time.time() print
text print 'elapsed time:', et - bt
import pytest from pnc_cli import projects from pnc_cli.swagger_client.apis.projects_api import ProjectsApi from test import testutils import pnc_cli.user_config as uc @pytest.fixture(scope='function', autouse=True) def get_projects_api(): global projects_api projects_api = ProjectsApi(uc.user.get_api_client()) def test_get_all_invalid_param(): testutils.assert_raises_typeerror(projects_api, 'get_all') def test_get_all(): projs = projects_api.get_all(page_index=0, page_size=1000000, sort='', q='').content assert projs is not None def test_create_new_invalid_param(): testutils.assert_raises_typeerror(projects_api, 'create_new') def test_create_new(new_project): proj_ids = [x.id for x in projects_api.get_all(page_size=1000000).content] assert new_project.id in proj_ids def test_get_specific_no_id(): testutils.assert_raises_valueerror(projects_api, 'get_specific', id=None) def test_get_specific_invalid_param(): testutils.assert_raises_typeerror(projects_api, 'get_specific', id=1) def test_get_specific(new_project): assert projects_api.get_specific(new_project.id) is not None def test_update_no_id(): testutils.assert_raises_valueerror(projects_api, 'update', id=None) def test_update_invalid_param(): testutils.assert_raises_typeerror(projects_api, 'update', id=1) def test_update(new_project): newname = 'newname' + testutils.gen_random_name() updated_project = projects._create_project_object(name=newname, description="pnc-cli test updated description") projects_api.update(
id=new_project.id, body=updated_project) retrieved_project = projects_api.get_specific(new
_project.id).content assert retrieved_project.name == newname and retrieved_project.description == 'pnc-cli test updated description' def test_delete_specific_no_id(): testutils.assert_raises_valueerror(projects_api, 'delete_specific', id=None) def test_delete_specific_invalid_param(): testutils.assert_raises_typeerror(projects_api, 'delete_specific', id=1) def test_delete_specific(new_project): proj_ids = [x.id for x in projects_api.get_all(page_size=1000000).content] assert new_project.id in proj_ids projects_api.delete_specific(new_project.id) proj_ids = [x.id for x in projects_api.get_all(page_size=1000000).content] assert new_project.id not in proj_ids
''' Non-original introduction script, added solely for the sake of familiarizing myself with Tensorflow. I, Evan Kirsch, do not claim credit whatsoever for this code. It is taken directly from https://www.tensorflow.org/ ''' import tensorflow as tf import numpy as np
# Create 100 phony x, y data points in NumPy, y = x * 0.1 + 0.3 x_data = np.random.rand(100).astype("float32") y_data = x_data * 0.1 + 0.3 # Try to find values for W and b that compute y_data = W * x_data + b # (We know that W should be 0.1 and b 0.3, but Tensorflow will # figure that out for us.) W = tf.Variable
(tf.random_uniform([1], -1.0, 1.0)) b = tf.Variable(tf.zeros([1])) y = W * x_data + b # Minimize the mean squared errors. loss = tf.reduce_mean(tf.square(y - y_data)) optimizer = tf.train.GradientDescentOptimizer(0.5) train = optimizer.minimize(loss) # Before starting, initialize the variables. We will 'run' this first. init = tf.initialize_all_variables() # Launch the graph. sess = tf.Session() sess.run(init) # Fit the line. for step in xrange(201): sess.run(train) if step % 20 == 0: print(step, sess.run(W), sess.run(b)) # Learns best fit is W: [0.1], b: [0.3]
fro
m view import CView from template_view import CTemplateView from form_view import
CFormView
ticalLayout.addLayout(self.horizontalLayout) self.verticalLayout.addWidget(self.plot_layout) self.verticalLayout.addWidget(self.data_info_label)
self.checkBox_useCorrected.stateChanged.connect(self.set_use_corrected) self.comboBox_primarySelector.currentIndexChanged.connect(self.update_plot) self.comboBox_unitsSelector.currentIndexChanged.connect(self.update_plot) self.combo
Box_traceSelector.currentIndexChanged.connect(self.update_plot) self.plot = self.plot_layout.addPlot() # type: pg.PlotItem self._ntwk = None self._ntwk_corrected = None self._corrected_data_enabled = True self._use_corrected = False self.corrected_data_enabled = kwargs.get('corrected_data_enabled', True) self.plot.addLegend() self.plot.showGrid(True, True) self.plot.setLabel("bottom", "frequency", units="Hz") self.last_plot = "rectangular" def get_use_corrected(self): return self._use_corrected def set_use_corrected(self, val): if val in (1, 2): self._use_corrected = True else: self._use_corrected = False self.update_plot() use_corrected = property(get_use_corrected, set_use_corrected) @property def ntwk(self): return self._ntwk @ntwk.setter def ntwk(self, ntwk): if ntwk is None or isinstance(ntwk, skrf.Network) or type(ntwk) in (list, tuple): self.set_trace_items(ntwk) self._ntwk = ntwk self.update_plot() else: raise TypeError("must set to skrf.Network, list of Networks, or None") @property def ntwk_corrected(self): return self._ntwk_corrected @ntwk_corrected.setter def ntwk_corrected(self, ntwk): if ntwk is None or isinstance(ntwk, skrf.Network) or type(ntwk) in (list, tuple): self.set_trace_items(ntwk) self._ntwk_corrected = ntwk self.update_plot() else: raise TypeError("must set to skrf.Network, list of Networks, or None") @property def corrected_data_enabled(self): return self._corrected_data_enabled @corrected_data_enabled.setter def corrected_data_enabled(self, enabled): if enabled is True: self._corrected_data_enabled = True self.checkBox_useCorrected.setEnabled(True) else: self._corrected_data_enabled = False self._use_corrected = False self.checkBox_useCorrected.setEnabled(False) def set_networks(self, ntwk, ntwk_corrected=None): if ntwk is None or isinstance(ntwk, skrf.Network) or type(ntwk) in (list, tuple): self._ntwk = ntwk self.set_trace_items(self._ntwk) if ntwk is None: self._ntwk_corrected = None self.set_trace_items(self._ntwk) return else: raise TypeError("must set to skrf.Network, list of Networks, or None") if ntwk_corrected is None or isinstance(ntwk_corrected, skrf.Network) or type(ntwk_corrected) in (list, tuple): self._ntwk_corrected = ntwk_corrected else: raise TypeError("must set to skrf.Network, list of Networks, or None") self.update_plot() def _calc_traces(self): trace = self.comboBox_traceSelector.currentIndex() n_ = m_ = 0 if trace > 0: mn = trace - 1 nports = int(sqrt(self.comboBox_traceSelector.count() - 1)) m_ = mn % nports n_ = int((mn - mn % nports) / nports) return m_, n_, trace def reset_plot(self, smith=False): self.plot.clear() if not smith and self.last_plot == "smith": self.plot.setAspectLocked(False) self.plot.autoRange() self.plot.enableAutoRange() self.plot.setLabel("bottom", "frequency", units="Hz") if smith and not self.last_plot == "smith": self.last_plot = "smith" self.ZGrid = smith_chart.gen_z_grid() self.s_unity_circle = smith_chart.gen_s_unity_circle() self.plot_layout.removeItem(self.plot) self.plot = self.plot_layout.addPlot() self.plot.setAspectLocked() self.plot.setXRange(-1, 1) self.plot.setYRange(-1, 1) if smith: self.plot.addItem(self.s_unity_circle) self.plot.addItem(self.ZGrid) if not smith: self.plot.setLabel("left", "") self.plot.setTitle(None) legend = self.plot.legend if legend is not None: legend.scene().removeItem(legend) self.plot.legend = None self.plot.addLegend() def clear_plot(self): self._ntwk = None self._ntwk_corrected = None self._ntwk_list = None self.reset_plot() def set_trace_items(self, ntwk=None): self.comboBox_traceSelector.blockSignals(True) current_index = self.comboBox_traceSelector.currentIndex() nports = 0 if isinstance(ntwk, skrf.Network): nports = ntwk.nports elif type(ntwk) in (list, tuple): for n in ntwk: if n.nports > nports: nports = n.nports self.comboBox_traceSelector.clear() self.comboBox_traceSelector.addItem("all") for n in range(nports): for m in range(nports): self.comboBox_traceSelector.addItem("S{:d}{:d}".format(m + 1, n + 1)) if current_index <= self.comboBox_traceSelector.count(): self.comboBox_traceSelector.setCurrentIndex(current_index) else: self.comboBox_traceSelector.setCurrentIndex(0) self.comboBox_traceSelector.blockSignals(False) def graph_clicked(self, ev): """ :type ev: pg.GraphicsScene.mouseEvents.MouseClickEvent :return: """ xy = self.plot.vb.mapSceneToView(ev.scenePos()) if not ev.isAccepted(): if "smith" in self.comboBox_primarySelector.currentText().lower(): S11 = xy.x() + 1j * xy.y() Z = (1 + S11) / (1 - S11) self.data_info_label.setText( "Sre: {:g}, Sim: {:g} - R: {:g}, X: {:g}".format(xy.x(), xy.y(), Z.real, Z.imag)) else: self.data_info_label.setText("x: {:g}, y: {:g}".format(xy.x(), xy.y())) elif isinstance(ev.acceptedItem, pg.PlotCurveItem): curve = ev.acceptedItem # type: pg.PlotCurveItem spoint = xy.x() + 1j * xy.y() sdata = curve.xData + 1j * curve.yData index = np.argmin(np.abs(sdata - spoint)) frequency = curve.ntwk.frequency.f_scaled[index] S11 = curve.xData[index] + 1j * curve.yData[index] Z = (1 + S11) / (1 - S11) self.data_info_label.setText( "Freq: {:g} ({:s}), S(re): {:g}, S(im): {:g} - R: {:g}, X: {:g}".format( frequency, curve.ntwk.frequency.unit, S11.real, S11.imag, Z.real, Z.imag)) def _plot_attr(self, ntwk, attr, colors, trace, n_, m_): for n in range(ntwk.s.shape[2]): for m in range(ntwk.s.shape[1]): if trace > 0: if not n == n_ or not m == m_: continue c = next(colors) label = ntwk.name param = "S{:d}{:d}".format(m + 1, n + 1) if ntwk.s.shape[1] > 1: label += " - " + param if hasattr(ntwk, attr): s = getattr(ntwk, attr) if "db" in attr: splot = pg.PlotDataItem(pen=pg.mkPen(c), name=label) if not np.any(s[:, m, n] == -np.inf): splot.setData(ntwk.f, s[:, m, n]) self.plot.addItem(splot) else: self.plot.plot(ntwk.f, s[:, m, n], pen=pg.mkPen(c), name=label) else: s = getattr(ntwk, param.lower(
lass " "to just what you are wearing, but you sequence much faster in a combat turn. You have no natural Armor " "Class (Armor Class is therefore 0 regardless of Agility). You must wear armor to get an Armor Class.Your " "sequence gets a 5 point bonus.", ALL_RACES), Trait("Heavy Handed", "You swing harder, not better. Your attacks are very brutal, but lack finesse. You rarely cause a good " "critical hit, but you always do more melee damage. You get a 4 point bonus to Melee Damage, but your " "critical hits do 30% less damage, and are 30% less likely to cripple a limb or cause unconsciousness.", ALL_RACES), Trait("Fast Shot", "You don't have time for a targeted attack, because you attack faster than normal people. It costs you one " "less action point to use a weapon. You cannot perform targeted shots, but all weapons take one less action " "point to use. Note that the Fast Shot trait has no effect on HtH or Melee attacks. Animals cannot choose " "this trait.", ALL_RACES - ANIMALS), Trait("Bloody Mess", "By some strange twist of fate, people around you die violently. You always see the worst way a person can " "die. This does not mean you kill them any faster or slower, but when they do die, it will be dramatic. " "Just how dramatic is up to the Gamemaster.", ALL_RACES), Trait("Jinxed", "The good thing is that everyone around you has more critical failures in combat. The bad thing is: so do " "you! If you, a member of your party, or a non-player character have a failure in combat, there is a " "greater likelihood the failure will be upgraded (downgraded?) to a critical failure. Critical failures are " "bad: weapons explode, you may hit the wrong target, you could lose part of your turn, or any number of bad " "things. Failures are 50% more likely to become critical failures around the character or anyone else in " "combat.", ALL_RACES), Trait("Good Natured", "You studied less-combative skills as you were growing up. Your combat skills start at a lower level, but " "First Aid, Doctor, Speech, and Barter are substantially improved. Those skills get a 20% bonus. You get a " "10% penalty to starting combat skills (Small Guns, Big Guns, Energy Weapons, Unarmed, and Melee Weapons). " "This is a one-time bonus. Animals and robots cannot choose this trait.", ALL_RACES - ANIMALS - ROBOTS), Trait("Chem Reliant", "You are more easily addicted to chems. Your chance to be addicted is twice normal, but you recover in half " "the time from their ill effects. Robots cannot choose this trait.", ALL_RACES - ROBOTS), Trait("Chem Resistant", "Chems only effect you half as long as normal, but your chance to be addicted is only 50% the normal amount. " "Robots cannot choose this trait.", ALL_RACES - ROBOTS), Trait("Night Person", "As a night-time person, you are more awake when the sun goes down. Your Intelligence and Perception are " "improved at night but are dulled during the day. You get a 1 point penalty to these Statistics from 0601 " "to 1800, and a 1 point bonus to these Stats from 1801 to 0600. Robots cannot choose this trait. Note that " "the bonus cannot take IN and PE above the character’s racial maximum or below the character’s racial " "minimum.", ALL_RACES), Trait("Skilled", "Since you spend more time improving your skills than a normal person, you gain more skill points. The " "tradeoff is that you do not gain as many extra abilities. You will gain a perk at one level higher than " "normal. For example, if you normally gained a perk every 4 levels, you would now gain a perk every 5 " "levels. You will get an additional 5 skill points per new experience level, and a one-time bonus of +10% " "to your skills when you begin the game. Animals and robots cannot choose this trait.", ALL_RACES - ANIMALS - ROBOTS), Trait("Gifted", "You have more innate abilities than most, so you have not spent as much time honing your skills. Your " "statistics are better than the average person, but your skills are lacking. All Stats get a 1- point " "bonus, but all skills get a 10% penalty and you receive 5 less Skill Points per level. Robots cannot " "choose this trait.", ALL_RACES - ROBOTS), Trait("Sex Appeal", "This trait increases your chances of having a good reaction with members of the opposite sex. " "Unfortunately, this trait tends to annoy members of your sex. Jealous twits. When interacting with members " "of the opposite sex, you gain a 1 point bonus to Charisma for reactions only. When making Speech and Barter " "rolls, you gain a 40% bonus for each. When interacting with members of the same sex, you have a 1 point " "penalty to Charisma for reactions only and have a 40% penalty to both Speech and Barter rolls. Only humans " "can choose this trait.", ["Human"]), Trait("Glowing One", "Extreme radiation exposure has left you glowing in the dark. Your glow eliminates modifiers from light in " "combat for both you and your enemies. In addition, you gain a +50% bonus to Radiation Resistance, but " "everyone around you takes 10 rads per hour (see Radiation under Damage and Death, below). Only Ghouls " "can choose this trait.", ["Ghoul"]), Trait("Tech Wizard", "You spent your formative years hunched over a bench learning all about the way things work. The trouble " "is that you’ve ruined your eyes! You get a +15% bonus to Science, Repair, and Lockpick skills, but you " "lose 1 Perception. Deathclaws and Dogs cannot choose this trait.", ALL_RACES - ANIMALS, attr_mod=-1, attr_name="Perception"), Trait("Fear the Reaper", "You have cheated death! You gain perks as if you were a human, but you are now on death’s short list. " "This means that once a month, you must roll against Luck or else drop dead. Only Ghouls ca
n choose this " "trait.", ["Ghoul"]), Trait("Vat Skin", "Other people find you hideous to behold and disgusting to smell after your “dip” in the FEV vats. " "The good news is that you gain a +10 bonus to your Armor Class thanks to your extra-tough skin. The bad " "news is that everyone within ten hexes of your location, friend and foe, suffers a 1-point penalty to " "Perception (you are unaffected). Only Mutants c
an choose this trait.", ["Half Mutant", "Super Mutant"], attr_mod=-1, attr_name="Perception"), Trait("Ham Fisted", "Genetic engineering – or dumb luck – has endowed you with huge hands. You get a “free” tag skill in " "Unarmed, but you suffer a -20% penalty to Small Guns, First Aid, Doctor, Repair, Science, and Lockpick " "Skills (these numbers cannot go below 0%). Only Mutants can choose this trait.", ["Half Mutant", "Super Mutant"]), Trait("Domesticated", "You have undergone extensive house training and have developed an above average Intelligence. Your IN is " "raised by 1, and can even go above your racial maximum, but you get a –2 penalty to Melee Damage. Only " "Deathclaws and Dogs can choose this trait.", ANIMALS, attr_mod=1, attr_name="Intelligence"), Trait("Rabid", "You are a half-crazed, feral killing machine. You are not affected by crippled limbs (blindness still " "affects you normally), and every time you kill an opponent in combat, you get 5 more APs that round. " "Chems, including stimpaks, have no effect on you. Only Deathclaws and Dogs can choose this
import os from flask import current_app from flask.cli import FlaskGroup, run_command from opsy.db import db from opsy.app import create_app, create_scheduler from opsy.utils import load_plugins DEFAULT_CONFIG = '%s/opsy.ini' % os.path.abspath(os.path.curdir) def create_opsy_app(info): return create_app(config=os.environ.get('OPSY_CONFIG', DEFAULT_CONFIG)) cli = FlaskGroup(create_app=create_opsy_app, # pylint: disable=invalid-name add_default_commands=False, help='The Opsy management cli.') cli.add_command(run_command) @cli.command('run-scheduler') def run_scheduler(): """Run the scheduler.""" scheduler = create_scheduler(current_app) try: current_app.logger.info('Starting the scheduler') scheduler.start() except (KeyboardInterrupt, SystemExit): scheduler.shutdown() current_app.logger.info('Stopping the scheduler') @cli.command('shell') def shell(): """Run a shell in the app context.""" from flask.gl
obals import _app_ctx_stack banner = 'Welcome to Opsy!' app = _app_ctx_stack.top.app shell_ctx = {'create_app': create_app, 'create_scheduler': create_scheduler, 'db': db} for plugin in load_plugins(current_app): plugin.register_shell_context(shell_ctx) shell_ctx.update(app.make_shell_context()) try: from IPython import embed embed(user_ns=shell_ctx, banner1=banner) return
except ImportError: import code code.interact(banner, local=shell_ctx) @cli.command('init-cache') def init_cache(): """Drop everything in cache database and rebuild the schema.""" current_app.logger.info('Creating cache database') db.drop_all(bind='cache') db.create_all(bind='cache') db.session.commit() def main(): with create_opsy_app(None).app_context(): for plugin in load_plugins(current_app): plugin.register_cli_commands(cli) cli()
# -*- coding: UTF-8 -*- from HowOldWebsite.estimators.estimator_sex import EstimatorSex from HowOldWebsite.models import RecordSex __author__ = 'Hao Yu' def sex_estimate(database_face_array, feature_jar): success = False database_record = None try: n_faces = len(database_face_array) result_estimated = __do_estimate(feature_jar, n_fac
es) database_record = \ __do_save_to_database(database_face_array, result_estimated) success = True except Exception as e: # print(e) pass return success, database_record def __do_estimate(feature_jar, n_faces): feature = EstimatorSex.feature_combine(feature_jar) feature = EstimatorSex.
feature_reduce(feature) result = EstimatorSex.estimate(feature) return result def __do_save_to_database(database_face, sex): database_record = [] for ith in range(len(database_face)): record = RecordSex(original_face=database_face[ith], value_predict=sex[ith]) database_record.append(record) return database_record
from .
import cmis_backend from . import ir_model_
fields
__author__ = 'suwelack' from msml.io.mapper.base_mapping import * import msml.model.generated.msmlScene as mod import msml.model.generated.abaqus as ab import msml.model.generated.msmlBase as mbase from jinja2 import Template, Environment, PackageLoader class Abaqus2StringMapping(Ba
seMapping): def __init__(self): self._env = Environment( keep_trailing_newline=False,loader=PackageLoader('msml.io.mapper', 'templates')) @complete_map_pre(ab.InputDeck) def map_InputDeck_pre(self, element,parent_source,parent_target, source,target): template = self._env.get_template('InputDeck_template.html') returnStr = template.render() ta
rget.append(returnStr) return target, ab.PartContainer @complete_map_post(ab.InputDeck) def map_InputDeck_post(self, element,parent_source,parent_target,source,target): return None,None @complete_map_pre(ab.PartContainer) def map_PartContainer_pre(self, element,parent_source,parent_target, source,target): template = self._env.get_template('PartContainer_template.html') returnStr = template.render() target.append(returnStr) return target, ab.Part @complete_map_post(ab.PartContainer) def map_PartContainer_post(self, element,parent_source,parent_target,source,target): return None,None @complete_map_pre(ab.Part) def map_Part_pre(self, element,parent_source,parent_target, source,target): template = self._env.get_template('Part_template.html') returnStr = template.render(id=element.id) target.append(returnStr) return target, mod.MeshDataObject @complete_map_post(ab.Part) def map_Part_post(self, element,parent_source,parent_target,source,target): return None,None @complete_map_pre(mod.MeshDataObject) def map_MeshDataObject_pre(self, element,parent_source,parent_target, source,target): template = self._env.get_template('MeshDataObject_template.html') vertNumber = len(element.value.vertices)/3 returnStr = template.render(sizes=element.value.cell_sizes, connectivity=element.value.connectivity, vertices=element.value.vertices, vertNumber = vertNumber) target.append(returnStr) return target, None @complete_map_post(mod.MeshDataObject) def map_MeshDataObject_post(self, element,parent_source,parent_target,source,target): return None,None @complete_map_pre(ab.Instance) def map_Instance_pre(self, element,parent_source,parent_target, source,target): template = self._env.get_template('Instance_template.html') returnStr = template.render(id = element.id, partId = element.partid) target.append(returnStr) return None,None @complete_map_post(ab.Instance) def map_Instance_post(self, element,parent_source,parent_target,source,target): return None,None @complete_map_pre(ab.Assembly) def map_Assembly_pre(self, element,parent_source,parent_target, source,target): template = self._env.get_template('Assembly_template.html') returnStr = template.render(id = element.id) target.append(returnStr) return None,None @complete_map_post(ab.Assembly) def map_Assembly_post(self, element,parent_source,parent_target,source,target): return None,None
import
pw19.__main__ if
__name__ == "__main__": pw19.__main__.main()
from flask import * import os from decorators import validate_account_and_region from aws import connect from sgaudit import get_reports, add_description from app.models import IPWhitelist elastatus = Blueprint('elastatus', __name__) @elastatus.route('/') def index(): default_account = current_app.config['CONFIG']['default_account'] default_region = current_app.config['CONFIG']['default_region'] default_service = curre
nt_app.config['CONFIG']['default_service'] return redirect(url_for('.'+default_service, account=default_account, region=default_region)) @elastatus.route('/<account>/<region>/ec2') @validate_account_and_region def ec2(account, region): c = connect(account, region, 'ec2') instances = c.get_only_instances() return render_template('ec2.html', r
egion=region, instances=instances) @elastatus.route('/<account>/<region>/ami') @validate_account_and_region def ami(account, region): c = connect(account,region, 'ec2') amis = c.get_all_images(owners = ['self']) ami_list = {ami: c.get_image(ami.id) for ami in amis} return render_template('ami.html', region=region, amis=ami_list) @elastatus.route('/<account>/<region>/ebs') @validate_account_and_region def ebs(account, region): c = connect(account, region, 'ebs') volumes = c.get_all_volumes() return render_template('ebs.html', volumes=volumes) @elastatus.route('/<account>/<region>/snapshots') @validate_account_and_region def snapshots(account, region): c = connect(account, region, 'ec2') snapshots = c.get_all_snapshots(owner='self') return render_template('snapshots.html', region=region, snapshots=snapshots) @elastatus.route('/<account>/<region>/autoscale') @validate_account_and_region def autoscale(account, region): c = connect(account, region, 'autoscale') asg = c.get_all_groups() return render_template('asg.html', region=region, asg=asg) @elastatus.route('/<account>/<region>/elb') @validate_account_and_region def elb(account, region): c = connect(account, region, 'elb') elb = c.get_all_load_balancers() return render_template('elb.html', region=region, elb=elb) @elastatus.route('/<account>/<region>/sg/<id>') @validate_account_and_region def sg(account, region, id): c = connect(account, region,'ec2') sg = c.get_all_security_groups(filters={'group-id': id}) sg = add_description(sg) return render_template('sg.html', region=region, sg=sg) @elastatus.route('/<account>/<region>/elasticache') @validate_account_and_region def elasticache(account, region): c = connect(account, region, 'elasticache') clusters = c.describe_cache_clusters(show_cache_node_info=True) clusters = clusters['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'] return render_template('elasticache.html', region=region, clusters=clusters) @elastatus.route('/<account>/<region>/route53') def route53(account, region): c = connect(account, region, 'route53') c = list(c) conn = c.pop(0) d = list() r = list() for hzitem in c[0]: d.append(hzitem) try: records = conn.get_all_rrsets(hzitem) paginate = True while paginate: for item in records: r.append(item) paginate = records.next_token except: domain = None d = d[::2] return render_template('r53.html', domains=d, records=r) @elastatus.route('/<account>/<region>/iam') def iam(account, region): c = connect(account, region, 'iam') users = c.get_all_users() users = users['list_users_response']['list_users_result']['users'] return render_template('iam.html', users=users) @elastatus.route('/<account>/<region>/rds') def rds(account, region): c = connect(account, region, 'rds') db_instances = c.get_all_dbinstances() return render_template('rds.html', db_instances=db_instances) @elastatus.route('/<account>/<region>/dynamodb') def dynamodb(account, region): c = connect(account, region, 'dynamodb') tables = c.list_tables() if tables: tables = [c.describe_table(x) for x in tables] else: tables = list() return render_template('dynamodb.html', tables=tables) @elastatus.route('/<account>/<region>/cloudformation') def cloudformation(account, region): c = connect(account, region, 'cloudformation') stacks = c.describe_stacks() return render_template('cloudformation.html', stacks=stacks) @elastatus.route('/<account>/<region>/cloudformation/<stack_name>.json') def get_template(account, region, stack_name): c = connect(account, region, 'cloudformation') template = c.get_template(stack_name) template = template["GetTemplateResponse"]["GetTemplateResult"]["TemplateBody"] response = make_response(template) response.headers["Content-Disposition"] = "attachment; filename=%s.json" % stack_name return response @elastatus.route('/<account>/<region>/cloudwatch') def cloudwatch(account, region): return render_template('cloudwatch.html') @elastatus.route('/<account>/<region>/sns') def sns(account, region): c = connect(account, region, 'sns') subscriptions = c.get_all_subscriptions() subscriptions = subscriptions['ListSubscriptionsResponse']['ListSubscriptionsResult']['Subscriptions'] return render_template('sns.html', subscriptions=subscriptions) @elastatus.route('/<account>/<region>/redshift') def redshift(account, region): c = connect(account, region, 'redshift') clusters = c.describe_clusters() clusters = clusters['DescribeClustersResponse']['DescribeClustersResult']['Clusters'] return render_template('redshift.html', clusters=clusters) @elastatus.route('/<account>/<region>/sqs') def sqs(account, region): c = connect(account, region, 'sqs') queues = list() all_queues = c.get_all_queues() for q in all_queues: url = 'https://sqs.%s.amazonaws.com%s' % (region, q.id) attributes = q.get_attributes() attributes['url'] = url queues.append(attributes) return render_template('sqs.html', queues=queues) @elastatus.route('/<account>/<region>/sgaudit') def sgaudit(account, region): c = connect(account, region, 'ec2') report, empty_groups = get_reports(c) return render_template('sgaudit.html', report=report)
if len(groups) > 1: if all(x == 1 for x in map(len, groups.values())): # All groups are different, this is an simpler case print(ut.repr2(groups, nl=3)) else: # Need to handle the multi-item groups first pass def consolodate_duplicates(self): fnames = map(basename, self.rel_fpath_list) duplicate_map = ut.find_duplicate_items(fnames) groups = [] for dupname, idxs in duplicate_map.items(): uuids = self.get_prop('uuids', idxs) unique_uuids, groupxs = ut.group_indices(uuids) groups.extend(ut.apply_grouping(idxs, groupxs)) multitons = [g for g in groups if len(g) > 1] # singletons = [g for g in groups if len(g) <= 1] ut.unflat_take(list(self.fpaths()), multitons) def duplicates(self): uuid_to_dupxs = ut.find_duplicate_items(self.uuids) dup_fpaths = [ut.take(self.rel_fpath_list, idxs) for idxs in uuid_to_dupxs.values()] return dup_fpaths def nbytes(self): return sum(self.attrs['nbytes']) def ext_hist(self): return ut.dict_hist(self.attrs['ext']) def fpaths(self): return self._abs(self.rel_fpath_list) def __nice__(self): return self.dpath def isect_info(self, other): set1 = set(self.rel_fpath_list) set2 = set(other.rel_fpath_list) set_comparisons = ut.odict([ ('s1', set1), ('s2', set2), ('union', set1.union(set2)), ('isect', set1.intersection(set2)), ('s1 - s2', set1.difference(set2)), ('s2 - s1', set1.difference(set1)), ]) stat_stats = ut.map_vals(len, set_comparisons) print(ut.repr4(stat_stats)) return set_comparisons if False: idx_lookup1 = ut.make_index_lookup(self.rel_fpath_list) idx_lookup2 = ut.make_index_lookup(other.rel_fpath_list) uuids1 = ut.take(self.uuids, ut.take(idx_lookup1, set_comparisons['union'])) uuids2 = ut.take(other.uuids, ut.take(idx_lookup2, set_comparisons['union'])) uuids1 == uuids2 def make_merge_bash_script(self, dest): import subprocess # find $SOURCE_DIR -name '*' -type f -exec mv -f {} $TARGET_DIR \; # bash_cmd = subprocess.list2cmdline(['mv', '--verbose', join(self.dpath, '*'), dest.dpath]) bash_cmd = subprocess.list2cmdline( ['find', self.dpath, '-name', '\'*\'', '-type', 'f', '-exec', 'mv', '-f', '{}', dest.dpath, '\;']) print(bash_cmd) return bash_cmd # # import shutil # move_tasks = [ # (join(self.dpath, rel_fpath), join(dest.dpath, rel_fpath)) # for rel_fpath in self.rel_fpath_list # ] # for src, dst in move_tasks: # if exists(dst): # raise Exception('dont overwrite yet') # bash_script = '\n'.join([subprocess.list2cmdline(('mv', src, dst)) for src, dst in move_tasks]) # return bash_script def merge_into(self, dest): import shutil print('Preparing merge %r into %r' % (self, dest)) # import shutil move_tasks = [ (join(self.dpath, rel_fpath), join(dest.dpath, rel_fpath)) for rel_fpath in self.rel_fpath_list ] for src, dst in move_tasks: if exists(dst): raise Exception('dont overwrite yet') def trymove(src, dst): try: shutil.move(src, dst) except OSError: return 1 return 0 error_list = [ trymove(src, dst) for (src, dst) in ut.ProgIter(move_tasks, lbl='moving') ] assert not any(error_list), 'error merging' return error_list def find_empty_dirs(self): """ find dirs with only dirs in them """ self.rel_dpath_list = ut.glob(self.dpath, '*', recursive=True, fullpath=False, with_dirs=True, with_files=False) counts = {dpath: 0 for dpath in self.rel_dpath_list} for fpath in self.rel_fpath_list: tmp = dirname(fpath) while tmp: counts[tmp] += 1 tmp = dirname(tmp) empty_dpaths = [dpath for dpath, count in counts.items() if count == 0] return empty_dpaths def delete_empty_directories(self): """ ut.ensuredir(self.dpath + '/foo') ut.ensuredir(self.dpath + '/foo/bar') ut.ensuredir(self.dpath + '/foo/bar/baz') self.delete_empty_directories() """ import os # for root, dirs, files in os.walk(self.dpath, topdown=False): # if len(files) == 0 and len(os.listdir(root)) == 0: # print('Remove %s' % root) # os.rmdir(root) if True: # Find all directories with no files subdirs = ut.glob(self.dpath, '*', recursive=True, fullpath=False, with_files=False, with_dirs=True) freq = {d: 0 for d in subdirs + ['']} for path in self.rel_fpath_list: while True: path = dirname(path) freq[path] += 1 if not path: break to_delete = [k for k, v in freq.items() if v == 0] # Remove deep dirs first to_delete = ut.sortedby(to_delete, map(len, to_delete))[::-1] for d in to_delete: dpath = join(self.dpath, d) print('Remove %s' % dpath) os.rmdir(dpath) def turtles2(): """ from utool.experimental.file_organizer import * """ self = SourceDir('/raid/raw/RotanTurtles/Roatan HotSpotter Nov_21_2016') self.populate()
""" Goal: Organize media on a computer over multiple drives. Fix duplicate strategy: make graph where each file/directory is a node make a directed edge whenever <path1> -- contains --> <path
2> Find all files with the same contents make an undirected edge whever <file1.uuid> == <file2.uuid> For each pair of the same files we need to assign them both to either directory 1 or directory 2. Maybe do a min-st cut between directory 1 and directory 2. for each pair of directories with the same file. In this case dir1 should be merged into dir2 (cut all edges from dir1 to its files) dir1 dir2 ---- ---- a.jpg --- a.jpg b.jpg --- b.jpg c.jpg --- c.jpg d.jpg e.jpg We also need to know the association of each file with every other file in its directory (or more preciely every other file in the system, but we can ignore most setting thir association to 0) """ def turtles(): source_dpaths = sorted(ut.glob('/raid/raw/RotanTurtles/', '*', recusrive=False, with_dirs=True, with_files=False)) sources = [SourceDir(dpath) for dpath in source_dpaths] for self in ut.ProgIter(sources, label='populate'): self.populate() import fnmatch del_ext = set(['.npy', '.flann', '.npz']) for self in ut.ProgIter(sources, label='populate'): flags = [ext in del_ext for ext in self.attrs['ext']] to_delete = ut.compress(list(self.fpaths()), flags) ut.remove_file_list(to_delete) flags = [fnmatch.fnmatch(fpath, '*/_hsdb/computed/chips/*.png') for fpath in self.rel_fpath_list] to_delete = ut.compress(list(self.fpaths()), flags) ut.remove_file_list(to_delete) self.populate() for self in ut.ProgIter(sources, label='del empty'): self.populate() self.delete_empty_directories() print(ut.byte_str2(sum([self.nbytes() for self in sources]))) # [ut.byte_str2(self.nbytes()) for self in sources] # import numpy as np # num_isect = np.zeros((len(sources), len(sources))) # num_union = np.zeros((len(sourc
"""Tests of the builder registry.""" import unittest from bs4 import BeautifulSoup from bs4.builder import ( builder_registry as registry, HTMLParserTreeBuilder, TreeBuilderRegistry, ) try: from bs4.builder import HTML5TreeBuilder HTML5LIB_PRESENT = True except ImportError: HTML5LIB_PRESENT = False try: from bs4.builder import ( LXMLTreeBuilderForXML, LXMLTreeBuilder, ) LXML_PRESENT = True except ImportError: LXML_PRESENT = False class BuiltInRegistryTest(unittest.TestCase): """Test the built-in registry with the default builders registered.""" def test_combination(self): if LXML_PRESENT: self.assertEqual(registry.lookup('fast', 'html'), LXMLTreeBuilder) if LXML_PRESENT: self.assertEqual(registry.lookup('permissive', 'xml'), LXMLTreeBuilderForXML) self.assertEqual(registry.lookup('strict', 'html'), HTMLParserTreeBuilder) if HTML5LIB_PRESENT: self.assertEqual(registry.lookup('html5lib', 'html'), HTML5TreeBuilder) def test_lookup_by_markup_type(self): if LXML_PRESENT: self.assertEqual(registry.lookup('html'), LXMLTreeBuilder) self.assertEqual(registry.lookup('xml'), LXMLTreeBuilderForXML) else: self.assertEqual(registry.lookup('xml'), None) if HTML5LIB_PRESENT: self.assertEqual(registry.lookup('html'
), HTML5TreeBuilder) else: self.assertEqual(registry.lookup('html'), HTMLParserTreeBuilder) def test_named_library(self): if LXML_PRESENT: self.assertEqual(registry.lookup('lxml', 'xml'), LXMLTreeBuilderForXML) self.assertEqual(registry.lookup('lxml', 'html
'), LXMLTreeBuilder) if HTML5LIB_PRESENT: self.assertEqual(registry.lookup('html5lib'), HTML5TreeBuilder) self.assertEqual(registry.lookup('html.parser'), HTMLParserTreeBuilder) def test_beautifulsoup_constructor_does_lookup(self): # You can pass in a string. BeautifulSoup("", features="html") # Or a list of strings. BeautifulSoup("", features=["html", "fast"]) # You'll get an exception if BS can't find an appropriate # builder. self.assertRaises(ValueError, BeautifulSoup, "", features="no-such-feature") class RegistryTest(unittest.TestCase): """Test the TreeBuilderRegistry class in general.""" def setUp(self): self.registry = TreeBuilderRegistry() def builder_for_features(self, *feature_list): cls = type('Builder_' + '_'.join(feature_list), (object,), {'features' : feature_list}) self.registry.register(cls) return cls def test_register_with_no_features(self): builder = self.builder_for_features() # Since the builder advertises no features, you can't find it # by looking up features. self.assertEqual(self.registry.lookup('foo'), None) # But you can find it by doing a lookup with no features, if # this happens to be the only registered builder. self.assertEqual(self.registry.lookup(), builder) def test_register_with_features_makes_lookup_succeed(self): builder = self.builder_for_features('foo', 'bar') self.assertEqual(self.registry.lookup('foo'), builder) self.assertEqual(self.registry.lookup('bar'), builder) def test_lookup_fails_when_no_builder_implements_feature(self): builder = self.builder_for_features('foo', 'bar') self.assertEqual(self.registry.lookup('baz'), None) def test_lookup_gets_most_recent_registration_when_no_feature_specified(self): builder1 = self.builder_for_features('foo') builder2 = self.builder_for_features('bar') self.assertEqual(self.registry.lookup(), builder2) def test_lookup_fails_when_no_tree_builders_registered(self): self.assertEqual(self.registry.lookup(), None) def test_lookup_gets_most_recent_builder_supporting_all_features(self): has_one = self.builder_for_features('foo') has_the_other = self.builder_for_features('bar') has_both_early = self.builder_for_features('foo', 'bar', 'baz') has_both_late = self.builder_for_features('foo', 'bar', 'quux') lacks_one = self.builder_for_features('bar') has_the_other = self.builder_for_features('foo') # There are two builders featuring 'foo' and 'bar', but # the one that also features 'quux' was registered later. self.assertEqual(self.registry.lookup('foo', 'bar'), has_both_late) # There is only one builder featuring 'foo', 'bar', and 'baz'. self.assertEqual(self.registry.lookup('foo', 'bar', 'baz'), has_both_early) def test_lookup_fails_when_cannot_reconcile_requested_features(self): builder1 = self.builder_for_features('foo', 'bar') builder2 = self.builder_for_features('foo', 'baz') self.assertEqual(self.registry.lookup('bar', 'baz'), None)
_backing_to_folder.""" m = self.mox m.StubOutWithMock(api.VMwareAPISession, 'vim') self._session.vim = self._vim m.StubOutWithMock(self._session, 'invoke_api') backing = FakeMor('VirtualMachine', 'my_back') folder = FakeMor('Folder', 'my_fol') task = FakeMor('Task', 'my_task') self._session.invoke_api(self._vim, 'MoveIntoFolder_Task', folder, list=[backing]).AndReturn(task) m.StubOutWithMock(self._session, 'wait_for_task') self._session.wait_for_task(task) m.ReplayAll() self._volumeops.move_backing_to_folder(backing, folder) m.UnsetStubs() m.VerifyAll() def test_init_conn_with_instance_and_backing(self): """Test initialize_connection with instance and backing.""" m = self.mox m.StubOutWithMock(self._driver.__class__, 'volumeops') self._driver.volumeops = self._volumeops m.StubOutWithMock(self._volumeops, 'get_backing') volume = FakeObject() volume['name'] = 'volume_name' volume['id'] = 'volume_id' volume['size'] = 1 connector = {'instance': 'my_instance'} backing = FakeMor('VirtualMachine', 'my_back') self._volumeops.get_backing(volume['name']).AndReturn(backing) m.StubOutWithMock(self._volumeops, 'get_host') host = FakeMor('HostSystem', 'my_host') self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host) datastore = FakeMor('Datastore', 'my_ds') resource_pool = FakeMor('ResourcePool', 'my_rp') m.StubOutWithMock(self._volumeops, 'get_dss_rp') self._volumeops.get_dss_rp(host).AndReturn(([datastore], resource_pool)) m.StubOutWithMock(self._volumeops, 'get_datastore') self._volumeops.get_datastore(backing).AndReturn(datastore) m.ReplayAll() conn_info = self._driver.initialize_connection(volume, connector) self.assertEqual(conn_info['driver_volume_type'], 'vmdk') self.assertEqual(conn_info['data']['volume'], 'my_back') self.assertEqual(conn_info['data']['volume_id'], 'volume_id') m.UnsetStubs() m.VerifyAll() def test_get_volume_group_folder(self): """Test _get_volume_group_folder.""" m = self.mox m.StubOutWithMock(self._driver.__class__, 'volumeops') self._driver.volumeops = self._volumeops datacenter = FakeMor('Datacenter', 'my_dc') m.StubOutWithMock(self._volumeops, 'get_vmfolder') self._volumeops.get_vmfolder(datacenter) m.StubOutWithMock(self._volumeops, 'create_folder') self._volumeops.create_folder(mox.IgnoreArg(), self._config.vmware_volume_folder) m.ReplayAll() self._driver._get_volume_group_folder(datacenter) m.UnsetStubs() m.VerifyAll() def test_init_conn_with_instance_and_backing_and_relocation(self): """Test initialize_connection with backing being relocated.""" m = self.mox m.StubOutWithMock(self._driver.__class__, 'volumeops') self._driver.volumeops = self._volumeops m.StubOutWithMock(self._volumeops, 'get_backing') volume = FakeObject() volume['name'] = 'volume_name' volume['id'] = 'volume_id' volume['size'] = 1 connector = {'instance': 'my_instance'} backing = FakeMor('VirtualMachine', 'my_back') self._volumeops.get_backing(volume['name']).AndReturn(backing) m.StubOutWithMock(self._volumeops, 'get_host') host = FakeMor('HostSystem', 'my_host') self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host) datastore1 = FakeMor('Datastore', 'my_ds_1') datastore2 = FakeMor('Datastore', 'my_ds_2') resource_pool = FakeMor('ResourcePool', 'my_rp') m.StubOutWithMock(self._volumeops, 'get_dss_rp') self._volumeops.get_dss_rp(host).AndReturn(([datastore1], resource_pool)) m.StubOutWithMock(self._volumeops, 'get_datastore') self._volumeops.get_datastore(backing).AndReturn(datastore2) m.StubOutWithMock(self._driver, '_get_folder_ds_summary') folder = FakeMor('Folder', 'my_fol') summary = FakeDatastoreSummary(1, 1, datastore1) size = 1 self._driver._get_folder_ds_summary(size, resource_pool, [datastore1]).AndReturn((folder, summary)) m.StubOutWithMock(self._volumeops, 'relocate_backing') self._volumeops.relocate_backing(backing, datastore1, resource_pool, host) m.StubOutWithMock(self._volumeops, 'move_backing_to_folder') self._volumeops.move_backing_to_folder(backing, folder) m.ReplayAll() conn_info = self._driver.initialize_connection(volume, connector) self.assertEqual(conn_info['driver_volume_type'], 'vmdk') self.assertEqual(conn_info['data']['volume'], 'my_back') self.assertEqual(conn_info['data']['volume_id'], 'volume_id') m.UnsetStubs() m.VerifyAll() def test_get_folder(self): """Test _get_folder.""" m = self.mox m.StubOutWithMock(self._volumeops, '_get_parent') self._volumeops._get_parent(mox.IgnoreArg(), 'Folder') m.ReplayAll() self._volumeops._get_folder(mox.IgnoreArg()) m.UnsetStubs() m.VerifyAll() def test_volumeops_clone_backing(self): """Test volumeops.clone_backing.""" m = self.mox m.StubOutWithMock(self._volumeops, '_get_parent') backing = FakeMor('VirtualMachine', 'my_back') folder = FakeMor('Folder', 'my_fol') self._volumeops._get_folder(backing).AndReturn(folder) m.StubOutWithMock(self._volumeops, '_get_clone_spec') name = 'name' snapshot = FakeMor('VirtualMachineSnapshot', 'my_snap') datastore = FakeMor('Datastore', 'my_ds') self._volumeops._get_clone_spec(datastore, mox.IgnoreArg(), snapshot) m.StubOutWithMock(api.VMwareAPISession, 'vim') self._session.vim = self._vim m.StubOutWithMock(self._session, 'invoke_api') task = FakeMor('Task', 'my_task') self._session.invoke_api(self._vim, 'CloneVM_Task', backing, folder=f
older, name=name, spec=mox.IgnoreArg()).AndReturn(task) m.StubOutWithMock(self._session, 'wait_for_task') clone = FakeMor('Vi
rtualMachine', 'my_clone') task_info = FakeTaskInfo('success', clone) self._session.wait_for_task(task).AndReturn(task_info) m.ReplayAll() ret = self._volumeops.clone_backing(name, backing, snapshot, mox.IgnoreArg(), datastore) self.assertEqual(ret, clone) m.UnsetStubs() m.VerifyAll() def test_clone_backing_linked(self): """Test _clone_backing with clone type - linked.""" m = self.mox m.StubOutWithMock(self._driver.__class__, 'volumeops') self._driver.volumeops = self._volumeops m.StubOutWithMock(self._volumeops, 'clone_backing') volume = FakeObject() volume['name'] = 'volume_name' self._volumeops.clone_backing(volume['name'], mox.IgnoreArg(), mox.IgnoreArg(), volumeops.LINKED_CLONE_TYPE, mox.IgnoreArg()) m.ReplayAll() self._driver._clone_backing(volume, mox.IgnoreArg(), mox.IgnoreArg(), volumeops.LINKED_CLONE_TYPE) m.UnsetStubs() m.VerifyAll() def test_clone_backing_full(self): """Test _clone_backing with clone type - full.""" m = self.mox m.StubOutWithMock(self._driver.__class__, 'volumeops') self._driver.volumeops = self._volumeops m.
.rocstats( scores, labels ) tpr = numpy.divide( tp, numpy.add( tp, fn ) ) fpr = numpy.divide( fp, numpy.add( fp, tn ) ) auroc = auc( fpr, tpr ) confidence_surfaces = roc_ci.roc_surfaces( tp, fp, fn, tn, n=300 ) plotROC( fpr, tpr, auroc, plot_title, plot, pdf_file, plotover, plotunder = lambda : roc_ci.plot_hulls( confidence_surfaces, invert_x = True ) ) def plotROCPDF(fpr, tpr,roc_auc, classifier_name,plot): with PdfPages('/Users/serge/Downloads/Summer/Presentation/q1_svm_cv.pdf') as pdf: fig= plt.figure(figsize=(8, 8)) plt.grid() plt.plot(fpr, tpr, lw=2, label='AUC = %0.2f' % (roc_auc)) plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6)) plt.plot(0.18,0.92,marker='o',label="Cologuard",markersize=10,linestyle="",markerfacecolor="k") plt.plot(0.05,0.74,marker='^',label="FIT",markersize=10,linestyle="",markerfacecolor="k") plt.plot(0.20,0.68,marker='s',label="Epi proColon",markersize=10,linestyle="",markerfacecolor="k") plt.plot(0.22,0.81,marker='p',label="SimplyPro Colon",markersize=10,linestyle="",markerfacecolor="k") plt.xlim([-0.05, 1.05]) plt.ylim([-0.05, 1.05]) plt.xticks(numpy.arange(0, 1.05, 0.1)) plt.yticks(numpy.arange(0, 1.05, 0.1)) plt.xlabel('1 - Specificty', fontsize=16) plt.ylabel('Sensitivity', fontsize=16) plt.title('Cross Validation ROC curve for '+classifier_name ,fontsize=16) plt.legend(loc="lower right",numpoints=1) pdf.savefig(fig) plt.close() #Preform loo CV for all classfiers but SVMs def generateROC(cv, classifier, features, labels, classifier_name, normal = None, plot = True, pdf_file = None, plotover = None ): pool=numpy.zeros((len(labels), 2)) normal_features=features if(normal=="log"): normal_features=numpy.log(normal_features) if(normal=="scaled"): scaler=preprocessing.StandardScaler() normal_features=scaler.fit_transform(normal_features) for i, (train, test) in enumerate(cv): classifier.fit(normal_features[train], labels[train]) probas_ = classifier.predict_proba(normal_features[test]) pool[i,0]=labels[test] pool[i,1]=probas_[0,1] plotROCwithCRfromScores( pool[:,1], [ x == 1 for x in pool[:,0] ], classifier_name, plot, pdf_file, plotover ) #Preform loo CV for all classfiers but SVMs def generateROCcoef(cv, classifier, features, labels, classifier_name, normal=None,plot=True): pool=numpy.zeros((len(labels), 2)) coefs=numpy.zeros((numpy.shape(features)[1], len(labels))) normal_features=features if(normal=="log"): normal_features=numpy.log(normal_features) if(normal=="scaled"): scaler=preprocessing.StandardScaler() normal_features=scaler.fit_transform(normal_features) for i, (train, test) in enumerate(cv): classifier.fit(normal_features[train], labels[train]) coef= classifier.coef_ for j in range(numpy.shape(features)[1]): coefs[j,i]=coef[0,j] probas_ = classifier.predict_proba(normal_features[test]) pool[i,0]=labels[test] pool[i,1]=probas_[0,1] fpr, tpr, thresholds = roc_curve(pool[:,0], pool[:,1]) roc_auc = auc(fpr, tpr) plotROC(fpr, tpr,roc_auc, classifier_name,plot) return (coefs,roc_auc) #Preform loo CV for SVMs def generateROCdf( cv, classifier, features, feature_names, labels, classifier_name, normal = None, plot = True, pdf_file = None, plotover = None): pool=numpy.zeros((len(labels), 2)) normal_features=features if(normal=="log"): normal_features=numpy.log(normal_features) if(normal=="scaled"): scaler=preprocessing.StandardScaler() normal_features=scaler.fit_transform(normal_features) for i, (train, test) in enumerate(cv): classifier.fit(normal_features[train], labels[train]) df = classifier.decision_function(normal_features[test]) pool[i,0]=labels[test] pool[i,1]=df[0] plotROCwithCRfromScores( pool[:,1], [ x == 1 for x in pool[:,0] ], classifier_name, plot, pdf_file, plotover ) #Preform loo CV for SVMs def generateROCdfcoef(cv, classifier, features, feature_names, labels, classifier_name, normal=None,plot=True): pool=numpy.zeros((len(labels), 2)) coefs=numpy.zeros((len(feature_names), len(labels))) normal_features=features if(normal=="log"): normal_features=numpy.log(normal_features) if(normal=="scaled"): scaler=preprocessing.StandardScaler() normal_features=scaler.fit_transform(normal_features) for i, (train, test) in enumerate(cv): classifier.fit(normal_features[train], labels[train]) coef= classifier.coef_ for j in range(len(feature_names)): coefs[j,i]=coef[0,j] df = classifier.decision_function(normal_features[test]) pool[i,0]=labels[test] pool[i,1]=df[0] fpr, tpr, thresholds = roc_curve(pool[:,0], pool[:,1]) roc_auc = auc(fpr, tpr) plotROC(fpr, tpr,roc_auc, classifier_name,plot) return (coefs,roc_auc) #Preform loo CV and get feature importance for random forests and extra trees def generateROCTrees(cv, classifier, features, labels, classifier_name, normal=None,plot=True): feature_importance=numpy.zeros((numpy.shape(features)[1],len(labels))) pool=numpy.zeros((len(labels), 2)) normal_features=features if(normal=="log"): normal_features=numpy.log(normal_features) if(normal=="scaled"): scaler=preprocessing.StandardScaler() normal_features=scaler.fit_transform(normal_features) for i, (train,
test) in enumerate(cv): classifier.fit(normal_features[train], labels[train]) importances = classifier.feature_importances_ for j in range(numpy.shape(features)[1]): feature_importance[j,i]=importances[j]
probas_ = classifier.predict_proba(normal_features[test]) pool[i,0]=labels[test] pool[i,1]=probas_[0,1] fpr, tpr, thresholds = roc_curve(pool[:,0], pool[:,1]) roc_auc = auc(fpr, tpr) plotROC(fpr, tpr,roc_auc, classifier_name,plot) return feature_importance #Nested CV for Logistic Regression w/ l1 penalty def nestedCVLR(features, labels, classifier_name, normal=None,plot=True): looOuter= LeaveOneOut(len(labels)) poolOuter=numpy.zeros((len(labels), 2)) Cs=numpy.zeros((len(labels))) normal_features=features if(normal=="log"): normal_features=numpy.log(normal_features) if(normal=="scaled"): scaler=preprocessing.StandardScaler() normal_features=scaler.fit_transform(normal_features) #How good is the method in the outer loop (LR) at predicting cancer? for i, (trainOuter, testOuter) in enumerate(looOuter): outerFeaturesTrain=normal_features[trainOuter] outerLabelsTrain=labels[trainOuter] #What is the lr model with the best hyperparameter settings to predict the #test sample from the training samples? best_auc=0 best_c=0 lessThanOneC=numpy.arange(0.01,1.0,0.01) greaterThanOneC=numpy.arange(1,101,1) for innerC in numpy.nditer(numpy.concatenate((lessThanOneC,greaterThanOneC))): #How good is the model with this hyperparameter? looInner=LeaveOneOut(len(outerLabelsTrain)) poolInner=numpy.zeros((len(outerLabelsTrain), 2)) for j, (trainInner, testInner) in enumerate(looInner): innerFeaturesTrain=outerFeaturesTrain[trainInner] innerLabelsTrain=outerLabelsTrain[trainInner] innerModel=linear_model.LogisticRegression(penalty="l1",C=float(innerC)) innerModel.fit(innerFeaturesTrain,innerLabelsTrain) probInner = innerModel.predict_proba(outerFeaturesTrain[testInner]) poolInner[j,0]=outerLabelsTrain[testInner] poolInner[j,1]=probInner[0,1] fpr, tpr, thresholds = roc_curve(poolInner[:,0], poolInner[:,1]) roc_auc = auc(fpr, tpr)
from distutils.core import setup setup( name='archinfo', version='0.03', packages=['archinfo'], install_requires=
[ 'capstone'
, 'pyelftools', 'pyvex' ] )
#!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright 2017 Twitter. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ''' metadatahandler.py ''' import traceback import tornado.gen import tornado.web from heron.common.src.python.utils.log import Log from heron.tools.tracker.src.python.handlers import BaseHandler # pylint: disable=attribute-defined-outside-init class MetaDataHandler(BaseHandler): """ URL - /topologies/metadata Parameters: - cluster (required) - environ (required) - role - (optional) Rol
e used to submit the topology. - topology (required) name of the requested topology The response JSON is a dictionary with all the static proper
ties of a topology. Runtime information is available from /topologies/runtimestate. Example JSON response: { release_version: "foo/bar", cluster: "local", release_tag: "", environ: "default", submission_user: "foo", release_username: "foo", submission_time: 1489523952, viz: "", role: "foo", jobname: "EX" } """ def initialize(self, tracker): """ initialize """ self.tracker = tracker @tornado.gen.coroutine def get(self): """ get method """ try: cluster = self.get_argument_cluster() role = self.get_argument_role() environ = self.get_argument_environ() topology_name = self.get_argument_topology() topology_info = self.tracker.getTopologyInfo(topology_name, cluster, role, environ) metadata = topology_info["metadata"] self.write_success_response(metadata) except Exception as e: Log.error("Exception when handling GET request '/topologies/metadata'") Log.debug(traceback.format_exc()) self.write_error_response(e)
#--*-- coding:utf-8 --*-- ''' Created on 2015��5��8�� @author: stm ''' from utils import DebugLog from base.msgcodec import MsgCodec from abc import abstractmethod from base.vdstate import CREATEVIOSL
PAR_STAT, StateBase from base.cmdmsg import CMDMsg class EngineBase(object): ''' The engine would deal with the command from UI or command line. And it would respond the result to UI or command listener. ''' # objs = {} # def __new__(cls, *args, **kv): # if cls in cls.objs: # retur
n cls.objs[cls] # cls.objs[cls] = super(EngineBase, cls).__new__(cls) # def __init__(self, vd_comm_cnt, vd_config): ''' Constructor ''' DebugLog.info_print("EngineBase is initialized") self.msg_decoder = MsgCodec() self.vd_comm_cnt = vd_comm_cnt self.vd_config = vd_config @abstractmethod def process_message(self, msg): ''' virtual method ''' pass @staticmethod def get_post_phase_progress_msg(server_id, phase, progress, cmd): resp_state = [StateBase.get_state_const(phase), StateBase.get_state_progress_const_name(progress)] msg = MsgCodec().encodeMsg(CMDMsg.getCMD(cmd), server_id, resp_state) DebugLog.debug_print_level1(msg) return msg
reakers with R>G>B. # Max values red_biggest = math_ops.cast((reds >= blues) & \ (reds >= greens), dtypes.float32) green_biggest = math_ops.cast((greens > reds) & \ (greens >= blues), dtypes.float32) blue_biggest = math_ops.cast((blues > reds) & \ (blues > greens), dtypes.float32) # Min values red_smallest = math_ops.cast((reds < blues) & \ (reds < greens), dtypes.float32) green_smallest = math_ops.cast((greens <= reds) & \ (greens < blues), dtypes.float32) blue_smallest = math_ops.cast((blues <= reds) & \ (blues <= greens), dtypes.float32) # Derivatives of R, G, B wrt Value slice dv_dr = red_biggest dv_dg = green_biggest dv_db = blue_biggest # Derivatives of R, G, B wrt Saturation slice # The first term in the addition is the case when the corresponding color # from (r,g,b) was "MAX" # -> derivative = MIN/square(MAX), MIN could be one of the other two colors # The second term is the case when the corresponding color from # (r,g,b) was "MIN" # -> derivative = -1/MAX, MAX could be one of the other two colours. ds_dr = math_ops.cast(reds > 0, dtypes.float32) * \ math_ops.add(red_biggest * \ math_ops.add(green_smallest * greens, blue_smallest * blues) * \ _CustomReciprocal(math_ops.square(reds)),\ red_smallest * -1 * _CustomReciprocal((green_biggest * \ greens) + (blue_biggest * blues))) ds_dg = math_ops.cast(greens > 0, dtypes.float32) * \ math_ops.add(green_biggest * \ math_ops.add(red_smallest * reds, blue_smallest * blues) * \ _CustomReciprocal(math_ops.square(greens)),\ green_smallest * -1 * _CustomReciprocal((red_biggest * \ reds) + (blue_biggest * blues))) ds_db = math_ops.cast(blues > 0, dtypes.float32) * \ math_ops.add(blue_biggest * \ math_ops.add(green_smallest * greens, red_smallest * reds) * \ _CustomReciprocal(math_ops.square(blues)),\ blue_smallest * -1 * _CustomReciprocal((green_biggest * \ greens) + (red_biggest * reds))) # Derivatives of R, G, B wrt Hue slice # Need to go case by case for each color. # for red, dh_dr -> dh_dr_1 + dh_dr_2 + dh_dr_3 + dh_dr_4 + dh_dr_5 # dh_dr_1 -> # if red was MAX, then derivative = 60 * -1 * (G-B)/square(MAX-MIN) == 60 *\ # -1 * (greens-blues) * reciprocal(square(saturation)) * \ # reciprical(square(value)) # elif green was MAX, there are two subcases # ie when red was MIN and when red was NOT MIN # dh_dr_2 -> # if red was MIN (use UV rule) -> 60 * ((1 * -1/(MAX-MIN)) +\ # (B-R)*(-1/square(MAX-MIN) * -1)) == 60 * (blues - greens) *\ # reciprocal(square(reds - greens)) # dh_dr_3 -> # if red was NOT MIN -> 60 * -1/MAX-MIN == -60 * reciprocal(greens-blues) # elif blue was MAX, there are two subcases # dh_dr_4 -> # if red was MIN (similarly use the UV rule) -> 60 * (blues - greens) *\ # reciprocal(square(blues - reds)) # dh_dr_5 -> # if red was NOT MIN -> 60 * 1/MAX-MIN == 60 * reciprocal(blues-greens) dh_dr_1 = 60 * (math_ops.cast(reds > 0, dtypes.float32) * red_biggest * \ -1 * \ (greens - blues) * \ _CustomReciprocal(math_ops.square(saturation)) *\ _CustomReciprocal(math_ops.square(value))) dh_dr_2 = 60 * (math_ops.cast(greens > 0, dtypes.float32) * green_biggest * \ red_smallest * (blues - greens) * \ _CustomReciprocal(math_ops.square(reds - greens))) dh_dr_3 = 60 * (math_ops.cast(greens > 0, dtypes.float32) * green_biggest * \ blue_smallest * -1 * _CustomReciprocal(greens - blues)) dh_dr_4 = 60 * (math_ops.cast(blues > 0, dtypes.float32) * blue_biggest * \ red_smallest * (blues - greens) * \ _CustomReciprocal(math_ops.square(blues - reds))) dh_dr_5 = 60 * (math_ops.cast(blues > 0, dtypes.float32) * blue_biggest * \ green_smallest * _CustomReciprocal(blues - greens)) dh_dr = dh_dr_1 + dh_dr_2 + dh_dr_3 + dh_dr_4 + dh_dr_5 # Converting from degrees to [0,1] scale as specified in # https://www.tensorflow.org/api_docs/python/tf/image/rgb_to_hsv dh_dr = dh_dr / 360 # for green, dh_dg -> dh_dg_1 + dh_dg_2 + dh_dg_3 + dh_dg_4 + dh_dg_5 # dh_dg_1 -> # if green was MAX, then derivative = 60 * -1 * (B-R)/square(MAX-MIN) == 60 *\ # -1 * (blues - reds) * reciprocal(square(saturation)) * \ # reciprocal(square(value)) # elif red was MAX, there are two subcases ie # when green was MIN and when green was NOT MIN # dh_dg_2 -> # if green was MIN (use UV rule) -> 60 * ((1 * 1/(MAX-MIN)) + \ # (greens-blues) * (-1/square(MAX-MIN) * -1)) == 60 * \ # ((reciprocal(reds-greens) + (greens-blues) * \ # reciprocal(square(reds-greens)))) # dh_dg_3 -> # if green was NOT MIN -> 60 * 1/MAX-MIN == 60 * reciprocal(reds - blues) # elif blue was MAX, there are two subcases # dh_dg_4 -> # if green was MIN (similarly use the UV rule) -> 60 * -1 * \ # (reciprocal(blues - greens) + (reds-greens)* -1 * \ # reciprocal(square(blues-greens))) # dh_dr_5 -> # if green was NOT MIN -> 60 * -1/MAX-MIN == -60 * reciprocal(blues - reds) dh_dg_1 = 60 * (math_ops.cast(greens > 0, dtypes.float32) * green_biggest * \ -1 * (blues - reds) * \ _CustomReciprocal(math_ops.square(saturation))\ * _CustomReciprocal(math_ops.square(value))) dh_dg_2
= 60 * (math_ops.cast(reds > 0, dtype
s.float32) * red_biggest * \ green_smallest * (reds - blues) * \ _CustomReciprocal(math_ops.square(reds - greens))) dh_dg_3 = 60 * (math_ops.cast(reds > 0, dtypes.float32) * red_biggest * \ blue_smallest * _CustomReciprocal(reds - blues)) dh_dg_4 = 60 * (math_ops.cast(blues > 0, dtypes.float32) * blue_biggest * \ green_smallest * (reds - blues) * \ _CustomReciprocal(math_ops.square(blues - greens))) dh_dg_5 = 60 * (math_ops.cast(blues > 0, dtypes.float32) * blue_biggest * \ red_smallest * -1 * _CustomReciprocal(blues - reds)) dh_dg = dh_dg_1 + dh_dg_2 + dh_dg_3 + dh_dg_4 + dh_dg_5 # Converting from degrees to [0,1] scale as specified in # https://www.tensorflow.org/api_docs/python/tf/image/rgb_to_hsv dh_dg = dh_dg / 360 # for blue, dh_db -> dh_db_1 + dh_db_2 + dh_db_3 + dh_db_4 + dh_db_5 # dh_db_1 -> # if blue was MAX, then derivative = 60 * -1 * (R-G)/square(MAX-MIN) == 60 *\ # -1 * reciprocal(square(saturation)) * reciprocal(square(value)) # elif red was MAX, there are two subcases # ie when blue was MIN and when blue was NOT MIN # dh_dg_2 -> # if blue was MIN (use UV rule) -> 60 * ((1 * -1/(MAX-MIN)) + \ # (greens-blues) * (-1/square(MAX-MIN) * -1)) == 60 * (greens - reds) *\ # reciprocal(square(reds - blues)) # dh_dg_3 -> # if blue was NOT MIN -> 60 * -1/MAX-MIN == 60 * -1 * \ # reciprocal(reds - greens) # elif green was MAX, there are two subcases # dh_dg_4 -> # if blue was MIN (similarly use the UV rule) -> 60 * -1 * \ # (reciprocal(greens - blues) + (blues - reds) * -1 * \ # reciprocal(square(greens - blues))) # dh_dr_5 -> # if blue was NOT MIN -> 60 * 1/MAX-MIN == 60 * reciprocal(greens - reds) dh_db_1 = 60 * (math_ops.cast(blues > 0, dtypes.float32) * blue_biggest * \ -1 * \ (reds - greens) * \ _CustomReciprocal(math_ops.square(saturation)) * \ _CustomReciprocal(math_ops.square(value))) dh_db_2 = 60 * (math_ops.cast(reds > 0, dtypes.float32) * red_biggest *\ blue_smallest * (greens - reds) * \ _CustomReciprocal(math_ops.square(reds - blues))) dh_db_3 = 60 * (math_ops.cast(reds > 0, dtypes.float32) * red_biggest * \ green_smallest * -
#!/usr/bin/python # # Request for symbolList. Currently RFA only support refresh messages # for symbolList. Hence, polling is required and symbolListRequest is called # internally by getSymbolList. # # IMAGE/REFRESH: # ({'MTYPE':'REFRESH','RIC':'0#BMD','SERVICE':'NIP'}, # {'ACTION':'ADD','MTYPE':'IMAGE','SERVICE':'NIP','RIC':'0#BMD','KEY':'FKLI'}, # {'ACTION':'ADD','MTYPE':'IMAGE'
,'SERVICE':'NIP','RIC':'0#BMD','KEY':'FKLL'}, # {'ACTION':'ADD','MTYPE':'IMAGE','SERVICE':'NIP','RIC':'0#BMD','KEY'
:'FKLM'}) # import pyrfa p = pyrfa.Pyrfa() p.createConfigDb("./pyrfa.cfg") p.acquireSession("Session3") p.createOMMConsumer() p.login() p.directoryRequest() p.dictionaryRequest() RIC = "0#BMD" symbolList = p.getSymbolList(RIC) print("\n=======\n" + RIC + "\n=======") print(symbolList.replace(" ","\n"))
><[^<>]*><[^<>]*><a href=\"mailto:") official_name_4_re = re.compile("<[br /p]*>([A-Za-z\. -]+?)<[^<>]*><[^<>]*><[^<>]*><a href=\"/files") official_name_5_re = re.compile(">([A-Za-z\. -]+?), [^<>]*?Director") official_name_6_re = re.compile("Fax .+?<[^<>]*><[^<>]*>([A-Za-z\. -]+?)<") website_re = re.compile("a href=\"(h.+?)\"") #email_re = re.compile("mailto:%*2*0*(.+?) *\".*?>") email_re = re.compile("[A-Za-z\.-]+?@[A-Za-z\.-]+") email_junk_re = re.compile("@[^<>]+?\.[cg]o[mv](.*?)<") font_re = re.compile("</*font.+?>") style_re = re.compile("(style.+?\")>") span_re = re.compile("</*span.+?>") w_re = re.compile("</*w:.+?>") u_re = re.compile("</*u>") m_re = re.compile("</*m:.+?>") set_re = re.compile("{.+?}") comment_re = re.compile("<!--.+?>") charleston_re = re.compile(" [A-Z][A-Z](.+?)\d{5}[\d-]*") richland_fix_re = re.compile("Military and Overseas Correspondence.+?</a>") address_re = re.compile("<[br p/]*>([^<>]*\d[^>]+?<.+?\d{5}[\d-]*) *<[brp/ ]*>") csz_re = re.compile("[\d>] *([A-Za-z \.]+?,* [A-Z][A-Z] +\d{5}[\d-]*)") po_re = re.compile("(P*o*s*t* *Of*i*c*e* .+?)<") city_re = re.compile("(.+?),* [A-Z][A-Z] ") state_re = re.compile(" ([A-Z][A-Z]) ") zip_re = re.compile("\d{5}[\d-]*") zip_mod_re = re.compile("\(\d{5}[\d-]*\)") mailing_region_re = re.compile("Mailing Address.+?[A-Z][A-Z] \d{5}[\d-]* *<[brp/ ]*>") for link in county_links: authority_name, first_name, last_name, county_name, town_name, fips, street, city, address_state, zip_code, po_street, po_city, po_state, po_zip_code, reg_authority_name, reg_first, reg_last, reg_street, reg_city, reg_state, reg_zip_code, reg_po_street, reg_po_city, reg_po_state, reg_po_zip_code, reg_phone, reg_fax, reg_email, reg_website, reg_hours, phone, fax, email, website, hours, review = dogcatcher.begin(voter_state) link_name = county_link_names[county_links.index(link)] file_name = tmpdir + link_name + "-sc-clerks.html" url = "http://www.scvotes.org" + link data = urllib.urlopen(url).read() output = open(file_name,"w") output.write(data) output.close() county = open(file_name).read() #Trimming the county. county = relevant_re.findall(county)[0] #There are a tremendous number of useless HTML tags or county-specific fixes. This code cleans them up so we don't have to deal with them elsewhere. for junk in email_junk_re.findall(county): county = county.replace(junk,"") for font in font_re.findall(county): county = county.replace(font,"") for style in style_re.findall(county): county = county.replace(style,"") for span in span_re.findall(county): county = county.replace(span,"") for w in w_re.findall(county): county = county.replace(w,"") for u in u_re.findall(county): county = county.replace(u,"") for m in m_re.findall(county): county = county.replace(m,"") for comment in comment_re.findall(county): county = county.replace(comment,"") for s in set_re.findall(county): county = county.replace(s,"") for item in charleston_re.findall(county): county = county.replace(item," ") for item in richland_fix_re.findall(county): county = county.replace(item," ") #fixing errors in Dillon, Florence, and Newberry Counties county = county.replace("sedwardsvr17","<a href=\"mailto:sedwardsvr17@aol.com\"").replace("%3",":").replace("%40","@").replace("brogers","<a href=\"mailto:brogers@newberrycounty.net\"") county_name = county_name_re.findall(county)[0].replace(" County","").strip() print "__________________________________" #unique case in Aiken County: if county_name == "Aiken County": reg_email = "cholland@aikencountysc.gov" county.replace("cholland@aikencountysc.gov","") phone = dogcatcher.find_phone(phone_re, county) for item in phone_re.findall(county): county = county.replace(item, "") #Many of the fax numbers don't have area codes. So we grab the first area code we find in the block of phone numbers and give it to the fax number. area_code = area_code_re.findall(phone)[0] fax = dogcatcher.find_phone(fax_re, county, area_code) for item in fax_re.findall(county): county = county.replace(item, "") county = county.replace("Fax", "") #unique case in Greenwood County, which gives a separate phone number for registration-related contacts: if county_name == "Greenwood County": phone = "(864) 942-3152, (864) 942-3153, (864) 942-5667" fax = "(804) 942-5664" county = county.replace(phone,"").replace(fax,"") reg_phone = "(864) 942-8585" county.replace("(864) 942-8585","") reg_fax = "(846) 942-5664" county.replace("942-5664","") #Some counties have a registration-only email address. In those counties, the absentee email has "absentee" in it. #Websites have similar problems print county email = dogcatcher.find_emails(email_re, county) if "absentee" in email: emails = email.split(", ") email = "" for item in emails: county = county.replace(item, "") if "absentee" in item: email = email + ", " + item else: reg_email = reg_email + ", " + item email = email.strip(", ") reg_email = reg_email.strip(", ") else: for item in email_re.findall(county): county = county.replace(item, "") website = dogcatcher.find_website(website_re, county) if "absentee" in website: websites = website.split(", ") website = "" for item in websites: county = county.replace(item, "") if "absentee" in item: website = website + ", " + item else: reg_website = reg_website + ", " + item else: for item in website_re.findall(county): county = county.replace(item, "")
website = website.strip(", ") reg_website = reg_website.strip(", ") print [email] #There are many
forms the official's name can take. This tries all of them. if official_name_1_re.findall(county): official_name = official_name_1_re.findall(county)[0].strip() elif official_name_2_re.findall(county): official_name = official_name_2_re.findall(county)[0].strip() elif official_name_3_re.findall(county): official_name = official_name_3_re.findall(county)[0].strip() elif official_name_4_re.findall(county): official_name = official_name_4_re.findall(county)[0].strip() elif official_name_5_re.findall(county): official_name = official_name_5_re.findall(county)[0].strip() elif official_name_6_re.findall(county): official_name = official_name_6_re.findall(county)[0].strip() else: official_name = "" if official_name: first_name, last_name, review = dogcatcher.split_name(official_name, review) county = county.replace(official_name,"") print "++++++++++++++++++++++++++++++++++++++" if county_name == "Charleston County": county = county.replace("Post Office","Mailing Address:<> Post Office") #Some counties don't put a marked "Mailing Address" section, but do have a separate mailing address. #So first, we check whether the county has "Mailing Address" in it. if "Mailing Address" not in county: #This section finds the full address. After finding the address, it identifies a city/state/zip (csz) combination and a PO Box number if that exists. #It removes both the CSZ and the PO Address (if it exists) from the full address, leaving behind a street address with some garbage. #It then cleans up the street address and pulls the city, state, and zip out of the csz, and assigns them as appropriate to the street address and state. address = address_re.findall(county)[0] csz = csz_re.findall(address)[0] address = address.replace(csz,"") try: po_street = po_re.findall(address)[0].replace("</b><p>","") except: po_street = "" street = address.replace(po_street,"").replace(csz,"").replace("</b><p>","") street = street.replace("<p>",", ").replace("</p>",", ").replace("<br />",", ").replace(",,",", ").replace(" ,",",").replace(",,",", ").replace(", , ",", ").strip(" /,") if po_street: po_city = city_re.findall(csz)[0] po_state = state_re.findall(csz)[0] po_zip_code = zip_re.findall(csz)[0] if street: city = city_re.findall(csz)[0] address_state = state_re.findall(csz)[0] zip_code = zip_re.findall(csz)[0] else: #If there's an explicitly stated mailing address, we find it, and then pull the mailing address out of it.
#!/usr/bin/env python # # Copyright 2015 Martin Cochran # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from game_model import Game from scores_messages import AgeBracket from scores_messages import Division from scores_messages import League class ListIdBiMap: """Encapsulates mappings to and from list id and structured properties.""" # List ID definitions corresponding to lists defined in the twitter account of # @martin_cochran. USAU_COLLEGE_OPEN_LIST_ID = '186814318' USAU_COLLEGE_WOMENS_LIST_ID = '186814882' USAU_CLUB_OPEN_LIST_ID = '186732484' USAU_CLUB_WOMENS_LIST_ID = '186732631' USAU_CLUB_MIXED_LIST_ID = '186815046' AUDL_LIST_ID = '186926608' MLU_LIST_ID = '186926651' ALL_LISTS = [ USAU_COLLEGE_OPEN_LIST_ID, USAU_COLLEGE_WOMENS_LIST_ID, USAU_CLUB_OPEN_LIST_ID, USAU_CLUB_WOMENS_LIST_ID, USAU_CLUB_MIXED_LIST_ID, AUDL_LIST_ID, MLU_LIST_ID ] # Simple data structure to lookup lists if the league, division, and age # bracket were specified in the request. LIST_ID_MAP = { League.USAU: { Division.OPEN: { AgeBracket.COLLEGE: USAU_COLLEGE_OPEN_LIST_ID, AgeBracket.NO_RESTRICTION: USAU_CLUB_OPEN_LIST_ID, }, Division.WOMENS: { AgeBracket.COLLEGE: USAU_COLLEGE_WOMENS_LIST_ID, AgeBracket.NO_RESTRICTION: USAU_CLUB_WOMENS_LIST_ID, }, Division.MIXED: { AgeBracket.NO_RESTRICTION: USAU_CLUB_MIXED_LIST_ID, }, }, League.AUDL: { Division.OPEN: { AgeBracket.NO_RESTRICTION: AUDL_LIST_ID, }, }, League.MLU: { Division.OPEN: { AgeBracket.NO_RESTRICTION: MLU_LIST_ID, }, }, } LIST_ID_TO_DIVISION = { USAU_COLLEGE_OPEN_LIST_ID: Division.OPEN, USAU_COLLEGE_WOMENS_LIST_ID: Division.WOMENS, USAU_CLUB_OPEN_LIST_ID: Division.OPEN, USAU_CLUB_WOMENS_LIST_ID: Division.WOMENS, USAU_CLUB_MIXED_LIST_ID: Division.MIXED, AUDL_LIST_ID: Division.OPEN, MLU_LIST_ID: Division.OPEN, } LIST_ID_TO_AGE_BRACKET = { USAU_COLLEGE_OPEN_LIST_ID: AgeBracket.COLLEGE, USAU_COLLEGE_WOMENS_LIST_ID: AgeBracket.COLLEGE, USAU_CLUB_OPEN_LIST_ID: AgeBracket.NO_RESTRICTION, USAU_CLUB_WOMENS_LIST_ID: AgeBracket.NO_RESTRICTION, USAU_CLUB_MIXED_LIST_ID: AgeBracket.NO_RESTRICTION, AUDL_LIST_ID: AgeBracket.NO_RESTRICTION, MLU_LIST_ID: AgeBracket.NO_RESTRICTION, } LIST_ID_TO_LEAGUE = { USAU_COLLEGE_OPEN_LIST_ID: League.USAU, USAU_COLLEGE_WOMENS_LIST_ID: League.USAU, USAU_CLUB_OPEN_LIST_ID: League.USAU, USAU_CLUB_WOMENS_LIST_ID: League.USAU, USAU_CLUB_MIXED_LIST_ID: League.USAU, AUDL_LIST_ID: League.AUDL, MLU_LIST_ID: League.MLU, } @staticmethod def GetListId(division, age_bracket, league): """Looks up the list_id which corresponds to the given division and league. Args: division: Division of interest age_bracket: AgeBracket of interest league: League of interest Returns: The list id corresponding to that league and division, or '' if no such list exists. """ d = ListIdBiMap.LIST_ID_MAP.get(league, {}) if not d:
return '' d = d.get(division, {}) if not d: return '' return d.get(age_bracket, '') @staticmethod def GetStructuredPropertiesForList(list_id): """Returns the division, age_bracket, and league for the given list id. Defaults to Division.OPEN, AgeBracket.NO_RESTRICTION
, and League.USAU, if the division, age_bracket, or leauge, respectively, does not exist in the map for the given list_id. Args: list_id: ID of list for which to retrieve properties. Returns: (division, age_bracket, league) tuple for the given list ID. """ division = ListIdBiMap.LIST_ID_TO_DIVISION.get(list_id, Division.OPEN) age_bracket = ListIdBiMap.LIST_ID_TO_AGE_BRACKET.get(list_id, AgeBracket.NO_RESTRICTION) league = ListIdBiMap.LIST_ID_TO_LEAGUE.get(list_id, League.USAU) return (division, age_bracket, league)
aler.nitro.exception.nitro_exception import nitro_exception from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util class appfwprofile_contenttype_binding(base_resource) : """ Binding class showing the contenttype that can be bound to appfwprofile. """ def __init__(self) : self._contenttype = "" self._state = "" self._comment = "" self._name = "" self.___count = 0 @property def state(self) : """Enabled.<br/>Possible values = ENABLED, DISABLED. """ try : retur
n self._state except Exception as e: raise e @state.setter def state(self, state) :
"""Enabled.<br/>Possible values = ENABLED, DISABLED """ try : self._state = state except Exception as e: raise e @property def name(self) : """Name of the profile to which to bind an exemption or rule.<br/>Minimum length = 1. """ try : return self._name except Exception as e: raise e @name.setter def name(self, name) : """Name of the profile to which to bind an exemption or rule.<br/>Minimum length = 1 """ try : self._name = name except Exception as e: raise e @property def contenttype(self) : """A regular expression that designates a content-type on the content-types list. """ try : return self._contenttype except Exception as e: raise e @contenttype.setter def contenttype(self, contenttype) : """A regular expression that designates a content-type on the content-types list. """ try : self._contenttype = contenttype except Exception as e: raise e @property def comment(self) : """Any comments about the purpose of profile, or other useful information about the profile. """ try : return self._comment except Exception as e: raise e @comment.setter def comment(self, comment) : """Any comments about the purpose of profile, or other useful information about the profile. """ try : self._comment = comment except Exception as e: raise e def _get_nitro_response(self, service, response) : """ converts nitro response into object and returns the object array in case of get request. """ try : result = service.payload_formatter.string_to_resource(appfwprofile_contenttype_binding_response, response, self.__class__.__name__) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == "ERROR") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.appfwprofile_contenttype_binding except Exception as e : raise e def _get_object_name(self) : """ Returns the value of object identifier argument """ try : if (self.name) : return str(self.name) return None except Exception as e : raise e @classmethod def add(cls, client, resource) : try : if resource and type(resource) is not list : updateresource = appfwprofile_contenttype_binding() updateresource.name = resource.name updateresource.comment = resource.comment updateresource.state = resource.state updateresource.contenttype = resource.contenttype return updateresource.update_resource(client) else : if resource and len(resource) > 0 : updateresources = [appfwprofile_contenttype_binding() for _ in range(len(resource))] for i in range(len(resource)) : updateresources[i].name = resource[i].name updateresources[i].comment = resource[i].comment updateresources[i].state = resource[i].state updateresources[i].contenttype = resource[i].contenttype return cls.update_bulk_request(client, updateresources) except Exception as e : raise e @classmethod def delete(cls, client, resource) : try : if resource and type(resource) is not list : deleteresource = appfwprofile_contenttype_binding() deleteresource.name = resource.name deleteresource.contenttype = resource.contenttype return deleteresource.delete_resource(client) else : if resource and len(resource) > 0 : deleteresources = [appfwprofile_contenttype_binding() for _ in range(len(resource))] for i in range(len(resource)) : deleteresources[i].name = resource[i].name deleteresources[i].contenttype = resource[i].contenttype return cls.delete_bulk_request(client, deleteresources) except Exception as e : raise e @classmethod def get(cls, service, name) : """ Use this API to fetch appfwprofile_contenttype_binding resources. """ try : obj = appfwprofile_contenttype_binding() obj.name = name response = obj.get_resources(service) return response except Exception as e: raise e @classmethod def get_filtered(cls, service, name, filter_) : """ Use this API to fetch filtered set of appfwprofile_contenttype_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = appfwprofile_contenttype_binding() obj.name = name option_ = options() option_.filter = filter_ response = obj.getfiltered(service, option_) return response except Exception as e: raise e @classmethod def count(cls, service, name) : """ Use this API to count appfwprofile_contenttype_binding resources configued on NetScaler. """ try : obj = appfwprofile_contenttype_binding() obj.name = name option_ = options() option_.count = True response = obj.get_resources(service, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e: raise e @classmethod def count_filtered(cls, service, name, filter_) : """ Use this API to count the filtered set of appfwprofile_contenttype_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = appfwprofile_contenttype_binding() obj.name = name option_ = options() option_.count = True option_.filter = filter_ response = obj.getfiltered(service, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e: raise e class As_scan_location_xmlsql: ELEMENT = "ELEMENT" ATTRIBUTE = "ATTRIBUTE" class Xmlmaxelementdepthcheck: ON = "ON" OFF = "OFF" class Xmlmaxattachmentsizecheck: ON = "ON" OFF = "OFF" class Xmlsoaparraycheck: ON = "ON" OFF = "OFF" class State: ENABLED = "ENABLED" DISABLED = "DISABLED" class Xmlmaxelementnamelengthcheck: ON = "ON" OFF = "OFF" class Isregex_ff: REGEX = "REGEX" NOTREGEX = "NOTREGEX" class Xmlmaxelementscheck: ON = "ON" OFF = "OFF" class Xmlendpointcheck: ABSOLUTE = "ABSOLUTE" RELATIVE = "RELATIVE" class Xmlmaxnamespacescheck: ON = "ON" OFF = "OFF" class Xmlmaxfilesizecheck: ON = "ON" OFF = "OFF" class Xmlmaxattributenamelengthcheck: ON = "ON" OFF = "OFF" class Xmlblockdtd: ON = "ON" OFF = "OFF" class Xmlblockpi: ON = "ON" OFF = "OFF" class Isregex_sql: REGEX = "REGEX" NOTREGEX = "NOTREGEX" class Xmlvalidateresponse: ON = "ON" OFF = "OFF" class Xmlmaxelementchildrencheck: ON = "ON" OFF = "OFF" class Isregex: REGEX = "REGEX" NOTREGEX = "NOTREGEX" class Xmlmaxentityexpansionscheck: ON = "ON" OFF = "OFF" class Xmlmaxnamespaceurilengthcheck: ON = "ON" OFF = "OFF" class As_scan_location_xss: FORMFIELD = "FORMFIELD" HEADER = "HEADER" COOKIE = "COOKIE" class Xmlmaxentityexpansiondepthcheck: ON = "ON" OFF = "OFF" class As_scan_location_xmlxss: ELEMENT = "ELEMENT" ATTRIBUTE = "ATTRIBUTE" class Xmlmaxattributevaluelengthcheck: ON = "ON" OFF = "OFF" class As_scan_location_sql: FORMFIELD = "FORMFIELD" HEADER = "HEADER" COOKIE = "COOKIE" class Isregex_ffc: REGEX = "REGEX" NOTREGEX = "NOTREGEX" class Xmlattachmentcontenttypecheck: ON = "ON" OFF = "OFF" class Isregex_xmlsql: REGEX = "REGEX" NOTREGEX = "NOTREGEX" class Xmlvalidatesoapenvelope: ON = "ON" OFF = "OFF" class Xmlmaxchardatalengthcheck: ON = "ON" OFF = "OFF" class Xmlminfilesizecheck: ON = "ON" OFF = "OFF" class
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for pubchem.py.""" import unittest import numpy import pytest from openfermion.chem.pubchem import geometry_from_pubchem from openfermion.testing.testing_utils import module_importable using_pubchempy = pytest.mark.skipif(module_importable('pubchempy') is False, reason='Not detecting `pubchempy`.') @using_pubchempy class OpenFermionPubChemTest(unittest.TestCase): def test_water(self): water_geometry = geometry_from_pubchem('water') self.water_natoms = len(water_geometry) self.water_atoms = [water_atom[0] for water_atom in water_geometry] water_oxygen_index = self.water_atoms.index('O') water_oxygen = water_geometry.pop(water_oxygen_index) water_oxygen_coordinate = numpy.array(water_oxygen[1]) water_hydrogen1_coordinate = numpy.array(water_geometry[0][1]) water_hydrogen2_coordinate = numpy.array(water_geometry[1][1]) water_oxygen_hydrogen1 = \ water_hydrogen1_coordinate - water_oxygen_coordinate water_oxygen_hydrogen2 = \ water_hydrogen2_coordinate - water_oxygen_coordinate self.water_bond_length_1 = numpy.linalg.norm(water_oxygen_hydrogen1) self.water_bond_length_2 = numpy.linalg.norm(water_oxygen_hydrogen2) self.water_bond_angle = \ numpy.arccos(numpy.dot(water_oxygen_hydrogen1, water_oxygen_hydrogen2 / (numpy.linalg.norm(water_oxygen_hydrogen1) * numpy.linalg.norm(water_oxygen_hydrogen2)))) water_natoms = 3 self.assertEqual(water_natoms, self.water_natoms) self.assertAlmostEqual(self.water_bond_length_1, self.water_bond_length_2, places=4) water_bond_length_low = 0.9 water_bond_length_high = 1.1 self.assertTrue(water_bond_length_low <= self.water_bond_length_1) self.assertTrue(water_bond_length_high >= self.water_bond_length_1) water_bond_angle_low = 100. / 360 * 2 * numpy.pi water_bond_angle_high = 110. / 360 * 2 * numpy.pi self.assertTrue(water_bond_angle_low <= self.water_bond_angle) self.assertTrue(water_bond_angle_high >= self.water_bond_angle) def test_helium(self): helium_geometry = geometry_from_pubchem('helium') self.helium_natoms = len(helium_geometry) helium_natoms = 1 self.assertEqual(helium_natoms
, self.helium_natoms) def test_none(self): none_geometry = geometry_from_pubchem('none') self.assertIsNone(none_geometry) def test_water_2d(self): water_geometry = geometry_from_pubchem('water', structure='2d') self.water_natoms = len(water_geometry) water_natoms = 3
self.assertEqual(water_natoms, self.water_natoms) self.oxygen_z_1 = water_geometry[0][1][2] self.oxygen_z_2 = water_geometry[1][1][2] z = 0 self.assertEqual(z, self.oxygen_z_1) self.assertEqual(z, self.oxygen_z_2) with pytest.raises(ValueError, match='Incorrect value for the argument structure'): _ = geometry_from_pubchem('water', structure='foo')
import binascii import datetime import hashlib import os from pymongo import ReturnDocument from vj4 import db from vj4.util import argmethod TYPE_REGISTRATION = 1 TYPE_SAVED_SESSION = 2 TYPE_UNSAVED_SESSION = 3 TYPE_LOSTPASS = 4 TYPE_CHANGEMAIL = 5 def _get_id(id_binary): return hashlib.sha256(id_binary).digest() @argmethod.wrap async def add(token_type: int, expire_seconds: int, **kwargs): """Add a token. Args: token_type: type of the token. expire_seconds: expire time, in seconds. **kwargs: extra data. Returns: Tuple of (token ID, token document). """ id_binary = hashlib.sha256(os.urandom(32)).digest() now = datetime.datetime.utcnow() doc = {**kwargs, '_id': _get_id(id_binary), 'token_type': token_type, 'create_at': now, 'update_at': now, 'expire_at': now + datetime.timedelta(seconds=expire_seconds)} coll = db.coll('token') await coll.insert_one(doc) return binascii.hexlify(id_binary).decode(), doc @argmethod.wrap async def get(token_id: str, token_type: int): """Get a token. Args: token_id: token ID. token_type: type of the token. Returns: The token document, or None. """ id_binary = binascii.unhexlify(token_id) coll = db.coll('token') doc = await coll.find_one({'_id': _get_id(id_binary), 'token_type': token_type}) return doc @argmethod.wrap async def get_most_recent_session_by_uid(uid: int): """Get the most recent session by uid.""" coll = db.coll('token') doc = await coll.find_one({'uid': uid, 'token_type': {'$in': [TYPE_SAVED_SESSION, TYPE_UNSAVED_SESSION]}}, sort=[('update_at', -1)]) return doc @argmethod.wrap async def get_session_list_by_uid(uid: int): """Get the session list by uid.""" coll = db.coll('token') return await coll.find({'uid': uid, 'token_type': {'$in': [TYPE_SAVED_SESSION, TYPE_UNSAVED_SESSION]}},
sort=[('create_at', 1)]).to_list() @argmethod.wrap async def update(token_id: str, token_type: int, expire_seconds: int, **kwargs): """Update a token. Args: token_id: token ID. token_type:
type of the token. expire_seconds: expire time, in seconds. **kwargs: extra data. Returns: The token document, or None. """ id_binary = binascii.unhexlify(token_id) coll = db.coll('token') assert 'token_type' not in kwargs now = datetime.datetime.utcnow() doc = await coll.find_one_and_update( filter={'_id': _get_id(id_binary), 'token_type': token_type}, update={'$set': {**kwargs, 'update_at': now, 'expire_at': now + datetime.timedelta(seconds=expire_seconds)}}, return_document=ReturnDocument.AFTER) return doc @argmethod.wrap async def delete(token_id: str, token_type: int): """Delete a token. Args: token_id: token ID. token_type: type of the token. Returns: True if deleted, or False. """ return await delete_by_hashed_id(_get_id(binascii.unhexlify(token_id)), token_type) @argmethod.wrap async def delete_by_hashed_id(hashed_id: str, token_type: int): """Delete a token by the hashed ID.""" coll = db.coll('token') result = await coll.delete_one({'_id': hashed_id, 'token_type': token_type}) return bool(result.deleted_count) @argmethod.wrap async def delete_by_uid(uid: int): """Delete all tokens by uid.""" coll = db.coll('token') result = await coll.delete_many({'uid': uid, 'token_type': {'$in': [TYPE_SAVED_SESSION, TYPE_UNSAVED_SESSION]}}) return bool(result.deleted_count) @argmethod.wrap async def ensure_indexes(): coll = db.coll('token') await coll.create_index([('uid', 1), ('token_type', 1), ('update_at', -1)], sparse=True) await coll.create_index('expire_at', expireAfterSeconds=0) if __name__ == '__main__': argmethod.invoke_by_args()
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo.tests.common import TransactionCase class TestGetMailChannel(TransactionCase): def setUp(self): super(TestGetMailChannel, self).setUp() self.operators = self.env['res.users'].create([{ 'name': 'Michel', 'login': 'michel' }, { 'name': 'Paul', 'login': 'paul' }, { 'name'
: 'Pierre',
'login': 'pierre' }, { 'name': 'Jean', 'login': 'jean' }, { 'name': 'Georges', 'login': 'georges' }]) self.livechat_channel = self.env['im_livechat.channel'].create({ 'name': 'The channel', 'user_ids': (6, 0, self.operators.ids) }) operators = self.operators def get_available_users(self): return operators self.patch(type(self.env['im_livechat.channel']), '_get_available_users', get_available_users) def test_get_mail_channel(self): """For a livechat with 5 available operators, we open 5 channels 5 times (25 channels total). For every 5 channels opening, we check that all operators were assigned. """ for i in range(5): mail_channels = self._get_mail_channels() channel_operators = [channel_info['operator_pid'] for channel_info in mail_channels] channel_operator_ids = [channel_operator[0] for channel_operator in channel_operators] self.assertTrue(all(partner_id in channel_operator_ids for partner_id in self.operators.mapped('partner_id').ids)) def _get_mail_channels(self): mail_channels = [] for i in range(5): mail_channel = self.livechat_channel._get_mail_channel('Anonymous') mail_channels.append(mail_channel) # send a message to mark this channel as 'active' self.env['mail.channel'].browse(mail_channel['id']).write({ 'channel_message_ids': [(0, 0, {'body': 'cc'})] }) return mail_channels
h[0,:],path[1,:]) plt.axis([-100, 300, -100, 300]) plt.show() return path def compute_A_star_path(origin,goal,Map): path=A_star(origin,goal,Map) #path_array=np.array(path) #print(path_array.shape) path_rev=np.flip(path, axis=0) return path_rev def robot_control(pos_rob,target, K_x=1,K_y=1,K_an=1): #pos_rob is a 1x3 matrix with(x,y,teta) & target is a 1x2 matrix with(x,y) # Radius and wheel width in cm L = 14.5 R = 1.7 theta_star=np.arctan2(target[1]-pos_rob[1], target[0]-pos_rob[0])*180/np.pi if theta_star<0: theta_star=360-abs(theta_star) theta=pos_rob[2] err_theta=theta_star-theta # GET wheel velocities through curvature M_r2wheels= np.array([[1/R, -L/(2*R) ],[1/R, L/(2*R)]]) # --> (Vr,Vteta) = M * (w_rigth, w_left) vel_wheels = np.ones(2) distance_x = (target[0]-pos_rob[0])*np.sin(pos_rob[2]*pi/180) - (target[1]-pos_rob[1])*np.cos(pos_rob[2]*pi/180) l= np.sqrt(np.power(target[0]-pos_rob[0],2)+np.power(target[1]-pos_rob[1],2)) #print("L is: ",l)() C = -distance_x/np.power(l,2) w = 2*R; kt=0.05 #A = (1-(C*L)/2)/(1+(C*L)/2) #vel_wheels[0] = w*L/(R*(1+A)) #vel_wheels[1] = vel_wheels[0]*A if abs(err_theta)>60 and abs(err_theta)<300: vel_robot=np.array([0,60]) # print("JUST SPINNING",abs(err_theta),theta_star,theta) else: vel_robot = np.array([w, w*C]) #print("velocidad del robot",vel_robot) vel_wheels =np.matmul(M_r2wheels,vel_robot) vel_wheels[0] = 180/pi * vel_wheels[0] vel_wheels[1] = 180/pi * vel_wheels[1] #print(vel_wheels) if np.absolute(vel_wheels[0]) > 400 : vel_wheels[0] = np.sign(vel_wheels[0])*400 if np.absolute(vel_wheels[1]) > 400: vel_wheels[1] = np.sign(vel_wheels[1])*400 #print(vel_wheels) return vel_wheels def forward_localization(pos_rob, vel_wheels, Ts): # position of the robot (x,y,teta) , vel_wheels 1x2:(vel_right, vel_left) and Ts(sampling time) L = 14.5 R = 1.7 vel_wheels[0] = vel_wheels[0] * pi/180 vel_wheels[1] = vel_wheels[1] * pi/180 M_wheels2rob= np.array([[R/2,R/2],[-R/L,R/L]]) M_rob2w = np.array([[np.cos(pos_rob[2]*pi/180),0],[np.sin(pos_rob[2]*pi/180),0],[0,1]]) #print(M_rob2w) vel_robot = np.matmul(M_wheels2rob,vel_wheels) #print('vel_robot: ', vel_robot) vel_world = np.matmul(M_rob2w,vel_robot) new_pos_rob = np.zeros(3) #new_pos_rob[0] = pos_rob[0] + Ts*vel_world[0] #new_pos_rob[1] = pos_rob[1] + Ts*vel_world[1] #new_pos_rob[2] = pos_rob[2] + Ts*vel_world[2] incr_r = vel_robot[0]*Ts incr_teta = vel_robot[1]*Ts * 180/pi #print('radial increment:',incr_r,' angular increment: ',incr_teta) new_pos_rob[0] = pos_rob[0] + incr_r*np.cos((pos_rob[2]+incr_teta/2)*pi/180) new_pos_rob[1] = pos_rob[1] + incr_r*np.sin((pos_rob[2]+incr_teta/2)*pi/180) new_pos_rob[2] = pos_rob[2] + incr_teta #print('new pos: ', new_pos_rob) if new_pos_rob[2] >360: new_pos_rob[2] = new_pos_rob[2] - 360 elif new_pos_rob[2] < 0 : new_pos_rob[2] = 360 + new_pos_rob[2] #print(new_pos_rob) return new_pos_rob def odometry_localization(pos_rob, odom_r, odom_l, Ts): # position of the robot (x,y,teta) , vel_wheels 1x2:(vel_right, vel_left) and Ts(sampling time) L = 14.5 R = 1.7 M_wheels2rob= np.array([[R/2,R/2],[-R/L,R/L]]) M_rob2w = np.array([[np.cos(pos_rob[2]*pi/180),0],[np.sin(pos_rob[2]*pi/180),0],[0,1]]) #print(M_rob2w) odom_r = odom_r*pi/180 odom_l = odom_l*pi/180 vel_wheels = np.array([odom_r,odom_l]) vel_robot = np.matmul(M_wheels2rob,vel_wheels) #print('vel_robot: ', vel_robot) vel_world = np.matmul(M_rob2w,vel_robot) new_pos_rob = np.zeros(3) #new_pos_rob[0] = pos_rob[0] + Ts*vel_world[0] #new_pos_rob[1] = pos_rob[1] + Ts*vel_world[1] #new_pos_rob[2] = pos_rob[2] + Ts*vel_world[2] incr_r = vel_robot[0] incr_teta = vel_robot[1] * 180/pi #print(incr_teta) #print('radial increment:',incr_r,' angular increment: ',incr_teta) new_pos_rob[0] = pos_rob[0] + incr_r*np.cos((pos_rob[2]+incr_teta/2)*pi/180) new_pos_rob[1] = pos_rob[1] + incr_r*np.sin((pos_rob[2]+incr_teta/2)*pi/180) new_pos_rob[2] = pos_rob[2] + incr_teta if new_pos_rob[2] >360: new_pos_rob[2] = new_pos_rob[2] - 360 elif new_pos_rob[2] < 0 : new_pos_rob[2] = 360 + new_pos_rob[2] #print(new_pos_rob) return new_pos_rob def select_target(pos_rob,path): #print("path inside select target",path) #print(np.size(path)) shortest_dist = 100000000000; shd2 = 100000000000; output=0 num=2 if path.shape[0]<=num: num=path.shape[0] for i in range(num): #compute the euclidean distance for all the possible points to go #distance2 = np.sqrt(np.power(path[0,i]-pos_rob[0],2)+np.power(path[1,i]-pos_rob[1],2)) #distance = np.absolute((path[0,i]-pos_rob[0])*np.sin(pos_rob[2]*pi/180) - (path[1,i]-pos_rob[1])*np.cos(pos_rob[2]*pi/180)) distance = np.absolute((path[i,0]-pos_rob[0])*np.sin(pos_rob[2]*pi/180) - (path[i,1]-pos_rob[1])*np.cos(pos_rob[2]*pi/180)) #distance= np.sqrt(np.power(path[i,0]-pos_rob[0],2)+np.power(path[i,1]-pos_rob[1],2)) if distance <= shortest_dist : #print("distance",distance) shortest_dist = distance output = i if output == path.shape[0]-1: output = i-1 if shortest_dist<2: new_path = path[(output+1):,:] target = path[output+1,:] else: new_path = path[(output):,:] target = path[output,:] print('Point to go : ',target,'and new path',new_path.shape) #print('new path : ',new_path) return target , new_path def kalman_filter(odom_r,odom_l,pos_rob,marker_list, marker_map,Ts,P): L = 14.5 R = 1.7 #From degrees to radians odom_l = odom_l*pi/180 odom_r = odom_r*pi/180 # get increments incr_r = R/2*(odom_r+odom_l) incr_teta = R/L*(odom_l-odom_r) * 180/pi ## A and B matrixes increment_R = R/2*(odom_r + odom_l) increment_teta = R/L*(odom_l-odom_r) * 180/pi # We want the increment in teta in degrees A = np.identity(3) A[0,2] = -increment_R*np.sin((pos_rob[2]+increment_teta/2)*pi/180) A[1,2] = increment_R*np.cos((pos_rob[2]+increment_teta/2)*pi/180) c = np.cos((pos_rob[2]+increment_teta/2)*pi/180); s = np.sin((pos_rob[2]+increment_teta/2)*pi/180) B = np.zeros([3,2]) B[0,0] = R/2*c+R*increment_R*R/(2*L)*s B[0,1] = R/2*c-R*increment_R*R/(2*L)*s B[1,0] = R/2*s-increment_R*R/(2*L)*c B[1,1] = R/2*s+increment_R*R/(2*L)*c B[2,0] = -R/L B[2,1] = R/L # H Matrix marker_list=np.array(marker_list) markers = [] for i in range (0,marker_list.shape[0]): #print("marker list",marker_list) if marker_list[i,0] < 900: distance = np.power(marker_map[i,0]-pos_rob[0],2) + np.power(marker_map[i,1]-pos_rob[1],2) if distance != 0: markers.append(i) #The size of the H array is related with the number of markers we see #H = np.zeros([len(markers)*3,3]) H = np.zeros([len(markers)*2,3]) #R = np.zeros([3*len(markers),3*len(markers)]) R = np.zeros([2*len(markers),2*len(markers)]) for i in range(0,len(markers)): distance = np.power(marker_map[markers[i],0]
-pos_rob[0],2) + np.power(marker_map[markers[i],1]-pos_rob[1],2) ''' H[i*3,0] = (marker_map[markers[i],1]-pos_rob[1])/distance H[i*3,1] = -(marker_map[markers
[i],0]-pos_rob[0])/distance H[i*3,2] = -1 H[i*3+1,0] = (pos_rob[0]-marker_map[markers[i],0])/np.sqrt(distance) H[i*3+1,1]= (pos_rob[1]-marker_map[markers[i],1])/np.sqrt(distance) H[i*3+1,2] = 0 H[i*3+2,0] = 0 H[i*3+2,1] = 0 H[i*3+2,2] = -1 ''' H[i*2,0] = (marker_map[markers[i],1]-pos_rob[1])/distance H[i*2,1] = -(marker_map[markers[i],0]-pos_rob[0])/distance H[i*2,2] = -1 H[i*2+1,0] = (pos_rob[0]-marker_map[markers[i],0])/np.sqrt(distance) H[i*2+1,1]= (pos_rob[1]-marker_map[markers[i],1])/np.sqrt(distance) H[i*2+1,2] = 0 #Noise of the measuremenets #R[i*3,i*3] = 1/np.power(10,5) #R[i*3+1,i*3+1] = 1/np.power(10,6) #R[i*3+2,i*3+2] = 1/np.power(10,6) R[i*2,i*2] = 1/np.power(10,5) R[i*2+1,i*2+1] = 1/np.power(10,6) # Process noise #print(H) #noise variance of the encoders noise_enc = 1/np.power(10,7) var_noise_enc = np.power(noise_enc/Ts,2) #noise variance of the model Q = np.zeros([3,3]) Q[0,0] = 1/np.power(10,4) Q[1,1] = 1/np.power(10,4) Q[2,2] = 1/np.power(7.62,5) # Kalman init #Prediction step P_pred = np.add(np.add(np.multiply(A,np.multi
# This file is part of the django-environ. # # Copyright (c) 2021, Serghei Iakovlev <egrep@protonmail.ch> # Copyright (c) 2013-2021, Daniele Faraglia <daniele.faraglia@gmail.com> # # For the full copyright and license information, please view # the LICENSE.txt file that was distributed with this source code. from environ.compat import json class FakeEnv: URL = 'http://www.google.com/' POSTGRES = 'postgres://uf07k1:wegauwhg@ec2-107-21-253-135.compute-1.amazonaws.com:5431/d8r82722' MYSQL = 'mysql://bea6eb0:69772142@us-cdbr-east.cleardb.com/heroku_97681?reconnect=true' MYSQL_CLOUDSQL_URL = 'mysql://djuser:hidden-password@//cloudsql/arvore-codelab:us-central1:mysqlinstance/mydatabase' MYSQLGIS = 'mysqlgis://user:password@127.0.0.1/some_database' SQLITE = 'sqlite:////full/path/to/your/database/file.sqlite' ORACLE_TNS = 'oracle://user:password@sid/' ORACLE = 'oracle://user:password@host:1521/sid' CUSTOM_BACKEND = 'custom.backend://user:password@example.com:5430/database' REDSHIFT = 'redshift://user:password@examplecluster.abc123xyz789.us-west-2.redshift.amazonaws.com:5439/dev' MEMCACHE = 'memcache://127.0.0.1:11211' REDIS = 'rediscache://127.0.0.1:6379/1?client_class=django_redis.client.DefaultClient&password=secret' EMAIL = 'smtps://user@domain.com:password@smtp.example.com:587' JSON = dict(one='bar', two=2, three=33.44) DICT = dict(foo='bar', test='on') PATH = '/home/dev' EXPORTED = 'exported var' @classmethod def generate_data(cls): return dict(STR_VAR='bar', MULTILINE_STR_VAR='foo\\nbar', MULTILINE_QUOTED_STR_VAR='---BEGIN---\\r\\n---END---', MULTILINE_ESCAPED_STR_VAR='---BEGIN---\\\\n---END---', INT_VAR='42', FLOAT_VAR='33.3', FLOAT_COMMA_VAR='33,3', FLOAT_STRANGE_VAR1='123,420,333.3', FLOAT_STRANGE_VAR2='123.420.333,3', FLOAT_NEGATIVE_VAR='-1.0', BOOL_TRUE_STRING_LIKE_INT='1', BOOL_TRUE_INT=1, BOOL_TRUE_
STRING_LIKE_BOOL='True', BOOL_TRUE_STRING_1='on', B
OOL_TRUE_STRING_2='ok', BOOL_TRUE_STRING_3='yes', BOOL_TRUE_STRING_4='y', BOOL_TRUE_STRING_5='true', BOOL_TRUE_BOOL=True, BOOL_FALSE_STRING_LIKE_INT='0', BOOL_FALSE_INT=0, BOOL_FALSE_STRING_LIKE_BOOL='False', BOOL_FALSE_BOOL=False, PROXIED_VAR='$STR_VAR', ESCAPED_VAR=r'\$baz', INT_LIST='42,33', INT_TUPLE='(42,33)', STR_LIST_WITH_SPACES=' foo, bar', EMPTY_LIST='', DICT_VAR='foo=bar,test=on', DATABASE_URL=cls.POSTGRES, DATABASE_MYSQL_URL=cls.MYSQL, DATABASE_MYSQL_GIS_URL=cls.MYSQLGIS, DATABASE_SQLITE_URL=cls.SQLITE, DATABASE_ORACLE_URL=cls.ORACLE, DATABASE_ORACLE_TNS_URL=cls.ORACLE_TNS, DATABASE_REDSHIFT_URL=cls.REDSHIFT, DATABASE_CUSTOM_BACKEND_URL=cls.CUSTOM_BACKEND, DATABASE_MYSQL_CLOUDSQL_URL=cls.MYSQL_CLOUDSQL_URL, CACHE_URL=cls.MEMCACHE, CACHE_REDIS=cls.REDIS, EMAIL_URL=cls.EMAIL, URL_VAR=cls.URL, JSON_VAR=json.dumps(cls.JSON), PATH_VAR=cls.PATH, EXPORTED_VAR=cls.EXPORTED)
try: import cPickle as pickle except ImportError: import pickle as pickle from django.core.management.color import color_style from django.db.models import signals, get_apps, get_app from django_evolution import is_multi_db, models as django_evolution from django_evolution.evolve import get_evolution_sequence, get_unapplied_evolutions from django_evolution.signature import create_project_sig from django_evolution.diff import Diff style = color_style() def install_baseline(app, latest_version, using_args, verbosity): app_label = app.
__name__.split('.')[-2] sequence = get_evolution_sequence(app)
if sequence: if verbosity > 0: print 'Evolutions in %s baseline:' % app_label, \ ', '.join(sequence) for evo_label in sequence: evolution = django_evolution.Evolution(app_label=app_label, label=evo_label, version=latest_version) evolution.save(**using_args) def evolution(app, created_models, verbosity=1, **kwargs): """ A hook into syncdb's post_syncdb signal, that is used to notify the user if a model evolution is necessary. """ default_db = None if is_multi_db(): from django.db.utils import DEFAULT_DB_ALIAS default_db = DEFAULT_DB_ALIAS db = kwargs.get('db', default_db) proj_sig = create_project_sig(db) signature = pickle.dumps(proj_sig) using_args = {} if is_multi_db(): using_args['using'] = db try: if is_multi_db(): latest_version = \ django_evolution.Version.objects.using(db).latest('when') else: latest_version = django_evolution.Version.objects.latest('when') except django_evolution.Version.DoesNotExist: # We need to create a baseline version. if verbosity > 0: print "Installing baseline version" latest_version = django_evolution.Version(signature=signature) latest_version.save(**using_args) for a in get_apps(): install_baseline(a, latest_version, using_args, verbosity) unapplied = get_unapplied_evolutions(app, db) if unapplied: print style.NOTICE('There are unapplied evolutions for %s.' % app.__name__.split('.')[-2]) # Evolutions are checked over the entire project, so we only need to check # once. We do this check when Django Evolutions itself is synchronized. if app == django_evolution: old_proj_sig = pickle.loads(str(latest_version.signature)) # If any models or apps have been added, a baseline must be set # for those new models changed = False new_apps = [] for app_name, new_app_sig in proj_sig.items(): if app_name == '__version__': # Ignore the __version__ tag continue old_app_sig = old_proj_sig.get(app_name, None) if old_app_sig is None: # App has been added old_proj_sig[app_name] = proj_sig[app_name] new_apps.append(app_name) changed = True else: for model_name, new_model_sig in new_app_sig.items(): old_model_sig = old_app_sig.get(model_name, None) if old_model_sig is None: # Model has been added old_proj_sig[app_name][model_name] = \ proj_sig[app_name][model_name] changed = True if changed: if verbosity > 0: print "Adding baseline version for new models" latest_version = \ django_evolution.Version(signature=pickle.dumps(old_proj_sig)) latest_version.save(**using_args) for app_name in new_apps: install_baseline(get_app(app_name), latest_version, using_args, verbosity) # TODO: Model introspection step goes here. # # If the current database state doesn't match the last # # saved signature (as reported by latest_version), # # then we need to update the Evolution table. # actual_sig = introspect_project_sig() # acutal = pickle.dumps(actual_sig) # if actual != latest_version.signature: # nudge = Version(signature=actual) # nudge.save() # latest_version = nudge diff = Diff(old_proj_sig, proj_sig) if not diff.is_empty(): print style.NOTICE( 'Project signature has changed - an evolution is required') if verbosity > 1: old_proj_sig = pickle.loads(str(latest_version.signature)) print diff signals.post_syncdb.connect(evolution)
# -*- coding: utf-8 -*- # This file is part of Shuup. # # Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. from django.http.response import HttpResponse, HttpResponseRedirect from django.utils.translation import ugettext_lazy as _ from django.views.generic.base import TemplateView from shuup.core
import telemetry class TelemetryView(TemplateView): template_name = "shuup/admin/system/telemetry.jinja" def get_context_data(self, **kwargs): context = super(TelemetryView, self).get_context_data(**kwargs) context.update({ "opt_in": not telemetry.is_opt_out(), "is_grace": telemetry.is_in_grace_period(), "last_submission_time": telemetry.get_last_submission_time(), "submission_data": telemetry.get_telemetry_data
(request=self.request, indent=2), "title": _("Telemetry") }) return context def get(self, request, *args, **kwargs): if "last" in request.GET: return HttpResponse(telemetry.get_last_submission_data(), content_type="text/plain; charset=UTF-8") return super(TelemetryView, self).get(request, *args, **kwargs) def post(self, request, *args, **kwargs): opt = request.POST.get("opt") if opt: telemetry.set_opt_out(opt == "out") return HttpResponseRedirect(request.path)
import numpy as np import pylab as pl from astropy.io import fits from scipy.interpolate import interp1d sigma_y = 0 * np.pi / 2.355 / 60. /180. #angle in radian sigmasq = sigma_y * sigma_y #f = fits.open('/media/luna1/flender/projects/gasmod/maps/OuterRim/cl_tsz150_Battaglia_c05_R13.fits')[1].data #l = np.arange(10000) #pl.semilogx(l, l*(l+1)*f['TEMPERATURE'][1:]/2./np.pi, label='Simulation') bl, bcl = np.genfromtxt('/media/luna1/vinu/github/HaloModel/data/battaglia_analytical.csv', delimiter=',', unpack=True) Bl = np.exp(-bl*bl*sigmasq) bclsm = bcl*Bl bclsm = bclsm *2*np.pi/ bl / (bl+1) /6.7354 #pl.semilogx(bl, bclsm, label='Battaglia') pl.loglog(bl, bclsm, label='Battaglia') vl, vcl1, vcl2, vcl = np.genfromtxt('/media/luna1/vinu
/github/HaloModel/data/cl_yy.dat', unpack=True) Dl = vl*(1.+vl)*vcl1*1e12*6.7354/2./np.pi Dl = vcl1*1e12 Bl = np.exp(-vl*vl*sigmasq) spl = interp1d(vl, Dl*Bl) pl.figure(1) #pl.semilogx(vl, Dl*Bl, label='Vinu') pl.loglog(vl, Dl*Bl, label='Vinu') pl.xlim(500,10000) pl.xlabel(r'$\ell$') pl.ylabel(r'$D_\ell$') pl.legend(loc=0) pl.savefig('../figs/compare_battaglia_vinu_simulation.png', bbox_inches='tight') pl.figure(2) pl.plot(bl, (bclsm-spl(bl))/spl(bl), label='Battaglia/Vinu') pl.x
label(r'$\ell$') pl.ylabel('Battaglia/Vinu') pl.show()
"""modify Revision ID: 4cefae1354ee Revises: 51f5ccfba190 Create Date: 2016-07-23 13:16:09.932365 """ # revision identifiers, used by Alembic. revision = '4cefae1354ee' down_revision = '51f5ccfba1
90' from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('posts', sa.Column('title', sa.String(length=128), nullable=True)) op.create_index('ix_posts_title', 'posts', ['title'], unique=False) ###
end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_index('ix_posts_title', 'posts') op.drop_column('posts', 'title') ### end Alembic commands ###
ance = fake_instance.fake_instance_obj(self.context) self._vmops.power_on(mock_instance, mock.sentinel.block_device_info) mock_fix_instance_vol_paths.assert_called_once_with( mock_instance.name, mock.sentinel.block_device_info) mock_set_vm_state.assert_called_once_with( mock_instance, constants.HYPERV_VM_STATE_ENABLED) @mock.patch.object(vmops.VMOps, 'log_vm_serial_output') @mock.patch.object(vmops.VMOps, '_delete_vm_console_log') def _test_set_vm_state(self, mock_delete_vm_console_log, mock_log_vm_output, state): mock_instance = fake_instance.fake_instance_obj(self.context) self._vmops._set_vm_state(mock_instance, state) self._vmops._vmutils.set_vm_state.assert_called_once_with( mock_instance.name, state) if state in (constants.HYPERV_VM_STATE_DISABLED, constants.HYPERV_VM_STATE_REBOOT): mock_delete_vm_console_log.assert_called_once_with(mock_instance) if state in (constants.HYPERV_VM_STATE_ENABLED, constants.HYPERV_VM_STATE_REBOOT): mock_log_vm_output.assert_called_once_with(mock_instance.name, mock_instance.uuid) def test_set_vm_state_disabled(self): self._test_set_vm_state(state=constants.HYPERV_VM_STATE_DISABLED) def test_set_vm_state_enabled(self): self._test_set_vm_state(state=constants.HYPERV_VM_STATE_ENABLED) def test_set_vm_state_reboot(self): self._test_set_vm_state(state=constants.HYPERV_VM_STATE_REBOOT) def test_set_vm_state_exception(self): mock_instance = fake_instance.fake_instance_obj(self.context) self._vmops._vmutils.set_vm_state.side_effect = vmutils.HyperVException self.assertRaises(vmutils.HyperVException, self._vmops._set_vm_state, mock_instance, mock.sentinel.STATE) def test_get_vm_state(self): summary_info = {'EnabledState': constants.HYPERV_VM_STATE_DISABLED} with mock.patch.object(self._vmops._vmutils, 'get_vm_summary_info') as mock_get_summary_info: mock_get_summary_info.return_value = summary_info response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME) self.assertEqual(response, constants.HYPERV_VM_STATE_DISABLED) @mock.patch.object(vmops.VMOps, '_get_vm_state') def test_wait_for_power_off_true(self, mock_get_state): mock_get_state.return_value = constants.HYPERV_VM_STATE_DISABLED result = self._vmops._wait_for_power_off( mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT) mock_get_state.assert_called_with(mock.sentinel.FAKE_VM_NAME) self.assertTrue(result) @mock.patch.object(vmops.etimeout, "with_timeout") def test_wait_for_power_off_false(self, mock_with_timeout): mock_with_timeout.side_effect = etimeout.Timeout() result = self._vmops._wait_for_power_off( mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT) self.assertFalse(result) @mock.patch.object(ioutils, 'IOThread') def _test_log_vm_serial_output(self, mock_io_thread, worker_running=False, worker_exists=False): self._vmops._pathutils.get_vm_console_log_paths.return_value = ( mock.sentinel.log_path, ) fake_instance_uuid = 'fake-uuid' fake_existing_worker = mock.Mock() fake_existing_worker.is_active.return_value = worker_running fake_log_writers = {fake_instance_uuid: fake_existing_worker} self._vmops._vm_log_writers = ( fake_log_writers if worker_exists else {}) self._vmops.log_vm_serial_output(mock.sentinel.instance_name, fake_instance_uuid) if not (worker_exists and worker_running): expected_pipe_path = r'\\.\pipe\%s' % fake_instance_uuid expected_current_worker = mock_io_thread.return_value expected_current_worker.start.assert_called_once_with() mock_io_thread.assert_called_once_with( expected_pipe_path, mock.sentinel.log_path, self._vmops._MAX_CONSOLE_LOG_FILE_SIZE) else: expected_current_worker = fake_existing_worker self.assertEqual(expected_current_worker, self._vmops._vm_log_writers[fake_instance_uuid]) def test_log_vm_serial_output_unexisting_worker(self): self._test_log_vm_serial_output() def test_log_vm_serial_output_worker_stopped(self): self._test_log_vm_serial_output(worker_exists=True) def test_log_vm_serial_output_worker_running(self): self._test_log_vm_serial_output(worker_exists=True, worker_running=True) def test_copy_vm_console_logs(self): fake_local_paths = (mock.sentinel.FAKE_PATH, mock.sentinel.FAKE_PATH_ARCHIVED) fake_remote_paths = (mock.sentinel.FAKE_REMOTE_PATH, mock.sentinel.FAKE_REMOTE_PATH_ARCHIVED) self._vmops._pathutils.get_vm_console_log_paths.side_effect = [ fake_local_paths, fake_remote_paths] self._vmops._pathutils.exists.side_effect = [True, False] self._vmops.copy_vm_console_logs(mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_DEST) calls = [mock.call(mock.sentinel.FAKE_VM_NAME), mock.call(mock.sentinel.FAKE_VM_NAME, remote_server=mock.sentinel.FAKE_DEST)] self._vmops._pathutils.get_vm_console_log_paths.assert_has_calls(calls) calls = [mo
ck.call(mock.sentinel.FAKE_PATH), mock.call(mock.sentinel.FAKE_PATH_ARCHIVED)] self._vmops._pathutils.exists.assert_has_calls(calls) self._vmops._pathutils.copy.assert_called_once_with( mock.sentinel.FAKE_PATH, mock.sentinel.FAKE_REMOTE_PATH) @mock.patch.object(vmops.iouti
ls, 'IOThread') def test_log_vm_serial_output(self, fake_iothread): self._vmops._pathutils.get_vm_console_log_paths.return_value = [ mock.sentinel.FAKE_PATH] self._vmops.log_vm_serial_output(mock.sentinel.FAKE_VM_NAME, self.FAKE_UUID) pipe_path = r'\\.\pipe\%s' % self.FAKE_UUID fake_iothread.assert_called_once_with( pipe_path, mock.sentinel.FAKE_PATH, self._vmops._MAX_CONSOLE_LOG_FILE_SIZE) fake_iothread.return_value.start.assert_called_once_with() @mock.patch("os.path.exists") def test_get_console_output(self, fake_path_exists): mock_instance = fake_instance.fake_instance_obj(self.context) fake_path_exists.return_value = True self._vmops._pathutils.get_vm_console_log_paths.return_value = ( mock.sentinel.FAKE_PATH, mock.sentinel.FAKE_PATH_ARCHIVED) with mock.patch('nova.virt.hyperv.vmops.open', mock.mock_open(read_data=self.FAKE_LOG), create=True): instance_log = self._vmops.get_console_output(mock_instance) # get_vm_console_log_paths returns 2 paths. self.assertEqual(self.FAKE_LOG * 2, instance_log) expected_calls = [mock.call(mock.sentinel.FAKE_PATH_ARCHIVED), mock.call(mock.sentinel.FAKE_PATH)] fake_path_exists.assert_has_calls(expected_calls, any_order=False) @mock.patch("__builtin__.open") @mock.patch("os.path.exists") def test_get_console_output_exception(self, fake_path_exists, fake_open): fake_vm = mock.MagicMock() fake_open.side_effect = vmutils.HyperVException fake_path_exists.return_value = True self._vmops._pathutils.get_vm_console_log_paths.return_value = ( mock.sentinel.fake_console_log_path, mock.sentinel.fake_console_log_archived) with mock.patch('nova.virt.hyperv.vmops.open', fak
ck_via_dict(mycfg, rc=RC_NOT_FOUND, policy_dmi="disabled") def test_path_env_gets_set_from_main(self): """PATH environment should always have some tokens when main is run. We explicitly call main as we want to ensure it updates PATH.""" cust = copy.deepcopy(VALID_CFG['NoCloud']) rootd = self.tmp_dir() mpp = 'main-printpath' pre = "MYPATH=" cust['files'][mpp] = ( 'PATH="/mycust/path"; main; r=$?; echo ' + pre + '$PATH; exit $r;') ret = self._check_via_dict( cust, RC_FOUND, func=".", args=[os.path.join(rootd, mpp)], rootd=rootd) line = [l for l in ret.stdout.splitlines() if l.startswith(pre)][0] toks = line.replace(pre, "").split(":") expected = ["/sbin", "/bin", "/usr/sbin", "/usr/bin", "/mycust/path"] self.assertEqual(expected, [p for p in expected if p in toks], "path did not have expected tokens") def test_zstack_is_ec2(self): """EC2: chassis asset tag ends with 'zstack.io'""" self._test_ds_found('Ec2-ZStack') def test_e24cloud_is_ec2(self): """EC2: e24cloud identified by sys_vendor""" self._test_ds_found('Ec2-E24Cloud') def test_e24clou
d_not_active(self): """EC2: bobrightbox.com in product_serial is not brightbox'""" self._test_ds_not_found('Ec2-E24Cloud-negative') class TestIsIBMProvisioning(DsIdentifyBase): """Test the is_ibm_provisioning method in ds-identif
y.""" inst_log = "/root/swinstall.log" prov_cfg = "/root/provisioningConfiguration.cfg" boot_ref = "/proc/1/environ" funcname = "is_ibm_provisioning" def test_no_config(self): """No provisioning config means not provisioning.""" ret = self.call(files={}, func=self.funcname) self.assertEqual(shell_false, ret.rc) def test_config_only(self): """A provisioning config without a log means provisioning.""" ret = self.call(files={self.prov_cfg: "key=value"}, func=self.funcname) self.assertEqual(shell_true, ret.rc) def test_config_with_old_log(self): """A config with a log from previous boot is not provisioning.""" rootd = self.tmp_dir() data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), self.inst_log: ("log data\n", -30), self.boot_ref: ("PWD=/", 0)} populate_dir_with_ts(rootd, data) ret = self.call(rootd=rootd, func=self.funcname) self.assertEqual(shell_false, ret.rc) self.assertIn("from previous boot", ret.stderr) def test_config_with_new_log(self): """A config with a log from this boot is provisioning.""" rootd = self.tmp_dir() data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), self.inst_log: ("log data\n", 30), self.boot_ref: ("PWD=/", 0)} populate_dir_with_ts(rootd, data) ret = self.call(rootd=rootd, func=self.funcname) self.assertEqual(shell_true, ret.rc) self.assertIn("from current boot", ret.stderr) class TestOracle(DsIdentifyBase): def test_found_by_chassis(self): """Simple positive test of Oracle by chassis id.""" self._test_ds_found('Oracle') def test_not_found(self): """Simple negative test of Oracle.""" mycfg = copy.deepcopy(VALID_CFG['Oracle']) mycfg['files'][P_CHASSIS_ASSET_TAG] = "Not Oracle" self._check_via_dict(mycfg, rc=RC_NOT_FOUND) def blkid_out(disks=None): """Convert a list of disk dictionaries into blkid content.""" if disks is None: disks = [] lines = [] for disk in disks: if not disk["DEVNAME"].startswith("/dev/"): disk["DEVNAME"] = "/dev/" + disk["DEVNAME"] # devname needs to be first. lines.append("%s=%s" % ("DEVNAME", disk["DEVNAME"])) for key in [d for d in disk if d != "DEVNAME"]: lines.append("%s=%s" % (key, disk[key])) lines.append("") return '\n'.join(lines) def _print_run_output(rc, out, err, cfg, files): """A helper to print return of TestDsIdentify. _print_run_output(self.call())""" print('\n'.join([ '-- rc = %s --' % rc, '-- out --', str(out), '-- err --', str(err), '-- cfg --', util.json_dumps(cfg)])) print('-- files --') for k, v in files.items(): if "/_shwrap" in k: continue print(' === %s ===' % k) for line in v.splitlines(): print(" " + line) VALID_CFG = { 'AliYun': { 'ds': 'AliYun', 'files': {P_PRODUCT_NAME: 'Alibaba Cloud ECS\n'}, }, 'Azure-dmi-detection': { 'ds': 'Azure', 'files': { P_CHASSIS_ASSET_TAG: '7783-7084-3265-9085-8269-3286-77\n', } }, 'Azure-seed-detection': { 'ds': 'Azure', 'files': { P_CHASSIS_ASSET_TAG: 'No-match\n', os.path.join(P_SEED_DIR, 'azure', 'ovf-env.xml'): 'present\n', } }, 'Ec2-hvm': { 'ds': 'Ec2', 'mocks': [{'name': 'detect_virt', 'RET': 'kvm', 'ret': 0}], 'files': { P_PRODUCT_SERIAL: 'ec23aef5-54be-4843-8d24-8c819f88453e\n', P_PRODUCT_UUID: 'EC23AEF5-54BE-4843-8D24-8C819F88453E\n', } }, 'Ec2-xen': { 'ds': 'Ec2', 'mocks': [MOCK_VIRT_IS_XEN], 'files': { 'sys/hypervisor/uuid': 'ec2c6e2f-5fac-4fc7-9c82-74127ec14bbb\n' }, }, 'Ec2-brightbox': { 'ds': 'Ec2', 'files': {P_PRODUCT_SERIAL: 'srv-otuxg.gb1.brightbox.com\n'}, }, 'Ec2-brightbox-negative': { 'ds': 'Ec2', 'files': {P_PRODUCT_SERIAL: 'tricky-host.bobrightbox.com\n'}, }, 'GCE': { 'ds': 'GCE', 'files': {P_PRODUCT_NAME: 'Google Compute Engine\n'}, 'mocks': [MOCK_VIRT_IS_KVM], }, 'GCE-serial': { 'ds': 'GCE', 'files': {P_PRODUCT_SERIAL: 'GoogleCloud-8f2e88f\n'}, 'mocks': [MOCK_VIRT_IS_KVM], }, 'NoCloud': { 'ds': 'NoCloud', 'mocks': [ MOCK_VIRT_IS_KVM, {'name': 'blkid', 'ret': 0, 'out': blkid_out( BLKID_UEFI_UBUNTU + [{'DEVNAME': 'vdb', 'TYPE': 'iso9660', 'LABEL': 'cidata'}])}, ], 'files': { 'dev/vdb': 'pretend iso content for cidata\n', } }, 'NoCloudUpper': { 'ds': 'NoCloud', 'mocks': [ MOCK_VIRT_IS_KVM, {'name': 'blkid', 'ret': 0, 'out': blkid_out( BLKID_UEFI_UBUNTU + [{'DEVNAME': 'vdb', 'TYPE': 'iso9660', 'LABEL': 'CIDATA'}])}, ], 'files': { 'dev/vdb': 'pretend iso content for cidata\n', } }, 'NoCloud-seed': { 'ds': 'NoCloud', 'files': { os.path.join(P_SEED_DIR, 'nocloud', 'user-data'): 'ud\n', os.path.join(P_SEED_DIR, 'nocloud', 'meta-data'): 'md\n', } }, 'NoCloud-seed-ubuntu-core': { 'ds': 'NoCloud', 'files': { os.path.join('writable/system-data', P_SEED_DIR, 'nocloud-net', 'user-data'): 'ud\n', os.path.join('writable/system-data', P_SEED_DIR, 'nocloud-net', 'meta-data'): 'md\n', } }, 'OpenStack': { 'ds': 'OpenStack', 'files': {P_PRODUCT_NAME: 'OpenStack Nova\n'}, 'mocks': [MOCK_VIRT_IS_KVM], 'policy_dmi': POLICY_FOUND_ONLY, 'policy_no_dmi': POLICY_FOUND_ONLY, }, 'OpenStack-OpenTelekom': { # OTC gen1 (Xen) hosts use OpenStack datasource, LP: #1756471 'ds': 'OpenStack', 'files': {P_CHASSIS_ASSET_TAG: 'OpenTelekomCloud\n'}, 'mocks': [MOCK_VIRT_IS_XEN], }, 'OpenStack-AssetTag-Nova': { # VMware vSphere can't modify product-name, LP: #1669875 'ds': 'OpenStack', 'files': {P_CHASSIS_ASSET_TAG: 'OpenStack Nova\n'}, 'mocks': [MOCK_VIRT_IS_XEN], }, 'OpenStack-AssetTag-Compute': { # VMware vSphere can't modify product
self.close(handle) return [name for database in result.rows() for name in database] else: return [] def get_database(self, database): return self.client.get_database(database) def get_tables_meta(self, database='default', table_names='*'): identifier = self.to_matching_wildcard(table_names) return self.client.get_tables_meta(database, identifier) def get_tables(self, database='default', table_names='*'): identifier = self.to_matching_wildcard(table_names) hql = "SHOW TABLES IN `%s` '%s'" % (database, identifier) # self.client.get_tables(database, table_names) is too slow query = hql_query(hql) timeout = SERVER_CONN_TIMEOUT.get() handle = self.execute_and_wait(query, timeout_sec=timeout) if handle: result = self.fetch(handle, rows=5000) self.close(handle) return [name for table in result.rows() for name in table] else: return [] def get_table(self, database, table_name): return self.client.get_table(database, table_name) def get_column(self, database, table_name, column_name): table = self.client.get_table(database, table_name) for col in table.cols: if col.name == column_name: return col return None def execute_query(self, query, design): return self.execute_and_watch(query, design=design) def select_star_from(self, database, table): hql = "SELECT * FROM `%s`.`%s` %s" % (database, table.name, self._get_browse_limit_clause(table)) return self.execute_statement(hql) def execute_statement(self, hql): if self.server_name == 'impala': query = hql_query(hql, QUERY_TYPES[1]) else: query = hql_query(hql, QUERY_TYPES[0]) return self.execute_and_watch(query) def fetch(self, query_handle, start_over=False, rows=None): no_start_over_support = [config_variable for config_variable in self.get_default_configuration(False) if config_variable.key == 'support_start_over' and config_variable.value == 'false'] if no_start_over_support: start_over = False return self.client.fetch(query_handle, start_over, rows) def close_operation(self, query_handle): return self.client.close_operation(query_handle) def open_session(self, user): return self.client.open_session(user) def close_session(self, session): return self.client.close_session(session) def cancel_operation(self, query_handle): resp = self.client.cancel_operation(query_handle) if self.client.query_server['server_name'] == 'impala': resp = self.client.close_operation(query_handle) return resp def get_sample(self, database, table): """No samples if it's a view (HUE-526)""" if not table.is_view: limit = min(100, BROWSE_PARTITIONED_TABLE_LIMIT.get()) partition_query = "" if table.partition_keys: partitions = self.get_partitions(database, table, partition_spec=None, max_parts=1) partition_query = 'WHERE ' + ' AND '.join(["%s='%s'" % (table.partition_keys[idx].name, key) for idx, key in enumerate(partitions[0].values)]) hql = "SELECT * FROM `%s`.`%s` %s LIMIT %s" % (database, table.name, partition_query, limit) query = hql_query(hql) handle = self.execute_and_wait(query, timeout_sec=5.0) if handle: result = self.fetch(handle, rows=100) self.close(handle) return result def analyze_table(self, database, table): if self.server_name == 'impala': hql = 'COMPUTE STATS `%(databas
e)s`.`%(table)s`' % {'database': database, 'table': table} else: hql = 'ANALYZE TABLE `%(database)s`.`%(table)s` COMPUTE STATISTICS' % {'database': database, 'table': table} return self.execute_statement(hql) def analyze_tab
le_columns(self, database, table): if self.server_name == 'impala': hql = 'COMPUTE STATS `%(database)s`.`%(table)s`' % {'database': database, 'table': table} else: hql = 'ANALYZE TABLE `%(database)s`.`%(table)s` COMPUTE STATISTICS FOR COLUMNS' % {'database': database, 'table': table} return self.execute_statement(hql) def get_table_stats(self, database, table): stats = [] if self.server_name == 'impala': hql = 'SHOW TABLE STATS `%(database)s`.`%(table)s`' % {'database': database, 'table': table} query = hql_query(hql) handle = self.execute_and_wait(query, timeout_sec=5.0) if handle: result = self.fetch(handle, rows=100) self.close(handle) stats = list(result.rows()) else: table = self.get_table(database, table) stats = table.stats return stats def get_table_columns_stats(self, database, table, column): if self.server_name == 'impala': hql = 'SHOW COLUMN STATS `%(database)s`.`%(table)s`' % {'database': database, 'table': table} else: hql = 'DESCRIBE FORMATTED `%(database)s`.`%(table)s` %(column)s' % {'database': database, 'table': table, 'column': column} query = hql_query(hql) handle = self.execute_and_wait(query, timeout_sec=5.0) if handle: result = self.fetch(handle, rows=100) self.close(handle) data = list(result.rows()) if self.server_name == 'impala': data = [col for col in data if col[0] == column][0] return [ {'col_name': data[0]}, {'data_type': data[1]}, {'distinct_count': data[2]}, {'num_nulls': data[3]}, {'max_col_len': data[4]}, {'avg_col_len': data[5]}, ] else: return [ {'col_name': data[2][0]}, {'data_type': data[2][1]}, {'min': data[2][2]}, {'max': data[2][3]}, {'num_nulls': data[2][4]}, {'distinct_count': data[2][5]}, {'avg_col_len': data[2][6]}, {'max_col_len': data[2][7]}, {'num_trues': data[2][8]}, {'num_falses': data[2][9]} ] else: return [] def get_top_terms(self, database, table, column, limit=30, prefix=None): limit = min(limit, 100) prefix_match = '' if prefix: prefix_match = "WHERE CAST(%(column)s AS STRING) LIKE '%(prefix)s%%'" % {'column': column, 'prefix': prefix} hql = 'SELECT %(column)s, COUNT(*) AS ct FROM `%(database)s`.`%(table)s` %(prefix_match)s GROUP BY %(column)s ORDER BY ct DESC LIMIT %(limit)s' % { 'database': database, 'table': table, 'column': column, 'prefix_match': prefix_match, 'limit': limit, } query = hql_query(hql) handle = self.execute_and_wait(query, timeout_sec=60.0) # Hive is very slow if handle: result = self.fetch(handle, rows=limit) self.close(handle) return list(result.rows()) else: return [] def drop_table(self, database, table): if table.is_view: hql = "DROP VIEW `%s`.`%s`" % (database, table.name,) else: hql = "DROP TABLE `%s`.`%s`" % (database, table.name,) return self.execute_statement(hql) def load_data(self, database, table, form, design): hql = "LOAD DATA INPATH" hql += " '%s'" % form.cleaned_data['path'] if form.cleaned_data['overwrite']: hql += " OVERWRITE" hql += " INTO TABLE " hql += "`%s`.`%s`" % (database, table.name,) if form.partition_columns: hql += " PARTITION (" vals = [] for key, column_name in form.partition_columns.iteritems(): vals.append("%s='%s'" % (column_name, form.cleaned_data[key])) hql += ", ".join(vals) hql += ")" query = hql_query(hql, database) design.data = query.dumps() design.save() return self.execute_query(query, design) def drop_tables(self, database, tables, design): hql = [] for table in tables: if table.is_view: hql.append("DROP VIEW `%s`.`%s`" % (database, table.name,)) else: hql.append("DROP TABLE `%s`.`%s`" % (database, table.name,)) query = hql_query(';'.join(hql), database) design.data = query.dumps() design.save() return self.execute_query(query, design) def invalidate_tables(self, d
import copy class Histogram( object ): '''Histogram + a few things. This class does not inherit from a ROOT class as we could want to use it with a TH1D, TH1F, and even a 2D at some point. Histogram contains the original ROOT histogram, obj, and a weighted version, weigthed, originally set equal to obj (weight == 1). - layer : can be used to order histograms - stack : to decide whether the histogram should be stacked or not (see the Stack class for more information) - name : user defined histogram. Useful when manipulating several histograms with the same GetName(), coming from different TDirectories. ''' def __init__(self, name, obj, layer=0., legendLine=None, stack=True): # name is a user defined name self.name = name self.realName = name # can be different if an alias is set if legendLine is None: self.legendLine = name else: self.legendLine = legendLine self.obj = obj # self.weighted = copy.deepcopy(self.obj) self.layer = layer self.stack = stack self.on = True self.style = None # after construction, weighted histogram = base histogram self.SetWeight(1) def Clone(self, newName): newHist = copy.deepcopy(self) newHist.name = newName newHist.legendLine = newName return newHist def __str__(self): fmt = '{self.name:<10} / {hname:<50},\t Layer ={self.layer:8.1f}, w = {weighted:8.1f}, u = {unweighted:8.1f}' tmp = fmt.format(self=self, hname = self.realName, weighted = self.Yield(weighted=True), unweighted = self.Yield(weighted=False) ) return tmp def Yield(self, weighted=True): '''Returns the weighted number of entries in the histogram (under and overflow not counted). Use weighted=False if you want the unweighted number of entries''' hist = self.weighted if not weighted: hist = self.obj return hist.Integral( 0, hist.GetNbinsX()+1) def GetBinning(self): '''return nbins, xmin, xmax''' return self.obj.GetNbinsX(), \ self.obj.GetXaxis().GetXmin(), \ self.obj.GetXaxis().GetXmax() def Rebin(self, factor): '''Rebins by factor''' self.obj.Rebin( factor ) self.weighted.Rebin(factor) def Divide(self, other): self.obj.Divide( other.obj) self.weighted.Divide( other.weighted ) def NormalizeToBinWidth(self): '''Divides each bin content and error by the bin size''' for i in range (1,self.obj.GetNbinsX()+1) : self.obj.SetBinContent(i, self.obj.GetBinContent(i) / self.obj.GetBinWidth(i)) self.obj.SetBinError (i, self.obj.GetBinError(i) / self.obj.GetBinWidth(i)) for i in range (1,self.weighted.GetNbinsX()+1) : self.weighted.SetBinContent(i, self.weighted.GetBinContent(i) / self.weighted.GetBinWidth(i)) self.weighted.SetBinError (i, self.weighted.GetBinError(i) / self.weighted.GetBinWidth(i)) def SetWeight(self, weight): '''Set the weight and create the weighted histogram.''' self.weighted = copy.deepcopy(self.obj) self.weight = weight self.weighted.Scale(weight) def Scale(self, scale): '''Scale the histogram (multiply the weight by scale)''' self.SetWeight( self.weight * scale ) def SetStyle(self, style): '''Set the style for the original and weighted histograms.''' if style is None: return style.formatHisto( self.obj ) style.formatHisto( self.weighted ) self.style = style def AddEntry(self, legend, legendLine=None): '''By default the legend entry is set to self.legendLine of the histogram.''' if legendLine is None: legendLine = self.legendLine if legendLine is None: legendLine = self.name opt = 'f' if not self.stack: opt = 'p' legend.AddEntry(self.obj, legendLine, opt) def Draw(self, opt='hist', weighted=True): '''Draw the weighted (or original) histogram.''' if weighted is True: self.weighted.Draw(opt) else: self.obj.Draw(opt) def GetXaxis(self, opt='', weighted=True): '''All these functions could be written in a clever and compact way''' if weighted is True: return self.weighted.GetXaxis() else: return self.obj.GetXaxis() def GetYaxis(self, opt='', weighted=True): '''All these functions could be written in a clever and compact way''' if weighted is True: return self.weighted.GetYaxis() else: return self.obj.GetYaxis() def GetMaximum(self, opt='', weighted=True): '''All these functions could be written in a clever and compact way''' if weighted is True: return self.weighted.GetMaximum() else: return self.obj.GetMaximum() def Add(self, other, coeff=1): '''Add another histogram. Provide the optional coeff argument for the coefficient factor (e.g. -1 to subtract) ''' self.obj.Add( other.obj, coeff ) self.weighted.Add( other.weighted, coeff ) integral = self.obj.Integral(0, self.obj.GetNbinsX()) if integral > 0.: self.weight = self.weighted.Integral(0, self.weighted.GetNbinsX()+1)/integral return self def Integral(self, weighted=True, xmin=None, xmax=None ): ''' Returns the weighted or unweighted integral of this histogram. If xmin and xmax are None, underflows and overflows are included. ''' if type( weighted ) is not bool: raise ValueError('weighted should be a boolean') if xmin is not None: bmin = self.obj.FindFixBin( xmin ) else: bmin = None if xmax is not None: bmax = self.obj.FindFixBin( xmax ) - 1 else: bmax = None hist = self.weighted if weighted is False: hist = self.obj if bmin is None and bmax is None: return hist.Integral(0, hist.GetNbinsX()+1) elif bmin is not None and bmax is not None: # import pdb; pdb.set_trace() if (xmax - xmin) % self.obj.GetBinWidth(1) != 0: raise ValueError('b
oundaries should define an integer number
of bins. nbins=%d, xmin=%3.3f, xmax=%3.3f' % (self.obj.GetNbinsX(), self.obj.GetXaxis().GetXmin(), self.obj.GetXaxis().GetXmax()) ) return hist.Integral(bmin, bmax) else: raise ValueError('if specifying one boundary, you must specify the other') def DrawNormalized(self): '''Draw a normalized version of this histogram. The original and weighted histograms stay untouched.''' self.obj.DrawNormalized() def Normalize(self): '''Sets the weight to normalize the weighted histogram to 1. In other words, the original histogram stays untouched.''' self.Scale( 1/self.Integral() ) def RemoveNegativeValues(self, hist=None): # what about errors?? if hist is None: self.RemoveNegativeValues(self.weighted) self.RemoveNegativeValues(self.obj) else: for ibin in range(1, hist.GetNbinsX()+1): if hist.GetBinContent(ibin)<0: hist.SetBinContent(ibin, 0) def Blind(self, minx, maxx): whist = self.weighted uwhist = self.weighted minbin = whist.FindBin(minx) maxbin = min(whist.FindBin(maxx), whist.GetNbinsX() + 1) for bin in range(minbin, maxbin): whist.SetBinContent(bin,0) whist.SetBinError(bin,0) uwhist.SetBinContent(bin,0) uwhist.SetBinError(bin,0)
#!/usr/bin/env python import os import sys sys.path.insert(0, os.pardir) from testing_harnes
s import TestHarness class StatepointTestHarness(TestHarness): def __init__(self): self._sp_name = None self._tallies = False se
lf._opts = None self._args = None def _test_output_created(self): """Make sure statepoint files have been created.""" sps = ('statepoint.03.*', 'statepoint.06.*', 'statepoint.09.*') for sp in sps: self._sp_name = sp TestHarness._test_output_created(self) if __name__ == '__main__': harness = StatepointTestHarness() harness.main()
from pyramid.response import Response from pyramid.view import view_config from sqlalchemy.exc import DBAPIError from .models import ( DBSession, MyModel, ) @view_config(route_name='home', renderer='templates/mytemplate.pt') def my_view(request): try: one = DBSession.query(MyModel).filter(MyModel.name == 'one').first() except DBAPIError: return Response(conn_err_msg, content_type='text/plain', status_int=500) return {'one': one, 'project': 'mypyramid'} conn_err_msg = """\ Pyramid is having a problem using your SQL database. The problem
might be caused by one of the following things: 1. You may need to run the "initialize_mypyramid_db" script to initialize your database tables. Check your virtual environment's "bin" direc
tory for this script and try to run it. 2. Your database server may not be running. Check that the database server referred to by the "sqlalchemy.url" setting in your "development.ini" file is running. After you fix the problem, please restart the Pyramid application to try it again. """
# generated from catkin/cmake/template/pkg.context.pc.in CATKIN_PACKAGE_PREFIX = "" PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else [] PROJECT_CATKIN_DEPENDS = "".replace(';', ' ') PKG_CONFIG_LIBRARIES_WITH_PREFIX = ""
.split(';') if "" != "" else [] PROJECT_NAME
= "hector_imu_tools" PROJECT_SPACE_DIR = "/home/trevor/ROS/catkin_ws/devel" PROJECT_VERSION = "0.3.3"
#!/usr/bin/env python # pylint: disable=C0103,W0622 # # A library that provides a Python interface to the Telegram Bot API # Copyright (C) 2015 Leandro Toledo de Souza <leandrotoeldodesouza@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser Public License for more details. # # You should have received a copy of the GNU Lesser Public License # along with this program. If not, see [http://www.gnu.org/licenses/]. """This module contains a object that represents a Telegram User""" from telegram import TelegramObject class User(TelegramObject): """This object represents a Telegram User. Attributes: id (int): first_name (str): last_name (str): username (str): Args: id (int): first_name (str): **kwargs: Arbitrary keyword arguments. Keyword Args: last_name (Optional[str]): username (Optional[str]): """ def __init__(self, id, first_name, **kwargs): # Required self.id = int(id) self.first_name = first_name # Optionals self.last_name = kwargs.get('last_name', '') self.username = kwargs.get('username', '') @property def name(self): """str: """ if
self.username: return '@%s' % self.username if self.last_name: return '%s %s' % (self.first_name, self.last_name) return self.first_name @staticmethod def de_json(data): """ Args: data (str): Returns: telegram.
User: """ if not data: return None return User(**data)
fos = dict() #this might take quite a long time self.configreg.foreach_language(self._get_language_variants, None) self.configreg.foreach_country(self._get_country_variants, None) #'grp' means that we want layout (group) switching options self.configreg.foreach_option('grp', self._get_switch_option, None) def _get_lang_variant(self, c_reg, item, subitem, lang): if subitem: name = item.get_name() + " (" + subitem.get_name() + ")" description = subitem.get_description() else: name = item.get_name() description = item.get_description() #if this layout has already been added for some other language, #do not add it again (would result in duplicates in our lists) if name not in self._layout_infos: self._layout_infos[name] = LayoutInfo(lang, description) def _get_country_variant(self, c_reg, item, subitem, country): if subitem: name = item.get_name() + " (" + subitem.get_name() + ")" description = subitem.get_description() else: name = item.get_name() description = item.get_description() # if the layout was not added with any language, add it with a country if name not in self._layout_infos: self._layout_infos[name] = LayoutInfo(country, description) def _get_language_variants(self, c_reg, item, user_data=None): lang_name, lang_desc = item.get_name(), item.get_description() c_reg.foreach_language_variant(lang_name, self._get_lang_variant, lang_desc) def _get_country_variants(self, c_reg, item, user_data=None): country_name, country_desc = item.get_name(), item.get_description() c_reg.foreach_country_variant(country_name, self._get_country_variant, country_desc) def _get_switch_option(self, c_reg, item, user_data=None): """Helper function storing layout switching options in foreach cycle""" desc = item.get_description() name = item.get_name() self._switch_opt_infos[name] = desc def get_current_layout(self): """ Get current activated X layout and variant :return: current activated X layout and variant (e.g. "cz (qwerty)") """ # ported from the widgets/src/LayoutIndicator.c code self._engine.start_listen(Xkl.EngineListenModes.TRACK_KEYBOARD_STATE) state = self._engine.get_current_state() cur_group = state.group num_groups = self._engine.get_num_groups() # BUG?: if the last layout in the list is activated and removed, # state.group may be equal to n_groups if cur_group >= num_groups: cur_group = num_groups - 1 layout = self._rec.layouts[cur_group] try: variant = self._rec.variants[cur_group] except IndexError: # X server may have forgotten to add the "" variant for its default layout variant = "" self._engine.stop_listen(Xkl.EngineListenModes.TRACK_KEYBOARD_STATE) return join_layout_variant(layout, variant) def get_available_layouts(self): """A generator yielding layouts (no need to store them as a bunch)""" return self._layout_infos.iterkeys() def get_switching_options(self): """Method returning list of available layout switching options""" return self._switch_opt_infos.iterkeys() def get_layout_variant_description(self, layout_variant, with_lang=True, xlated=True): """ Get description of the given layout-variant. :param layout_variant: layout-variant specification (e.g. 'cz (qwerty)') :type layout_variant: str :param with_lang: whether to include language of the layout-variant (if defined) in the description or not :type with_lang: bool :param xlated: whethe to return translated or english version of the description :type xlated: bool :return: description of the layout-variant specification (e.g. 'Czech (qwerty)') :rtype: str """ layout_info = self._layout_infos[layout_variant] # translate language and upcase its first letter, translate the # layout-variant description if xlated: lang = iutil.upcase_first_letter(iso_(layout_info.lang).decode("utf-8")) description = Xkb_(layout_info.desc).decode("utf-8") else: lang = iutil.upcase_first_letter(layout_info.lang) description = layout_info.desc if with_lang and lang and not description.startswith(lang): return "%s (%s)" % (lang, description) else: return description def get_switch_opt_description(self, switch_opt): """ Get description of the given layout switching option. :param switch_opt: switching option name/ID (e.g. 'grp:alt_shift_toggle') :type switch_opt: str :return: description of the layout switching option (e.g. 'Alt + Shift') :rtype: str """ # translate the description of the switching option return Xkb_(self._switch_opt_infos[switch_opt]) @gtk_action_wait def activate_default_layout(self): """ Activates default layout (the first one in the list of configured layouts). """ self._engine.lock_group(0) def is_valid_layout(self, layout): """Return if given layout is valid layout or not""" return layout in self._layout_infos @gtk_action_wait def add_layout(self, layout): """ Method that tries to add a given layout to the current X configuration. The X layouts configuration is handled by two lists. A list of layouts and a list of variants. Index-matching items in these lists (as if they were zipped) are used for the construction of real layouts (e.g. 'cz (qwerty)'). :param layout: either 'layout' or 'layout (variant)' :raise XklWrapperError: if the given layout is invalid or cannot be added """ try: #we can get 'layout' or 'layout (variant)' (layout, variant) = parse_layout_variant(layout) except InvalidLayoutVariantSpec as ilverr: raise XklWrapperError("Failed to add layout: %s" % ilverr) #do not add the same layout-variant combinanion multiple times if (layout, variant) in zip(self._rec.layouts, self._rec.vari
ants): return self._rec.set_layouts(self._rec.layouts + [layout]) self._rec.set_variants(self._rec.variants + [variant]) if not self._rec.activate(self._engine): raise XklWrapperError("Failed to add layout '%s (%s)'" % (layout, variant)) @gtk_action_wait def remove_layout(self, layout): """ Method that tries to remove a given layout fro
m the current X configuration. See also the documentation for the add_layout method. :param layout: either 'layout' or 'layout (variant)' :raise XklWrapperError: if the given layout cannot be removed """ #we can get 'layout' or 'layout (variant)' (layout, variant) = parse_layout_variant(layout) layouts_variants = zip(self._rec.layouts, self._rec.variants) if not (layout, variant) in layouts_variants: msg = "'%s (%s)' not in the list of added layouts" % (layout, variant) raise XklWrapperError(msg) idx = layouts_variants.index((layout, variant)) new_layouts = self._rec.layouts[:idx] + self._rec.layouts[(idx + 1):] new_variants = self._rec.variants[:idx] + self._rec.variants[(idx + 1):] self._rec.set_layouts(new_layouts) self._rec.set_variants(new_variants) if not self._rec.activate(self._engine): raise XklWrapperError("Failed to
tyNotImplemented def dark_maze(): return AbilityNotImplemented return dark_maze, dark_maze, @card("Samite Alchemist") def samite_alchemist(card, abilities): def samite_alchemist(): return AbilityNotImplemented return samite_alchemist, @card("Grandmother Sengir") def grandmother_sengir(card, abilities): def grandmother_sengir(): return AbilityNotImplemented return grandmother_sengir, @card("Winter Sky") def winter_sky(card, abilities): def winter_sky(): return AbilityNotImplemented return winter_sky, @card("Memory Lapse") def memory_lapse(card, abilities): def memory_lapse(): return AbilityNotImplemented return memory_lapse, @card("Roots") def roots(card, abilities): def roots(): return AbilityNotImplemented def roots(): return AbilityNotImplemented def roots(): return AbilityNotImplemented return roots, roots, roots, @card("Joven's Tools") def jovens_tools(card, abilities): def jovens_tools(): return AbilityNotImplemented return jovens_tools, @card("Serra Aviary") def serra_aviary(card, abilities): def serra_aviary(): return AbilityNotImplemented return serra_aviary, @card("Baki's Curse") def bakis_curse(card, abilities): def bakis_curse(): return AbilityNotImplemented return bakis_curse, @card("Cemetery Gate") def cemetery_gate(card, abilities): def cemetery_gate(): return AbilityNotImplemented def cemetery_gate(): return AbilityNotImplemented return cemetery_gate, cemetery_gate, @card("Hazduhr the Abbot") def hazduhr_the_abbot(card, abilities): def hazduhr_the_abbot(): return AbilityNotImplemented return hazduhr_the_abbot, @card("An-Havva Constable") def anhavva_constable(card, abilities): def anhav
va_constable(): return AbilityNotImplemented return anhavva_constable, @card("Jinx") def jinx(card, abilities): def jinx(): return AbilityNotImplem
ented def jinx(): return AbilityNotImplemented return jinx, jinx, @card("Serra Inquisitors") def serra_inquisitors(card, abilities): def serra_inquisitors(): return AbilityNotImplemented return serra_inquisitors, @card("Roterothopter") def roterothopter(card, abilities): def roterothopter(): return AbilityNotImplemented def roterothopter(): return AbilityNotImplemented return roterothopter, roterothopter, @card("Aysen Bureaucrats") def aysen_bureaucrats(card, abilities): def aysen_bureaucrats(): return AbilityNotImplemented return aysen_bureaucrats, @card("Sengir Bats") def sengir_bats(card, abilities): def sengir_bats(): return AbilityNotImplemented def sengir_bats(): return AbilityNotImplemented return sengir_bats, sengir_bats, @card("Wizards' School") def wizards_school(card, abilities): def wizards_school(): return AbilityNotImplemented def wizards_school(): return AbilityNotImplemented def wizards_school(): return AbilityNotImplemented return wizards_school, wizards_school, wizards_school, @card("Dry Spell") def dry_spell(card, abilities): def dry_spell(): return AbilityNotImplemented return dry_spell, @card("Hungry Mist") def hungry_mist(card, abilities): def hungry_mist(): return AbilityNotImplemented return hungry_mist, @card("Baron Sengir") def baron_sengir(card, abilities): def baron_sengir(): return AbilityNotImplemented def baron_sengir(): return AbilityNotImplemented def baron_sengir(): return AbilityNotImplemented return baron_sengir, baron_sengir, baron_sengir, @card("Serra Paladin") def serra_paladin(card, abilities): def serra_paladin(): return AbilityNotImplemented def serra_paladin(): return AbilityNotImplemented return serra_paladin, serra_paladin, @card("Clockwork Swarm") def clockwork_swarm(card, abilities): def clockwork_swarm(): return AbilityNotImplemented def clockwork_swarm(): return AbilityNotImplemented def clockwork_swarm(): return AbilityNotImplemented def clockwork_swarm(): return AbilityNotImplemented return clockwork_swarm, clockwork_swarm, clockwork_swarm, clockwork_swarm, @card("Shrink") def shrink(card, abilities): def shrink(): return AbilityNotImplemented return shrink, @card("Greater Werewolf") def greater_werewolf(card, abilities): def greater_werewolf(): return AbilityNotImplemented return greater_werewolf, @card("Rashka the Slayer") def rashka_the_slayer(card, abilities): def rashka_the_slayer(): return AbilityNotImplemented def rashka_the_slayer(): return AbilityNotImplemented return rashka_the_slayer, rashka_the_slayer, @card("Ihsan's Shade") def ihsans_shade(card, abilities): def ihsans_shade(): return AbilityNotImplemented return ihsans_shade, @card("Daughter of Autumn") def daughter_of_autumn(card, abilities): def daughter_of_autumn(): return AbilityNotImplemented return daughter_of_autumn, @card("Feast of the Unicorn") def feast_of_the_unicorn(card, abilities): def feast_of_the_unicorn(): return AbilityNotImplemented def feast_of_the_unicorn(): return AbilityNotImplemented return feast_of_the_unicorn, feast_of_the_unicorn, @card("Ambush Party") def ambush_party(card, abilities): def ambush_party(): return AbilityNotImplemented return ambush_party, @card("Black Carriage") def black_carriage(card, abilities): def black_carriage(): return AbilityNotImplemented def black_carriage(): return AbilityNotImplemented def black_carriage(): return AbilityNotImplemented return black_carriage, black_carriage, black_carriage, @card("Sengir Autocrat") def sengir_autocrat(card, abilities): def sengir_autocrat(): return AbilityNotImplemented def sengir_autocrat(): return AbilityNotImplemented return sengir_autocrat, sengir_autocrat, @card("Anaba Spirit Crafter") def anaba_spirit_crafter(card, abilities): def anaba_spirit_crafter(): return AbilityNotImplemented return anaba_spirit_crafter, @card("Irini Sengir") def irini_sengir(card, abilities): def irini_sengir(): return AbilityNotImplemented return irini_sengir, @card("Leaping Lizard") def leaping_lizard(card, abilities): def leaping_lizard(): return AbilityNotImplemented return leaping_lizard, @card("Mesa Falcon") def mesa_falcon(card, abilities): def mesa_falcon(): return AbilityNotImplemented def mesa_falcon(): return AbilityNotImplemented return mesa_falcon, mesa_falcon, @card("Wall of Kelp") def wall_of_kelp(card, abilities): def wall_of_kelp(): return AbilityNotImplemented def wall_of_kelp(): return AbilityNotImplemented return wall_of_kelp, wall_of_kelp, @card("Spectral Bears") def spectral_bears(card, abilities): def spectral_bears(): return AbilityNotImplemented return spectral_bears, @card("Coral Reef") def coral_reef(card, abilities): def coral_reef(): return AbilityNotImplemented def coral_reef(): return AbilityNotImplemented def coral_reef(): return AbilityNotImplemented return coral_reef, coral_reef, coral_reef, @card("Orcish Mine") def orcish_mine(card, abilities): def orcish_mine(): return AbilityNotImplemented def orcish_mine(): return AbilityNotImplemented def orcish_mine(): return AbilityNotImplemented return orcish_mine, orcish_mine, orcish_mine, @card("Sea Troll") def sea_troll(card, abilities): def sea_troll(): return AbilityNotImplemented return sea_troll, @card("Folk of An-Havva") def folk_of_anhavva(card, abilities): def folk_of_anhavva(): ret
from rezgui.qt import QtGui from rezgui.util import create_pane from rezgui.mixins.ContextViewMixin import ContextViewMixin from rezgui.models.ContextModel import ContextModel from rez.config import config from rez.vendor import yaml from rez.vendor.yaml.error import YAMLError from rez.vendor.schema.schema import Schema, SchemaError, Or, And, Use from functools import partial class ContextSettingsWidget(QtGui.QWidget, ContextViewMixin): titles = { "packages_path": "Search path for Rez packages", "implicit_packages": "Packages that are implicitly added to the request", "package_filter": "Package exclusion/inclusion rules" } schema_dict = { "packages_path": [basestring], "implicit_packages": [basestring], "package_filter": Or(And(None, Use(lambda x: [])), And(dict, Use(lambda x: [x])), [dict]) } def __init__(self, context_model=None, attributes=None, parent=None): """ Args: attributes (list of str): Select only certain settings to expose. If None, all settings are exposed. """ super(ContextSettingsWidget, self).__init__(parent) ContextViewMixin.__init__(self, context_model) self.schema_keys = set(self.schema_dict.iterkeys()) if attributes: self.schema_keys &= set(attributes) assert self.schema_keys schema_dict = dict((k, v) for k, v in self.schema_dict.iteritems() if k in self.schema_keys) self.schema = Schema(schema_dict) self.edit = QtGui.QTextEdit() self.edit.setStyleSheet("font: 12pt 'Courier'") self.default_btn = QtGui.QPushButton("Set To Defaults") self.discard_btn = QtGui.QPushButton("Discard Changes...") self.apply_btn = QtGui.QPushButton("Apply") self.discard_btn.setEnabled(False) self.apply_btn.setEnabled(False) btn_pane = create_pane([None, self.default_btn, self.discard_btn, self.apply_btn], True) layout = QtGui.QVBoxLayout() layout.addWidget(self.edit) layout.addWidget(btn_pane) self.setLayout(layout) self.apply_btn.clicked.connect(self.apply_changes) self.default_btn.clicked.connect(self.set_defaults) self.discard_btn.clicked.connect(partial(self.discard_changes, True)) self.edit.textChanged.connect(self._settingsChanged) self._update_text() def _contextChanged(self, flags=0): if not (flags & ContextModel.CONTEXT_CHANGED): return self._update_text() def apply_changes(self): def _content_error(title, text): ret = QtGui.QMessageBox.warning(self, title, text, QtGui.QMessageBox.Discard, QtGui.QMessageBox.Cancel) if ret == QtGui.QMessageBox.Discard: self.discard_changes() # load new content try: txt = self.edit.toPlainText() data = yaml.load(str(txt)) except YAMLError as e: _content_error("Invalid syntax", str(e)) return # check against schema if self.schema: try: data = self.schema.validate(data) except SchemaError as e: _content_error("Settings validation failure", str(e)) return # apply to context model self.context_model.set_packages_path(data["packages_path"]) self.context_model.set_package_filter(data["package_filter"]) self._update_text() def discard_changes(self, prompt=False): if prompt: ret = QtGui.QMessageBox.warning( self, "The context settings have been modified.", "Your changes will be lost. Are you sure?", QtGui.QMessageBox.Ok, QtGui.QMessageBox.Cancel) if ret != QtGui.QMessageBox.Ok: return self._update_text() def set_defaults(self): packages_path = config.packages_path implicits = [str(x) for x in config.implicit_packages] package_filter = config.package_filter data = {"packages_path": packages_path, "implicit_packages": implicits, "package_filter": package_filter} data = dict((k, v) for k, v in data.iteritems() if k in self.schema_keys) self._set_text(data) self.discard_btn.setEnabled(True) self.apply_btn.setEnabled(True) def _update_text(self): model = self.context_model implicits = [str(x) for x in model.implicit_packages] data = {"packages_path": model.packages_path, "implicit_packages": implicits, "package_filter": model.package_filter} data = dict((k, v) for k, v in data.iteritems() if k in self.schema_keys) self._set_text(data) self.discard_btn.setEnabled(False) self.apply_btn.setEnabled(False) def _set_text(self, data): lines = [] for key, value in data.iteritems(): lines.append('')
txt = yaml.dump({key: value}, default_flow_style=False) title = self.titles.get(key) if title: lines.append("# %s" % title) lines.append(txt.rstrip()) txt = '\n'.join(lines) + '\n' txt = txt.lstrip() self.edit.setPlainText(txt) def _settingsChanged(self): self.discard_btn.setEnabled(True) self.apply_btn.setEnabled(True) # Copyright 2013-2016 Allan
Johns. # # This library is free software: you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation, either # version 3 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see <http://www.gnu.org/licenses/>.
# encoding: utf-8 # module gio._gio # from /usr/lib/python2.7/dist-packages/gtk-2.0/gio/_gio.so # by generator 1.135 # no doc # imports import gio as __gio import glib as __glib import gobject as __gobject import gobject._gobject as __gobject__gobject class InetSocketAddress(__gio.SocketAddress): """ Object GInetSocketAddress Properties from GInetSocketAddress: address -> GInetAddress: Address The address port -> guint: Port The port flowinfo -> guint: Flow info
IPv6 flow info scope-id -> guint: Scope ID IPv6 scope ID Properties from GSocketAddress: family -> GSocketFamily: Address family The family of the socket address Signals from GObject: notify (GParam) """ def get_address
(self, *args, **kwargs): # real signature unknown pass def get_port(self, *args, **kwargs): # real signature unknown pass def __init__(self, *args, **kwargs): # real signature unknown pass __gtype__ = None # (!) real value is ''
#!/usr/bin/env python # # MountDirectories.py: this file is part of the GRS suite # Copyright (C) 2015 Anthony G. Basile # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os from copy import deepcopy from grs.Constants import CONST from grs.Execute import Execute class MountDirectories(): """ This controls the mounting/unmounting of directories under the system's portage configroot. """ def __init__(self, portage_configroot=CONST.PORTAGE_CONFIGROOT, \ package=CONST.PACKAGE, portage=CONST.PORTAGE, logfile=CONST.LOGFILE): # The order is respected. Note that 'dev' needs to be
mounted beore 'dev/pts'. self.directories = [ 'dev', 'dev/pts', {'dev/shm' : ('tmpfs', 'shm')}, 'proc', 'sys', [portage,
'usr/portage'], [package, 'usr/portage/packages'] ] # Once initiated, we only work with one portage_configroot self.portage_configroot = portage_configroot self.package = package self.portage = portage self.logfile = logfile # We need to umount in the reverse order self.rev_directories = deepcopy(self.directories) self.rev_directories.reverse() def ismounted(self, mountpoint): """ Obtain all the current mountpoints. Since python's os.path.ismount() fails for for bind mounts, we obtain these ourselves from /proc/mounts. """ mountpoints = [] for line in open('/proc/mounts', 'r').readlines(): mountpoints.append(line.split()[1]) # Let's make sure mountoint is canonical real path, no sym links, since that's # what /proc/mounts reports. Otherwise we can get a false negative on matching. mountpoint = os.path.realpath(mountpoint) return mountpoint in mountpoints def are_mounted(self): """ Return whether some or all of the self.directories[] are mounted. """ some_mounted = False all_mounted = True for mount in self.directories: if isinstance(mount, str): target_directory = mount elif isinstance(mount, list): target_directory = mount[1] elif isinstance(mount, dict): tmp = list(mount.keys()) target_directory = tmp[0] target_directory = os.path.join(self.portage_configroot, target_directory) if self.ismounted(target_directory): some_mounted = True else: all_mounted = False return some_mounted, all_mounted def mount_all(self): """ Mount all the self.directories[] under the system's portage configroot. """ # If any are mounted, let's first unmount all, then mount all some_mounted, all_mounted = self.are_mounted() if some_mounted: self.umount_all() # Now go through each of the self.directories[] to be mounted in order. for mount in self.directories: if isinstance(mount, str): # In this case, the source_directory is assumed to exist relative to / # and we will just bind mount it in the system's portage configroot. source_directory = mount target_directory = mount elif isinstance(mount, list): # In this case, the source_directory is assumed to be an abspath, and # we create it if it doesn't already exist. source_directory = mount[0] os.makedirs(source_directory, mode=0o755, exist_ok=True) target_directory = mount[1] elif isinstance(mount, dict): # In this case, we are given the mountpoint, type and name, # so we just go right ahead and mount -t type name mountpoint. # This is useful for tmpfs filesystems. tmp = list(mount.values()) tmp = tmp[0] vfstype = tmp[0] vfsname = tmp[1] tmp = list(mount.keys()) target_directory = tmp[0] # Let's make sure the target_directory exists. target_directory = os.path.join(self.portage_configroot, target_directory) os.makedirs(target_directory, mode=0o755, exist_ok=True) # Okay now we're ready to do the actual mounting. if isinstance(mount, str): cmd = 'mount --bind /%s %s' % (source_directory, target_directory) elif isinstance(mount, list): cmd = 'mount --bind %s %s' % (source_directory, target_directory) elif isinstance(mount, dict): cmd = 'mount -t %s %s %s' % (vfstype, vfsname, target_directory) Execute(cmd, timeout=60, logfile=self.logfile) def umount_all(self): """ Unmount all the self.directories[]. """ # We must unmount in the opposite order that we mounted. for mount in self.rev_directories: if isinstance(mount, str): target_directory = mount elif isinstance(mount, list): target_directory = mount[1] elif isinstance(mount, dict): tmp = list(mount.keys()) target_directory = tmp[0] target_directory = os.path.join(self.portage_configroot, target_directory) if self.ismounted(target_directory): cmd = 'umount --force %s' % target_directory Execute(cmd, timeout=60, logfile=self.logfile)
ration file, created by # sphinx-quickstart on Thu Aug 28 15:17:47 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('.')) # -- General configuration ---------------------
--------------------------- # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.2' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'epytext', ] # Add any paths that contain templates he
re, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'PySpark' copyright = u'' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = 'master' # The full version, including alpha/beta/rc tags. release = os.environ.get('RELEASE_VERSION', version) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for autodoc -------------------------------------------------- # Look at the first line of the docstring for function and method signatures. autodoc_docstring_signature = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'nature' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "../../docs/img/spark-logo-hd.png" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_domain_indices = False # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'pysparkdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'pyspark.tex', u'pyspark Documentation', u'Author', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'pyspark', u'pyspark Documentation', [u'Author'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'pyspark', u'pyspark Documentation', u'Author', 'pyspark', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to
#-*- coding: utf-8 -*- import sys from abc import ABCMeta, abstractmethod from src.shell.std import Std class ICommand(Std): """ Interface for a scat command """ def __init__(self, verbose=2): self.__verbose = verbose return def stdout(self, msg, crlf=True): if self.__verbose > 1: sys.stdout.write("[*] " + msg)
if crlf: sys.stdout.write("\n") def stderr(self, msg):
""" Print message on standard error, with formatting. @param msg message to print """ if self.__verbose > 0: sys.stderr.write("*** " + msg + "\n") @abstractmethod def run(self, *args, **kwargs): raise NotImplemented @abstractmethod def help(self, *args, **kwargs): print(self.__doc__.replace("\n"+8*" ","\n")[1:-5]) @abstractmethod def complete(self, text, line, begidx, endidx): return
_DIRECTION, SUPPORT_OSCILLATE, SUPPORT_PRESET_MODE, SUPPORT_SET_SPEED, FanEntity, ) from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType PRESET_MODE_AUTO = "auto" PRESET_MODE_SMART = "smart" PRESET_MODE_SLEEP = "sleep" PRESET_MODE_ON = "on" FULL_SUPPORT = SUPPORT_SET_SPEED | SUPPORT_OSCILLATE | SUPPORT_DIRECTION LIMITED_SUPPORT = SUPPORT_SET_SPEED async def async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: """Set up the demo fan platform.""" async_add_entities( [ DemoPercentageFan( hass, "fan1", "Living Room Fan", FULL_SUPPORT, [ PRESET_MODE_AUTO, PRESET_MODE_SMART, PRESET_MODE_SLEEP, PRESET_MODE_ON, ], ), DemoPercentageFan( hass, "fan2", "Ceiling Fan", LIMITED_SUPPORT, None, ), AsyncDemoPercentageFan( hass, "fan3", "Percentage Full Fan", FULL_SUPPORT, [ PRESET_MODE_AUTO, PRESET_MODE_SMART, PRESET_MODE_SLEEP, PRESET_MODE_ON, ], ), DemoPercentageFan( hass, "fan4", "Percentage Limited Fan", LIMITED_SUPPORT, [ PRESET_MODE_AUTO, PRESET_MODE_SMART, PRESET_MODE_SLEEP, PRESET_MODE_ON, ], ), AsyncDemoPercentageFan( hass, "fan5", "Preset Only Limited Fan", SUPPORT_PRESET_MODE, [ PRESET_MODE_AUTO, PRESET_MODE_SMART, PRESET_MODE_SLEEP, PRESET_MODE_ON, ], ), ] ) async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up the Demo config entry.""" await async_setup_platform(hass, {}, async_add_entities) class BaseDemoFan(FanEntity): """A demonstration fan component that uses legacy fan speeds.""" def __init__( self, hass, unique_id: str, name: str, supported_features: int, preset_modes: list[str] | None, ) -> None: """Initialize the entity.""" self.hass = hass self._unique_id = unique_id self._supported_features = supported_features self._percentage: int | None = None self._preset_modes = preset_modes self._preset_mode: str | None = None self._oscillating: bool | None = None self._direction: str | None = None self._name = name if supported_features & SUPPORT_OSCILLATE: self._oscillating = False if supported_features & SUPPORT_DIRECTION: self._direction = "forward" @property def unique_id(self): """Return the unique id.""" return self._unique_id @property def name(self) -> str: """Get entity name.""" return self._name @property def should_poll(self): """No polling needed for a demo fan.""" return False @property def current_direction(self) -> str | None: """Fan direction.""" return self._direction @property def oscillating(self) -> bool | None: """Oscillating.""" retu
rn self._oscillating @property def supported_features(self) -> int: """Flag supported features.""" return self._supported_features class DemoPercentageFan(BaseDemoFan, FanEntity): """A demonstration fan component that uses percentages.""" @property def perce
ntage(self) -> int | None: """Return the current speed.""" return self._percentage @property def speed_count(self) -> int: """Return the number of speeds the fan supports.""" return 3 def set_percentage(self, percentage: int) -> None: """Set the speed of the fan, as a percentage.""" self._percentage = percentage self._preset_mode = None self.schedule_update_ha_state() @property def preset_mode(self) -> str | None: """Return the current preset mode, e.g., auto, smart, interval, favorite.""" return self._preset_mode @property def preset_modes(self) -> list[str] | None: """Return a list of available preset modes.""" return self._preset_modes def set_preset_mode(self, preset_mode: str) -> None: """Set new preset mode.""" if self.preset_modes and preset_mode in self.preset_modes: self._preset_mode = preset_mode self._percentage = None self.schedule_update_ha_state() else: raise ValueError(f"Invalid preset mode: {preset_mode}") def turn_on( self, speed: str = None, percentage: int = None, preset_mode: str = None, **kwargs, ) -> None: """Turn on the entity.""" if preset_mode: self.set_preset_mode(preset_mode) return if percentage is None: percentage = 67 self.set_percentage(percentage) def turn_off(self, **kwargs) -> None: """Turn off the entity.""" self.set_percentage(0) def set_direction(self, direction: str) -> None: """Set the direction of the fan.""" self._direction = direction self.schedule_update_ha_state() def oscillate(self, oscillating: bool) -> None: """Set oscillation.""" self._oscillating = oscillating self.schedule_update_ha_state() class AsyncDemoPercentageFan(BaseDemoFan, FanEntity): """An async demonstration fan component that uses percentages.""" @property def percentage(self) -> int | None: """Return the current speed.""" return self._percentage @property def speed_count(self) -> int: """Return the number of speeds the fan supports.""" return 3 async def async_set_percentage(self, percentage: int) -> None: """Set the speed of the fan, as a percentage.""" self._percentage = percentage self._preset_mode = None self.async_write_ha_state() @property def preset_mode(self) -> str | None: """Return the current preset mode, e.g., auto, smart, interval, favorite.""" return self._preset_mode @property def preset_modes(self) -> list[str] | None: """Return a list of available preset modes.""" return self._preset_modes async def async_set_preset_mode(self, preset_mode: str) -> None: """Set new preset mode.""" if self.preset_modes is None or preset_mode not in self.preset_modes: raise ValueError( "{preset_mode} is not a valid preset_mode: {self.preset_modes}" ) self._preset_mode = preset_mode self._percentage = None self.async_write_ha_state() async def async_turn_on( self, speed: str = None, percentage: int = None, preset_mode: str = None, **kwargs, ) -> None: """Turn on the entity.""" if preset_mode: await self.async_set_preset_mode(preset_mode) return if percentage is None: percentage = 67 await self.async_set_percentage(percentage) async def async_turn_off(self, **kwargs) -> None: """Turn
''' Copyright 2013 Sven Reissmann <sven@0x80.io> This file is part of ddserver. ddserver is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later
version. ddserver is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with ddserver. If not, see <http://www.gnu.org/licenses/>. ''' from ddserver.utils.deps import require import ddserver.interface.pag
es.index # @UnusedImport: for web application import ddserver.interface.pages.signup # @UnusedImport: for web application import ddserver.interface.pages.lostpasswd # @UnusedImport: for web application import ddserver.interface.pages.login # @UnusedImport: for web application import ddserver.interface.pages.user.account # @UnusedImport: for web application import ddserver.interface.pages.user.hosts # @UnusedImport: for web application import ddserver.interface.pages.user.host # @UnusedImport: for web application import ddserver.interface.pages.admin.users # @UnusedImport: for web application import ddserver.interface.pages.admin.suffixes # @UnusedImport: for web application import ddserver.updater.nic # @UnusedImport: for web application @require(web = 'ddserver.web:Web') def main(web): # Set up web server and run it web.run() if __name__ == '__main__': main()
from tests.base import TestCase from vilya.models.project import CodeDoubanProject from vilya.models.project_conf import PROJECT_CONF_FILE from nose.tools import raises class TestProjectConf(TestCase): def test_create_project_without_conf(self): self.clean_up() project = CodeDoubanProject.add( 'tp', owner_id="test1", create_trac=False) assert project.conf['docs'], "enabled by default" def test_conf_add_wrong_keys(self): self.clean_up() project = CodeDoubanProject.add( 'tp', owner_id="test1", create_trac=False) u = self.addUser() project.git.commit_one_file( PROJECT_CONF_FILE, 'unexisting_key_argl1: 1\nunexisting_key_argl2: 2', 'm', u) assert 'unexisting_key_argl1' not in project.conf def test_conf(self): self.clean_up() project = CodeDoubanProject.add( 'tp', owner_id="test1", create_trac=False) u = self.addUser() project.git.commit_one_file(PROJECT_CONF_FILE, 'docs: {Docs: {dir: other_dir}}', 'm', u) assert project.conf['docs']['Docs']['dir'] == 'other_dir' @raises(Exception) def test_broken_conf(self): self.clean_up() project = CodeDoubanProject.add( 'tp', owner_id="test1", create_trac=False) u = self.addUser() project.git.commit_one_file(PROJECT_CONF_FILE, 'docs {dir: other_dir', 'm', u) assert project.conf['docs']['dir'] == 'other_dir' def test_cannot_set_undefined_first_level_entry(self): self.clean_up() project = CodeDoubanProject.add( 'tp', owner_id="test1", create_trac=False) u = self.addUser() project.git.commit_one_file(PROJECT_CONF_FILE, 'unexisting_key: 123', 'm', u) # First level key need to be defined in default_code_config.yaml assert 'unexisting_key' not in project.conf def test_can_set_undefined_second_level_entry(self): self.clean_up() project = CodeDoubanProject.ad
d( 'tp', owner_id="test1", create_trac=False) u = self.addUser() project.git.commit_one_file(PROJECT_CONF_FILE, 'docs: {unexisting_key: a
aa}', 'm', u) assert project.conf['docs']['unexisting_key'] == 'aaa' def clean_up(self): prj = CodeDoubanProject.get_by_name('tp') if prj: prj.delete()
import numpy as np from scipy import stats from statsmodels.regression.linear_model import OLS from statsmodels.tools import tools from statsmodels.sandbox.regression.gmm import IV2SLS, IVGMM, DistQuantilesGMM, spec_hausman from statsmodels.sandbox.regression import gmm if __name__ == '__main__': import statsmodels.api as sm examples = ['ivols', 'distquant'][:] if 'ivols' in examples: exampledata = ['ols', 'iv', 'ivfake'][1] nobs = nsample = 500 sige = 3 corrfactor = 0.025 x = np.linspace(0,10, nobs) X = tools.add_constant(np.column_stack((x, x**2)), prepend=False) beta = np.array([1, 0.1, 10]) def sample_ols(exog): endog = np.dot(exog, beta) + sige*np.random.normal(size=nobs) return endog, exog, None def sample_iv(exog): print('using iv example') X = exog.copy() e = sige * np.random.normal(size=nobs) endog = np.dot(X, beta) + e exog[:,0] = X[:,0] + corrfactor * e z0 = X[:,0] + np.random.normal(size=nobs) z1 = X.sum(1) + np.random.normal(size=nobs) z2 = X[:,1] z3 = (np.dot(X, np.array([2,1, 0])) + sige/2. * np.random.normal(size=nobs)) z4 = X[:,1] + np.random.normal(size=nobs) instrument = np.column_stack([z0, z1, z2, z3, z4, X[:,-1]]) return endog, exog, instrument def sample_ivfake(exog): X = exog e = sige * np.random.normal(size=nobs) endog = np.dot(X, beta) + e #X[:,0] += 0.01 * e #z1 = X.sum(1) + np.random.normal(size=nobs) #z2 = X[:,1] z3 = (np.dot(X, np.array([2,1, 0])) + sige/2. * np.random.normal(size=nobs)) z4 = X[:,1] + np.random.normal(size=nobs) instrument = np.column_stack([X[:,:2], z3, z4, X[:,-1]]) #last is constant return endog, exog, instrument if exampledata == 'ols': endog, exog, _ = sample_ols(X) inst
rument = exog elif exampledata == 'iv': endog, exog, instrument = sample_iv(X) elif exampledata == 'ivfake': endog, exog, instrument = sample_ivfake(X) #using GMM and IV2SLS cla
sses #---------------------------- mod = gmm.IVGMM(endog, exog, instrument, nmoms=instrument.shape[1]) res = mod.fit() modgmmols = gmm.IVGMM(endog, exog, exog, nmoms=exog.shape[1]) resgmmols = modgmmols.fit() #the next is the same as IV2SLS, (Z'Z)^{-1} as weighting matrix modgmmiv = gmm.IVGMM(endog, exog, instrument, nmoms=instrument.shape[1]) #same as mod resgmmiv = modgmmiv.fitgmm(np.ones(exog.shape[1], float), weights=np.linalg.inv(np.dot(instrument.T, instrument))) modls = gmm.IV2SLS(endog, exog, instrument) resls = modls.fit() modols = OLS(endog, exog) resols = modols.fit() print('\nIV case') print('params') print('IV2SLS', resls.params) print('GMMIV ', resgmmiv) # .params print('GMM ', res.params) print('diff ', res.params - resls.params) print('OLS ', resols.params) print('GMMOLS', resgmmols.params) print('\nbse') print('IV2SLS', resls.bse) print('GMM ', res.bse) #bse currently only attached to model not results print('diff ', res.bse - resls.bse) print('%-diff', resls.bse / res.bse * 100 - 100) print('OLS ', resols.bse) print('GMMOLS', resgmmols.bse) #print 'GMMiv', modgmmiv.bse print("Hausman's specification test") print(resls.spec_hausman()) print(spec_hausman(resols.params, res.params, resols.cov_params(), res.cov_params())) print(spec_hausman(resgmmols.params, res.params, resgmmols.cov_params(), res.cov_params())) if 'distquant' in examples: #estimating distribution parameters from quantiles #------------------------------------------------- #example taken from distribution_estimators.py gparrvs = stats.genpareto.rvs(2, size=5000) x0p = [1., gparrvs.min()-5, 1] moddist = gmm.DistQuantilesGMM(gparrvs, None, None, distfn=stats.genpareto) #produces non-sense because optimal weighting matrix calculations don't #apply to this case #resgp = moddist.fit() #now with 'cov': LinAlgError: Singular matrix pit1, wit1 = moddist.fititer([1.5,0,1.5], maxiter=1) print(pit1) p1 = moddist.fitgmm([1.5,0,1.5]) print(p1) moddist2 = gmm.DistQuantilesGMM(gparrvs, None, None, distfn=stats.genpareto, pquant=np.linspace(0.01,0.99,10)) pit1a, wit1a = moddist2.fititer([1.5,0,1.5], maxiter=1) print(pit1a) p1a = moddist2.fitgmm([1.5,0,1.5]) print(p1a) #Note: pit1a and p1a are the same and almost the same (1e-5) as # fitquantilesgmm version (functions instead of class) res1b = moddist2.fitonce([1.5,0,1.5]) print(res1b.params) print(res1b.bse) #they look much too large print(np.sqrt(np.diag(res1b._cov_params)))
#!/usr/bin/python """checks bugle trace log for OpenGL problems""" from __future__ import print_function import sys count = 0 lineNo = 0 inList = False inBlock = False legalLists = {} setLists = {} usedInList = {} usedInBlock = {} usedOutBlock = {} def error(error, lineNo, *args): print("Error:", error.format(*args), lineNo, file=sys.stderr) exit(-1) for line in sys.stdin: line=line[line.find('gl'):] # split line into functional parts op=line.find('(') # the function mane function=line[0:op] rest=line[op+1:-1] cl=rest.find(')') # the argument list args=rest[0:cl] rest=rest[cl+1:] # the result result='' eq=rest.find('= ') if eq >= 0: result = rest[eq+2:] lineNo=lineNo+1 if False and function.find( 'List' ) >= 0 and function.find( 'Call' ) < 0: print(" ".join((count, line[:-1], function, args, result))) count = count + 1 if count > 100: exit(-1) if function == 'glBegin': if inBlock: print("Error: Still in block.", lineNo) exit(-1) inBlock = True elif function == 'glEn
d': if not inBlock: print("Error: Not in block.", lineNo) exit(-1) inBlock = False else: blockDict=usedOutBlock if inBlock: blockDict=usedInBl
ock if not function in blockDict: blockDict[function]=lineNo if function == 'glGenLists': legalLists[result] = True if inList: error("Still in list generation.", lineNo) if function == 'glEndList': if not inList: error("Not in list generation.", lineNo) if inBlockAtListStart != inBlock: error("glBegin/glEnd mismatch in list.", lineNo) inList=False if function == 'glNewList': inBlockAtListStart=inBlock l=args[0:args.find(',')] currentList=l if inList: error("Still in list generation.", lineNo) if not legalLists[l]: error("list {} used, but not generated.", lineNo, l) setLists[l]=True inList=True elif inList: if not function in usedInList: usedInList[function]=lineNo #print lineNo, function if function == 'glCallList': l=args if not legalLists[l]: error("list {} used, but not generated.", lineNo, l) if inList and currentList == l: error("list {} used, but it's just getting generated.", lineNo, l) if not setLists[l]: error("list {} used, but not set.", lineNo, l) if function == 'glDeleteLists': l=args[0:args.find(',')] if not legalLists[l]: error("list {} used, but not generated.", lineNo, l) legalLists[l]=False setLists[l]=False print("Used in display lists:") for f in usedInList: print(f, usedInList[f]) print() print("Used in glBegin/End:") for f in usedInBlock: print(f, usedInBlock[f]) print() print("Used outside glBegin/End:") for f in usedOutBlock: print(f, usedOutBlock[f])
""" Course info page. """ from .course_page import CoursePage class CourseInfoPage(CoursePage): """ Course info. """ url_path = "info" def is_browser_on_page(self): return self.is_css_present('section.updates') @property def num_upda
tes(self): """ Return the number of updates on the page. """ return self.css_count('section.updates section article') @property def handout_links(self):
""" Return a list of handout assets links. """ return self.css_map('section.handouts ol li a', lambda el: el['href'])
#!/usr/bin/env python # -*- coding: utf-8 -*- from os import path import os from tempfile import NamedTemporaryFile import numpy as np import morfessor from six import PY2 from six.moves import cPickle as pickle from . import polyglot_path from .decorators import memoize from .downloader import downloader from .mapping import Embedding, CountedVocabulary, CaseExpander, DigitExpander from .utils import _open resource_dir = { "cw_embeddings":"embeddings2", "sgns_embeddings":"sgns2", "ue_embeddings":"uniemb", "visualization": "tsne2", "wiki_vocab": "counts2", "sentiment": "sentiment2", } def locate_resource(name, lang, filter=
None): """Return filename that contains specific language resource name. Args: name (string): Name of the resource. lang (string): language code to be loaded. """ task_dir = resource_dir.get(name
, name) package_id = u"{}.{}".format(task_dir, lang) p = path.join(polyglot_path, task_dir, lang) if not path.isdir(p): if downloader.status(package_id) != downloader.INSTALLED: raise ValueError("This resource is available in the index " "but not downloaded, yet. Try to run\n\n" "polyglot download {}".format(package_id)) return path.join(p, os.listdir(p)[0]) @memoize def load_embeddings(lang="en", task="embeddings", type="cw"): """Return a word embeddings object for `lang` and of type `type` Args: lang (string): language code. task (string): parameters that define task. type (string): skipgram, cw, cbow ... """ src_dir = "_".join((type, task)) if type else task p = locate_resource(src_dir, lang) e = Embedding.load(p) if type == "cw": e.apply_expansion(CaseExpander) e.apply_expansion(DigitExpander) if type == "sgns": e.apply_expansion(CaseExpander) if type == "ue": e.apply_expansion(CaseExpander) return e @memoize def load_vocabulary(lang="en", type="wiki"): """Return a CountedVocabulary object. Args: lang (string): language code. type (string): wiki,... """ src_dir = "{}_vocab".format(type) p = locate_resource(src_dir, lang) return CountedVocabulary.from_vocabfile(p) @memoize def load_ner_model(lang="en", version="2"): """Return a named entity extractor parameters for `lang` and of version `version` Args: lang (string): language code. version (string): version of the parameters to be used. """ src_dir = "ner{}".format(version) p = locate_resource(src_dir, lang) fh = _open(p) try: return pickle.load(fh) except UnicodeDecodeError: fh.seek(0) return pickle.load(fh, encoding='latin1') @memoize def load_pos_model(lang="en", version="2"): """Return a part of speech tagger parameters for `lang` and of version `version` Args: lang (string): language code. version (string): version of the parameters to be used. """ src_dir = "pos{}".format(version) p = locate_resource(src_dir, lang) fh = _open(p) return dict(np.load(fh)) @memoize def load_unified_pos_model(lang="en"): src_dir = "unipos" p = locate_resource(src_dir, lang) return dict(np.load(p)) @memoize def load_morfessor_model(lang="en", version="2"): """Return a morfessor model for `lang` and of version `version` Args: lang (string): language code. version (string): version of the parameters to be used. """ src_dir = "morph{}".format(version) p = locate_resource(src_dir, lang) file_handler = _open(p) tmp_file_ = NamedTemporaryFile(delete=False) tmp_file_.write(file_handler.read()) tmp_file_.close() io = morfessor.MorfessorIO() model = io.read_any_model(tmp_file_.name) os.remove(tmp_file_.name) return model @memoize def load_transliteration_table(lang="en", version="2"): """Return a morfessor model for `lang` and of version `version` Args: lang (string): language code. version (string): version of the parameters to be used. """ src_dir = "transliteration{}".format(version) p = locate_resource(src_dir, lang) file_handler = _open(p) return pickle.load(file_handler)
import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from sklearn.datasets import load_sample_image from sklearn.datasets import load_sample_images # Utility functions def plot_image(image): plt.imshow(image, cmap="gray", interpolation="nearest") plt.axis("off") def plot_color_image(image): plt.i
mshow(image.astype(np.uint8),interpolation="nearest") plt.axis("off") # Load sample images china = load_sample_image('china.jpg') flower = load_sample_image('flower.jpg') image = china[150:220, 130:250] height, width, channels = image.shape image_grayscale = image.mean(axis=2).astype(np.float32) images = image_grayscale.reshape(1, height, width, 1) dataset = np.array(load_sample_images().images, dtype=np.float32)
batchsize, height, width, channels = dataset.shape # Create 2 filters fmap = np.zeros(shape=(7, 7, channels, 2), dtype=np.float32) fmap[:, 3, 0, 0] = 1 fmap[3, :, 0, 1] = 1 plot_image(fmap[:,:,0,0]) plt.show() plot_image(fmap[:,:,0,1]) plt.show() X = tf.placeholder(tf.float32, shape=(None, height, width, channels)) convolution = tf.nn.conv2d(X, fmap, strides=[1,2,2,1], padding='SAME') with tf.Session() as sess: output = sess.run(convolution, feed_dict={X: dataset}) plt.imshow(output[0,:,:,1]) plt.show()
""" Provides a set of pluggable permission policies. """ from __future__ import unicode_literals from django.http import Http404 from rest_framework import exceptions from rest_framework.compat import is_authenticated SAFE_METHODS = ('GET', 'HEAD', 'OPTIONS') class BasePermission(object): """ A base class from which all permission classes should inherit. """ def has_permission(self, request, view): """ Return `True` if permission is granted, `False` otherwise. """ return True def has_object_permission(self, request, view, obj): """ Return `True` if permission is granted, `False` otherwise. """ return True class AllowAny(BasePermission): """ Allow any access. This isn't strictly required, since you could use an empty permission_classes list, but it's useful because it makes the intention more explicit. """ def has_permission(self, request, view): return True class IsAuthenticated(BasePermission): """ Allows access only to authenticated users. """ def has_permission(self, request, view): return request.user and is_authenticated(request.user) class IsAdminUser(BasePermission): """ Allows access only to admin users. """ def has_permission(self, request, view): return request.user and request.user.is_staff class IsAuthenticatedOrReadOnly(BasePermission): """ The request is authenticated as a user, or is a read-only request. """ def has_permission(self, request, view): return ( request.method in SAFE_METHODS or request.user and is_authenticated(request.user) ) class DjangoModelPermissions(BasePermission): """ The request is authenticated using `django.contrib.auth` permissions. See: https://docs.djangoproject.com/en/dev/topics/auth/#permissions It ensures that the user is authenticated, and has the appropriate `add`/`change`/`delete` permissions on the model. This permission can only be applied against view classes that provide a `.queryset` attribute. """ # Map methods into required permission codes. # Override this if you need to also provide 'view' permissions, # or if you want to provide custom permission codes. perms_map = { 'GET': [], 'OPTIONS': [], 'HEAD': [], 'POST': ['%(app_label)s.add_%(model_name)s'], 'PUT': ['%(app_label)s.change_%(model_name)s'], 'PATCH': ['%(app_label)s.change_%(model_name)s'], 'DELETE': ['%(app_label)s.delete_%(model_name)s'], } authenticated_users_only = True def get_required_permissions(self, method, model_cls): """ Given a model and an HTTP method, return the list of permission codes that the user is required to have. """ kwargs = { 'app_label': model_cls._meta.app_label, 'model_name': model_cls._meta.model_name } if method not in self.perms_map: raise exceptions.MethodNotAllowed(method) return [perm % kwargs for perm in self.perms_map[method]] def has_permission(self, request, view): # Workaround to ensure DjangoModelPermissions are not applied # to the root view when using DefaultRouter. if getattr(view, '_ignore_model_permissions', False): return True if hasattr(view, 'get_queryset'): queryset = view.get_queryset() else: queryset = getattr(view, 'queryset', None) assert queryset is not None, ( 'Cannot apply DjangoModelPermissions on a view that ' 'does not set `.queryset` or have a `.get_queryset()` method.' ) perms = self.get_required_permissions(request.method, queryset.model) return ( request.user and (is_authenticated(request.user) or not self.authenticated_users_only) and request.user.has_perms(perms) ) class DjangoModelPermissionsOrAnonReadOnly(DjangoModelPermissions): """ Similar to DjangoModelPermissions, except that anonymous users are allowed read-only access. """ authenticated_users_only = False class DjangoObjectPermissions(DjangoModelPermissions): """ The request is authenticated using Django's object-level permissions. It requires an object-permissions-enabled backend, such as Django Guardian. It ensures that the user is authenticated, and has the appropriate `add`/`change`/`delete` permissions on the object using .has_perms. This permission can only be applied against view classes that provide a `.queryset` attribute. """ perms_map = { 'GET': [], 'OPTIONS':
[], 'HEAD': [], 'POST': ['%(app_label)s.add_%(model_name)s'], 'PUT': ['%(app_label)s.change_%(model_name)s'], 'PATCH': ['%(app_label)s.change_%(model_name)s'], 'DELETE': ['%(app_label)s.delete_%(model_na
me)s'], } def get_required_object_permissions(self, method, model_cls): kwargs = { 'app_label': model_cls._meta.app_label, 'model_name': model_cls._meta.model_name } if method not in self.perms_map: raise exceptions.MethodNotAllowed(method) return [perm % kwargs for perm in self.perms_map[method]] def has_object_permission(self, request, view, obj): if hasattr(view, 'get_queryset'): queryset = view.get_queryset() else: queryset = getattr(view, 'queryset', None) assert queryset is not None, ( 'Cannot apply DjangoObjectPermissions on a view that ' 'does not set `.queryset` or have a `.get_queryset()` method.' ) model_cls = queryset.model user = request.user perms = self.get_required_object_permissions(request.method, model_cls) if not user.has_perms(perms, obj): # If the user does not have permissions we need to determine if # they have read permissions to see 403, or not, and simply see # a 404 response. if request.method in SAFE_METHODS: # Read permissions already checked and failed, no need # to make another lookup. raise Http404 read_perms = self.get_required_object_permissions('GET', model_cls) if not user.has_perms(read_perms, obj): raise Http404 # Has read permissions. return False return True
''' The tests in this package are to ensure the proper resultant dtypes of set operations. ''' import itertools as it import numpy as np import pytest from pandas.core.dtypes.common import is_dtype_equal import pandas as pd from pandas import Int64Index, RangeIndex from pandas.tests.indexes.conftest import indices_list import pandas.util.testing as tm COMPATIBLE_INCONSISTENT_PAIRS = { (Int64Index, RangeIndex): (tm.makeIntIndex, tm.makeRangeIndex) } @pytest.fixture(params=list(it.combinations(indices_list, 2)), ids=lambda x: type(x[0]).__name__ + type(x[1]).__name__) def index_pair(request): """ Create all combinations of 2 index types. """ return request.param def test_union_same_types(indices): # Union with a non-unique, non-monotonic index raises error # Only needed for bool index factory idx1 = indices.sort_values() idx2 = indices.sort_values() assert idx1.union(idx2).dtype == idx1.dtype def test_union_different_types(index_pair): # GH 23525 idx1, idx2 = index_pair type_pair = tuple(sorted([type(idx1), type(idx2)], key=lambda x: str(x))) if type_pair in COMPATIBLE_INCONSISTENT_PAIRS: pytest.xfail('This test only considers non compatible indexes.') if any(isinstance(idx, pd.MultiIndex) for idx in index_pair): pytest.xfail('This test doesn\'t consider multiindixes.') if is_dtype_equal(idx1.dtype, idx2.dtype): pytest.xfail('This test only considers non matching dtypes.') # A union with a CategoricalIndex (even as dtype('O')) and a # non-CategoricalIndex can only be made if both indices are monotonic. # This is true before this PR as well. # Union with a non-unique, non-monotonic index raises error # This applies to the boolean index idx1 = idx1.sort_values() idx2 = idx2.sort_values() assert idx1.union(idx2).dtype == np.dtype('O') assert idx2.union(idx1).dtype == np.dtype('O') @pytest.mark.parametrize('idx_fact1,idx_fact2', COMPATIBLE_INCONSISTENT_PAIRS.values()) def test_compatible_inconsistent_pairs(idx_fact1, idx_fact2): # GH 23525 idx1 = idx_fact1(10)
idx2 = idx_fact2(20) res1 = idx1.union(idx2) res2 = idx2.union(idx1) assert r
es1.dtype in (idx1.dtype, idx2.dtype) assert res2.dtype in (idx1.dtype, idx2.dtype)
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012, Intel, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the volume RPC API. """ from oslo.config import cfg from cinder.openstack.common import rpc import cinder.openstack.common.rpc.proxy CONF = cfg.CONF class VolumeAPI(cinder.openstack.common.rpc.proxy.RpcProxy): '''Client side of the volume rpc API. API version history: 1.0 - Initial version. 1.1 - Adds clone volume option to create_volume. 1.2 - Add publish_service_capabilities() method. 1.3 - Pass all image metadata (not just ID) in copy_volume_to_image. 1.4 - Add request_spec, filter_properties and allow_reschedule arguments to create_volume(). 1.5 - Add accept_transfer. 1.6 - Add extend_volume. 1.7 - Adds host_name parameter to attach_volume() to allow attaching to host rather than instance. 1.8 - Add migrate_volume, rename_volume. ''' BASE_RPC_API_VERSION = '1.0' def __init__(self, topic=None): super(VolumeAPI, self).__init__( topic=topic or CONF.volume_topic, default_version=self.BASE_RPC_API_VERSION) def create_volume(self, ctxt, volume, host, request_spec, filter_properties, allow_reschedule=True, snapshot_id=None, image_id=None, source_volid=None): self.cast(ctxt, self.make_msg('create_volume', volume_id=volume['id'], request_spec=request_spec, filter_properties=filter_properties,
allow_reschedule=allow_reschedule, snapshot_id=snapshot_id, image_id=image_id, source_volid=source_volid), topic=rpc.queue_get_for(ctxt, self.topic, host), version='1.4') def delete_volume(self, ctxt, volume): sel
f.cast(ctxt, self.make_msg('delete_volume', volume_id=volume['id']), topic=rpc.queue_get_for(ctxt, self.topic, volume['host'])) def create_snapshot(self, ctxt, volume, snapshot): self.cast(ctxt, self.make_msg('create_snapshot', volume_id=volume['id'], snapshot_id=snapshot['id']), topic=rpc.queue_get_for(ctxt, self.topic, volume['host'])) def delete_snapshot(self, ctxt, snapshot, host): self.cast(ctxt, self.make_msg('delete_snapshot', snapshot_id=snapshot['id']), topic=rpc.queue_get_for(ctxt, self.topic, host)) def attach_volume(self, ctxt, volume, instance_uuid, host_name, mountpoint): return self.call(ctxt, self.make_msg('attach_volume', volume_id=volume['id'], instance_uuid=instance_uuid, host_name=host_name, mountpoint=mountpoint), topic=rpc.queue_get_for(ctxt, self.topic, volume['host']), version='1.7') def detach_volume(self, ctxt, volume): return self.call(ctxt, self.make_msg('detach_volume', volume_id=volume['id']), topic=rpc.queue_get_for(ctxt, self.topic, volume['host'])) def copy_volume_to_image(self, ctxt, volume, image_meta): self.cast(ctxt, self.make_msg('copy_volume_to_image', volume_id=volume['id'], image_meta=image_meta), topic=rpc.queue_get_for(ctxt, self.topic, volume['host']), version='1.3') def initialize_connection(self, ctxt, volume, connector): return self.call(ctxt, self.make_msg('initialize_connection', volume_id=volume['id'], connector=connector), topic=rpc.queue_get_for(ctxt, self.topic, volume['host'])) def terminate_connection(self, ctxt, volume, connector, force=False): return self.call(ctxt, self.make_msg('terminate_connection', volume_id=volume['id'], connector=connector, force=force), topic=rpc.queue_get_for(ctxt, self.topic, volume['host'])) def publish_service_capabilities(self, ctxt): self.fanout_cast(ctxt, self.make_msg('publish_service_capabilities'), version='1.2') def accept_transfer(self, ctxt, volume): self.cast(ctxt, self.make_msg('accept_transfer', volume_id=volume['id']), topic=rpc.queue_get_for(ctxt, self.topic, volume['host']), version='1.5') def extend_volume(self, ctxt, volume, new_size): self.cast(ctxt, self.make_msg('extend_volume', volume_id=volume['id'], new_size=new_size), topic=rpc.queue_get_for(ctxt, self.topic, volume['host']), version='1.6') def migrate_volume(self, ctxt, volume, dest_host, force_host_copy): host_p = {'host': dest_host.host, 'capabilities': dest_host.capabilities} self.cast(ctxt, self.make_msg('migrate_volume', volume_id=volume['id'], host=host_p, force_host_copy=force_host_copy), topic=rpc.queue_get_for(ctxt, self.topic, volume['host']), version='1.8') def rename_volume(self, ctxt, volume, new_name_id): self.call(ctxt, self.make_msg('rename_volume', volume_id=volume['id'], new_name_id=new_name_id), topic=rpc.queue_get_for(ctxt, self.topic, volume['host']), version='1.8')
from backend import app if
__name__ == '__main__': app.run('0.0.0.0',port=8080, threa
ded=True, debug=True)
InvalidFrameError, InternalError, ) from collections import deque from .util import take_from_deque __all__ = [ 'Frame', 'PingFrame', 'DataFrame', 'ResetFrame', 'WindowFrame', 'SettingsFrame', 'FrameReader', ] FRAME_HEADER_FMT = struct.Struct('>IIB') assert FRAME_HEADER_FMT.size == 9 FLAG_PING_ACK = 1 FLAG_DATA_EOF = 1 FLAG_DATA_OPEN = 2 FLAG_DATA_ACK = 4 FLAG_RESET_READ = 1 FLAG_RESET_WRITE = 2 FLAG_SETTINGS_ACK = 1 class Header(object): __slots__ = ('stream_id', 'payload_size', 'flags', 'kind') def __init__(self, stream_id, payload_size, flags, kind): self.stream_id = stream_id self.payload_size = payload_size self.flags = flags self.kind = kind @classmethod def load(cls, reader): stream_id, size_flags, kind = FRAME_HEADER_FMT.unpack(reader.read(9)) return cls(stream_id, size_flags >> 8, size_flags & 0xff, kind) def dump(self, writer): writer.write(FRAME_HEADER_FMT.pack(self.stream_id, self.flags | (self.payload_size << 8), self.kind)) class FrameMeta(type): def __new__(meta, name, bases, bodydict): cls = type.__new__(meta, name, bases, bodydict) frame_id = bodydict.get('ID') frame_classes = cls.frame_classes if frame_id is not None: prev = frame_classes.get(frame_id) if prev is not None: raise TypeError('Frames %s and %s have the same type id %r' % (prev.__name__, name, frame_id)) frame_classes[frame_id] = cls return cls class Frame(object): __slots__ = () __metaclass__ = FrameMeta frame_classes = {} @classmethod def load(cls, reader): if reader.eof: return None header = Header.load(reader) impl = cls.frame_classes.get(header.kind) if impl is None: raise UnknownFrameError() return impl.load_frame_data(header, reader) def __repr__(self): return '%s(%s)' % ( self.__class__.__name__, ', '.join( '%s=%r' % (name, getattr(self, name)) for name in self.__class__.__slots__, ), ) class PingFrame(Frame): __slots__ = ('flags', 'value') ID = 0 FMT = struct.Struct('>q') def __init__(self, flags, value): self.flags = flags self.value = value def __cmp__(self, other): if other is self: return 0 if isinstance(other, PingFrame): return cmp( (self.flags, self.value), (other.flags, other.value), ) return cmp(id(self), id(other)) @classmethod def load_frame_data(cls, header, reader): if header.stream_id != 0 or header.payload_size != 8: raise InvalidFrameError() value, = cls.FMT.unpack(reader.read(8)) return cls(header.flags, value) def dump(self, writer): Header(0, 8, self.flags, self.ID).dump(writer) writer.write(self.FMT.pack(self.value)) class DataFrame(Frame): __slots__ = ('stream_id', 'flags', 'data') ID = 1 def __init__(self, stream_id, flags, data): self.stream_id = stream_id self.flags = flags self.data = data def __cmp__(self, other): if other is self: return 0 if isinstance(other, DataFrame): return cmp( (self.stream_id, self.flags, self.data), (other.stream_id, other.flags, other.data), ) return cmp(id(self), id(other)) @classmethod def load_frame_data(cls, header, reader): if header.payload_size > 0: data = reader.read(header.payload_size) else: data = '' return cls(header.stream_id, header.flags, data) def dump(self, writer): Header(self.stream_id, len(self.data), self.flags, self.ID).dump(writer) if self.data: writer.write(self.data) class ResetFrame(Frame): __slots__ = ('stream_id', 'flags', 'error') ID = 2 FMT = struct.Struct('>I') def __init__(self, stream_id, flags, error): self.stream_id = stream_id self.flags = flags self.error = error def __cmp__(self, other): if other is self: return 0 if isinstance(other, ResetFrame): return cmp( (self.stream_id, self.flags, self.error), (other.stream_id, other.flags, other.error), ) return cmp(id(self), id(other)) @classmethod def load_frame_data(cls, header, reader): if header.payload_size < 4: raise InvalidFrameError() error_code, = cls.FMT.unpack(reader.read(4)) if header.payload_size > 4: message = reader.read(header.payload_size - 4) else: message = '' return cls(header.stream_id, header.flags, CopperError.from_error_code(error_code, message)) def dump(self, writer): error_code = getattr(self.error, 'copper_error', InternalError.copper_error) if error_code == -1: message = '%s' % (self.error,) else: message = self.error.message or '' if not isinstance(message, basestring): try: message = str(message) except UnicodeError: message = unicode(message) if isinstance(message, unicode): message = message.encode('utf8') Header(self.stream_id, len(message) + 4, self.flags, self.ID).dump(writer) writer.write(self.FMT.pack(error_code)) if message: writer.write(message) class WindowFrame(Frame): __slots__ = ('stream_id', 'flags', 'increment') ID = 3
FMT = struct.Struct('>I') def __init__(self, stream_id, flags, increment): self.stream_id = stream_id self.flags = flags self.increment = increment def __cmp__(self, other): if other is self: return 0 if isinstance(other, WindowFrame): return cmp( (self.stream_id, self.flags, self.increment),
(other.stream_id, other.flags, other.increment), ) return cmp(id(self), id(other)) @classmethod def load_frame_data(cls, header, reader): if header.payload_size != 4: raise InvalidFrameError() increment, = cls.FMT.unpack(reader.read(4)) return cls(header.stream_id, header.flags, increment) def dump(self, writer): Header(self.stream_id, 4, self.flags, self.ID).dump(writer) writer.write(self.FMT.pack(self.increment)) class SettingsFrame(Frame): __slots__ = ('flags', 'values') ID = 4 FMT = struct.Struct('>HI') def __init__(self, flags, values): self.flags = flags self.values = values def __cmp__(self, other): if other is self: return 0 if isinstance(other, SettingsFrame): return cmp( (self.flags, self.values), (other.flags, other.values), ) return cmp(id(self), id(other)) @classmethod def load_frame_data(cls, header, reader): if header.stream_id != 0: raise InvalidFrameError() if header.flags & FLAG_SETTINGS_ACK: if header.payload_size != 0: raise InvalidFrameError() values = {} else: if (header.payload_size % 6) != 0: raise InvalidFrameError() count = header.payload_size // 6 values = {} while count > 0: sid, value = cls.FMT.unpack(reader.read(6)) values[sid] = value count -= 1 return cls(header.flags, values) def dump(self, writer): Header(0, 6 * len(self.values), self.flags, self.ID).dump(writer) for sid, value in sorted(self.values.items()): writer.write(self.FMT.pack(sid, value)) class FrameReader(object): def __init__(self, sock, chunk_size=8192): self.sock = sock self.size = 0