prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
rom unittest import TestCase class TestChecks(TestCase): def test_get_noqa_lines(self): checker = QuoteChecker(None, filename=get_absolute_path('data/no_qa.py')) self.assertEqual(checker.get_noqa_lines(checker.get_file_contents()), [2]) class TestFlake8Stdin(TestCase): def test_stdin(self): """Test using stdin.""" filepath = get_absolute_path('data/doubles.py') with open(filepath, 'rb') as f: p = subprocess.Popen(['flake8', '--select=Q', '-'], stdin=f, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() stdout_lines = stdout.splitlines() self.assertEqual(stderr, b'') self.assertEqual(len(stdout_lines), 3) self.assertRegex( stdout_lines[0], b'stdin:1:(24|25): Q000 Double quotes found but single quotes preferred') self.assertRegex( stdout_lines[1], b'stdin:2:(24|25): Q000 Double quotes found but single quotes preferred') self.assertRegex( stdout_lines[2], b'stdin:3:(24|25): Q000 Double quotes found but single quotes preferred') class DoublesTestChecks(TestCase): def setUp(self): class DoublesOptions(): inline_quotes = "'" multiline_quotes = "'" QuoteChecker.parse_options(DoublesOptions) def test_multiline_string(self): doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles_multiline_string.py')) self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [ {'col': 4, 'line': 1, 'message': 'Q001 Double quote multiline found but single quotes preferred'}, ]) def test_multiline_string_using_lines(self): with open(get_absolute_path('data/doubles_multiline_string.py')) as f: lines = f.readlines() doubles_checker = QuoteChecker(None, lines=lines) self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [ {'col': 4, 'line': 1, 'message': 'Q001 Double quote multiline found but single quotes preferred'}, ]) def test_wrapped(self): doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles_wrapped.py')) self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), []) def test_doubles(self): doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles.py')) self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [ {'col': 24, 'line': 1, 'message': 'Q000 Double quotes found but single quotes preferred'}, {'col': 24, 'line': 2, 'message': 'Q000 Double quotes found but single quotes preferred'}, {'col': 24, 'line': 3, 'message': 'Q000 Double quotes found but single quotes preferred'}, ]) def test_noqa_doubles(self): checker = QuoteChecker(None, get_absolute_path('data/doubles_noqa.py')) self.assertEqual(list(checker.run()), []) def test_escapes(self): doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles_escaped.py')) self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [ {'col': 25, 'line': 1, 'message': 'Q003 Change outer quotes to avoid escaping inner quotes'}, ]) def test_escapes_allowed(self): class Options(): inline_quotes = "'" avoid_escape = False QuoteChecker.parse_options(Options) doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles_escaped.py')) self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), []) class DoublesAliasTestChecks(TestCase): def setUp(self): class DoublesAliasOptions(): inline_quotes = 'single' multiline_quotes = 'single' QuoteChecker.parse_options(DoublesAliasOptions) def test_doubles(self): doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles_wrapped.py')) self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), []) doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles.py')) self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [ {'col': 24, 'line': 1, 'message': 'Q000 Double quotes found but single quotes preferred'}, {'col': 24, 'line': 2, 'message': 'Q000 Double quotes found but single quotes preferred'}, {'col': 24, 'line': 3, 'message': 'Q000 Double quotes found but single quotes preferred'}, ]) class SinglesTestChecks(TestCase): def setUp(self): class SinglesOptions(): inline_quotes = '"' multiline_quotes = '"' QuoteChecker.parse_options(SinglesOptions) def test_multiline_string(self): singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles_multiline_string.py')) self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [ {'col': 4, 'line': 1, 'message': 'Q001 Single quote multiline found but double quotes preferred'}, ]) def test_wrapped(self): singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles_wrapped.py')) self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), []) def test_singles(self): singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles.py')) self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [ {'col': 24, 'line': 1, 'message': 'Q000 Single quotes found but double quotes preferred'}, {'col': 24, 'line': 2, 'message': 'Q000 Single quotes found but double quotes preferred'}, {'col': 24, 'line': 3, 'message': 'Q000 Single quotes found but double quotes preferred'}, ]) def test_noqa_singles(self): checker = QuoteChecker(None, get_absolute_path('data/singles_noqa.py')) self.assertEqual(list(checker.run()), []) def test_escapes(self): singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles_escaped.py')) self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [ {'col': 25, 'line': 1, 'message': 'Q003 Change outer quotes to avoid escaping inner quotes'}, ]) def test_escapes_allowed(self): class Options(): inline_quotes = '"' avoid_escape = False QuoteChecker.parse_options(Options) singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles_escaped.py')) self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), []) class SinglesAliasTestChecks(TestCase): def setUp(self): class SinglesAliasOptions(): inline_quotes = 'double' multiline_quotes = 'double' QuoteChecker.parse_options(SinglesAliasOptions) def test_singles(self): singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles_wrapped.py')) self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), []) singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [ {'col': 24, 'line': 1, 'message': 'Q000 Single quotes found but double quotes preferred'}, {'col': 24, 'line': 2, 'message': 'Q000 Single quotes found but double quotes preferred'}, {'col': 24, 'line': 3, 'message
': 'Q000 Single quotes found but double quotes preferred'}, ]) class MultilineTestChecks(TestCase):
#!/usr/bin/env python # -*- coding: utf-8 -*- """The setup script.""" from setuptools import setup, find_packages with open('README.rst') as readme_file: readme = readme_file.read() with open('HISTORY.rst') as history_file: history = history_file.read() requirements = [ # TODO: put package requirements here ] setup_requirements = [ # TODO(nbargnesi): put setup requirements (distutils extensions, etc.) here ] test_requirements = [ # TODO: put package test requirements here ] setup(
name='proxme', version='0.1.0', description="Serves your proxy auto-config (PAC) content.", long_description=readme + '\n\n' + history, author="Nick Bargnesi", author_email='nick@den-4.com', url='https://github.com/nbargnesi/proxme', packages=find_packages(include=['proxme']), include_package_data=True, install_requires=requirements, license="MIT license", zip_safe=False, keywords='proxme', classifie
rs=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', "Programming Language :: Python :: 2", 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], test_suite='tests', tests_require=test_requirements, setup_requires=setup_requirements, entry_points = { 'console_scripts': [ 'proxme = proxme.__main__:main' ], } )
""" [2016-11-09] Challenge #291 [Intermediate] Reverse Polish Notation Calculator https://www.reddit.com/r/dailyprogrammer/comments/5c5jx9/20161109_challenge_291_intermediate_reverse/ A little while back we had a programming [challenge](https://www.reddit.com/r/dailyprogrammer/comments/2yquvm/20150311_challenge_205_intermediate_rpn/) to convert an infix expression (also known as "normal" math) to a postfix expression (also known as [Reverse Polish Notation](https://en.wikipedia.org/wiki/Reverse_Polish_notation)). Today we'll do something a little different: We will write a calculator that takes RPN input, and outputs the result. # Formal input The input will be a whitespace-delimited RPN expression. The supported operators will be: * `+` - addition * `-` - subtraction * `*`, `x` - multiplication * `/` - division (floating point, e.g. `3/2=1.5`, not `3/2=1`) * `//` - integer division (e.g. `3/2=1`) * `%` - modulus, or "remainder" division (e.g. `14%3=2` and `21%7=0`) * `^` - power * `!` - factorial (unary operator) **Sample input:** 0
.5 1 2 ! * 2 1 ^ + 10 + * # Formal output The output is a single number: the result of the calculation. The output should also indicate if the input is not a valid RPN expression. **Sample output:** 7 Explanation: the sample input translates to `0.5 * ((1 * 2!) + (2 ^ 1) + 10)`, w
hich comes out to `7`. ## Challenge 1 **Input:** `1 2 3 4 ! + - / 100 *` **Output:** `-4` ## Challenge 2 **Input:** `100 807 3 331 * + 2 2 1 + 2 + * 5 ^ * 23 10 558 * 10 * + + *` # Finally... Hope you enjoyed today's challenge! Have a fun problem or challenge of your own? Drop by /r/dailyprogrammer_ideas and share it with everyone! """ def main(): pass if __name__ == "__main__": main()
ature", native_unit_of_measurement=TEMP_CELSIUS, value_getter=lambda api: api.getOutsideTemperature(), device_class=DEVICE_CLASS_TEMPERATURE, ), ViCareSensorEntityDescription( key=SENSOR_RETURN_TEMPERATURE, name="Return Temperature",
native_unit_of_measurement=TEMP_CELSIUS, value_getter=lambda api: api.getReturnTemperature(), device_class=DEVICE_CLASS_TEMPERATURE, ), ViCareSensorEntityDescription( key=SENSOR_BOILER_TEMPERATURE, name="Boiler Temperature", native_unit_of_measurement=TEMP_CELSIUS, value_
getter=lambda api: api.getBoilerTemperature(), device_class=DEVICE_CLASS_TEMPERATURE, ), ViCareSensorEntityDescription( key=SENSOR_DHW_GAS_CONSUMPTION_TODAY, name="Hot water gas consumption today", native_unit_of_measurement=ENERGY_KILO_WATT_HOUR, value_getter=lambda api: api.getGasConsumptionDomesticHotWaterToday(), device_class=DEVICE_CLASS_ENERGY, state_class=STATE_CLASS_TOTAL_INCREASING, ), ViCareSensorEntityDescription( key=SENSOR_DHW_GAS_CONSUMPTION_THIS_WEEK, name="Hot water gas consumption this week", native_unit_of_measurement=ENERGY_KILO_WATT_HOUR, value_getter=lambda api: api.getGasConsumptionDomesticHotWaterThisWeek(), device_class=DEVICE_CLASS_ENERGY, state_class=STATE_CLASS_TOTAL_INCREASING, ), ViCareSensorEntityDescription( key=SENSOR_DHW_GAS_CONSUMPTION_THIS_MONTH, name="Hot water gas consumption this month", native_unit_of_measurement=ENERGY_KILO_WATT_HOUR, value_getter=lambda api: api.getGasConsumptionDomesticHotWaterThisMonth(), device_class=DEVICE_CLASS_ENERGY, state_class=STATE_CLASS_TOTAL_INCREASING, ), ViCareSensorEntityDescription( key=SENSOR_DHW_GAS_CONSUMPTION_THIS_YEAR, name="Hot water gas consumption this year", native_unit_of_measurement=ENERGY_KILO_WATT_HOUR, value_getter=lambda api: api.getGasConsumptionDomesticHotWaterThisYear(), device_class=DEVICE_CLASS_ENERGY, state_class=STATE_CLASS_TOTAL_INCREASING, ), ViCareSensorEntityDescription( key=SENSOR_GAS_CONSUMPTION_TODAY, name="Heating gas consumption today", native_unit_of_measurement=ENERGY_KILO_WATT_HOUR, value_getter=lambda api: api.getGasConsumptionHeatingToday(), device_class=DEVICE_CLASS_ENERGY, state_class=STATE_CLASS_TOTAL_INCREASING, ), ViCareSensorEntityDescription( key=SENSOR_GAS_CONSUMPTION_THIS_WEEK, name="Heating gas consumption this week", native_unit_of_measurement=ENERGY_KILO_WATT_HOUR, value_getter=lambda api: api.getGasConsumptionHeatingThisWeek(), device_class=DEVICE_CLASS_ENERGY, state_class=STATE_CLASS_TOTAL_INCREASING, ), ViCareSensorEntityDescription( key=SENSOR_GAS_CONSUMPTION_THIS_MONTH, name="Heating gas consumption this month", native_unit_of_measurement=ENERGY_KILO_WATT_HOUR, value_getter=lambda api: api.getGasConsumptionHeatingThisMonth(), device_class=DEVICE_CLASS_ENERGY, state_class=STATE_CLASS_TOTAL_INCREASING, ), ViCareSensorEntityDescription( key=SENSOR_GAS_CONSUMPTION_THIS_YEAR, name="Heating gas consumption this year", native_unit_of_measurement=ENERGY_KILO_WATT_HOUR, value_getter=lambda api: api.getGasConsumptionHeatingThisYear(), device_class=DEVICE_CLASS_ENERGY, state_class=STATE_CLASS_TOTAL_INCREASING, ), ViCareSensorEntityDescription( key=SENSOR_POWER_PRODUCTION_CURRENT, name="Power production current", native_unit_of_measurement=POWER_WATT, value_getter=lambda api: api.getPowerProductionCurrent(), device_class=DEVICE_CLASS_POWER, state_class=STATE_CLASS_TOTAL_INCREASING, ), ViCareSensorEntityDescription( key=SENSOR_POWER_PRODUCTION_TODAY, name="Power production today", native_unit_of_measurement=ENERGY_KILO_WATT_HOUR, value_getter=lambda api: api.getPowerProductionToday(), device_class=DEVICE_CLASS_ENERGY, state_class=STATE_CLASS_TOTAL_INCREASING, ), ViCareSensorEntityDescription( key=SENSOR_POWER_PRODUCTION_THIS_WEEK, name="Power production this week", native_unit_of_measurement=ENERGY_KILO_WATT_HOUR, value_getter=lambda api: api.getPowerProductionThisWeek(), device_class=DEVICE_CLASS_ENERGY, state_class=STATE_CLASS_TOTAL_INCREASING, ), ViCareSensorEntityDescription( key=SENSOR_POWER_PRODUCTION_THIS_MONTH, name="Power production this month", native_unit_of_measurement=ENERGY_KILO_WATT_HOUR, value_getter=lambda api: api.getPowerProductionThisMonth(), device_class=DEVICE_CLASS_ENERGY, state_class=STATE_CLASS_TOTAL_INCREASING, ), ViCareSensorEntityDescription( key=SENSOR_POWER_PRODUCTION_THIS_YEAR, name="Power production this year", native_unit_of_measurement=ENERGY_KILO_WATT_HOUR, value_getter=lambda api: api.getPowerProductionThisYear(), device_class=DEVICE_CLASS_ENERGY, state_class=STATE_CLASS_TOTAL_INCREASING, ), ) CIRCUIT_SENSORS: tuple[ViCareSensorEntityDescription, ...] = ( ViCareSensorEntityDescription( key=SENSOR_SUPPLY_TEMPERATURE, name="Supply Temperature", native_unit_of_measurement=TEMP_CELSIUS, value_getter=lambda api: api.getSupplyTemperature(), ), ) BURNER_SENSORS: tuple[ViCareSensorEntityDescription, ...] = ( ViCareSensorEntityDescription( key=SENSOR_BURNER_STARTS, name="Burner Starts", icon="mdi:counter", value_getter=lambda api: api.getStarts(), ), ViCareSensorEntityDescription( key=SENSOR_BURNER_HOURS, name="Burner Hours", icon="mdi:counter", native_unit_of_measurement=TIME_HOURS, value_getter=lambda api: api.getHours(), ), ViCareSensorEntityDescription( key=SENSOR_BURNER_MODULATION, name="Burner Modulation", icon="mdi:percent", native_unit_of_measurement=PERCENTAGE, value_getter=lambda api: api.getModulation(), ), ) COMPRESSOR_SENSORS: tuple[ViCareSensorEntityDescription, ...] = ( ViCareSensorEntityDescription( key=SENSOR_COMPRESSOR_STARTS, name="Compressor Starts", icon="mdi:counter", value_getter=lambda api: api.getStarts(), ), ViCareSensorEntityDescription( key=SENSOR_COMPRESSOR_HOURS, name="Compressor Hours", icon="mdi:counter", native_unit_of_measurement=TIME_HOURS, value_getter=lambda api: api.getHours(), ), ViCareSensorEntityDescription( key=SENSOR_COMPRESSOR_HOURS_LOADCLASS1, name="Compressor Hours Load Class 1", icon="mdi:counter", native_unit_of_measurement=TIME_HOURS, value_getter=lambda api: api.getHoursLoadClass1(), ), ViCareSensorEntityDescription( key=SENSOR_COMPRESSOR_HOURS_LOADCLASS2, name="Compressor Hours Load Class 2", icon="mdi:counter", native_unit_of_measurement=TIME_HOURS, value_getter=lambda api: api.getHoursLoadClass2(), ), ViCareSensorEntityDescription( key=SENSOR_COMPRESSOR_HOURS_LOADCLASS3, name="Compressor Hours Load Class 3", icon="mdi:counter", native_unit_of_measurement=TIME_HOURS, value_getter=lambda api: api.getHoursLoadClass3(), ), ViCareSensorEntityDescription( key=SENSOR_COMPRESSOR_HOURS_LOADCLASS4, name="Compressor Hours Load Class 4", icon="mdi:counter", native_unit_of_measurement=TIME_HOURS, value_getter=lambda api: api.getHoursLoadClass4(), ), ViCareSensorEntityDescription( key=SENSOR_COMPRESSOR_HOURS_LOADCLASS5, name="Compressor Hours Load Class 5", icon="mdi:counter", native_unit_of_measurement=TIME_HOURS, value_ge
#!/usr/bin/env python ''' Generate the main window for the pi-gui program. The interface show the last played item with cover, title and supllemental informations that is interactive and two buttons for show up the library screen and exit the porgram itself. ''' #@author: Philipp Sehnert #@contact: philipp.sehnert[a]gmail.com # python imports import sys, os import pygame # internal imports from interfaces import Interface YELLOW = (255, 255, 0) class MainMenu(): ''' generate the start interface for accessing all other screens''' def __init__(self, screen, funcs, hardware_instance, book): # declare important variables self.screen = screen # important for framerate self.clock = pygame.time.Clock() # contain all interface methods self.interface = Interface() # functions for the menu items self.funcs = funcs # cached book for last played window self.book = book #define function that checks for mouse location def on_click(self): click_pos = (pygame.mouse.get_pos() [0], pygame.mouse.get_pos() [1]) # select last played item if 10 <= click_pos[0] <= 310 and 120 <= click_pos[1] <= 185: self.funcs['Continue'](self.book) # go to library screen if 10 <= click_p
os[0] <= 205 and 190 <= click_
pos[1] <= 230: self.funcs['Select Book']() # exit gui if 265 <= click_pos[0] <= 315 and 190 <= click_pos[1] <= 230: self.interface.exit_interface(self.screen) def run(self): '''run method for drawing the screen to dispay''' mainloop = True # use infinity loop for showing the screen while mainloop: # Limit frame speed to 30 FPS self.clock.tick(30) self.interface.main_interface(self.screen, self.book) # wait for a pressed button or exit infinity loop for event in pygame.event.get(): # recognize mouse and touchscreen activity if event.type == pygame.MOUSEBUTTONDOWN: pos = (pygame.mouse.get_pos() [0], pygame.mouse.get_pos() [1]) pygame.draw.circle(self.screen, YELLOW, pos, 10, 0) self.on_click() # update the screen pygame.display.flip()
ITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT # OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Ce fichier définit la classe BaseObj définie plus bas.""" import sys import traceback import time from collections import OrderedDict from bases.collections.dictionnaire import * from bases.collections.liste import Liste objets_base = {} # dictionnaire des différents BaseObj {nom_cls:cls} # Objets chargés objets = {} objets_par_type = {} ids = {} statut_gen = 0 # 0 => OK, 1 => en cours classes_base = {} class MetaBaseObj(type): """Métaclasse des objets de base. Cette métaclasse est là pour gérer les versions des différents objets BaseObj : Si un objet BaseObj change de structure, pour X raison (par exemple un attribut change de nom ou de type), à la récupération l'objet sera mis à jour grâce à une fonction définie dans le convertisseur (voir BaseObj.update). La fonction se trouvera dans un fichier identifiant le nom de la classe. On s'assure grâce à cette métaclasse que deux classes héritées de BaseObj n'ont pas un nom identique et on attribut un numéro de version (0) par défaut aux objets issus de ces classes hérités. """ def __init__(cls, nom, bases, contenu): """Constructeur de la métaclasse""" type.__init__(cls, nom, bases, contenu) classes_base[cls.__module__ + "." + cls.__name__] = cls # Si on trouve les attributs _nom et _version, # c'est que la classe est versionnée if "_nom" in contenu and "_version" in contenu: cls._version = contenu["_version"] cls._nom = contenu["_nom"] # Pas de doublons ! if cls._nom in objets_base: if objets_base[cls._nom] == cls: return raise RuntimeError("La classe {0} héritée de BaseObj " \ "possède le même nom que la classe {1}".format( \ str(cls), str(objets_base[cls._nom]))) objets_base[cls._nom] = cls # On décore la méthode __init__ de la classe ancien_init = cls.__init__
def new_init(self, *args, **kwargs): ancien_init(self, *args, **kwargs) self.set_version(cls, cls._version) cls.__init__ = new_init else: cls._version = None cls._nom = None INIT, CONSTRUIT = 0, 1 class BaseObj(metaclass=MetaBaseObj): """Classe devant être héritée de la grande majorité des classes de Kassie. Le test est simple : si l'objet issu de
la classe doit être enregistré, l'hériter de BaseObj. """ importeur = None enregistrer = False _nom = "base_obj" _version = 1 def __init__(self): """Instancie un simple statut""" self._statut = INIT # On initialise le dictionnaire des versions de l'objet self._dict_version = {} self.e_existe = True self.ajouter_enr() def __getnewargs__(self): raise NotImplementedError( "la classe " + str(type(self)) + " n'a pas de méthode " \ "__getnewargs__") def ajouter_enr(self): if self.e_existe and type(self).enregistrer and statut_gen == 0 and \ id(self) not in objets: objets[id(self)] = self liste = objets_par_type.get(type(self), []) liste.append(self) objets_par_type[type(self)] = liste def version_actuelle(self, classe): """Retourne la version actuelle de l'objet. Cette version est celle enregistrée dans l'objet. Elle peut donc être différence de la classe (c'est le cas au chargement d'un objet à mettre à jour). """ if classe._nom in self._dict_version: return self._dict_version[classe._nom] else: return 0 def set_version(self, classe, version): """Met le numéro de version dans le dictionnaire de version.""" self._dict_version[classe._nom] = version def _construire(self): """Construit l'objet""" self._statut = CONSTRUIT def detruire(self): """Marque l'objet comme détruit.""" self.e_existe = False importeur.supenr.detruire_objet(self) if id(self) in objets: del objets[id(self)] @property def construit(self): return hasattr(self, "_statut") and self._statut == CONSTRUIT def __setstate__(self, dico_attrs): """Méthode appelée lors de la désérialisation de l'objet""" global statut_gen statut_gen = 1 # On récupère la classe classe = type(self) # On appel son constructeur try: classe.__init__(self, *self.__getnewargs__()) except NotImplementedError: print("Méthode __getnewargs__ non définie pour", classe) sys.exit(1) except TypeError as err: print("Erreur lors de l'appel au constructeur de", classe, err) print(traceback.format_exc()) sys.exit(1) self.__dict__.update(dico_attrs) # On vérifie s'il a besoin d'une vraie mis à jour self._update(classe) statut_gen = 0 self.ajouter_enr() def _update(self, classe): """Méthode appelée pendant la désérialisation de l'objet, destinée à vérifier si l'objet doit être mis à jour et, le cas échéant, le mettre à jour. """ # Mise à jour récursive par rapport aux classes-mères for base in classe.__bases__: # Inutile d'essayer de mettre à jour 'object' if base is not object: base._update(self, base) if classe._nom in objets_base: # On importe le convertisseur dédié à la classe en cours try: convertisseur = getattr(__import__( \ "primaires.supenr.convertisseurs." + classe._nom, \ globals(), locals(), ["Convertisseur"]), \ "Convertisseur") except ImportError as error: print("La classe {0} suivie en version ne possède pas de " \ "fichier de convertisseurs dans primaires.supenr." \ "convertisseurs".format(classe._nom)) exit() except AttributeError as error: print("Le fichier {0}.py dans primaires.supenr." \ "convertisseurs ne possède pas de classe " \ "Convertisseur".format(classe._nom)) exit() # On vérifie la version de la classe et celle de l'objet # Rappel : # self.version_actuelle() retourne la version enregistrée # classe._version retourne la version de la classe while self.version_actuelle(classe) < classe._version: try: # On appelle la version de conversion getattr(convertisseur, "depuis_version_" + \ str(self.version_actuelle(classe)))(self, classe) except AttributeError as error: print("Le fichier {0}.py dans primaires.supenr." \ "convertisseurs ne comporte pas de méthode " \ "depuis_version_".format(classe._nom) + str( \ self.version_actuelle(classe))) print(traceback.format_exc()) exit() def __getattribute__(self, nom_attr): """Méthode appelé quand on cherche à réc
# Copyright (c) 2007 Enough Project. # See LICENSE for details. ## /* Copyright 2007, Eyal Lotem, Noam Lewis, enoughmail@googlegroups.com */ ## /* ## This file is part of Enough. ## Enough is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 3 of the License, or ## (at your option) any later version. ## Enough is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## You should have received a copy of the GNU General Public License ## along with this program. If not, see <http://www.gnu.org/licenses/>. ## */ # Parses DOT "plain" output # graph scale width height # node name x y width height label style shape color fillcolor # edge tail head n x1 y1 .. xn yn [label xl yl] style color # stop from twisted.internet import protocol, defer from twisted.protocols.basic import LineReceiver class OutOfDate(Exception): pass class Error(Exception): pass class _ProtocolWrapper(protoc
ol.ProcessProtocol): """ This class wraps a L{Protocol} instance in a L{ProcessProtocol} instance. """ def __init__(self, proto): self.proto = proto def connectionMade(self): self.proto.connectionMade() def outReceived(self, data): self.proto.dataReceived(data)
def errReceived(self, data): import sys sys.stderr.write(data) sys.stderr.flush() def processEnded(self, reason): self.proto.connectionLost(reason) class _DotProtocol(LineReceiver): delimiter = '\n' def __init__(self): self._waiting = None self._current_graph_parser = None self._process = None def set_process(self, process): self._process = process def lineReceived(self, line): if self._current_graph_parser is None: raise Error("Dot outputs stuff, we're not expecting it", line) self._current_graph_parser.lineReceived(line) def _completed_current(self, result): self._current_graph_parser = None if self._waiting: dot_graph_text, d = self._waiting self._waiting = None self._start(dot_graph_text, d) return result def get_graph_data(self, dot_graph_text): d = defer.Deferred() if self._current_graph_parser: # Let the current result finish computing, "queue" this # one. if self._waiting: self._waiting[1].errback(OutOfDate()) self._waiting = dot_graph_text, d else: self._start(dot_graph_text, d) return d def _start(self, dot_graph_text, d): self._process.write(dot_graph_text + '\n') d.addBoth(self._completed_current) self._current_graph_parser = _GraphParser(d) class _GraphParser(object): def __init__(self, dresult): self.dresult = dresult self.graph = {} self.nodes = {} self.edges = {} # by heads def lineReceived(self, line): graph, nodes, edges = self.graph, self.nodes, self.edges words = line.split() if words[0] == 'graph': graph['scale'], graph['width'], graph['height'] = map(float, words[1:]) return if words[0] == 'node': node = {} node['name'] = words[1] start = 2 for i,attr in enumerate(('x', 'y','width', 'height',)): node[attr] = float(words[i+start]) start += 4 for i,attr in enumerate(('label', 'style', 'shape', 'color', 'fillcolor')): node[attr] = (words[i+start]) nodes[node['name']] = node return if words[0] == 'edge': edge = {} edge['tail'] = words[1] edge['head'] = words[2] n = int(words[3]) points = [] i = 4 while (i - 4) / 2 < n: points.append((float(words[i]), float(words[i+1]))) i += 2 edge['points'] = points if len(words) == 6+n*2: edge['label'] = edge['lx'] = edge['ly'] = None elif len(words) == 9+n*2: edge['label'] = words[-5] edge['lx'], edge['ly'] = float(words[-4]), float(words[-3]) else: assert False, "Cannot understand %r" % (line,) edge['style'] = words[-2] edge['color'] = words[-1] edges.setdefault(edge['tail'], []).append(edge) return if words[0] == 'stop': self.dresult.callback((graph, nodes, edges)) return self.dresult.errback(ValueError("Unexpected statement", line)) class Dot(object): layout_programs = ('dot', 'neato', 'twopi') def __init__(self): from twisted.internet import reactor self.protocol = _DotProtocol() self.processes = {} for prog, command_line in find_dot(self.layout_programs).iteritems(): process = reactor.spawnProcess(_ProtocolWrapper(self.protocol), command_line, [command_line, '-Tplain', '-y']) self.processes[prog] = process self.set_process('dot') def set_process(self, prog): self.protocol.set_process(self.processes[prog]) def get_graph_data(self, dot_graph_text): return self.protocol.get_graph_data(dot_graph_text) def find_dot(layout_programs): import sys import os if sys.platform == 'win32': DOT_PATH = r'\program files\att\graphviz\bin' DOT_SUFFIX = '.exe' for drive in ('c', 'd'): if os.path.isdir(drive + ':' + DOT_PATH): break else: raise Exception("Couldn't find DOT installation path") DOT_PATH = drive + ':' + DOT_PATH else: # Assume dot programs have no suffix and are in the PATH DOT_PATH = '' DOT_SUFFIX = '' res_paths = {} for prog in layout_programs: res_paths[prog] = os.path.join(DOT_PATH, prog+DOT_SUFFIX) return res_paths
is None: timestamp_format = lambda: int(time.time()) def alert(*params): formatted_msg = msg_type + "\t" + msg_template % params timestamped_msg = prepend_timestamp(formatted_msg, timestamp_format) print >> warnfile, timestamped_msg return alert def build_alert_hooks(patterns_file, warnfile): """Parse data in patterns file and transform into alert_hook list. Args: patterns_file: file; File to read alert pattern definitions from. warnfile: file; File to configure alert function to write warning to. Returns: list; Regex to alert function mapping. [(regex, alert_function), ...] """ pattern_lines = patterns_file.readlines() # expected pattern format: # <msgtype> <newline> <regex> <newline> <alert> <newline> <newline> # msgtype = a string categorizing the type of the message - used for # enabling/disabling specific categories of warnings # regex = a python regular expression # alert = a string describing the alert message # if the regex matches the line, this displayed warning will # be the result of (alert % match.groups()) patterns = zip(pattern_lines[0::4], pattern_lines[1::4], pattern_lines[2::4]) # assert that the patterns are separated by empty lines if sum(len(line.strip()) for line in pattern_lines[3::4]) > 0: raise ValueError('warning patterns are not separated by blank lines') hooks = [] for msgtype, regex, alert in patterns: regex = re.compile(regex.rstrip('\n')) alert_function = make_alert(warnfile, msgtype.rstrip('\n'), alert.rstrip('\n')) hooks.append((regex, alert_function)) return hooks def process_input( input, logfile, log_timestamp_format=None, alert_hooks=()): """Continuously read lines from input stream and: - Write them to log, possibly prefixed by timestamp. - Watch for alert patterns. Args: input: file; Stream to read from. logfile: file; Log file to write to log_timestamp_format: str; Format to use for timestamping entries. No timestamp is added if None. alert_hooks: list; Generated from build_alert_hooks. [(regex, alert_function), ...] """ while True: line = input.readline() if len(line) == 0: # this should only happen if the remote console unexpectedly # goes away. terminate this process so that we don't spin # forever doing 0-length reads off of input write_logline(logfile, TERM_MSG, log_timestamp_format) break if line == '\n': # If it's just an empty line we discard and continue. continue write_logline(logfile, line, log_timestamp_format) for regex, callback in alert_hooks: match = re.match(regex, line.strip()) if match: callback(*match.groups()) def lookup_lastlines(lastlines_dirpath, path): """Retrieve last lines seen for path. Open corresponding lastline file for path If there isn't one or isn't a match return None Args: lastlines_dirpath: str; Dirpath to store lastlines files to. path: str; Filepath to source file that lastlines came from. Returns: str; Last lines seen if they exist - Or - None; Otherwise """ underscored = path.replace('/', '_') try: lastlines_file = open(os.path.join(lastlines_dirpath, underscored)) except (OSError, IOError): return lastlines = lastlines_file.read() lastlines_file.close() os.remove(lastlines_file.name) if not lastlines: return try: target_file = open(path) except (OSError, IOError): return # Load it all in for now target_data = target_file.read() target_file.close() # Get start loc in the target_data string, scanning from right loc = target_data.rfind(lastlines) if loc == -1: return # Then translate this into a reverse line number # (count newlines that occur afterward) reverse_lineno = target_data.count('\n', loc + len(lastlines)) return reverse_lineno def write_lastlines_file(lastlines_dirpath, path, data): """Write data to lastlines file for path. Args: lastlines_dirpath: str; Dirpath to store lastlines files to. path: str; Filepath to source file that data comes from. data: str; Returns: str; Filepath that lastline data was written to. """ underscored = path.replace('/', '_') dest_path = os.path.join(lastlines_dirpath, underscored) open(dest_path, 'w').write(data) return dest_path def nonblocking(pipe): """Set python file object to nonblocking mode. This allows us to take advantage of pipe.read() where we don't have to specify a buflen. Cuts down on a few lines we'd have to maintain. Args: pipe: file; File object to modify Returns: pipe """ flags = fcntl.fcntl(pipe, fcntl.F_GETFL) fcntl.fcntl(pipe, fcntl.F_SETFL, flags| os.O_NONBLOCK) return pipe def launch_tails(follow_paths, lastlines_dirpath=None): """Launch a tail process for each follow_path. Args: follow_paths: list; lastlines_dirpath: str; Returns: tuple; (procs, pipes) or ({path: subprocess.Popen, ...}, {file: path, ...}) """ if lastlines_dirpath and not os.path.exists(lastlines_dirpath): os.makedirs(lastlines_dirpath) tail_cmd = ('/usr/bin/tail', '--retry', '--follow=name') procs = {} # path -> tail_proc pipes = {} # tail_proc.stdout -> path for path in follow_paths: cmd = list(tail_cmd) if lastlines_dirpath: reverse_lineno = lookup_lastlines(lastlines_dirpath, path) if reverse_lineno is None: reverse_lineno = 1 cmd.append('--lines=%d' % reverse_lineno) cmd.append(path) tail_proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) procs[path] = tail_proc pipes[nonblocking(tail_proc.stdout)] = path return procs, pipes def poll_tail_pipes(pipes, lastlines_dirpath=None, waitsecs=5): """Wait on tail pipes for new data for waitsecs, return any new lines. Args: pipes: dict; {subprocess.Popen: follow_path, ...} lastlines_dirpath: str; Path to write lastlines to. waitsecs: int; Timeout to pass to select Returns: tuple; (lines, bad_pipes) or ([line, ...], [subprocess.Popen, ...]) """ lines = [] bad_pipes = [] # Block until at least one is ready to read or waitsecs elapses ready, _, _ = select.select(pipes.keys(), (), (), waitsecs) for fi in ready: path = pipes[fi] data = fi.read() if len(data) == 0: # If no data, process is probably dead, add to bad_pipes bad_pipes.append(fi) continue if lastlines_dirpath: # Overwr
ite the lastlines file for this source path # Probably just want to write the last 1-3 lines. write_lastlines_file(lastlines_dirpath, path, data) for line in data.splitlines(): lines.append('[%s]\t%s\n' % (path, line)) return lines, bad_pipes def snuff(subprocs): """Helper for killing off remaining live su
bprocesses. Args: subprocs: list; [subprocess.Popen, ...] """ for proc in subprocs: if proc.poll() is None: os.kill(proc.pid, signal.SIGKILL) proc.wait() def follow_files(follow_paths, outstream, lastlines_dirpath=None, waitsecs=5): """Launch tail on a set of files and merge their output into outstream. Args: follow_paths: list; Local paths to launch tail on. outstream: file; Output stream to write aggregated lines to. lastlines_dirpath: Local dirpath to record last lines seen in. waitsecs: int; Timeout for poll_tail_pipes. """ procs, pipes = launch_tails(follow_paths, lastlines_dirpath) while pipes: lines, bad
import ast import base64 import itertools from functools import lru_cache import cpapilib from flask import session from app import app OBJECTS_DICTIONARY = None @lru_cache(maxsize=5000) def uid_name(uid_obj): for obj in OBJECTS_DICTIONARY: if uid_obj == obj['uid']: return obj['name'] class API(cpapilib.Management): def pre_data(self): self.all_colors = [ 'aquamarine', 'blue', 'crete blue', 'burlywood', 'cyan', 'dark green', 'khaki', 'orchid', 'dark orange', 'dark sea green', 'pink', 'turquoise', 'dark blue', 'firebrick', 'brown', 'forest green', 'gold', 'dark gold', 'gray', 'dark gray', 'light green', 'lemon chiffon', 'coral', 'sea green', 'sky blue', 'magenta', 'purple', 'slate blue', 'violet red', 'navy blue', 'olive', 'orange', 'red', 'sienna', 'yellow' ] self.all_commands = [command['name'] for command in self.show('commands')['commands']] self.all_targets = [target['name'] for batch in self.show_all('gateways-and-servers') for target in batch['objects']] self.all_layers = [(layer['name'], layer['uid']) for batch in self.show_all('access-layer') for layer in batch['access-layers']] def customcommand(self, command, payload): """Validate payload and send command to server.""" try: payload = ast.literal_eval(payload) except ValueError: return 'Invalid input provided.' except Exception as e: return e return self._api_call(command, **payload) def runcommand(self, targets, script): """Issue command against Check Point targets, verify task is complete on each gateways and return response for each target.""" taskreturn = [] payload = { 'script-name': 'cpapi', 'script': script, 'targets': targets } response = self.run('script', **payload) if 'tasks' in response: for task in response['tasks']: target = task['target'] taskid = task['task-id'] taskresponse = self.monitortask(target, taskid) taskreturn.append(taskresponse) return taskreturn @staticmethod def base64_ascii(base64resp): """Converts base64 to ascii for run command/showtask.""" return base64.b64decode(base64resp).decode('utf-8') def monitortask(self, target, taskid): """Run gettask until task is complete and we can return response.""" if self.monitor_task(taskid, timeout=30): response = self.show('task', **{'task-id': taskid, 'details-level': 'full'}) if response['tasks'][0]['task-details'][0]['responseMessage']: base64resp = response['tasks'][0]['task-details'][0]['responseMessage'] asciiresp = self.base64_ascii(base64resp) taskresponse = { 'target': target, 'status': response['tasks'][0]['status
'], 'response': asciiresp } else: taskresponse = { 'target': target, 'status': response['tasks'][0]['status'], 'response': 'Not Available' } else: app.logger.warn('Script did not finish within time limit on {}.'.format(target)) taskresponse = { 'target': target, 'status': 'Task did not comple
te within 30 seconds.', 'response': 'Unavailable.' } return taskresponse def show_object(self, objuid): show_obj_response = self.show('object', uid=objuid) payload = { 'uid': objuid, 'details-level': 'full' } type_obj_response = self.show(show_obj_response['object']['type'], **payload) return type_obj_response def show_rules(self, **kwargs): """Recieves Layer UID, limit, offset.""" all_rules = {'rulebase': []} app.logger.info('Retrieving rules for - {}'.format(kwargs)) response = self.show('access-rulebase', **kwargs) all_rules.update({'to': response['to'], 'total': response['total']}) self._filter_rules(all_rules, response) return all_rules def _filter_rules(self, all_rules, response): """Recieves show_rules response and performs logic against whether rules are sections or rules.""" for rule in response['rulebase']: if 'type' in rule: if rule['type'] == 'access-rule': final = self._filter_rule(rule, response['objects-dictionary']) all_rules['rulebase'].append(final) elif rule['type'] == 'access-section': if 'name' in rule: section = rule['name'] else: section = '' all_rules['rulebase'].append({'type': 'accesssection', 'name': section}) if 'rulebase' in rule: for subrule in rule['rulebase']: final = self._filter_rule(subrule, response['objects-dictionary']) all_rules['rulebase'].append(final) return all_rules @staticmethod def _filter_rule(rule, object_dictionary): """Recieves rule and replaces UID with Name.""" global OBJECTS_DICTIONARY OBJECTS_DICTIONARY = object_dictionary src = rule['source'] src_all = [] dst = rule['destination'] dst_all = [] srv = rule['service'] srv_all = [] act = rule['action'] trg = rule['install-on'] trg_all = [] if rule['track']['type']: trc = rule['track']['type'] else: trc = rule['track'] for srcobj, dstobj, srvobj, trgobj in itertools.zip_longest(src, dst, srv, trg): if srcobj: src_all.append((uid_name(srcobj), srcobj)) if dstobj: dst_all.append((uid_name(dstobj), dstobj)) if srvobj: srv_all.append((uid_name(srvobj), srvobj)) if trgobj: trg_all.append((uid_name(trgobj), trgobj)) return { 'type': 'accessrule', 'number': rule['rule-number'], 'name': rule.get('name', ''), 'source': src_all, 'source-negate': rule['source-negate'], 'destination': dst_all, 'destination-negate': rule['destination-negate'], 'service': srv_all, 'service-negate': rule['service-negate'], 'action': uid_name(act), 'track': uid_name(trc), 'target': trg_all, 'enabled': rule['enabled'] }
)'.format(name) ) path = os.path.join('/usr/ports', name) if not os.path.isdir(path): raise SaltInvocationError('Path {0!r} does not exist'.format(path)) return path def _options_dir(name): ''' Retrieve the path to the dir containing OPTIONS file for a given port ''' _check_portname(name) _root = '/var/db/ports' # New path: /var/db/ports/category_portname new_dir = os.path.join(_root, name.replace('/', '_')) # Old path: /var/db/ports/portname old_dir = os.path.join(_root, name.split('/')[-1]) if os.path.isdir(old_dir): return old_dir return new_dir def _options_file_exists(name): ''' Returns True/False based on whether or not the options file for the specified port exists. ''' return os.path.isfile(os.path.join(_options_dir(name), 'options')) def _write_options(name, configuration): ''' Writes a new OPTIONS file ''' _check_portname(name) pkg = next(iter(configuration)) conf_ptr = configuration[pkg] dirname = _options_dir(name) if not os.path.isdir(dirname): try: os.makedirs(dirname) except OSError as exc: raise CommandExecutionError( 'Unable to make {0}: {1}'.format(dirname, exc) ) with salt.utils.fopen(os.path.join(dirname, 'options'), 'w') as fp_: sorted_options = list(conf_ptr.keys()) sorted_options.sort() fp_.write( '# This file was auto-generated by Salt (http://saltstack.com)\n' '# Options for {0}\n' '_OPTIONS_READ={0}\n' '_FILE_COMPLETE_OPTIONS_LIST={1}\n' .format(pkg, ' '.join(sorted_options)) ) opt_tmpl = 'OPTIONS_FILE_{0}SET+={1}\n' for opt in sorted_options: fp_.write( opt_tmpl.format( '' if conf_ptr[opt] == 'on' else 'UN', opt ) ) def _normalize(val): ''' Fix Salt's yaml-ification of on/off, and otherwise normalize the on/off values to be used in writing the options file ''' if isinstance(val, bool): return 'on' if val else 'off' return str(val).lower() def install(name, clean=True): ''' Install a port from the ports tree. Installs using ``BATCH=yes`` for non-interactive building. To set config options for a given port, use :mod:`ports.config <salt.modules.freebsdports.config>`. clean : True If ``True``, cleans after installation. Equivalent to running ``make install clean BATCH=yes``. .. note:: It may be helpful to run this function using the ``-t`` option to set a higher timeout, since compiling a port may cause the Salt command to exceed the default timeout. CLI Example: .. code-block:: bash salt -t 1200 '*' ports.install security/nmap ''' portpath = _check_portname(name) old = __salt__['pkg.list_pkgs']() if old.get(name.rsplit('/')[-1]): deinstall(name) result = __salt__['cmd.run_all']( 'make install{0} BATCH=yes'.format(' clean' if clean else ''), cwd=portpath, reset_system_locale=False ) if result['retcode'] != 0: __context__['ports.install_error'] = result['stderr'] __context__.pop('pkg.list_pkgs', None) new = __salt__['pkg.list_pkgs']() ret = salt.utils.compare_dicts(old, new) if not ret and result['retcode'] == 0: # No change in package list, but the make install was successful. # Assume that the installation was a recompile with new options, and # set return dict so that changes are detected by the ports.installed # state. ret = {name: {'old': old.get(name, ''), 'new': new.get(name, '')}} return ret def deinstall(name): ''' De-install a port. CLI Example: .. code-block:: bash salt '*' ports.deinstall security/nmap ''' portpath = _check_portname(name) old = __salt__['pkg.list_pkgs']() __salt__['cmd.run']('make deinstall BATCH=yes', cwd=portpath) __context__.pop('pkg.list_pkgs', None) new = __salt__['pkg.list_pkgs']() return salt.utils.compare_dicts(old, new) def rmconfig(name): ''' Clear the cached options for the specified port; run a ``make rmconfig`` name The name of the port to clear CLI Example: .. code-block:: bash salt '*' ports.rmconfig security/nmap ''' portpath = _check_portname(name) return __salt__['cmd.run']('make rmconfig', cwd=portpath) def showconfig(name, default=False, dict_return=False): ''' Show the configuration options for a given port. default : False Show the default options for a port (not necessarily the same as the current configuration) dict_return : False Instead of returning the output of ``make showconfig``, return the data in an dictionary CLI Example: .. code-block:: bash salt '*' ports.showconfig security/nmap salt '*' ports.showconfig security/nmap default=True ''' portpath = _check_portname(name) if default and _options_file_exists(name): saved_config = showconfig(name, default=False, dict_return=True) rmconfig(name) if _options_file_exists(name): raise CommandExecutionError('Unable to get default configuration') default_config = showconfig(name, default=False, dict_return=dict_return) _write_options(name, saved_config) return default_config try: result = __salt__['cmd.run_all']('make showconfig', cwd=portpath) output = result['stdout'].splitlines() if result['retcode'] != 0: error = result['stderr'] else: error = '' except TypeError: error = result if error: msg = ('Error running \'make showconfig\' for {0}: {1}' .format(name, error)) log.error(msg) raise SaltInvocationError(msg) if not dict_return: return '\n'.join(output) if (not output) or ('configuration options' not in output[0]): return {} try: pkg = output[0].split()[-1].rstrip(':') except (IndexError, AttributeError, TypeError) as exc: log.error( 'Unable to get pkg-version string: {0}'.format(exc) ) return {}
ret = {pkg: {}} output = output[1:] for line in output: try: opt, val, desc = re.match( r'\s+([^=]+)=(off|on): (.+)', line ).groups() except AttributeError: contin
ue ret[pkg][opt] = val if not ret[pkg]: return {} return ret def config(name, reset=False, **kwargs): ''' Modify configuration options for a given port. Multiple options can be specified. To see the available options for a port, use :mod:`ports.showconfig <salt.modules.freebsdports.showconfig>`. name The port name, in ``category/name`` format reset : False If ``True``, runs a ``make rmconfig`` for the port, clearing its configuration before setting the desired options CLI Examples: .. code-block:: bash salt '*' ports.config security/nmap IPV6=off ''' portpath = _check_portname(name) if reset: rmconfig(name) configuration = showconfig(name, dict_return=True) if not configuration: raise CommandExecutionError( 'Unable to get port configuration for {0!r}'.format(name) ) # Get top-level key for later reference pkg = next(iter(configuration)) conf_ptr = configuration[pkg] opts = dict( (str(x), _normalize(kwargs[x])) for x in kwargs if not x.startswith('_') ) bad_opts = [x for x in opts if x not in conf_ptr] if bad_opts: raise SaltInvocationError( 'The following opts are not valid for port {0}: {1}' .format(name, ', '.join(bad_opts)) ) bad_vals = [ '{0}={1}'
# Copyright 2016 Virgil Dupras # # This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # which should be included with this package. The terms are also available at # http://www.gnu.org/licenses/gpl-3.0.html # This plugin subclasses CurrencyProviderPlugin to provide additional currencies, whose rates are # stale, and thus never updated. If you want to add your own fancy weird currency, this is the # best place. from datetime import date from core.plugin import CurrencyProviderPlugin class StaleProviderPlugin(CurrencyProviderPlugin): NAME = 'Stale c
urrencies provider' AUTHOR = "Virgil Dupras" def register_currencies(self): self.register_currency( 'ATS', 'Austrian schilling', start_date=date(1998, 1, 2), start_rate=0.1123, stop_date=date(2001, 12, 31), latest_rate=0.10309) self.register_currency( 'BEF', 'Belgian franc', start_date=date(1998,
1, 2), start_rate=0.03832, stop_date=date(2001, 12, 31), latest_rate=0.03516) self.register_currency( 'DEM', 'German deutsche mark', start_date=date(1998, 1, 2), start_rate=0.7904, stop_date=date(2001, 12, 31), latest_rate=0.7253) self.register_currency( 'ESP', 'Spanish peseta', exponent=0, start_date=date(1998, 1, 2), start_rate=0.009334, stop_date=date(2001, 12, 31), latest_rate=0.008526) self.register_currency( 'FIM', 'Finnish markka', start_date=date(1998, 1, 2), start_rate=0.2611, stop_date=date(2001, 12, 31), latest_rate=0.2386) self.register_currency( 'FRF', 'French franc', start_date=date(1998, 1, 2), start_rate=0.2362, stop_date=date(2001, 12, 31), latest_rate=0.2163) self.register_currency( 'GHC', 'Ghanaian cedi (old)', start_date=date(1998, 1, 2), start_rate=0.00063, stop_date=date(2007, 6, 29), latest_rate=0.000115) self.register_currency( 'GRD', 'Greek drachma', start_date=date(1998, 1, 2), start_rate=0.005, stop_date=date(2001, 12, 31), latest_rate=0.004163) self.register_currency( 'IEP', 'Irish pound', start_date=date(1998, 1, 2), start_rate=2.0235, stop_date=date(2001, 12, 31), latest_rate=1.8012) self.register_currency( 'ITL', 'Italian lira', exponent=0, start_date=date(1998, 1, 2), start_rate=0.000804, stop_date=date(2001, 12, 31), latest_rate=0.000733) self.register_currency( 'NLG', 'Netherlands guilder', start_date=date(1998, 1, 2), start_rate=0.7013, stop_date=date(2001, 12, 31), latest_rate=0.6437) self.register_currency( 'PTE', 'Portuguese escudo', exponent=0, start_date=date(1998, 1, 2), start_rate=0.007726, stop_date=date(2001, 12, 31), latest_rate=0.007076) self.register_currency( 'SIT', 'Slovenian tolar', start_date=date(2002, 3, 1), start_rate=0.006174, stop_date=date(2006, 12, 29), latest_rate=0.006419) self.register_currency( 'TRL', 'Turkish lira', exponent=0, start_date=date(1998, 1, 2), start_rate=7.0e-06, stop_date=date(2004, 12, 31), latest_rate=8.925e-07) self.register_currency( 'VEB', 'Venezuelan bolivar', exponent=0, start_date=date(1998, 1, 2), start_rate=0.002827, stop_date=date(2007, 12, 31), latest_rate=0.00046) self.register_currency( 'SKK', 'Slovak koruna', start_date=date(2002, 3, 1), start_rate=0.03308, stop_date=date(2008, 12, 31), latest_rate=0.05661)
#!/usr/bin/env python # -*- coding: utf-8 -*- from runner.koan import * def function(): return "pineapple" def function2(): return "tractor" class Class(object): def method(self): return "parrot" class AboutMethodBindings(Koan): def test_methods_are_bound_to_an_object(self): obj = Class() self.assertEqual(True, obj.method.im_self == obj) def test_methods_are_also_bound_to_a_function(self): obj = Class() self.assertEqual('parrot', obj.method()) self.assertEqual('parrot', obj.method.im_func(obj)) def test_functions_have_attributes(self): self.assertEqual(31, len(dir(function))) self.assertEqual(True, dir(function) == dir(Class.method.im_func)) def test_bound_methods_have_different_attributes(self): obj = Class() self.assertEqual(23, len(dir(obj.method))) def test_setting_attributes_on_an_unbound_function(self): function.cherries = 3 self.assertEqual(3, function.cherries) def test_setting_attributes_on_a_bound_method_directly(self): obj = Class() try: obj.method.cherries = 3 except AttributeError as ex: self.assertMatch('object has no attribute', ex[0]) def test_setting_attributes_on_methods_by_accessing_the_inner_function(self): obj = Class() obj.method.im_func.cherri
es = 3 self.assertEqual(3, obj.method.cherries) def test_functions_can_have_inner_functions(self): function2.get_fruit = function self.assertEqual('pineapple', function2.get_fruit()) def test_inner_functions_are_unbound(self): function2.get_fruit = function try: cls = function2.get_fruit.im_self except AttributeError as ex: se
lf.assertMatch('object has no attribute', ex[0]) # ------------------------------------------------------------------ class BoundClass(object): def __get__(self, obj, cls): return (self, obj, cls) binding = BoundClass() def test_get_descriptor_resolves_attribute_binding(self): bound_obj, binding_owner, owner_type = self.binding # Look at BoundClass.__get__(): # bound_obj = self # binding_owner = obj # owner_type = cls self.assertEqual('BoundClass', bound_obj.__class__.__name__) self.assertEqual('AboutMethodBindings', binding_owner.__class__.__name__) self.assertEqual(AboutMethodBindings, owner_type) # ------------------------------------------------------------------ class SuperColor(object): def __init__(self): self.choice = None def __set__(self, obj, val): self.choice = val color = SuperColor() def test_set_descriptor_changes_behavior_of_attribute_assignment(self): self.assertEqual(None, self.color.choice) self.color = 'purple' self.assertEqual('purple', self.color.choice)
'''Autogenerated by xml_generate script, do not edit!''' from OpenGL import platform as _p, arrays # Code generation uses this from OpenGL.raw.GL import _
types as _cs # End users want this... from OpenGL.raw.GL._types import * from OpenGL.raw.GL import _errors from OpenGL.constant import Constant as _C import ctypes _EXTENSION_NAME = 'GL_SGIX_shadow' def _f( function ): return _p.createFunction( function,_p.PLATFORM.GL,'GL_SGIX_shadow',error_checker=_errors._error_checker) GL_TEXTURE_COM
PARE_OPERATOR_SGIX=_C('GL_TEXTURE_COMPARE_OPERATOR_SGIX',0x819B) GL_TEXTURE_COMPARE_SGIX=_C('GL_TEXTURE_COMPARE_SGIX',0x819A) GL_TEXTURE_GEQUAL_R_SGIX=_C('GL_TEXTURE_GEQUAL_R_SGIX',0x819D) GL_TEXTURE_LEQUAL_R_SGIX=_C('GL_TEXTURE_LEQUAL_R_SGIX',0x819C)
#!/usr/bin/env python import csv import sys from EPPs.common import StepEPP class GenerateHamiltonInputUPL(StepEPP): """Generate a CSV containing the necessary information to batch up to 9 User Prepared Library receipt plates into one DCT plate. The Hamilton requires input and output plate containers and well positions from the LIMS as well as the volume to be pipetted, which is taken from the step UDF "DNA Volume (uL)" - this is a constant and can only be updated with a LIMS configuration change.""" # additional argument required for the location of the Hamilton input file so def __init__ customised def __init__(self, argv=None): super().__init__(argv) self.hamilton_input = self.cmd_args.hamilton_input @staticmethod def add_args(argparser): argparser.add_argument( '-i', '--hamilton_input', type=str, required=True, help='Hamilton input file generated by the LIMS' ) def _run(self): # csv_dict will be a dictionary that consists of the lines to be present in the Hamilton input file. These are # then sorted into correct order and added to the csv_array which is used to write the file csv_dict = {} csv_array = [] # define the column headers that will be used in the Hamilton input file and add to the csv_array to be # used to write the file csv_column_headers = ['Input Plate', 'Input Well', 'Output Plate', 'Output Well', 'DNA Volume', 'TE Volume'] csv_array.append(csv_column_headers) # define the sets for listing the unique input and output containers unique_input_containers = set() unique_output_containers = set() # obtain all of the inputs for the step all_inputs = self.process.all_inputs() # find all the inputs for the step that are analytes (i.e. samples and not associated files) for artifact in all_inputs: if artifact.type == 'Analyte': output = self.process.outputs_per_input(artifact.id, Analyte=True) # the script is only compatible with 1 output for each input i.e. replicates are not allowed if len(output) > 1: print('Multipl
e outputs found for an input %s. This step is not compatible with replicates.' % artifact.name) sys.exit(1) # build a list of the unique input containers for checking that no more than 9 are present (this is due # to a deck limit on the Hamilton) and for sorting the sample locations by input plate. Build a list of # unique output containers as no more than 1 plate unique_input_containers.add(artifact.contai
ner.name) unique_output_containers.add(output[0].container.name) # assemble each line of the Hamilton input file in the correct structure for the Hamilton csv_line = [artifact.container.name, artifact.location[1], output[0].container.name, output[0].location[1], self.process.udf['DNA Volume (uL)'], '0'] # build a dictionary of the lines for the Hamilton input file with a key that facilitates the lines # being by input container then column then row csv_dict[artifact.container.name + artifact.location[1]] = csv_line # check the number of input containers if len(unique_input_containers) > 9: print('Maximum number of input plates is 9. There are %s output plates in the step.' % len(unique_input_containers)) sys.exit(1) # check the number of output containers if len(unique_output_containers) > 1: print('Maximum number of output plates is 1. There are %s output plates in the step.' % len(unique_output_containers)) sys.exit(1) # define the rows and columns in the input plate (standard 96 well plate pattern) rows = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] columns = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12'] # add the lines to the csv_array that will be used to write the Hamilton input file for unique_input_container in sorted(unique_input_containers): for column in columns: for row in rows: if unique_input_container + row + ":" + column in csv_dict.keys(): csv_array.append(csv_dict[unique_input_container + row + ":" + column]) # create and write the Hamilton input file, this must have the hamilton_input argument as the prefix as this is # used by Clarity LIMS to recognise the file and attach it to the step with open(self.hamilton_input + '-hamilton_input.csv', 'w',newline='') as f: writer = csv.writer(f) writer.writerows(csv_array) if __name__ == '__main__': GenerateHamiltonInputUPL().run()
from collections import Counter as C i,s=lambda:C(input()),lambda t:sum(t.values());a,b,c=i(),i(),i();a,b,N=a&c,b&c,s(c);print('NO'if any((a+b)[k]<v for k,v in c.i
tems())|(s(a)*2<N)|(s(b)*2<N)else'Y
ES')
"""Add search tokens. Revision ID: 482338e7a7d6 Revises: 41a7e825d108 Create Date: 2014-03-18 00:16:49.525732 """ # revision identifiers, used by Alembic. revision = '482338e7a7d6' down_revision = 'adc646e1f11' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table(
'searchtoken', sa.Column('id', sa.Integer(), nullable=False), sa.Column('token', sa.String(length=255), nullable=True), sa.Column('source', sa.Enum('name', 'email_address'), nullab
le=True), sa.Column('contact_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['contact_id'], ['contact.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id') ) def downgrade(): op.drop_table('searchtoken')
+ self.fee.to_bytes(8, byteorder='big', signed=False) + self.symbol + self.name + self.owner + self._data.token.decimals.to_bytes(8, byteorder='big', signed=False)) for initial_balance in self._data.token.initial_balances: data_bytes += initial_balance.address data_bytes += initial_balance.amount.to_bytes(8, byteorder='big', signed=False) return data_bytes @staticmethod def create(symbol: bytes, name: bytes, owner: bytes, decimals: int, initial_balances: list, fee: int, xmss_pk: bytes, master_addr: bytes = None): transaction = TokenTransaction() if master_addr: transaction._data.master_addr = master_addr transaction._data.public_key = bytes(xmss_pk) transaction._data.token.symbol = symbol transaction._data.token.name = name transaction._data.token.owner = owner transaction._data.token.decimals = decimals for initial_balance in initial_balances: transaction._data.token.initial_balances.extend([initial_balance]) transaction._data.fee = int(fee) transaction.validate_or_raise(verify_signature=False) return transaction def _validate_custom(self): if len(self.symbol) > config.dev.max_token_symbol_length: logger.warning('Token Symbol Length exceeds maximum limit') logger.warning('Found Symbol Length %s', len(self.symbol)) logger.warning('Expected Symbol length %s', config.dev.max_token_symbol_length) return False if len(self.name) > config.dev.max_token_name_length: logger.warning('Token Name Length exceeds maximum limit') logger.warning('Found Name Length %s', len(self.symbol)) logger.warning('Expected Name length %s', config.dev.max_token_name_length) return False if len(self.symbol) == 0: logger.warning('Missing Token Symbol') return False if len(self.name) == 0: logger.warning('Missing Token Name') return False if len(self.initial_balances) == 0: logger.warning('Invalid Token Transaction, without any initial balance') return False sum_of_initial_balances = 0 for initial_balance in self.initial_balances: sum_of_initial_balances += initial_balance.amount if initial_balance.amount <= 0: logger.warning('Invalid Initial Amount in Token Transaction') logger.warning('Address %s | Amount %s', initial_balance.address, initial_balance.amount) return False allowed_decimals = self.calc_allowed_decimals(sum_of_initial_balances // 10 ** self.decimals) if self.decimals > allowed_decimals: logger.warning('Decimal is greater than maximum allowed decimal') logger.warning('Allowed Decimal %s', allowed_decimals) logger.warning('Decimals Found %s', self.decimals) return False if self.fee < 0: raise ValueError('TokenTransaction [%s] Invalid Fee = %d', bin2hstr(self.txhash), self.fee) return True # checks new tx validity based upon node
statedb and node mempool. def validate_extended(self, addr_from_state: AddressState, addr_from_pk_state: AddressState): if not self.validate_slave(addr_from_state, addr_from_pk_state): return False tx_balance = addr_from_state.balance if not AddressState.address_is_valid(self.addr_from): logger.warning('Invalid address addr_from
: %s', bin2hstr(self.addr_from)) return False if not AddressState.address_is_valid(self.owner): logger.warning('Invalid address owner_addr: %s', bin2hstr(self.owner)) return False for address_balance in self.initial_balances: if not AddressState.address_is_valid(address_balance.address): logger.warning('Invalid address in initial_balances: %s', bin2hstr(address_balance.address)) return False if tx_balance < self.fee: logger.info('TokenTxn State validation failed for %s because: Insufficient funds', bin2hstr(self.txhash)) logger.info('balance: %s, Fee: %s', tx_balance, self.fee) return False if addr_from_pk_state.ots_key_reuse(self.ots_key): logger.info('TokenTxn State validation failed for %s because: OTS Public key re-use detected', bin2hstr(self.txhash)) return False return True def apply_state_changes(self, addresses_state): addr_from_pk = bytes(QRLHelper.getAddress(self.PK)) owner_processed = False addr_from_processed = False addr_from_pk_processed = False for initial_balance in self.initial_balances: if initial_balance.address == self.owner: owner_processed = True if initial_balance.address == self.addr_from: addr_from_processed = True if initial_balance.address == addr_from_pk: addr_from_pk_processed = True if initial_balance.address in addresses_state: addresses_state[initial_balance.address].update_token_balance(self.txhash, initial_balance.amount) addresses_state[initial_balance.address].transaction_hashes.append(self.txhash) if self.owner in addresses_state and not owner_processed: addresses_state[self.owner].transaction_hashes.append(self.txhash) if self.addr_from in addresses_state: addresses_state[self.addr_from].balance -= self.fee if not addr_from_processed and self.addr_from != self.owner: addresses_state[self.addr_from].transaction_hashes.append(self.txhash) if addr_from_pk in addresses_state: if self.addr_from != addr_from_pk and addr_from_pk != self.owner: if not addr_from_pk_processed: addresses_state[addr_from_pk].transaction_hashes.append(self.txhash) addresses_state[addr_from_pk].increase_nonce() addresses_state[addr_from_pk].set_ots_key(self.ots_key) def revert_state_changes(self, addresses_state, chain_manager): addr_from_pk = bytes(QRLHelper.getAddress(self.PK)) owner_processed = False addr_from_processed = False addr_from_pk_processed = False for initial_balance in self.initial_balances: if initial_balance.address == self.owner: owner_processed = True if initial_balance.address == self.addr_from: addr_from_processed = True if initial_balance.address == addr_from_pk: addr_from_pk_processed = True if initial_balance.address in addresses_state: addresses_state[initial_balance.address].update_token_balance(self.txhash, initial_balance.amount * -1) addresses_state[initial_balance.address].transaction_hashes.remove(self.txhash) if self.owner in addresses_state and not owner_processed: addresses_state[self.owner].transaction_hashes.remove(self.txhash) if self.addr_from in addresses_state: addresses_state[self.addr_from].balance += self.fee if not addr_from_processed and self.addr_from != self.owner: addresses_state[self.addr_from].transaction_hashes.remove(self.txhash) if addr_from_pk in addresses_state: if self.addr_from != addr_from_pk and addr_from_pk != self.owner: if not addr_from_pk_processed: addresses_state[addr_from_pk].transaction_hashes.remove(self.txhash) addresses_state[addr_from_pk].decrease_nonce() addresses_sta
#!/usr/bin/env python # Copyright 2011 Google Inc. All Rights Reserved. """Action to fingerprint files on the client.""" import hashlib from grr.parsers import fingerprint from grr.client import vfs from grr.client.client_actions import standard from grr.lib import rdfvalue class FingerprintFile(standard.ReadBuffer): """Apply a set of fingerprinting methods to a file.""" in_rdfvalue = rdfvalue.FingerprintRequest out_rdfvalue = rdfvalue.FingerprintResponse _hash_types = { rdfvalue.FingerprintTuple.Hash.MD5: hashlib.md5, rdfvalue.FingerprintTuple.Hash.SHA1: hashlib.sha1, rdfvalue.FingerprintTuple.Hash.SHA256: hashlib.sha256, } _fingerprint_types = { rdfvalue.FingerprintTuple.Type.FPT_GENERIC: ( fingerprint.Fingerprinter.EvalGeneric), rdfvalue.FingerprintTuple.Type.FPT_PE_COFF: ( fingerprint.Fingerprinter.EvalPecoff), } def Run(self, args): """Fingerprint a file.""" with vfs.VFSOpen(args.pathspec, progress_callback=self.Progress) as file_obj: fingerprinter = fingerprint.Fingerprinter(file_obj) response = rdfvalue.FingerprintResponse() response.pathspec = file_obj.pathspec if args.tuples: tuples = args.tuples else: # There are none selected -- we will cover everything tuples = list() for k in self._fingerprint_types.iterkeys(): tuples.append(rdfvalue.FingerprintTuple(fp_type=k)) for finger in tuples: hashers = [self._hash_types[h] for h in finger.hashers] or None if finger.fp_type in self._fingerprint_types: invoke = self._fingerprint_types[finger.fp_type] res = invoke(fingerprinter, hashers)
if res: response.matching_types.append(finger.fp_type) else: raise RuntimeError("Encountered unknown fingerprint type. %s" % finger.fp_type) # Structure of the results is a list of dicts, each containing the # name of the hashing method, hashes for enabled hash algorithms, # and auxilliary data where present (e.g. signature blobs). # Also see Fingerprint:HashIt() response.r
esults = fingerprinter.HashIt() self.SendReply(response)
import argparse parser = argparse.ArgumentParser() parser.add_argument("--mat", type=str, help="mat file with observations X and side info", required=True) parser.add_argument("--epochs", type=int, help="number of epochs", default = 2000) parser.add_argument("--hsize", type=int, help="size of the hidden layer", default = 30) parser.add_argument("--batch-size", type=int, help="batch size", default = 512) args = parser.parse_args() import tensorflow as tf import scipy.io import numpy as np import chemblnet as cn import chemblnet.vbutils as vb data = scipy.io.matlab.loadmat(args.mat) label = data["X"] Fu = data["Fu"].todense() Fv = data["Fv"].todense() # 109, 167, 168, 204, 214, 215 Ytrain, Ytest = cn.make_train_test(label, 0.5) Ytrain = Ytrain.tocsr() Ytest = Ytest.tocsr() # learning parameters Y_prec = 1.5 h1_size = args.hsize batch_size = args.batch_size lrate = 1e-1 lrate_decay = 1.0 print("Data file: %s" % args.mat) print("Y size: [%d, %d]" % (label.shape[0], label.shape[1])) print("Num row feat: %d" % Fu.shape[1]) print("Num col feat: %d" % Fv.shape[1]) print("Test stdev: %.4f" % np.std( Ytest.data )) print("-----------------------") print("Num epochs: %d" % args.epochs) print("Hidden size: %d" % args.hsize) print("Learning rate: %.1e" % lrate) print("Batch size: %d" % batch_size) print("-----------------------") extra_info = False ## y_val is a vector of values and y_coord gives their coordinates y_val = tf.placeholder(tf.float32, name="y_val") y_coord = tf.placeholder(tf.int32, shape=[None, 2], name="y_coord") #y_idx_u = tf.placeholder(tf.int64) #y_idx_v = tf.placeholder(tf.int64) x_u = tf.placeholder(tf.float32, shape=[None, Fu.shape[1]], name="x_u") x_v = tf.placeholder(tf.float32, shape=[None, Fv.shape[1]], name="x_v") u_idx = tf.placeholder(tf.int64, name="u_idx") #v_idx = tf.placeholder(tf.int64, name="v_idx") learning_rate = tf.placeholder(tf.float32, name = "learning_rate") ## ratio of total training points to mini-batch training points, for the current batch tb_ratio = tf.placeholder(tf.float32, name = "tb_ratio") bsize = tf.placeholder(tf.float32, name = "bsize") ## model #beta_u = vb.NormalGammaUni("beta_u", shape = [Fu.shape[1], h1_size], initial_stdev = 0.1, fixed_prec = False) #beta_v = vb.NormalGammaUni("beta_v", shape = [Fv.shape[1], h1_size], initial_stdev = 0.1, fixed_prec = False) U = vb.NormalGammaUni("U", shape = [Ytrain.shape[0], h1_size], initial_stdev = 1.0, fixed_prec = False) V = vb.NormalGammaUni("V", shape = [Ytrain.shape[1], h1_size], initial_stdev = 1.0, fixed_prec = False) global_mean = tf.constant(Ytrain.data.mean(), dtype=tf.float32) ## means Umean_b = tf.gather(U.mean, u_idx) Vmean_b = V.mean #h_u = tf.matmul(x_u, beta_u.mean) + Umean_b #h_u = tf.matmul(x_u, beta_u.mean) + Umean_b h_u = Umean_b h_v = Vmean_b y_pred = tf.matmul(h_u, h_v, transpose_b=True) y_pred_b = global_mean + tf.gather_nd(y_pred, y_coord) y_sse = tf.reduce_sum( tf.square(y_val - y_pred_b) ) y_loss = Y_prec / 2.0 * y_sse ## variance Uvar_b = tf.exp(tf.gather(U.logvar, u_idx)) Vvar_b = V.var #h_u_var = tf.matmul(tf.square(x_u), beta_u.var) + Uvar_b #h_v_var = tf.matmul(tf.square(x_v), beta_v.var) + Vvar_b h_u_var = Uvar_b h_v_var = Vvar_b y_var = Y_prec / 2.0 * tf.matmul(h_u_var, h_v_var + tf.square(h_v), transpose_b=True) + Y_prec / 2.0 * tf.matmul(tf.square(h_u), h_v_var, transpose_b=True) var_loss = tf.gather_nd(y_var, y_coord) L_D = tb_ratio * (y_loss + var_loss) #L_prior = beta_u.prec_div() + beta_v.prec_div() + U.prec_div() + V.prec_div() + beta_u.normal_div() + beta_v.normal_div() + U.normal_div_partial(Umean_b, Uvar_b, bsize) + V.normal_div() L_prior = U.prec_div() + V.prec_div() + U.normal_div() + V.normal_div() loss = L_D + L_prior train_op = tf.train.AdagradOptimizer(learning_rate).minimize(loss) #train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss) #train_op = tf.train.MomentumOptimizer(1e-7, 0.90).minimize(loss) ###################################################### def select_y(X, row_idx): Xtmp = X[row_idx] return np.column_stack(Xtmp.nonzero()), Xtmp.data.astype(np.float32), [0, 0] rIdx = np.random.permutation(Ytrain.shape[0]) # ---------- test data ------------- # Yte_coord, Yte_values, Yte_shape = select_y(Ytest, np.arange(Ytest.shape[0])) # ------- train data (all) --------- # Ytr_coord, Ytr_values, Ytr_shape = select_y(Ytrain, np.arange(Ytrain.shape[0])) sess = tf.Session() if True: sess.run(tf.global_variables_initializer()) for epoch in range(args.epochs): rIdx = np.random.permutation(Ytrain.shape[0]) ## mini-batch loop for start in np.arange(0, Ytr
ain.shape[0], batch_size): if start + batch_size > Ytrain.shape[0]: break idx = rIdx[start : start + batch_size] by_coord, by_values, by_shape = select_y(Ytrain, idx) sess.run(train_op, feed_dict={x_u: Fu[idx,:], x_v: Fv, y_coord: by_coord, y_val: by_values, u_idx: idx,
tb_ratio: Ytrain.shape[0] / float(len(idx)),#Ytrain.nnz / float(by_values.shape[0]), learning_rate: lrate, bsize: batch_size }) ## TODO: check from here ## epoch's Ytest error if epoch % 1 == 0: test_y_pred = sess.run(y_pred_b, feed_dict = {x_u: Fu, x_v: Fv, y_coord: Yte_coord, y_val: Yte_values, u_idx: np.arange(Ytrain.shape[0])}) test_rmse = np.sqrt(np.mean(np.square(test_y_pred - Yte_values))) train_y_pred = sess.run(y_pred_b, feed_dict = {x_u: Fu, x_v: Fv, y_coord: Ytr_coord, y_val: Ytr_values, u_idx: np.arange(Ytrain.shape[0])}) train_rmse = np.sqrt(np.mean(np.square(train_y_pred - Ytr_values))) #L_D_tr, loss_tr, beta_u, beta_v = sess.run([L_D, loss, beta.prec_div(), beta.normal_div()], # feed_dict={x_indices: Xi, # x_shape: Xs, # x_ids_val: Xv, # x_idx_comp: Xindices, # y_idx_comp: Ytr_idx_comp, # y_idx_prot: Ytr_idx_prot, # y_val: Ytr_val, # tb_ratio: 1.0, # bsize: Ytrain.shape[0] # }) # beta_l2 = np.sqrt(sess.run(tf.nn.l2_loss(beta.mean))) # beta_std_min = np.sqrt(sess.run(tf.reduce_min(beta.var))) # beta_prec = sess.run(beta.prec) # V_prec = sess.run(V.prec) # V_l2 = np.sqrt(sess.run(tf.nn.l2_loss(V.mean))) # Z_prec = sess.run(Z.prec) # #W2_l2 = sess.run(tf.nn.l2_loss(W2)) # test_rmse = np.sqrt( test_sse / Yte_val.shape[0]) # train_rmse = np.sqrt( train_sse / Ytr_val.shape[0]) if epoch % 20 == 0: print("Epoch\tRMSE(te, tr)\t\t|") print("%3d.\t%.5f %.5f\t|" % (epoch, test_rmse, train_rmse)) if extra_info: #print("beta: [%s]" % beta.summarize(sess)) #print("Z: [%s]" % Z.summarize(sess)) print("V: [%s]" % V.summarize(sess))
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import datetime class Migration(migrations.Migration): dependencies = [ ('historias', '0006_auto_20150413_0001'), ] operations = [ migrations.AlterField( model_name='historias', name='fecha_ingreso', field=models.DateField(default=datetime.datetime(2015, 4, 25, 14, 59, 14, 468359), help_text='Formato: dd/mm/yyyy', verbose_name='Fecha de Ingreso'), preserve_default=True, ), migrations.AlterField( model_name='historias', name='hora_ingreso',
field=models.TimeField(default=datetime.datetime(2015, 4, 25, 14, 59, 14, 468307), help_text='Formato: hh:mm', verbose_name='Hora de Ingreso'), preserve_defa
ult=True, ), migrations.AlterField( model_name='ubicaciones', name='sala', field=models.CharField(max_length=10, choices=[(b'SALA 1', b'SALA 1'), (b'SALA 2', b'SALA 2'), (b'SALA 3', b'SALA 3'), (b'SALA 4', b'SALA 4'), (b'SALA 5', b'SALA 5'), (b'GAURDIA', b'GAURDIA'), (b'NEO', b'NEO'), (b'UTI', b'UTI'), (b'UCO', b'UCO'), (b'PRE PARTO', b'PRE PARTO')]), preserve_default=True, ), ]
__author__ = 'oskyar' from django.db import models from django.utils.translation import ugettext as _ from s3direct.fields import S3DirectField from smart_selects.db_fields import ChainedManyToManyField # Manager de Asignatura class SubjectManager(models.Manager): def owner(self, pk_subject): return self.get(pk=pk_subject).teacher def by_owner(self, userProfile): return self.filter(teacher=userProfile) def get_num_questions(self, subject, type=None): num_questions = 0 for topic in subject.topics.all(): if type: for subtopic in topic.subtopics.all(): num_questions += subtopic.questions.filter(type=type).count() else: for subtopic in topic.subtopics.all(): num_questions += subtopic.questions.all().count() return num_questions def get_all_questions(self, subject, type=None): questions = list() for topic in subject.topics.all(): if type: for subtopic in topic.subtopics.all(): questions += subtopic.questions.filter(type=type) else: for subtopic in topic.subtopics.all(): questions += subtopic.questions.all() return questions # Asignatura. class Subject(models.Model): # id = Id creada por defecto por django teacher = models.ForeignKey( 'user.UserProfile', related_name='subjects') students = ChainedManyToManyField( 'user.UserProfile', chained_field='student', chained_model_field='user', auto_choose=True, related_name="my_subjects") name = models.CharField( max_length=128, blank=False, null=False, verbose_name=_("Nombre de la asignatura")) description = models.CharField( max_length=512, blank=False, null=False, verbose_name=_("Breve descripción, máximo 512 caracteres")) category = models.CharField( max_length=75, blank=False, null=False, verbose_name=_("Categoría")) test_opt = models.BooleanField( blank=False, null=False, verbose_name=_("Examen final directo")) capacity = models.IntegerField( null=True, verbose_name=_("Nº máx. alumnos")) image = S3DirectField( dest='subjects', blank=True, null=True, verbose_name="Imagen de la asignatura") created_on = models.DateTi
meField(blank=True, null=False) # pos_image = models.CharField(blank=True, null=True, max_length=250) objects = SubjectManager() class Meta: permissions = ( ('view_subject', 'View detail Subject'), ('register
_subject', 'Student registers of subject'), ('unregister_subject', 'Student unregisters of subject') ) def __str__(self): return self.name + " (" + self.category + ")"
__author_
_ = 'in
g'
# -*-
coding: utf-8 -*- from src.constant import * import unittest from src.game import Game class TestPlayers(unittest.TestCase): # Init a player def test_initPlayer(self): game = Game() player = game.cre
atePlayer() self.assertEqual(player._id, 0) # Get a valid player def test_getPlayer(self): game = Game() player0 = game.createPlayer() player1 = game.createPlayer() self.assertEqual(player0._id, 0) self.assertEqual(player1._id, 1) playerN = game.getPlayer(0) self.assertEqual(playerN._id, 0) playerN = game.getPlayer(1) self.assertEqual(playerN._id, 1) # Get an invalid player def test_getUnvalidPlayer(self): game = Game() player = game.getPlayer(0) self.assertIsNone(player) # Set to many players def test_tooManyPlayers(self): game = Game() for i in range(1,5): game.createPlayer() player = game.getPlayer(2) self.assertEqual(player._id, 2) player = game.getPlayer(5) self.assertIsNone(player)
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Customized Swish activation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf @tf.keras.utils.register_keras_serializable(package='Text') def simple_swish(features): """Computes the Swish activation function. The tf.nn.swish operation uses a custom gradient to reduce memory usage. Since saving custom gradients in SavedModel is currently not supported, and one would not be able to use an exported TF-Hub module for fine-tuning, we provide this wrapper that can allow to select whether to use the native TensorFlow swish operation, or whether to use a customized operation that has uses default TensorFlow gradient computation. Args: features: A `Tensor` representing preactivation values. Returns: The activation value. """ features = tf.convert_to_tensor(features) return features * tf.nn.sigmoid(features) @tf.keras.utils.register_keras_se
rializable(package='Text') def hard_swish(features): """Computes a hard version of the swish function. This operation can be used to reduce computational cost and improve quantization for edge devices.
Args: features: A `Tensor` representing preactivation values. Returns: The activation value. """ features = tf.convert_to_tensor(features) return features * tf.nn.relu6(features + tf.constant(3.)) * (1. / 6.) @tf.keras.utils.register_keras_serializable(package='Text') def identity(features): """Computes the identity function. Useful for helping in quantization. Args: features: A `Tensor` representing preactivation values. Returns: The activation value. """ features = tf.convert_to_tensor(features) return tf.identity(features)
__ve
rsion__ = "0.1
.dev0"
import unittest import socket import os from shapy.framework.netlink.constants import * from shapy.framework.netlink.message import * from shapy.framework.netlink.tc import * from shapy.framework.netlink.htb import * from shapy.framework.netlink.connection import Connection from tests import TCTestCase class TestClass(TCTestCase): def test_add_class(self): self.qhandle = 0x1 << 16 # | 0x1 # major:minor, 1: self.add_htb_qdisc() handle = 0x1 << 16 | 0x1 rate = 256*1000 mtu = 1600 this_dir = os.path.dirname(os.path.realpath(__file__)) with open(os.path.join(this_dir, 'htb_add_class.data'), 'rb') as f: data = f.read() #init = Attr(TCA_HTB_INIT, HTBParms(rate, rate).pack
()+data[36+8+4+48:]) init = Attr(TCA_HTB_INIT, HTBParms(rate, rate).pack() + RTab(rate, mtu).pack() + CTab(rate, mtu).pack()) tcm = tcmsg(socket.AF_UNSPEC, self.interface.if_index, handle, self.qhandle, 0, [Attr(TCA_KIND, 'htb\0'), init]) msg = Message(type=RTM
_NEWTCLASS, flags=NLM_F_EXCL | NLM_F_CREATE | NLM_F_REQUEST | NLM_F_ACK, service_template=tcm) self.conn.send(msg) self.check_ack(self.conn.recv()) self.delete_root_qdisc() def add_htb_qdisc(self): tcm = tcmsg(socket.AF_UNSPEC, self.interface.if_index, self.qhandle, TC_H_ROOT, 0, [Attr(TCA_KIND, 'htb\0'), HTBQdiscAttr(defcls=0x1ff)]) msg = Message(type=RTM_NEWQDISC, flags=NLM_F_EXCL | NLM_F_CREATE | NLM_F_REQUEST | NLM_F_ACK, service_template=tcm) self.conn.send(msg) r = self.conn.recv() self.check_ack(r) return r
from django.apps import
AppConfig class PagesConfig(AppConfig): name = 'precision.pages' verbose_n
ame = "Pages"
# coding: utf-8 from sqlalchemy import Column, Float, Integer, Numeric, String, Table, Text from geoalchemy2.types import Geometry from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() metadata = Base.metadata class EgoDemandFederalstate(Base): __tablename__ = 'ego_demand_federalstate' __table_args__ = {'schema': 'demand'} eu_code = Column(String(7), primary_key=True) federal_states = Column(String) elec_consumption_households = Column(Float(53)) elec_consumption_industry = Column(Float(53)) elec_consumption_tertiary_sector = Column(Float(53)) population = Column(Integer) elec_consumption_households_per_person = Column(Float(53)) class EgoDpLoadarea(Base): __tablename__ = 'ego_dp_loadarea' __table_args__ = {'schema': 'demand'} version = Column(Text, primary_key=True, nullable=False) id = Column(Integer, primary_key=True, nullable=False) subst_id = Column(Integer) area_ha = Column(Float(53)) nuts = Column(String(5)) rs_0 = Column(String(12)) ags_0 = Column(String(12)) otg_id = Column(Integer) un_id = Column(Integer) zensus_sum = Column(Integer) zensus_count = Column(Integer) zensus_density = Column(Float(53)) ioer_sum = Column(Float(53)) ioer_count = Column(Integer) ioer_density = Column(Float(53)) sector_area_residential = Column(Float(53)) sector_area_retail = Column(Float(53)) sector_area_industrial = Column(Float(53)) sector_area_agricultural = Column(Float(53)) sector_area_sum = Column(Float(53)) sector_share_residential = Column(Float(53)) sector_share_retail = Column(Float(53)) sector_share_industrial = Column(Float(53)) sector_share_agricultural = Column(Float(53)) sector_share_sum = Column(Float(53)) sector_count_residential = Column(Integer) sector_count_retail = Column(Integer) sector_count_industrial = Column(Integer) sector_count_agricultural = Column(Integer) sector_count_sum = Column(Integer) sector_consumption_residential = Column(Float(53)) sector_consumption_retail = Column(Float(53)) sector_consumption_industrial = Column(Float(53)) sector_consumption_agricultural = Column(Float(53)) sector_consumption_sum = Column(Float(53)) sector_peakload_retail = Column(Float(53)) sector_peakload_residential = Column(Float(53)) sector_peakload_industrial = Column(Float(53)) sector_peakload_agricultural = Column(Float(53)) geom_centroid = Column(Geometry('POINT', 3035)) geom_surfacepoint = Column(Geometry('POINT', 3035)) geom_centre = Column(Geometry('POINT', 3035)) geom = Column(Geometry('POLYGON', 3035), index=True) t_ego_dp_loadarea_v0_4_3_mview = Table( 'ego_dp_loadarea_v0_4_3_mview', metadata, Column('version', Text), Column('id', Integer, unique=True), Column('subst_id', Integer), Column('area_ha', Numeric), Column('nuts', String(5)), Column('rs_0', String(12)), Column('ags_0', String(12)), Column('otg_id', Integer), Column('un_id', Integer), Column('zensus_sum', Integer), Column('zensus_count', Integer), Column('zensus_density', Numeric), Column('ioer_sum', Numeric), Column('ioer_count', Integer), Column('ioer_density', Numeric), Column('sector_area_residential', Numeric), Column('sector_area_retail', Numeric), Column('sector_area_industrial', Numeric), Column('sector_area_agricultural', Numeric), Column('sector_area_sum', Numeric), Column('sector_share_residential', Numeric), Column('sector_share_retail', Numeric), Column('sector_share_industrial', Numeric), Column('sector_share_agricultural', Numeric), Column('sector_share_sum', Numeric), Column('sector_count_residential', Integer), Column('sector_count_retail', Integer), Column('sector_count_industrial', Integer), Column('sector_count_agricultural', Integer), Column('sector_count_sum', Integer), Column('sector_consumption_residential', Float(53)), Column('sector_consumption_retail', Float(53)), Column('sector_consumption_industrial',
Float(53)), Column('sector_consumption_agricultural', Float(53)), Column('sector_consumption_sum', Float(53)), Column('sector_peakload_retail', Float(53)), Column('sector_peakload_residential', Float(53)), Column('sector_peakload_industrial', Float(53)), Column('sector_peakload_agricultural', Float(53)), Colum
n('geom_centroid', Geometry('POINT', 3035)), Column('geom_surfacepoint', Geometry('POINT', 3035)), Column('geom_centre', Geometry('POINT', 3035)), Column('geom', Geometry('POLYGON', 3035), index=True), schema='demand' ) t_ego_dp_loadarea_v0_4_5_mview = Table( 'ego_dp_loadarea_v0_4_5_mview', metadata, Column('version', Text), Column('id', Integer, unique=True), Column('subst_id', Integer), Column('area_ha', Numeric), Column('nuts', String(5)), Column('rs_0', String(12)), Column('ags_0', String(12)), Column('otg_id', Integer), Column('un_id', Integer), Column('zensus_sum', Integer), Column('zensus_count', Integer), Column('zensus_density', Numeric), Column('ioer_sum', Numeric), Column('ioer_count', Integer), Column('ioer_density', Numeric), Column('sector_area_residential', Numeric), Column('sector_area_retail', Numeric), Column('sector_area_industrial', Numeric), Column('sector_area_agricultural', Numeric), Column('sector_area_sum', Numeric), Column('sector_share_residential', Numeric), Column('sector_share_retail', Numeric), Column('sector_share_industrial', Numeric), Column('sector_share_agricultural', Numeric), Column('sector_share_sum', Numeric), Column('sector_count_residential', Integer), Column('sector_count_retail', Integer), Column('sector_count_industrial', Integer), Column('sector_count_agricultural', Integer), Column('sector_count_sum', Integer), Column('sector_consumption_residential', Float(53)), Column('sector_consumption_retail', Float(53)), Column('sector_consumption_industrial', Float(53)), Column('sector_consumption_agricultural', Float(53)), Column('sector_consumption_sum', Float(53)), Column('sector_peakload_retail', Float(53)), Column('sector_peakload_residential', Float(53)), Column('sector_peakload_industrial', Float(53)), Column('sector_peakload_agricultural', Float(53)), Column('geom_centroid', Geometry('POINT', 3035)), Column('geom_surfacepoint', Geometry('POINT', 3035)), Column('geom_centre', Geometry('POINT', 3035)), Column('geom', Geometry('POLYGON', 3035), index=True), schema='demand' )
#! /usr/bin/env python # This file is part of the dvbobjects library. # # Copyright © 2005-2013 Lorenzo Pallara l.pallara@avalpa.com # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This p
rogram is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULA
R PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA from math import floor def MJD_convert(year, month, day): if (month == 1) or (month == 2): l = 1 else: l = 0 return 14956 + day + (floor((year - l) * 365.25)) + (floor((month + 1 + l * 12) * 30.6001))
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import binascii import os import pytest from cryptography.hazmat.backends.interfaces import CipherBackend from cryptography.hazmat.primitives.ciphers import algorithms, modes from .utils import generate_encrypt_test from ...utils import load_nist_vectors @pytest.mark.supported( only_if=lambda backend: backend.cipher_supported( algorithms.IDEA(b"\x00" * 16), modes.ECB() ), skip_message="Does not support IDEA ECB", ) @pytest.mark.requires_backend_interface(interface=CipherBackend) class TestIDEAModeECB(object): test_ECB = generate_encrypt_test( load_nist_vectors, os.path.join("ciphers", "IDEA"), ["idea-ecb.txt"], lambda key, **kwargs: algorithms.IDEA(binascii.unhexlify((key))), lambda **kwargs: modes.ECB(), ) @pytest.mark.supported( only_if=lambda backend: backend.cipher_supported( algorithms.IDEA(b"\x00" * 16), modes.CBC(b"\x00" * 8) ), skip_message="Does not support IDEA CBC", ) @pytest.mark.requires_backend_interface(interface=CipherBackend) class TestIDEAModeCBC(object): test_CBC = generate_encrypt_test( load
_nist_vectors, os.path.join("ciphers", "IDEA"), ["idea-cbc.txt"], lambda key, **kwargs: algorithms.IDEA(binascii.unhexlify((key))), lambda iv, **kwargs: modes.CBC(binascii.unhexlify(iv)) ) @pytest.mark.supported( only_if=lambda backend: backend.cipher_supported( algorithms.IDEA(b"\x00" * 16), modes.OFB(b"\x00" * 8)
), skip_message="Does not support IDEA OFB", ) @pytest.mark.requires_backend_interface(interface=CipherBackend) class TestIDEAModeOFB(object): test_OFB = generate_encrypt_test( load_nist_vectors, os.path.join("ciphers", "IDEA"), ["idea-ofb.txt"], lambda key, **kwargs: algorithms.IDEA(binascii.unhexlify((key))), lambda iv, **kwargs: modes.OFB(binascii.unhexlify(iv)) ) @pytest.mark.supported( only_if=lambda backend: backend.cipher_supported( algorithms.IDEA(b"\x00" * 16), modes.CFB(b"\x00" * 8) ), skip_message="Does not support IDEA CFB", ) @pytest.mark.requires_backend_interface(interface=CipherBackend) class TestIDEAModeCFB(object): test_CFB = generate_encrypt_test( load_nist_vectors, os.path.join("ciphers", "IDEA"), ["idea-cfb.txt"], lambda key, **kwargs: algorithms.IDEA(binascii.unhexlify((key))), lambda iv, **kwargs: modes.CFB(binascii.unhexlify(iv)) )
fro
m django import forms from django.contrib.auth.forms import UserCreationForm from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _ class RegistrationForm(UserCreationForm): email = forms.EmailField(help_text='Enter a valid email address') address = forms.CharField() website = forms.URLField() def clean_email(self): email = self.cleaned_data['email'] try: User.objects.get(email=email) except User.DoesNotExist: return self.cleaned_data['email'] raise forms.ValidationError(_("Email already exists"))
# -*- coding:utf-8 -*- import operator import string import operator import itertools import snowballstemmer from textblob import TextBlob, Word LOWER_MAP = { 'tr': { ord('I'): u'ı' } } STEMMERS = { 'en': snowballstemmer.stemmer('english'), 'tr': snowballstemmer.stemmer('turkish'), } def noun_phrases(text): blob = TextBlob(text) return blob.tokenize() def get_synsets(text): return Word(to_lemma(text)).synsets def get_lemmas(text): word = Word(to_lemma(text)) sets = map(set, [synset.lemma_names() for synset in word.synsets]) return map(from_lemma, reduce(operator.or_, sets)) def to_lemma(text): return text.replace(' ', '_') def from_lemma(text): return text.replace('_', ' ') def stem_word(word, language): stemmer = STEMMERS.get(language) if stemmer is None: return word return (stemmer .stemWord(word) .strip(string.punctuation)) def tokenize(wordlist, language, stem=True): return ' '.join((stem_word(word, language) if stem else word) for word in wordlist) def lower(text, language): if language in LOWER_MAP: text = text.translate(LOWER_MAP[language]) return text.lower() def build_ngrams(text, language='en'): blob = TextBlob(lower(text, language)) ngrams = [blob.ngrams(n=n) for n in (3, 2, 1)] wordlists = reduce(operator.add, ngrams) tokenized = ( tokenize(wordl
ist, language, stem=True) for wordlist in wordlists) pure = ( tokenize(wordlist, language, stem=False) for wordlist in wordlists) return itertools.chain(tokenized, pure) def is_subsequence(sequence, pa
rent): for i in xrange(1 + len(parent) - len(sequence)): if sequence == parent[i:i + len(sequence)]: return True return False
#!/usr/bin
/python file = open('prog.txt
','r') s = "" for b in file.read(): s+="%d," % ord(b) print(s[:-1])
from flask import Flask from flask.ext.quik import FlaskQuik from flask.ext.quik import render_template app = Flask(__name__) quik =
FlaskQuik(app) @app.route('/', methods=['GET', 'POST'] ) def hello_quik(): return render_te
mplate('hello.html', name='quik') app.run(host='0.0.0.0', debug=True, port=5000)
# -*- coding=utf-8 -*- """Test LinkedList for random inputs.""" import pytest def
test_linkedlist_tail_default(): """Test LinkedList contstructor for functionality.""" from linked_list import LinkedList assert LinkedList.tail is None def test_linkedlist_construct_empty_list(): """Test LinkedList insert command works with empty list.""" from linked_list import LinkedList input_ = [] linked_list_instance = LinkedList(input_) assert linked_list_instance.tail is None def test_linkedlist_construct_integer(): """Test LinkedList insert command works wit
h empty list.""" from linked_list import LinkedList input_ = 5 linked_list_instance = LinkedList(input_) assert linked_list_instance.tail.value == 5 def test_linkedlist_constructor_list_isnode(): """Test LinkedList contstructor for functionality.""" from linked_list import LinkedList, Node input_ = [1, 2] linked_list_instance = LinkedList(input_) assert isinstance(linked_list_instance.tail, Node) def test_linkedlist_constructor_nodeval(): """Test LinkedList contstructor for functionality.""" from linked_list import LinkedList, Node input_ = [1, 2] ll_inst = LinkedList(input_) assert ll_inst.tail.pointer.value == Node(2, Node(1, None)).pointer.value def test_linkedlist_constructor_nodeterm(): """Test LinkedList contstructor for functionality.""" from linked_list import LinkedList input_ = [1, 2] linked_list_instance = LinkedList(input_) assert linked_list_instance.tail.pointer.pointer is None def test_linkedlist_insert_integer(): """Test LinkedList insert command works correctly.""" from linked_list import LinkedList, Node input_ = [1, 2] ll_inst = LinkedList(input_) ll_inst.insert(3) assert ll_inst.tail.pointer.pointer.value == (Node(2, Node(1, Node(3, None))).pointer.pointer.value ) def test_linkedlist_insert_string(): """Test LinkeList.insert for tail addition to Node list.""" from linked_list import LinkedList input_ = [1, 2, 3] linked_list_instance = LinkedList(input_) linked_list_instance.insert("Nadia") assert linked_list_instance.tail.pointer.pointer.pointer.value == "Nadia" def test_linkedlist_insert_empty(): """Test LinkedList.insert from an empty list.""" from linked_list import LinkedList input_ = [] linked_list_instance = LinkedList(input_) linked_list_instance.insert('a') assert linked_list_instance.size() == 1 def test_linkedlist_pop(): """Test LinkedList.pop for head removal.""" from linked_list import LinkedList input_ = [1] linked_list_instance = LinkedList(input_) assert linked_list_instance.pop() == 1 def test_linkedlist_pop_empty(): """Test LinkedList.pop from an empty list.""" from linked_list import LinkedList input_ = [] linked_list_instance = LinkedList(input_) with pytest.raises(IndexError): linked_list_instance.pop() def test_linkedlist_size_long(): """Test LinkedList.size for proper length return.""" from linked_list import LinkedList input2_ = list(range(75)) linked_list_instance2 = LinkedList(input2_) assert linked_list_instance2.size() == len(input2_) def test_linkedlist_size_empty(): """Test LinkedList.size for proper length return.""" from linked_list import LinkedList input3_ = [] linked_list_instance3 = LinkedList(input3_) assert linked_list_instance3.size() == len(input3_) @pytest.fixture(scope='function') def linked_list_instance(): """Fixture for linkedlist search test.""" from linked_list import LinkedList input_ = "a b c d e f g h i j k l m n o p q r s t u v w x y z".split() return LinkedList(input_) def test_linkedlist_search_mid(linked_list_instance): """Test LinkedList.search for value match and return.""" assert linked_list_instance.search("d").value == "d" def test_linkedlist_search_head(linked_list_instance): """Test LinkedList.search for value match and return.""" assert linked_list_instance.search("a").value == "a" def test_linkedlist_search_missing(linked_list_instance): """Test LinkedList.search for value match and return.""" assert linked_list_instance.search("norton is amazing") is None def test_linkedlist_remove(linked_list_instance): """Test LinkedList.remove for proper mid-list Node removal.""" from linked_list import Node linked_list_instance.remove(Node('y')) assert linked_list_instance.tail.pointer.value == 'x' def test_linkedlist_remove_tail(linked_list_instance): """Test LinkedList.remove for proper first Node removal.""" from linked_list import Node linked_list_instance.remove(Node('z')) assert linked_list_instance.tail.pointer.value == 'x' def test_linkedlist_remove_head(): """Test LinkedList.remove for proper last Node removal.""" from linked_list import LinkedList, Node input_ = "a b c".split() linked_list_instance = LinkedList(input_) linked_list_instance.remove(Node('a')) assert linked_list_instance.tail.pointer.pointer is None def test_linkedlist_display(): """Test LinkedList.display for proper string formatting.""" from linked_list import LinkedList input_ = "a b c".split() linked_list_instance = LinkedList(input_) assert linked_list_instance.display() == "('c', 'b', 'a')"
active_extensions = [] class Extension(object): def register(self): pass def dispatch(event, *args, **kwargs): for extension in active_extens
ions: if not hasattr(extension, event): continue getattr(extension, event)(*args, **kwargs) def register(extension): instance = extension() activ
e_extensions.append(instance) instance.register()
import asyncio import demjson from bot import user_steps, sender, get, downloader from message import Message client_id = ''#YOUR CLIENT ID async def search(query): global guest_client_id search_url = 'https://api.soundcloud.com/search?q=%s&facet=model&limit=30&offset=0&linked_partitioning=1&client_id='+client_id url = search_url % query response = await get(url) r = demjson.decode(response) res = [] for entity in r['collection']: if entity['kind'] == 'track': res.append([entity['title'], entity['permalink_url']]) return res async def getfile(url): response = await get( "https://api.soundcloud.com/resolve?url={}&client_id="+client_id.format(url)) r = demjson.decode(response) return r['stream_url'] + "?client_id="+client_id @asyncio.coroutine async def run(message, matches, chat_id, step): from_id = message['from']['id'] if step == 0: await sender( Message(chat_id).set_text("*Please Wait*\nI'm Searching all Music with this name", parse_mode="markdown")) user_steps[from_id] = {"name": "Soundcloud", "step": 1, "data": {}} i = 0 show_keyboard = {'keyboard': [], "selective": True} matches = matches.replace(" ", "+") for song in await search(matches): title, link = song[0], song[1] user_steps[from_id]['data'][title] = link show_keyboard['keyboard'].append([title]) i += 1 if i == 20: break if len(show_keyboard['keyboard']) in [0, 1]: hide_keyboard = {'hide_keyboard': True, 'selective': True} del user_steps[from_id] return [Message(chat_id).set_t
ext("*Not Found*", reply_to_message_id=message['message_id'], reply_markup=hide_keyboard, parse_mode="markdown")] return [Message(chat_id).set_text("Select One Of these :", reply_to_message_id=message['message_id'],
reply_markup=show_keyboard)] elif step == 1: try: hide_keyboard = {'hide_keyboard': True, "selective": True} await sender(Message(chat_id).set_text("*Please Wait*\nLet me Save this Music For You", reply_to_message_id=message['message_id'], reply_markup=hide_keyboard, parse_mode="markdown")) await downloader(await getfile(user_steps[from_id]['data'][message['text']]), "tmp/{}.mp3".format(message['text'])) del user_steps[from_id] return [Message(chat_id).set_audio("tmp/{}.mp3".format(message['text']), title=message['text'], performer="@Siarobot")] except Exception as e: del user_steps[from_id] return [Message(chat_id).set_text("*Wrong Input*\n_Try Again_", parse_mode="markdown")] plugin = { "name": "Soundcloud", "desc": "Download a Music From Sound Cloud\n\n" "*For Start :*\n`/sc michael jackson billie jean`", "usage": ["/sc \\[`Search`]"], "run": run, "sudo": False, "patterns": ["^[/!#]sc (.*)$"] }
from __future__ import absolute_import import autograd.numpy as np import autograd.numpy.random as npr from autograd.util import * from autograd import grad npr.seed(1) def test_real_type(): fun = lambda x: np.sum(np.real(x)) df = grad(fun) assert type(df(1.0)) == float assert type(df(1.0j)) == complex def test_real_if_close_type(): fun = lambda x: np.sum(np.real(x)) df = grad(fun) assert type(df(1.0)) == float assert type(df(1.0j)) == complex def test_imag_type(): f
un = lambda x: np.sum(np.imag(x)) df = grad(fun) assert base_class(type(df(1.0 ))) == float assert base_class(type(df(1.0j))) == complex # TODO: real times imag def test_angle_real(): fun = lambda x : to_scalar(np.angle(x)) d_fun = lambda x: to_scalar(grad(fun
)(x)) check_grads(fun, npr.rand()) check_grads(d_fun, npr.rand()) def test_angle_complex(): fun = lambda x : to_scalar(np.angle(x)) d_fun = lambda x: to_scalar(grad(fun)(x)) check_grads(fun, npr.rand() + 1j*npr.rand()) check_grads(d_fun, npr.rand() + 1j*npr.rand()) def test_abs_real(): fun = lambda x : to_scalar(np.abs(x)) d_fun = lambda x: to_scalar(grad(fun)(x)) check_grads(fun, 1.1) check_grads(d_fun, 2.1) def test_abs_complex(): fun = lambda x : to_scalar(np.abs(x)) d_fun = lambda x: to_scalar(grad(fun)(x)) check_grads(fun, 1.1 + 1.2j) check_grads(d_fun, 1.1 + 1.3j)
= api.LocalLB.Monitor.get_template_type(template_names=[monitor])[0] parent2 = api.LocalLB.Monitor.get_parent_template(template_names=[monitor])[0] if ttype == TEMPLATE_TYPE and parent == parent2: result = True else: module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent)) except bigsuds.OperationFailed, e: if "was not found" in str(e): result = False else: # genuine exception raise return result def create_monitor(api, monitor, template_attributes): try: api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes]) except bigsuds.OperationFailed, e: if "already exists" in str(e): return False else: # genuine exception raise return True def delete_monitor(api, monitor): try: api.LocalLB.Monitor.delete_template(template_names=[monitor]) except bigsuds.OperationFailed, e: # maybe it was deleted since we checked if "was not found" in str(e): return False else: # genuine exception raise return True def check_string_property(api, monitor, str_property): try: return str_property == api.LocalLB.Monitor.get_template_string_property([monitor], [str_property['type']])[0] except bigsuds.OperationFailed, e: # happens in check mode if not created yet if "was not found" in str(e): return True else: # genuine exception raise return True def set_string_property(api, monitor, str_property): api.LocalLB.Monitor.set_template_string_property(template_names=[monitor], values=[str_property]) def check_integer_property(api, monitor, int_property): try: return int_property == api.LocalLB.Monitor.get_template_integer_property([monitor], [int_property['type']])[0] except bigsuds.OperationFailed, e: # happens in check mode if not created yet if "was not found" in str(e): return True else: # genuine exception raise return True def set_integer_property(api, monitor, int_property): api.LocalLB.Monitor.set_template_int_property(template_names=[monitor], values=[int_property]) def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties): changed = False for str_property in template_string_properties: if str_property['value'] is not None and not check_string_property(api, monitor, str_property): if not module.check_mode: set_string_property(api, monitor, str_property) changed = True for int_property in template_integer_properties: if int_property['value'] is not None and not check_integer_property(api, monitor, int_property): if not module.check_mode: set_integer_property(api, monitor, int_property) changed = True return changed def get_ipport(api, monitor): return api.LocalLB.Monitor.get_template_destination(template_names=[monitor])[0] def set_ipport(api, monitor, ipport): try: api.LocalLB.Monitor.set_template_destination(template_names=[monitor], destinations=[ipport]) return True, "" except bigsuds.OperationFailed, e: if "Cannot modify the address type of monitor" in str(e): return False, "Cannot modify the address type of monitor if already assigned to a pool." else: # genuine exception raise # =========================================== # main loop # # writing a module for other monitor types should # only need an updated main() (and monitor specific functions) def main(): # begin monitor specific stuff argument_spec=f5_argument_spec(); argument_spec.update(dict( name = dict(required=True), type = dict(default=DEFAULT_TEMPLATE_TYPE_CHOICE, choices=TEMPLATE_TYPE_CHOICES), parent = dict(default=DEFAULT_PARENT), parent_partition = dict(default='Common'), send = dict(required=False), receive = dict(required=False), ip = dict(required=False), port = dict(required=False, type='int'), interval = dict(required=False, type='int'), timeout = dict(required=False, type='int'), time_until_up = dict(required=False, type='int', default=0) ) ) module = AnsibleModule( argument_spec = argument_spec, supports_check_mode=True ) (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module) parent_partition = module.params['parent_partition'] name = module.params['name'] type = 'TTYPE_' + module.params['type'].upper() parent = fq_name(parent_partition, module.params['parent']) monitor = fq_name(partition, name) send = module.params['send'] receive = module.params['receive'] ip = module.params['ip'] port = module.params['port'] interval = module.params['interval'] timeout = module.params['timeout'] time_until_up = module.params['time_until_up'] # tcp monitor has multiple types, so overrule global TEMPLATE_TYPE TEMPLATE_TYPE
= type # end monitor specific stuff api = bigip_api(server, user, password) monitor_exists = check_monitor_exists(module, api, monitor, parent) # ipport is a special setting if monitor_exists: # make sure to not update current settings if not asked cur_ipport = get_ipport(api, monitor) if ip is None: ip = cur_ipport['ipport']['address'] if port is None: port = cur_ipport['ipport']['po
rt'] else: # use API defaults if not defined to create it if interval is None: interval = 5 if timeout is None: timeout = 16 if ip is None: ip = '0.0.0.0' if port is None: port = 0 if send is None: send = '' if receive is None: receive = '' # define and set address type if ip == '0.0.0.0' and port == 0: address_type = 'ATYPE_STAR_ADDRESS_STAR_PORT' elif ip == '0.0.0.0' and port != 0: address_type = 'ATYPE_STAR_ADDRESS_EXPLICIT_PORT' elif ip != '0.0.0.0' and port != 0: address_type = 'ATYPE_EXPLICIT_ADDRESS_EXPLICIT_PORT' else: address_type = 'ATYPE_UNSET' ipport = {'address_type': address_type, 'ipport': {'address': ip, 'port': port}} template_attributes = {'parent_template': parent, 'interval': interval, 'timeout': timeout, 'dest_ipport': ipport, 'is_read_only': False, 'is_directly_usable': True} # monitor specific stuff if type == 'TTYPE_TCP': template_string_properties = [{'type': 'STYPE_SEND', 'value': send}, {'type': 'STYPE_RECEIVE', 'value': receive}] else: template_string_properties = [] template_integer_properties = [{'type': 'ITYPE_INTERVAL', 'value': interval}, {'type': 'ITYPE_TIMEOUT', 'value': timeout}, {'type': 'ITYPE_TIME_UNTIL_UP', 'value': interval}] # main logic, monitor generic try: result = {'changed': False} # default if state == 'absent': if monitor_exists: if not module.check_mode: # possible race condition if same task #
from __future__ import print_function import os.path import re import imp import sys from shutil import copyfile import PythonAPI as api class ValidateFilename(api.PythonAPIRule): def __init__(self, config): super(ValidateFilename, self).__init__(config) def run(self, inputFile, outputFile, encoding): # NOTE: dot syntax doesn't work for dereferencing fields on self.config because the properties are defined using UTF-8 strings. if not "regex" in self.config: self.error("No regex specified.") elif not "importConfig" in self.config: self.error("No importConfig specified in the rule config.") elif not "file" in self.config["importConfig"]: self.error("No file specified in the rule config.importConfig.") else: filename
= os.path.basename(self.config["importConfig"]["file"]) prog = re.compile(self.config["regex"], re.UNICODE) if prog.match(filename) is None: self.error(filename + " does not match the regular expression " + self.config["regex"]) # Copy the file to the output for
the next rule copyfile(inputFile, outputFile) api.process(ValidateFilename)
ion values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Pinax Symposion' copyright = u'2012, Eldarion Team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.5' # The full version, including alpha/beta/rc tags. release = '0.5dev' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_pa
rentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (sy
ntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'PinaxSymposiondoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'PinaxSymposion.tex', u'Pinax Symposion Documentation', u'Eldarion Team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'pinaxsymposion', u'Pinax Symposion Documentation', [u'Eldarion Team'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'PinaxSymposion', u'Pinax Symposion Documentation', u'Eldarion Team', 'PinaxSymposion', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # -- Additional config for Django ---------------------------------------------- # Arrange for importing pycon modules to work okay given that they'll # try to pull in Django # See http://techblog.ironfroggy.com/2012/06/how-to-use-sphinx-autodoc-on.html #sys.path.append(os.path.dirname(os.path.dirname(__file__))) import django os.environ.setdefault("DJANGO_SETTINGS_MODULE", "symposion.settings") # -- Locale configurations ----------------------------------------------------- # # http://sphinx-doc.org/intl.html#translating-with-sphinx-intl locale_dirs = ['locale/'] # path is example but recommende
'lo1_address': 'TCPIP0::10.20.61.59::inst0::INSTR', 'lo1_timeout': 5000, 'rf_switch_address': '10.20.61.224', 'use_rf_switch': True, 'pxi_chassis_id': 0, 'hdawg_address': 'hdawg-dev8108', 'awg_tek_address': 'TCPIP0::10.20.61.186::inst0::INSTR', 'use_awg_tek': True, 'sa_address': 'TCPIP0::10.20.61.56::inst0::INSTR', 'adc_timeout': 10, 'adc_trig_rep_period': 50 * 125, # 10 kHz rate period 'adc_trig_width': 2, # 80 ns trigger length } cw_settings = {} pulsed_settings = {'lo1_power': 18, 'vna_power': 16, 'ex_clock': 1000e6, # 1 GHz - clocks of some devices 'rep_rate': 20e3, # 10 kHz - pulse sequence repetition rate # 500 ex_clocks - all waves is shorten by this amount of clock cycles # to verify that M3202 will not miss next trigger # (awgs are always missing trigger while they are still outputting waveform) 'global_num_points_delta': 400, 'hdawg_ch0_amplitude': 0.3, 'hdawg_ch1_amplitude': 0.3, 'hdawg_ch2_amplitude': 0.8, 'hdawg_ch3_amplitude': 0.8, 'hdawg_ch4_amplitude': 0.8, 'hdawg_ch5_amplitude': 0.8, 'hdawg_ch6_amplitude': 0.8, 'hdawg_ch7_amplitude': 0.8, 'awg_tek_ch1_amplitude': 1.0, 'awg_tek_ch2_amplitude': 1.0, 'awg_tek_ch3_amplitude': 1.0, 'awg_tek_ch4_amplitude': 1.0, 'awg_tek_ch1_offset': 0.0, 'awg_tek_ch2_offset': 0.0, 'awg_tek_ch3_offset': 0.0, 'awg_tek_ch4_offset': 0.0, 'lo1_freq': 3.41e9, 'pna_freq': 6.07e9, #'calibrate_delay_nop': 65536, 'calibrate_delay_nums': 200, 'trigger_readout_channel_name': 'ro_trg', 'trigger_readout_length': 200e-9, 'modem_dc_calibration_amplitude': 1.0, 'adc_nop': 1024, 'adc_nums': 50000, ## Do we need control over this? Probably, but not now... WUT THE FUCK MAN } class hardware_setup(): def __init__(self, device_settings, pulsed_settings): self.device_settings = device_settings self.pulsed_settings = pulsed_settings self.cw_settings = cw_settings self.hardware_state = 'undefined' self.pna = None self.lo1 = None self.rf_switch = None self.awg_tek = None self.sa = None self.coil_device = None self.hdawg = None self.adc_device = None self.adc = None self.ro_trg = None self.coil = None self.iq_devices = None def open_devices(self): # RF switch for making sure we know what sample we are measuring self.pna = Agilent_N5242A('pna', address=self.device_settings['vna_address']) self.lo1 = Agilent_E8257D('lo1', address=self.device_settings['lo1_address']) self.lo1._visainstrument.timeout = self.device_settings['lo1_timeout'] if self.device_settings['use_rf_switch']: self.rf_switch = nn_rf_switch('rf_switch', address=self.device_settings['rf_switch_address']) if self.device_settings['use_awg_tek']: self.awg_tek = Tektronix_AWG5014('awg_tek', address=self.device_settings['awg_tek_address']) self.sa = Agilent_N9030A('pxa', address=self.device_settings['sa_address']) self.coil_device = self.awg_tek self.hdawg = Zurich_HDAWG1808(self.device_settings['hdawg_address']) self.adc_device = TSW14J56_evm() self.adc_device.timeout = self.device_settings['adc_timeout'] self.adc = TSW14J56_evm_reducer(self.adc_device) self.adc.output_raw = True self.adc.last_cov = False self.adc.avg_cov = False self.adc.resultnumber = False self.adc_device.set_trig_src_period(self.device_settings['adc_trig_rep_period']) # 10 kHz period rate self.adc_device.set_trig_src_width(self.device_settings['adc_trig_width']) # 80 ns trigger length # self.hardware_state = 'undefined' def set_pulsed_mode(self): self.lo1.set_status(1) # turn on lo1 output self.lo1.set_power(self.pulsed_settings['lo1_power']) self.lo1.set_frequency(self.pulsed_settings['lo1_freq']) self.pna.set_power(self.pulsed_settings['vna_power']) self.pna.write("OUTP ON") self.pna.write("SOUR1:POW1:MODE ON") self.pna.write("SOUR1:POW2:MODE OFF") self.pna.set_sweep_mode("CW") self.pna.set_frequency(self.pulsed_settings['pna_freq']) self.hdawg.stop() self.awg_tek.stop() self.awg_tek.set_clock(self.pulsed_settings['ex_clock']) # клок всех авгшк self.hdawg.set_clock(self.pulsed_settings['ex_clock']) self.hdawg.set_clock_source(1) # setting repetition period for slave devices # 'global_num_points_delay' is needed to verify that M3202A and other slave devices will be free # when next trigger arrives. global_num_points = int(np.round( self.pulsed_settings['ex_clock'] / self.pulsed_settings['rep_rate'] - self.pulsed_settings[ 'global_num_points_delta'])) # global_num_points = 20000 self.hdawg.set_nop(global_num_points) self.hdawg.clear() # а вот длину сэмплов, которая очевидно то же самое, нужно задавать на всех авгшках. # хорошо, что сейчас она только одна. # this is zashkvar WUT THE FUCK MAN self.hdawg.set_trigger_impedance_1e3() self.hdawg.set_dig_trig1_source([0, 0, 0, 0]) self.hdawg.set_dig_trig1_slope([1, 1, 1, 1]) # 0 - Level sensitive trigger, 1 - Rising edge trigger, # 2 - Falling edge trigger, 3 - Rising or falling edge trigger self.hdawg.set_dig_trig1_source([0, 0, 0, 0]) self.hdawg.set_dig_trig2_slope([1, 1, 1, 1]) self.hdawg.set_trig_level(0.6) for sequencer in range(4): self.hdawg.send_cur_prog(sequencer=sequencer) self.hdawg.set_marker_out(channel=np.int(2 * sequencer), source=4) # set marker 1 to awg
mark out 1 for sequencer self.hdawg.set_marker_out(channel=np.int(2 * sequencer + 1),
source=7) # set marker 2 to awg mark out 2 for sequencer for channel in range(8): self.hdawg.set_amplitude(channel=channel, amplitude=self.pulsed_settings['hdawg_ch%d_amplitude'%channel]) self.hdawg.set_offset(channel=channel, offset=0 * 1.0) self.hdawg.set_digital(channel=channel, marker=[0]*(global_num_points)) self.hdawg.set_all_outs() self.hdawg.run() self.awg_tek._visainstrument.write('AWGC:RMOD TRIG') self.awg_tek._visainstrument.write('TRIG:WVAL LAST') self.awg_tek._visainstrument.write('TRIG:IMP 1kohm') self.awg_tek._visainstrument.write('TRIG:SLOP POS') self.awg_tek._visainstrument.write('TRIG:LEV 0.5') self.awg_tek._visainstrument.write('SOUR1:ROSC:FREQ 10MHz') self.awg_tek._visainstrument.write('SOUR1:ROSC:SOUR EXT') # awg_tek.set_trigger_mode('CONT') self.awg_tek.set_nop(global_num_points) # репрейт нужно задавать по=хорошему только на управляющей_t self.awg_tek.check_cached = True for channel in range(1, 5): self.awg_tek.set_amplitude(self.pulsed_settings['awg_tek_ch{}_amplitude'.format(channel)], channel=channel) self.awg_tek.set_offset(self.pulsed_settings['awg_tek_ch{}_offset'.format(channel)], channel=channel) self.awg_tek.set_output(1, channel=channel) self.awg_tek.set_waveform([0] * global_num_poin
import sys def check_args(argv): if le
n(argv) != 2: print ("Help:\n" "%s fi
lename.log\n" "filename.log = name of logfile") % argv[0] sys.exit(1)
# -*- coding: UTF-8 -*- import haystack from django.core.management.base import BaseCommand, CommandError from django.db import transaction
from conference import models from conference.templatetags.conference import fare_blob from collections import defaultdict from datetime import datetime from xml.sax.saxutils import escape class Command(BaseCommand): """ """ @transaction.commit_on_success
def handle(self, *args, **options): try: conference = args[0] except IndexError: raise CommandError('conference missing') partner_events = defaultdict(list) for f in models.Fare.objects.available(conference=conference).filter(ticket_type='partner'): try: date = datetime.strptime(fare_blob(f, 'data').split(',')[0][:-2] + ' 2011', '%B %d %Y').date() time = datetime.strptime(fare_blob(f, 'departure'), '%H:%M').time() except ValueError: continue partner_events[date].append((f, time)) for sch in models.Schedule.objects.filter(conference=conference): events = list(models.Event.objects.filter(schedule=sch)) for fare, time in partner_events[sch.date]: track_id = 'f%s' % fare.id for e in events: if track_id in e.get_all_tracks_names(): event = e break else: event = models.Event(schedule=sch, talk=None) event.track = 'partner-program ' + track_id event.custom = escape(fare.name) event.start_time = time if time.hour < 13: d = (13 - time.hour) * 60 else: d = (19 - time.hour) * 60 event.duration = d event.save()
from djang
o.core.management.base import BaseCommand class Command(BaseCommand): def handle(self, *args, **options): from squeezemail.tasks import run
_steps run_steps.delay()
import json from urllib2 import urlopen, HTTPError from urllib import urlencode import logging class HTTPClient(object): def __init__(self, host='localhost', port=90): self.host = host self.port = port def get_serv_addr (self):
return 'http://%s:%s/' % ( self.host, self.port, ) def call_handler(self, handler, *args, **kwargs): url = '%s%s/' % (self.get_serv_addr(), handler) try: postdata = kwargs.pop('postdata') except: postdata=None for arg in args: url += '%s/' % arg
params = urlencode(kwargs) url = '%s?%s'% (url, params) logging.debug("Request url: %s" % url) try: response = urlopen(url, postdata) except HTTPError as err: raise(err) except: return None ## Reading data: try: response = response.read() except: return None ## Decoding to JSON: try: return json.loads(response) except: return response
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by appl
icable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """This module is deprecated. Please use `airflow.providers.google.cloud.hooks.kubernetes_engine`.""" import warnings from airflow.providers.google.cloud.hooks.kubernetes_engine i
mport GKEHook warnings.warn( "This module is deprecated. Please use `airflow.providers.google.cloud.hooks.kubernetes_engine`", DeprecationWarning, stacklevel=2, ) class GKEClusterHook(GKEHook): """This class is deprecated. Please use `airflow.providers.google.cloud.hooks.container.GKEHook`.""" def __init__(self, *args, **kwargs): warnings.warn( "This class is deprecated. Please use `airflow.providers.google.cloud.hooks.container.GKEHook`.", DeprecationWarning, stacklevel=2, ) super().__init__(*args, **kwargs)
t Image from io import BytesIO from werkzeug.utils import secure_filename from ..tf2.models import all_classes, TF2BodyGroup, TF2EquipRegion from ..mods.models import ModClassModel, ModImage from ..models import get_or_create from app import db, sentry def list_from_vdf_dict(dictionary): return_list = [] for dict_item, number in dictionary.items(): if number is not None and number > 0: return_list.append(dict_item) return return_list def extract_and_image(zip_in, db_record): """ Extracts the uploaded zip files and generates an imagine and thumbnail from the given files. :param zip_in: :return: """ input_folder = current_app.config['UPLOADED_WORKSHOPZIPS_DEST'] output_folder = current_app.config['OUTPUT_FOLDER_LOCATION'] mod_id = db_record.id print "Starting conversion: {}".format(zip_in) zip_filename = os.path.join(input_folder, zip_in) # If we have a zip file, grab the manifest if zipfile.is_zipfile(zip_filename): with zipfile.ZipFile(zip_filename, "r") as zip_file: if sum(f.file_size for f in zip_file.infolist()) < 105000000: try: print "Opening manifest" manifest_stream = zip_file.open('manifest.txt') manifest_str = BytesIO(manifest_stream.read()) manifest = steam.vdf.load(manifest_str).get('asset') except KeyError: flash("No manifest, please upload a Workshop zip.", "danger") return except zipfile.BadZipfile: flash("Archive is corrupt, please try repackaging your item before trying again.", "danger") return print "Converting manifest. vdf -> dict" else: flash("Zip is too large when extracted, min size is ~100MB", "danger") return else: flash('Not a zip: {}'.format(zip_filename), "danger") return name = manifest['name'] try: icon = manifest['ImportSession']['icon'] except KeyError: icon = None if icon: # 'icon' can contain a lot of backslashes for reasons unknown to man, we'll get rid of them here. icon = ntpath.normpath(icon.replace('\\', ntpath.sep)) iconUnix = os.path.normpath(icon.replace('\\', os.path.sep)) # List of files we want to extract and later pack into a VPK to_extract = [] # Start extracting print "Start extracting" with zipfile.ZipFile(zip_filename) as zip_open: for infile in zip_open.namelist(): # Only extract the contents of the game, materials or models folder all
owed_extracts = ['game', 'materials', 'models'] if '..' in infile or infile.startswith('/'): flash("Error", "danger") return if ntpath.dirname(infile).split(ntpath.sep)[0] in allowed_extracts: to_extract.append(infile) # How many to extract print "{} files to extract.".format(len(to_extract)) # Do extractings print "Extracting." safe_name = s
ecure_filename(name) folder_name = "{mod_id}".format(mod_id=mod_id) os.path.altsep = '\\' zip_open.extractall(os.path.join(output_folder, folder_name), to_extract) if icon: # Load the icon into a byte stream print "Reading TGA image." try: tga_f = BytesIO(zip_open.read(iconUnix)) except KeyError: tga_f = BytesIO(zip_open.read(icon)) img = Image.open(tga_f) # Save the image as a PNG print "Saving large PNG image" filename = "backpack_icon_large.png" img.save(os.path.join(output_folder, folder_name, filename)) backpack_icon_large = ModImage(filename, db_record.id, 0) db.session.add(backpack_icon_large) # Resize the image to make a thumbnail print "Resizing image" img.thumbnail((128, 128), Image.ANTIALIAS) # Save the thumbnail print "Saving small PNG image" filename = "backpack_icon.png" img.save(os.path.join(output_folder, folder_name, filename)) backpack_icon = ModImage(filename, db_record.id, 1) db.session.add(backpack_icon) # Fetch desired item info from manifest items_game_info = manifest['ImportSession']['ItemSchema'] equip_regions = [] equip_region = items_game_info.get('equip_region') if equip_region: equip_regions.append(equip_region) else: equip_region_dict = items_game_info.get('equip_regions') if equip_region_dict: equip_regions += list_from_vdf_dict(equip_region_dict) visuals = items_game_info.get('visuals') bodygroups = [] if visuals: bodygroups_dict = visuals.get('player_bodygroups') if bodygroups_dict: bodygroups += list_from_vdf_dict(bodygroups_dict) used_by_classes = items_game_info.get('used_by_classes') used_by_classes = list_from_vdf_dict(used_by_classes) used_by_classes = [i.lower() for i in used_by_classes] model_player = items_game_info.get('model_player') class_models = {} if used_by_classes and len(used_by_classes) is 1: if model_player: class_models.update({used_by_classes[0].lower(): model_player}) else: return elif not used_by_classes or len(used_by_classes) > 1: if not used_by_classes: used_by_classes = all_classes model_player_per_class = items_game_info.get('model_player_per_class') model_player_per_class = dict((k.lower(), v) for k, v in model_player_per_class.iteritems()) for tf2_class in used_by_classes: if tf2_class.title() in all_classes: if model_player_per_class: class_model = model_player_per_class.get(tf2_class) elif model_player: class_model = model_player else: continue class_and_model = {tf2_class: class_model} class_models.update(class_and_model) # Update database record db_record.name = safe_name db_record.pretty_name = manifest['ImportSession']['name'] db_record.manifest_steamid = int(manifest['steamid'], 16) db_record.item_slot = "misc" # Only miscs have Workshop zips to date db_record.image_inventory = items_game_info.get('image_inventory') if bodygroups: for bodygroup in bodygroups: bg_db = TF2BodyGroup.query.get(bodygroup) if bg_db: db_record.bodygroups.append(bg_db) if equip_regions: for er in equip_regions: er_db = TF2EquipRegion.query.get(er) if er_db: db_record.equip_regions.append(er_db) if class_models: for class_name, model in class_models.items(): db_record.class_model[class_name] = (get_or_create(db.session, ModClassModel, mod_id=mod_id, class_name=class_name, model_path=model)) # And we're fin print "Done: {}".format(db_record.zip_file) db_record.completed = True return db_record def vpk_package(folder): try: check_call([os.path.abspath(current_app.config['VPK_BINARY_PATH']), folder]) except CalledProcessError: sentry.captureException() abort(500) shutil.rmtree(folder) def rename_copy(ext_list, dest_format): for extension in ext_list: for mod_path, replacement_path in dest_format.items(): to_rename = mod_path.format(ext=extension) rename_dest = replacement_path.format(ext=extension) dest_directory = os.path.dirname(rename_dest) if not os.path.exists(dest_directory): os.makedirs(dest_directory) shutil.copyfile(to_rename, rename_dest) def backpack_icon(output_folder, input_folder, backpack_extensions, image_inventor
# ============================================================================= # 2013+ Copyright (c) Kirill Smorodinnikov <shaitkir@gmail.com> # All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # ============================================================================= import sys sys.path.insert(0, "") # for running from cmake import pytest from conftest import set_property, raises, make_session import elliptics io_flags = set((elliptics.io_flags.default, elliptics.io_flags.append, elliptics.io_flags.prepare, elliptics.io_flags.commit, elliptics.io_flags.overwrite, elliptics.io_flags.nocsum, elliptics.io_flags.plain_write, elliptics.io_flags.nodata, elliptics.io_flags.cache, elliptics.io_flags.cache_only, elliptics.io_flags.cache_remove_from_disk)) command_flags = set((elliptics.command_flags.default, elliptics.command_flags.direct, elliptics.command_flags.nolock)) exceptions_policy = set((elliptics.exceptions_policy.no_exceptions, elliptics.exceptions_policy.throw_at_start, elliptics.exceptions_policy.throw_at_wait, elliptics.exceptions_policy.throw_at_get, elliptics.exceptions_policy.throw_at_iterator_end, elliptics.exceptions_policy.default_exceptions)) filters = set((elliptics.filters.positive, elliptics.filters.positive, elliptics.filters.positive_with_ack, elliptics.filters.positive_final, elliptics.filters.negative, elliptics.filters.negative_with_ack, elliptics.filters.negative_final, elliptics.filters.all, elliptics.filters.all_with_ack, elliptics.filters.all_final)) checkers = set((elliptics.checkers.no_check, elliptics.checkers.at_least_one, elliptics.checkers.all, elliptics.checkers.quorum)) class TestSession: def test_flags(self): assert set(elliptics.io_flags.values.values()) == io_flags assert set(elliptics.command_flags.values.values()) == command_flags assert set(elliptics.exceptions_policy.values.values()) == exceptions_policy assert set(elliptics.filters.values.values()) == filters assert set(elliptics.checkers.values.values()) == checkers @pytest.mark.parametrize("prop, value", [ ('timeout', 5), ('groups', []), ('exceptions_policy', elliptics.exceptions_policy.default_exceptions), ('cflags', 0), ('ioflags', 0), ('timestamp', elliptics.Time(2 ** 64 - 1, 2 ** 64 - 1)), ('trace_id', 0), ('user_flags', 0)]) def test_properties_default(self, server, simple_node, prop, value): session = elliptics.Session(node=simple_node) assert getattr(session, prop) == value @pytest.mark.parametrize('prop, setter, getter, values', [ ('groups', 'set_groups', 'get_groups', ( [], range(1, 100), range(1, 100000), range(10, 10000))), ('cflags', 'set_cflags', 'get_cflags', command_flags), ('ioflags', 'set_ioflags', 'get_ioflags', io_flags), ('exceptions_policy', 'set_exceptions_policy', 'get_
exceptions_policy', tuple(exceptions_p
olicy) + ( elliptics.exceptions_policy.throw_at_start | elliptics.exceptions_policy.throw_at_wait, elliptics.exceptions_policy.throw_at_start | elliptics.exceptions_policy.throw_at_wait | elliptics.exceptions_policy.throw_at_get | elliptics.exceptions_policy.throw_at_iterator_end)), ('timeout', 'set_timeout', 'get_timeout', ( 28376487, 2 ** 63 - 1)), ('timestamp', 'set_timestamp', 'get_timestamp', ( elliptics.Time(0, 0), elliptics.Time(2 ** 64 - 1, 2 ** 64 - 1), elliptics.Time(238689126897, 1723861827))), ('trace_id', None, None, ( 0, 32423946, 2 ** 32 - 1)), ('user_flags', 'set_user_flags', 'get_user_flags', ( 0, 438975345, 2 ** 64 - 1))]) def test_properties(self, server, simple_node, prop, setter, getter, values): session = elliptics.Session(node=simple_node) assert type(session) == elliptics.Session for value in values: set_property(session, prop, value, setter=setter, getter=getter) def test_resetting_timeout(self, server, simple_node): session = make_session(node=simple_node, test_name='TestSession.test_resetting_timeout') assert session.timeout == 5 # check default timeout value session.timeout = 1 # set different value assert session.timeout == 1 # check that the value has been set session.timeout = 0 # set timeout to 0 which should reset to default assert session.timeout == 5 # check default timeout value @pytest.mark.parametrize("prop, value", [ ('cflags', 2 ** 64), ('ioflags', 2 ** 32), ('exceptions_policy', 2 ** 32), ('timeout', 2 ** 63), ('trace_id', 2 ** 64), ('user_flags', 2 ** 64)]) def test_properties_out_of_limits(self, server, simple_node, prop, value): session = elliptics.Session(simple_node) pytest.raises(OverflowError, "set_property(session, '{0}', {1})" .format(prop, value)) def test_clone(self, server, simple_node): orig_s = make_session(node=simple_node, test_name='TestSession.test_clone') orig_s.groups = [1, 2, 3] orig_s.timeout = 13 orig_s.exceptions_policy = elliptics.exceptions_policy.throw_at_wait orig_s.cflags = elliptics.command_flags.direct orig_s.ioflags = elliptics.io_flags.overwrite orig_s.timestamp = elliptics.Time(213, 415) orig_s.trace_id = 731 orig_s.user_flags = 19731 clone_s = orig_s.clone() assert clone_s.groups == orig_s.groups == [1, 2, 3] assert clone_s.timeout == orig_s.timeout == 13 assert clone_s.exceptions_policy == orig_s.exceptions_policy == \ elliptics.exceptions_policy.throw_at_wait assert clone_s.cflags == orig_s.cflags == elliptics.command_flags.direct assert clone_s.ioflags == orig_s.ioflags == elliptics.io_flags.overwrite assert clone_s.timestamp == orig_s.timestamp == elliptics.Time(213, 415) assert clone_s.trace_id == orig_s.trace_id == 731 assert clone_s.user_flags == orig_s.user_flags == 19731
" datestxt = np.loadtxt(filename, dtype=str) dates = [] for i in datestxt: dates.append(dt.datetime.strptime(i, "%m/%d/%Y")) return pd.TimeSeries(index=dates, data=dates) GTS_DATES = _cache_dates() def getMonthNames(): return(['JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT','NOV','DEC']) def getYears(funds): years=[] for date in funds.index: if(not(date.year in years)): years.append(date.year) return(years) def getMonths(funds,year): months=[] for date in funds.index: if((date.year==year) and not(date.month in months)): months.append(date.month) return(months) def getDays(funds,year,month): days=[] for date in funds.index: if((date.year==year) and (date.month==month)): days.append(date) return(days) def getDaysBetween(ts_start, ts_end): days=[] for i in range(0,(ts_end-ts_start).days): days.append(ts_start+timedelta(days=1)*i) return(days) def getFirstDay(funds,year,month): for date in funds.index: if((date.year==year) and (date.month==month)): return(date) return('ERROR') def getLastDay(funds,year,month): return_date = 'ERROR' for date in funds.index: if((date.year==year) and (date.month==month)): return_date = date return(return_date) def getNextOptionClose(day, trade_days, offset=0): #get third friday in month of day #get first of month year_off=0 if day.month+offset > 12: year_off = 1 offset = offset - 12 first = dt.datetime(day.year+year_off, day.month+offset, 1, hour=16) #get weekday day_num = first.weekday() #get first friday (friday - weekday) add 7 if less than 1 dif = 5 - day_num if dif < 1: dif = dif+7 #move to third friday dif = dif + 14 friday = first+dt.timedelta(days=(dif-1)) #if friday is a holiday, op
tions expire then if friday in tr
ade_days: month_close = first + dt.timedelta(days=dif) else: month_close = friday #if day is past the day after that if month_close < day: return_date = getNextOptionClose(day, trade_days, offset=1) else: return_date = month_close return(return_date) def getLastOptionClose(day, trade_days): start = day while getNextOptionClose(day, trade_days)>=start: day= day - dt.timedelta(days=1) return(getNextOptionClose(day, trade_days)) def getNYSEoffset(mark, offset): ''' Returns NYSE date offset by number of days ''' mark = mark.replace(hour=0, minute=0, second=0, microsecond=0) i = GTS_DATES.index.searchsorted(mark, side='right') # If there is no exact match, take first date in past if GTS_DATES[i] != mark: i -= 1 ret = GTS_DATES[i + offset] ret = ret.replace(hour=16) return ret def getNYSEdays(startday = dt.datetime(1964,7,5), endday = dt.datetime(2020,12,31), timeofday = dt.timedelta(0)): """ @summary: Create a list of timestamps between startday and endday (inclusive) that correspond to the days there was trading at the NYSE. This function depends on a separately created a file that lists all days since July 4, 1962 that the NYSE has been open, going forward to 2020 (based on the holidays that NYSE recognizes). @param startday: First timestamp to consider (inclusive) @param endday: Last day to consider (inclusive) @return list: of timestamps between startday and endday on which NYSE traded @rtype datetime """ start = startday - timeofday end = endday - timeofday dates = GTS_DATES[start:end] ret = [x + timeofday for x in dates] return(ret) def getNextNNYSEdays(startday, days, timeofday): """ @summary: Create a list of timestamps from startday that is days days long that correspond to the days there was trading at NYSE. This function depends on the file used in getNYSEdays and assumes the dates within are in order. @param startday: First timestamp to consider (inclusive) @param days: Number of timestamps to return @return list: List of timestamps starting at startday on which NYSE traded @rtype datetime """ try: # filename = os.environ['QS'] + "/qstkutil/NYSE_dates.txt" filename = os.path.join(os.path.dirname(__file__), 'NYSE_dates.txt') except KeyError: print "Please be sure to set the value for QS in config.sh or\n" print "in local.sh and then \'source local.sh\'.\n" datestxt = np.loadtxt(filename,dtype=str) dates=[] for i in datestxt: if(len(dates)<days): if((dt.datetime.strptime(i,"%m/%d/%Y")+timeofday)>=startday): dates.append(dt.datetime.strptime(i,"%m/%d/%Y")+timeofday) return(dates) def getPrevNNYSEday(startday, timeofday): """ @summary: This function returns the last valid trading day before the start day, or returns the start day if it is a valid trading day. This function depends on the file used in getNYSEdays and assumes the dates within are in order. @param startday: First timestamp to consider (inclusive) @param days: Number of timestamps to return @return list: List of timestamps starting at startday on which NYSE traded @rtype datetime """ try: # filename = os.environ['QS'] + "/qstkutil/NYSE_dates.txt" filename = os.path.join(os.path.dirname(__file__), 'NYSE_dates.txt') except KeyError: print "Please be sure to set the value for QS in config.sh or\n" print "in local.sh and then \'source local.sh\'.\n" datestxt = np.loadtxt(filename,dtype=str) #''' Set return to first day ''' dtReturn = dt.datetime.strptime( datestxt[0],"%m/%d/%Y")+timeofday #''' Loop through all but first ''' for i in datestxt[1:]: dtNext = dt.datetime.strptime(i,"%m/%d/%Y") #''' If we are > startday, then use previous valid day ''' if( dtNext > startday ): break dtReturn = dtNext + timeofday return(dtReturn) def ymd2epoch(year, month, day): """ @summary: Convert YMD info into a unix epoch value. @param year: The year @param month: The month @param day: The day @return epoch: number of seconds since epoch """ return(t.mktime(dt.date(year,month,day).timetuple())) def epoch2date(ts): """ @summary Convert seconds since epoch into date @param ts: Seconds since epoch @return thedate: A date object """ tm = t.gmtime(ts) return(dt.date(tm.tm_year,tm.tm_mon,tm.tm_mday)) def _trade_dates(dt_start, dt_end, s_period): ''' @summary: Generate dates on which we need to trade @param c_strat: Strategy config class @param dt_start: Start date @param dt_end: End date ''' ldt_timestamps = getNYSEdays(dt_start, dt_end, dt.timedelta(hours=16) ) # Use pandas reindex method instead # Note, dates are index as well as values, we select based on index # but return values since it is a numpy array of datetimes instead of # pandas specific. ts_dates = pd.TimeSeries(index=ldt_timestamps, data=ldt_timestamps) # These are the dates we want if s_period[:2] == 'BW': # special case for biweekly dr_range = pd.DateRange(dt_start, dt_end, timeRule=s_period[1:]) dr_range = np.asarray(dr_range) li_even = np.array(range(len(dr_range))) dr_range = dr_range[li_even[li_even % 2 == 0]] else: dr_range = pd.DateRange(dt_start, dt_end, timeRule=s_period) dr_range = np.asarray(dr_range) # Warning, we MUST copy the date range, if we modify it it will be returned # in it's modified form the next time we use it. dr_range = np.copy(dr_range) dr_range += pd.DateOffset(hours=16) ts_dates = ts_dates.reindex( dr_range, method='bfill' ) ldt_dates = ts_dates[ts_dates.notnull()].values #Make unique sdt_unique = set() ldt_dates = [x for x in ldt_dates if x
'''Custom models for the block_comment app.''' import difflib from django.contrib.comments.models import Comment from django.db import models from django.utils.translation import ugettext as _ from block_comment.diff_match_patch import diff_match_patch class BlockComment(Comment): ''' ``BlockComment`` extends Django's comments framework to store information about the block of text the comment relates to. ''' # Position in the full text that the block the comment relates to begins at index = models.PositiveIntegerField(null=True, blank=True) # The text of the block, used for determining diffs/orphans regarding = models.TextField(blank=True) def get_match_index(self, haystack): ''' Returns the index of the closest match to needle within the haystack. ''' def get_block_index(i): ''' ``haystack`` and ``blocks`` are accessible by closure. ''' return haystack.index(blocks[i]) needle = self.regarding.strip() matches = [] blocks = haystack.split("\n") block_index = None # Check for an exact match first if needle in blocks: return get_block_index(blocks.index(needle)) # If that didn't work, do a basic diff comparison block-by-block for p in blocks: comp = difflib.SequenceMatcher(None, needle, p) if comp.ratio() > .85: matches.append(blocks.index(comp.b)) if len(matches) == 1: block_index = matches.pop() elif len(matches) == 0: # No matches, can we find a potential match with a smarter # matching algorithm? matcher = diff_match_patch() index = matcher.match_main(haystack, needle, 0) if index > -1: return index
else:
# We've got multiple options, let's narrow them down with # a smarter matching algorithm. matcher = diff_match_patch() for i in tuple(matches): if matcher.match_main(blocks[i], needle, self.index) < 0: # No match, discard this option matches.remove(i) # Unless we've only got one match left, we'll fall through to -1 if len(matches) == 1: block_index = matches[0] if block_index: return get_block_index(block_index) # If we can't find anything, return -1 return -1 def relink_comment(self, haystack, save=True): index = self.get_match_index(haystack) if index == self.index: return None elif index > -1: self.index = index else: self.index = None if save: self.save()
# -*- coding: utf-8 -*- """ Created on Tue Jul 22 07:54:05 2014 @author: charleslelosq Carnegie Institution for Science """ import sys sys.path.append("/Users/charleslelosq/Documents/RamPy/lib-charles/") import csv import numpy as np import scipy import matplotlib import matplotlib.gridspec as gridspec from pylab import * from StringIO import StringIO from scipy import interpolate # to fit spectra we use the lmfit software of Matt Newville, CARS, university of Chicago, available on the web from lmfit import minimize, Minimizer, Parameters, Parameter, report_fit, fit_report from spectratools import * #Charles' libraries and functions from Tkinter import * import tkMessageBox from tkFileDialog import askopenfilename #### We define a set of functions that will be used for fitting data #### unfortunatly, as we use lmfit (which is convenient because it can fix or release #### easily the parameters) we are not able to use arrays for parameters... #### so it is a little bit long to write all the things, but in a way quite robust also... #### gaussian and pseudovoigt functions are available in spectratools #### if you need a voigt, fix the gaussian-to-lorentzian ratio to 1 in the parameter definition before #### doing the data fit def residual(pars, x, data=None, eps=None): # unpack parameters: # extract .value attribute for each parameter a1 = pars['a1'].value a2 = pars['a2'].value f1 = pars['f1'].value f2 = pars['f2'].value l1 = pars['l1'].value l2 = pars['l2'].value # Gaussian model peak1 = gaussian(x,a1,f1,l1) peak2 = gaussian(x,a2,f2,l2) model = peak1 + peak2 if data is None: return model, peak1, peak2 if eps
is None: return (model - data) return (model - data)/eps ##### CORE OF THE CALCULATION BELOW #### CALLING THE DATA NAMES tkMessageBox.showinfo( "Open file", "Please open the list of spectra") Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file with op
en(filename) as inputfile: results = list(csv.reader(inputfile)) # we read the data list #### LOOP FOR BEING ABLE TO TREAT MULTIPLE DATA #### WARNING: OUTPUT ARE AUTOMATICALLY GENERATED IN A DIRECTORY CALLED "DECONV" #### (see end) THAT SHOULD BE PRESENT !!!!!!!!!! for lg in range(len(results)): name = str(results[lg]).strip('[]') name = name[1:-1] # to remove unwanted "" sample = np.genfromtxt(name) # get the sample to deconvolute # we set here the lower and higher bonds for the interest region lb = 4700 ### MAY NEED TO AJUST THAT hb = 6000 interestspectra = sample[np.where((sample[:,0] > lb)&(sample[:,0] < hb))] ese0 = interestspectra[:,2]/abs(interestspectra[:,1]) #take ese as a percentage, we assume that the treatment was made correctly for error determination... if not, please put sigma = None interestspectra[:,1] = interestspectra[:,1]/np.amax(interestspectra[:,1])*100 # normalise spectra to maximum, easier to handle after sigma = abs(ese0*interestspectra[:,1]) #calculate good ese #sigma = None # you can activate that if you are not sure about the errors xfit = interestspectra[:,0] # region to be fitted data = interestspectra[:,1] # region to be fitted params = Parameters() ####################### FOR MELT: ####################### COMMENT IF NOT WANTED # (Name, Value, Vary, Min, Max, Expr) params.add_many(('a1', 1, True, 0, None, None), ('f1', 5200, True, 750, None, None), ('l1', 1, True, 0, None, None), ('a2', 1, True, 0, None, None), ('f2', 5400, True, None, None, None), ('l2', 1, True, None, None, None)) result = minimize(residual_melt, params, args=(xfit, data)) # fit data with leastsq model from scipy model = fit_report(params) # the report yout, peak1,peak2,= residual_melt(params,xfit) # the different peaks #### We just calculate the different areas up to 4700 cmm-1 and those of the gaussians # Select interest areas for calculating the areas of OH and H2Omol peaks intarea45 = sample[np.where((sample[:,0]> 4100) & (sample[:,0]<4700))] area4500 = np.trapz(intarea45[:,1],intarea45[:,0]) esearea4500 = 1/sqrt(area4500) # We assume that RELATIVE errors on areas are globally equal to 1/sqrt(Area) # now for the gaussians # unpack parameters: # extract .value attribute for each parameter a1 = pars['a1'].value a2 = pars['a2'].value l1 = pars['l1'].value l2 = pars['l2'].value AireG1 = gaussianarea(a1,l1) AireG2 = gaussianarea(a2,l2) ##### WE DO A NICE FIGURE THAT CAN BE IMPROVED FOR PUBLICATION fig = figure() plot(sample[:,0],sample[:,1],'k-') plot(xfit,yout,'r-') plot(xfit,peak1,'b-') plot(xfit,peak2,'b-') xlim(lb,hb) ylim(0,np.max(sample[:,1])) xlabel("Wavenumber, cm$^{-1}$", fontsize = 18, fontweight = "bold") ylabel("Absorption, a. u.", fontsize = 18, fontweight = "bold") text(4000,np.max(intarea45[:,1])+0.03*np.max(intarea45[:,1]),('Area OH: \n'+'%.1f' % area4500),color='b',fontsize = 16) text(4650,a1 + 0.05*a1,('Area pic 1$: \n'+ '%.1f' % AireG1),color='b',fontsize = 16) text(5000,a2 + 0.05*a2,('OH/H$_2$O$_{mol}$: \n'+'%.3f' % ratioOH_H2O+'\n+/-'+'%.3f' % eseratioOH_H2O),color='r',fontsize = 16) ##### output of data, fitted peaks, parameters, and the figure in pdf ##### all goes into the ./deconv/ folder name.rfind('/') nameout = name[name.rfind('/')+1::] namesample = nameout[0:nameout.find('.')] pathint = str('/deconv/') # the output folder ext1 = '_ydec.txt' ext2 = '_params.txt' ext3 = '.pdf' pathout1 = pathbeg+pathint+namesample+ext1 pathout2 = pathbeg+pathint+namesample+ext2 pathout3 = pathbeg+pathint+namesample+ext3 matout = np.vstack((xfit,data,yout,peak1,peak2)) matout = np.transpose(matout) np.savetxt(pathout1,matout) # saving the arrays of spectra fd = os.open( pathout2, os.O_RDWR|os.O_CREAT ) # Open a file and create it if it do not exist fo = os.fdopen(fd, "w+") # Now get a file object for the above file. fo.write(model) # write the parameters in it fo.close() savefig(pathout3) # save the figure
s nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from .helpers import build_model_with_cfg from .layers import SelectiveKernel, ConvBnAct, create_attn from .registry import register_model from .resnet import ResNet def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv1', 'classifier': 'fc', **kwargs } default_cfgs = { 'skresnet18': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet18_ra-4eec2804.pth'), 'skresnet34': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet34_ra-bdc0ccde.pth'), 'skresnet50': _cfg(), 'skresn
et50d': _cfg( first_conv='conv1.0'), 'skresnext50_32x4d': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnext50_ra-f40e40bf.pth'), } class SelectiveKernelBasic(nn.Module): expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): super(SelectiveKernelBasic, self).__init__() sk_kwargs = sk_kwargs or {} conv_kwargs = dict(drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer) assert cardinality == 1, 'BasicBlock only supports cardinality of 1' assert base_width == 64, 'BasicBlock doest not support changing base width' first_planes = planes // reduce_first outplanes = planes * self.expansion first_dilation = first_dilation or dilation self.conv1 = SelectiveKernel( inplanes, first_planes, stride=stride, dilation=first_dilation, **conv_kwargs, **sk_kwargs) conv_kwargs['act_layer'] = None self.conv2 = ConvBnAct( first_planes, outplanes, kernel_size=3, dilation=dilation, **conv_kwargs) self.se = create_attn(attn_layer, outplanes) self.act = act_layer(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.drop_block = drop_block self.drop_path = drop_path def zero_init_last_bn(self): nn.init.zeros_(self.conv2.bn.weight) def forward(self, x): shortcut = x x = self.conv1(x) x = self.conv2(x) if self.se is not None: x = self.se(x) if self.drop_path is not None: x = self.drop_path(x) if self.downsample is not None: shortcut = self.downsample(shortcut) x += shortcut x = self.act(x) return x class SelectiveKernelBottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): super(SelectiveKernelBottleneck, self).__init__() sk_kwargs = sk_kwargs or {} conv_kwargs = dict(drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer) width = int(math.floor(planes * (base_width / 64)) * cardinality) first_planes = width // reduce_first outplanes = planes * self.expansion first_dilation = first_dilation or dilation self.conv1 = ConvBnAct(inplanes, first_planes, kernel_size=1, **conv_kwargs) self.conv2 = SelectiveKernel( first_planes, width, stride=stride, dilation=first_dilation, groups=cardinality, **conv_kwargs, **sk_kwargs) conv_kwargs['act_layer'] = None self.conv3 = ConvBnAct(width, outplanes, kernel_size=1, **conv_kwargs) self.se = create_attn(attn_layer, outplanes) self.act = act_layer(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.drop_block = drop_block self.drop_path = drop_path def zero_init_last_bn(self): nn.init.zeros_(self.conv3.bn.weight) def forward(self, x): shortcut = x x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) if self.se is not None: x = self.se(x) if self.drop_path is not None: x = self.drop_path(x) if self.downsample is not None: shortcut = self.downsample(shortcut) x += shortcut x = self.act(x) return x def _create_skresnet(variant, pretrained=False, **kwargs): return build_model_with_cfg( ResNet, variant, pretrained, default_cfg=default_cfgs[variant], **kwargs) @register_model def skresnet18(pretrained=False, **kwargs): """Constructs a Selective Kernel ResNet-18 model. Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this variation splits the input channels to the selective convolutions to keep param count down. """ sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) model_args = dict( block=SelectiveKernelBasic, layers=[2, 2, 2, 2], block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs) return _create_skresnet('skresnet18', pretrained, **model_args) @register_model def skresnet34(pretrained=False, **kwargs): """Constructs a Selective Kernel ResNet-34 model. Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this variation splits the input channels to the selective convolutions to keep param count down. """ sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) model_args = dict( block=SelectiveKernelBasic, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs) return _create_skresnet('skresnet34', pretrained, **model_args) @register_model def skresnet50(pretrained=False, **kwargs): """Constructs a Select Kernel ResNet-50 model. Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this variation splits the input channels to the selective convolutions to keep param count down. """ sk_kwargs = dict(split_input=True) model_args = dict( block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs) return _create_skresnet('skresnet50', pretrained, **model_args) @register_model def skresnet50d(pretrained=False, **kwargs): """Constructs a Select Kernel ResNet-50-D model. Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this variation splits the input channels to the selective convolutions to keep param count down. """ sk_kwargs = dict(split_input=True) model_args = dict( block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs) return _create_skresnet('skresnet50d', pretrained, **model_args) @register_model def skresnext50_32x4d(pretrained=False, **kwargs): """Constructs a Select Kernel ResNeXt50-32x4d model. This should be equivalent to the SKNet-50 model in the Select Kernel Paper """ sk_kwargs = dict(rd_ratio=1/16, rd_divisor=32, split_input=False) model_args = dict( block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs) return _create_skresnet('skresn
from gitbarry.reasons impor
t start, finish, switch # , switch, publish REASONS = { 'start': start, 'finish': fini
sh, 'switch': switch, # 'publish': publish, }
# Author: Nic Wolfe <nic@wolfeden.ca> # URL: http://code.google.com/p/lazylibrarian/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import lazylibrarian from lazylibrarian import logger, common, formatter # parse_qsl moved to urlparse module in v2.6 try: from urlparse import parse_qsl #@UnusedImport except: from cgi import parse_qsl #@Reimport import lib.oauth2 as oauth import lib.pythontwitter as twitter class TwitterNotifier: consumer_key = "208JPTMMnZjtKWA4obcH8g"
consumer_secret = "BKaHzaQRd5PK6EH8EqPZ1w8mz6NSk9KErArarinHutk" REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token' ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token' AUTHORIZATION_URL = 'https://api.t
witter.com/oauth/authorize' SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate' def notify_snatch(self, title): if lazylibrarian.TWITTER_NOTIFY_ONSNATCH: self._notifyTwitter(common.notifyStrings[common.NOTIFY_SNATCH]+': '+title) def notify_download(self, title): if lazylibrarian.TWITTER_NOTIFY_ONDOWNLOAD: self._notifyTwitter(common.notifyStrings[common.NOTIFY_DOWNLOAD]+': '+title) def test_notify(self): return self._notifyTwitter("This is a test notification from LazyLibrarian / " + formatter.now(), force=True) def _get_authorization(self): signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1() #@UnusedVariable oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret) oauth_client = oauth.Client(oauth_consumer) logger.info('Requesting temp token from Twitter') resp, content = oauth_client.request(self.REQUEST_TOKEN_URL, 'GET') if resp['status'] != '200': logger.info('Invalid respond from Twitter requesting temp token: %s' % resp['status']) else: request_token = dict(parse_qsl(content)) lazylibrarian.TWITTER_USERNAME = request_token['oauth_token'] lazylibrarian.TWITTER_PASSWORD = request_token['oauth_token_secret'] return self.AUTHORIZATION_URL+"?oauth_token="+ request_token['oauth_token'] def _get_credentials(self, key): request_token = {} request_token['oauth_token'] = lazylibrarian.TWITTER_USERNAME request_token['oauth_token_secret'] = lazylibrarian.TWITTER_PASSWORD request_token['oauth_callback_confirmed'] = 'true' token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret']) token.set_verifier(key) logger.info('Generating and signing request for an access token using key '+key) signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1() #@UnusedVariable oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret) logger.info('oauth_consumer: '+str(oauth_consumer)) oauth_client = oauth.Client(oauth_consumer, token) logger.info('oauth_client: '+str(oauth_client)) resp, content = oauth_client.request(self.ACCESS_TOKEN_URL, method='POST', body='oauth_verifier=%s' % key) logger.info('resp, content: '+str(resp)+','+str(content)) access_token = dict(parse_qsl(content)) logger.info('access_token: '+str(access_token)) logger.info('resp[status] = '+str(resp['status'])) if resp['status'] != '200': logger.error('The request for a token with did not succeed: '+str(resp['status'])) return False else: logger.info('Your Twitter Access Token key: %s' % access_token['oauth_token']) logger.info('Access Token secret: %s' % access_token['oauth_token_secret']) lazylibrarian.TWITTER_USERNAME = access_token['oauth_token'] lazylibrarian.TWITTER_PASSWORD = access_token['oauth_token_secret'] return True def _send_tweet(self, message=None): username=self.consumer_key password=self.consumer_secret access_token_key=lazylibrarian.TWITTER_USERNAME access_token_secret=lazylibrarian.TWITTER_PASSWORD logger.info(u"Sending tweet: "+message) api = twitter.Api(username, password, access_token_key, access_token_secret) try: api.PostUpdate(message) except Exception, e: logger.error(u"Error Sending Tweet: %s" %e) return False return True def _notifyTwitter(self, message='', force=False): prefix = lazylibrarian.TWITTER_PREFIX if not lazylibrarian.USE_TWITTER and not force: return False return self._send_tweet(prefix+": "+message) notifier = TwitterNotifier
import urllib import urllib2 from bs4 import BeautifulSoup textToSearch = 'g
orillaz' query = urllib.quote(textToSearch) url = "https://www.youtube.com/results?search_query=" + query response = urllib2.urlopen(url) html = response.read() soup = BeautifulSoup(html) for vid in soup.findAll(attrs={'class':'yt-uix
-tile-link'}): print 'https://www.youtube.com' + vid['href']
itch_fault(self, *args): """ Set switch down (Deletes the OVS switch bridge) Args: index: Index of the switch dpid to take out """ index = args[0] dpid = self.dpids[index] switch_name = self.topo.switches_by_id[index] switch = next((switch for switch in self.net.switches if switch.name == switch_name), None) if switch is None: return self.dump_switch_flows(switch) name = '%s:%s DOWN' % (self.topo.switches_by_id[index], self.dpids[index]) self.topo_watcher.add_switch_fault(index, name) switch.stop() switch.cmd(self.VSCTL, 'del-controller', switch.name, '|| true') self.assertTrue( self.wait_for_prometheus_var( 'of_dp_disconnections_total', 1, dpid=dpid), 'DP %s not detected as DOWN' % dpid) self.net.switches.remove(switch) def random_switch_fault(self, *args): """Randomly take out an available switch""" dpid_list = self.topo_watcher.get_eligable_switch_events() if len(self.stack_roots.keys()) <= 1: # Prevent only root from being destroyed sorted_roots = dict(sorted(self.stack_roots.items(), key=lambda item: item[1])) for root_index in sorted_roots.keys(): root_dpid = self.dpids[root_index] if root_dpid in dpid_list: dpid_list.remove(root_dpid) break if not dpid_list: return dpid_item_index = self.rng.randrange(len(dpid_list)) dpid_item = dpid_list[dpid_item_index] dpid_index = self.dpids.index(dpid_item) self.create_switch_fault(dpid_index) def dp_link_fault(self, *args): """ Create a fault/tear down the stack link between two switches Args: src_dp_index: Index of the source DP of the stack link dst_dp_index: Index of the destination DP of the stack """ src_i = args[0] dst_i = args[1] src_dpid = self.dpids[src_i] dst_dpid = self.dpids[dst_i] s1_name = self.topo.switches_by_id[src_i] s2_name = self.topo.switches_by_id[dst_i] for port, link in self.topo.ports[s1_name].items(): status = self.stack_port_status(src_dpid, s1_name, port) if link[0] == s2_name and status == 3: peer_port = link[1] self.set_port_down(port, src_dpid) self.set_port_down(peer_port, dst_dpid) self.wait_for_stack_port_status(src_dpid, s1_name, port, 4) self.wait_for_stack_port_status(dst_dpid, s2_name, peer_port, 4) name = 'Link %s[%s]:%s-%s[%s]:%s DOWN' % ( s1_name, src_dpid, port, s2_name, dst_dpid, peer_port) self.topo_watcher.add_link_fault(src_i, dst_i, name) return def random_dp_link_fault(self, *args): """Randomly create a fault for a DP link""" link_list = self.topo_watcher.get_eligable_link_events() if not link_list: return index = self.rng.randrange(len(link_list)) dplink = link_list[index] srcdp = self.dpids.index(dplink[0]) dstdp = self.dpids.index(dplink[1]) self.dp_link_fault(srcdp, dstdp) def create_proportional_random_fault_event(self): """Create a fault-event randomly based on the numbe
r of link and switch events available""" f
uncs = [] for _ in self.topo_watcher.get_eligable_link_events(): funcs.append(self.random_dp_link_fault) for _ in self.topo_watcher.get_eligable_switch_events(): funcs.append(self.random_switch_fault) i = self.rng.randrange(len(funcs)) funcs[i]() def create_random_fault_event(self): """Randomly choose an event type to fault on""" funcs = [] if self.topo_watcher.get_eligable_link_events(): funcs.append(self.random_dp_link_fault) if self.topo_watcher.get_eligable_switch_events(): funcs.append(self.random_switch_fault) if not funcs: return i = self.rng.randrange(len(funcs)) funcs[i]() def network_function(self, fault_events=None, num_faults=1): """ Test the network by slowly tearing it down different ways Args: fault_events: (optional) list of tuples of fault event functions and the parameters to use in the given order; instead of randomly choosing parts of the network to break num_faults: (optional) number of faults to cause before each evaluation is made """ self.verify_stack_up() self.fault_events = fault_events self.num_faults = num_faults self.rng = random.Random(self.seed) self.topo_watcher = TopologyWatcher( self.dpids, self.switch_links, self.host_links, self.NUM_VLANS, self.host_information, self.routers) # Calculate stats (before any tear downs) self.calculate_connectivity() self.assertTrue(self.topo_watcher.is_connected(), ( 'Host connectivity does not match predicted')) # Start tearing down the network if self.fault_events: # Do Specified list of faults (in order) until failure or fault list completed fault_index = 0 while fault_index < len(self.fault_events): for _ in range(self.num_faults): event_func, params = self.fault_events[fault_index] fault_index += 1 event_func(*params) self.calculate_connectivity() self.assertTrue(self.topo_watcher.is_connected(), ( 'Host connectivity does not match predicted')) else: # Continue creating fault until none are available or expected connectivity does not # match real connectivity while self.topo_watcher.continue_faults(): for _ in range(self.num_faults): self.create_proportional_random_fault_event() self.calculate_connectivity() self.assertTrue(self.topo_watcher.is_connected(), ( 'Host connectivity does not match predicted')) def tearDown(self, ignore_oferrors=False): """Make sure to dump the watcher information too""" if self.topo_watcher: self.topo_watcher.dump_info(self.tmpdir) super(FaucetFaultToleranceBaseTest, self).tearDown(ignore_oferrors=ignore_oferrors) class FaucetSingleFaultTolerance2DPTest(FaucetFaultToleranceBaseTest): """Run a range of fault-tolerance tests for topologies on 2 DPs""" NUM_DPS = 2 NUM_HOSTS = 4 NUM_VLANS = 2 N_DP_LINKS = 1 STACK_ROOTS = {0: 1} ASSUME_SYMMETRIC_PING = False class FaucetSingleFaultTolerance3DPTest(FaucetFaultToleranceBaseTest): """Run a range of fault-tolerance tests for topologies on 3 DPs""" NUM_DPS = 3 NUM_HOSTS = 6 NUM_VLANS = 2 N_DP_LINKS = 1 STACK_ROOTS = {0: 1} class FaucetSingleFaultTolerance4DPTest(FaucetFaultToleranceBaseTest): """Run a range of fault-tolerance tests for topologies on 4 DPs""" NUM_DPS = 4 NUM_HOSTS = 4 NUM_VLANS = 1 N_DP_LINKS = 1 STACK_ROOTS = {0: 1} def test_ftp2_all_random_switch_failures(self): """Test fat-tree-pod-2 randomly tearing down only switches""" fault_events = [(self.random_switch_fault, (None,)) for _ in range(self.NUM_DPS)] stack_roots = {2*i: 1 for i in range(self.NUM_DPS//2)} self.set_up(networkx.cycle_graph(self.NUM_DPS), stack_roots) self.network_function(fault_events=fault_events) def test_ftp2_all_random_link_failures(self): """Test fat-tree-pod-2 randomly tearing down only switch-switch links""" network_graph = networkx.cycle_graph(self.NUM_DPS) fault_events = [(self.random_dp_link_fault, (None,)) for _ in range(len(network_graph.edges()))] stack_roots = {2*i: 1 for i in range
: self._loadPage("formPage") inputElement = self.driver.find_element_by_xpath("//input[@id='notWorking']") self.assertFalse(inputElement.is_enabled()) inputElement = self.driver.find_element_by_xpath("//input[@id='working']") self.assertTrue(inputElement.is_enabled()) def testElementsShouldBeDisabledIfTheyAreDisabledUsingRandomDisabledStrings(self): self._loadPage("formPage") disabledTextElement1 = self.driver.find_element_by_id("disabledTextElement1") self.assertFalse(disabledTextElement1.is_enabled()) disabledTextElement2 = self.driver.find_element_by_id("disabledTextElement2") self.assertFalse(disabledTextElement2.is_enabled()) disabledSubmitElement = self.driver.find_element_by_id("disabledSubmitElement") self.assertFalse(disabledSubmitElement.is_enabled()) def testShouldIndicateWhenATextAreaIsDisabled(self): self._loadPage("formPage") textArea = self.driver.find_element_by_xpath("//textarea[@id='notWorkingArea']") self.assertFalse(textArea.is_enabled()) def testShouldThrowExceptionIfSendingKeysToElementDisabledUsingRandomDisabledStrings(self): self._loadPage("formPage") disabledTextElement1 = self.driver.find_element_by_id("disabledTextElement1") try: disabledTextElement1.send_keys("foo") self.fail("Should have thrown exception") except: pass self.assertEqual("", disabledTextElement1.text) disabledTextElement2 = self.driver.find_element_by_id("disabledTextElement2") try: disabledTextElement2.send_keys("bar") self.fail("Should have thrown exception") except: pass self.assertEqual("", disabledTextElement2.text) def testShouldIndicateWhenASelectIsDisabled(self): self._loadPage("formPage") enabled = self.driver.find_element_by_name("selectomatic") disabled = self.driver.find_element_by_name("no-select") self.assertTrue(enabled.is_enabled()) self.assertFalse(disabled.is_enabled()) def testShouldReturnTheValueOfCheckedForACheckboxEvenIfItLacksThatAttribute(self): self._loadPage("formPage") checkbox = self.driver.find_element_by_xpath("//input[@id='checky']") self.assertTrue(checkbox.get_attribute("checked") is None) checkbox.click() self.assertEqual("true", checkbox.get_attribute("checked")) def testShouldReturnTheValueOfSelectedForRadioButtonsEvenIfTheyLackThatAttribute(self): self._loadPage("formPage") neverSelected = self.driver.find_element_by_id("cheese") initiallyNotSelected = self.driver.find_element_by_id("peas") initiallySelected = self.driver.find_element_by_id("cheese_and_peas") self.assertTrue(neverSelected.get_attribute("selected") is None, "false") self.assertTrue(initiallyNotSelected.get_attribute("selected") is None, "false") self.assertEqual("true", initiallySelected.get_attribute("selected"), "true") initiallyNotSelected.click() self.assertTrue(neverSelected.get_attribute("selected") is None) self.assertEqual("true", initiallyNotSelected.get_attribute("selected")) self.assertTrue(initiallySelected.get_attribute("selected") is None) def testShouldReturnTheValueOfSelectedForOptionsInSelectsEvenIfTheyLackThatAttribute(self): self._loadPage("formPage") selectBox = self.driver.find_element_by_xpath("//select[@name='selectomatic']") options = selectBox.find_elements_by_tag_name("option") one = options[0] two = options[1] self.assertTrue(one.is_selected()) self.assertFalse(two.is_selected()) self.assertEqual("true",
one.get_attribute("selected")) self.assertTrue(two.get_attribute("selected") is None) def testShouldReturnValueOfClassAttributeOfAnElement(self): self._loadPage("xhtmlTest") heading = self.driver.find_element_by_xpath("//h1") classname = heading.get_attribute("class") self.assertEqual("header", classname) # Disabled du
e to issues with Frames #def testShouldReturnValueOfClassAttributeOfAnElementAfterSwitchingIFrame(self): # self._loadPage("iframes") # self.driver.switch_to.frame("iframe1") # # wallace = self.driver.find_element_by_xpath("//div[@id='wallace']") # classname = wallace.get_attribute("class") # self.assertEqual("gromit", classname) def testShouldReturnTheContentsOfATextAreaAsItsValue(self): self._loadPage("formPage") value = self.driver.find_element_by_id("withText").get_attribute("value") self.assertEqual("Example text", value) def testShouldReturnTheContentsOfATextAreaAsItsValueWhenSetToNonNorminalTrue(self): self._loadPage("formPage") e = self.driver.find_element_by_id("withText") self.driver.execute_script("arguments[0].value = 'tRuE'", e) value = e.get_attribute("value") self.assertEqual("tRuE", value) def testShouldTreatReadonlyAsAValue(self): self._loadPage("formPage") element = self.driver.find_element_by_name("readonly") readOnlyAttribute = element.get_attribute("readonly") textInput = self.driver.find_element_by_name("x") notReadOnly = textInput.get_attribute("readonly") self.assertNotEqual(readOnlyAttribute, notReadOnly) def testShouldGetNumericAtribute(self): self._loadPage("formPage") element = self.driver.find_element_by_id("withText") self.assertEqual("5", element.get_attribute("rows")) def testCanReturnATextApproximationOfTheStyleAttribute(self): self._loadPage("javascriptPage") style = self.driver.find_element_by_id("red-item").get_attribute("style") self.assertTrue("background-color" in style.lower()) def testShouldCorrectlyReportValueOfColspan(self): self._loadPage("tables") th1 = self.driver.find_element_by_id("th1") td2 = self.driver.find_element_by_id("td2") self.assertEqual("th1", th1.get_attribute("id")) self.assertEqual("3", th1.get_attribute("colspan")) self.assertEqual("td2", td2.get_attribute("id")); self.assertEquals("2", td2.get_attribute("colspan")); def testCanRetrieveTheCurrentValueOfATextFormField_textInput(self): self._loadPage("formPage") element = self.driver.find_element_by_id("working") self.assertEqual("", element.get_attribute("value")) element.send_keys("hello world") self.assertEqual("hello world", element.get_attribute("value")) def testCanRetrieveTheCurrentValueOfATextFormField_emailInput(self): self._loadPage("formPage") element = self.driver.find_element_by_id("email") self.assertEqual("", element.get_attribute("value")) element.send_keys("hello@example.com") self.assertEqual("hello@example.com", element.get_attribute("value")) def testCanRetrieveTheCurrentValueOfATextFormField_textArea(self): self._loadPage("formPage") element = self.driver.find_element_by_id("emptyTextArea") self.assertEqual("", element.get_attribute("value")) element.send_keys("hello world") self.assertEqual("hello world", element.get_attribute("value")) @pytest.mark.ignore_chrome def testShouldReturnNullForNonPresentBooleanAttributes(self): self._loadPage("booleanAttributes") element1 = self.driver.find_element_by_id("working") self.assertEqual(None, element1.get_attribute("required")) element2 = self.driver.find_element_by_id("wallace") self.assertEqual(None, element2.get_attribute("nowrap")) @pytest.mark.ignore_ie def testShouldReturnTrueForPresentBooleanAttributes(self): self._loadPage("booleanAttributes") element1 = self.driver.find_element_by_id("emailRequired") self.assertEqual("true", element1.get_attribute("required")) element2 = self.driver.find_element_by_id("emptyTextAreaRequi
"""Lists VPC offerings""" from baseCmd import * from baseResponse import * class listVPCOfferingsCmd (baseCmd): typeInfo = {} def __init__(self): self.isAsync = "false" """list VPC offerings by display text""" self.displaytext = None self.typeInfo['displaytext'] = 'string' """list VPC offerings by id""" self.id = None self.typeInfo['id'] = 'uuid' """true if need to list only default VPC offerings. Default value is false""" self.isdefault = None self.typeInfo['isdefault'] = 'boolean' """List by keyword""" self.keyword = None self.typeInfo['keyword'] = 'string' """list VPC offerings by name""" self.name = None self.typeInfo['name'] = 'string' """""" self.page = None self.typeInfo['page'] = 'integer' """""" self.pagesize = None self.typeInfo['pag
esize'] = 'integer' """list VPC offerings by state""" se
lf.state = None self.typeInfo['state'] = 'string' """list VPC offerings supporting certain services""" self.supportedservices = [] self.typeInfo['supportedservices'] = 'list' self.required = [] class listVPCOfferingsResponse (baseResponse): typeInfo = {} def __init__(self): """the id of the vpc offering""" self.id = None self.typeInfo['id'] = 'string' """the date this vpc offering was created""" self.created = None self.typeInfo['created'] = 'date' """an alternate display text of the vpc offering.""" self.displaytext = None self.typeInfo['displaytext'] = 'string' """indicates if the vpc offering supports distributed router for one-hop forwarding""" self.distributedvpcrouter = None self.typeInfo['distributedvpcrouter'] = 'boolean' """true if vpc offering is default, false otherwise""" self.isdefault = None self.typeInfo['isdefault'] = 'boolean' """the name of the vpc offering""" self.name = None self.typeInfo['name'] = 'string' """The secondary system compute offering id used for the virtual router""" self.secondaryserviceofferingid = None self.typeInfo['secondaryserviceofferingid'] = 'string' """The secondary system compute offering name used for the virtual router""" self.secondaryserviceofferingname = None self.typeInfo['secondaryserviceofferingname'] = 'string' """The primary system compute offering id used for the virtual router""" self.serviceofferingid = None self.typeInfo['serviceofferingid'] = 'string' """The primary system compute offering name used for the virtual router""" self.serviceofferingname = None self.typeInfo['serviceofferingname'] = 'string' """state of the vpc offering. Can be Disabled/Enabled""" self.state = None self.typeInfo['state'] = 'string' """indicated if the offering can support region level vpc""" self.supportsregionLevelvpc = None self.typeInfo['supportsregionLevelvpc'] = 'boolean' """the list of supported services""" self.service = [] class capability: def __init__(self): """"can this service capability value can be choosable while creatine network offerings""" self.canchooseservicecapability = None """"the capability name""" self.name = None """"the capability value""" self.value = None class provider: def __init__(self): """"uuid of the network provider""" self.id = None """"true if individual services can be enabled/disabled""" self.canenableindividualservice = None """"the destination physical network""" self.destinationphysicalnetworkid = None """"the provider name""" self.name = None """"the physical network this belongs to""" self.physicalnetworkid = None """"services for this provider""" self.servicelist = None """"state of the network provider""" self.state = None class service: def __init__(self): """"the service name""" self.name = None """"the list of capabilities""" self.capability = [] """"can this service capability value can be choosable while creatine network offerings""" self.canchooseservicecapability = None """"the capability name""" self.name = None """"the capability value""" self.value = None """"the service provider name""" self.provider = [] """"uuid of the network provider""" self.id = None """"true if individual services can be enabled/disabled""" self.canenableindividualservice = None """"the destination physical network""" self.destinationphysicalnetworkid = None """"the provider name""" self.name = None """"the physical network this belongs to""" self.physicalnetworkid = None """"services for this provider""" self.servicelist = None """"state of the network provider""" self.state = None
from managers import sl2gen from utils.ssh import SSH from paramiko import SSHException import sys import logging log
= logging.getLogger("sl2.ion") def launch_ion(tsuite): """Launch ION daemons. Args: tsuite: tsuite runtime.""" gdbcmd_path = tsuite.conf["slash2"]["ion_gdb"] sl2gen.launch_gdb_sl(tsuite, "ion", tsuite.sl2objects["ion"], "sliod", gdbcmd_path) def create_ion(tsuite): """Create ION file systems. Args: tsuite: tsuite runtime.""" for ion in tsuite.sl2objects["ion"]: #Create monolithic reference/replace dict repl_dict = dict(tsuite.src_
dirs, **tsuite.build_dirs) repl_dict = dict(repl_dict, **ion) #Create remote connection to server try: user, host = tsuite.user, ion["host"] log.debug("Connecting to {0}@{1}".format(user, host)) ssh = SSH(user, host, '') cmd = """ mkdir -p {datadir} mkdir -p {fsroot} {slmkfs} -Wi -u {fsuuid} -I {site_id} {fsroot}"""\ .format(**repl_dict) sock_name = "ts.ion."+ion["id"] sl2gen.sl_screen_and_wait(tsuite, ssh, cmd, sock_name) log.info("Finished creating {0}!".format(ion["name"])) ssh.close() except SSHException, e: log.fatal("Error with remote connection to {0} with res {1}!"\ .format(ion["host"], ion["name"])) tsuite.shutdown() def kill_ion(tsuite): """Kill ION daemons. Args: tsuite: runtime tsuite.""" sl2gen.stop_slash2_socks(tsuite, "ion", tsuite.sl2objects["ion"], "slictl", "sliod")
correct name and colour. self.assertEqual(class_details(CLASS_DEATH_KNIGHT), {'colour': 0xC41F3B, 'name': 'Death Knight'}) def test_for_shaman_class(self): # Makes sure that when the id for the Shaman class is passed we get the # correct name and colour. self.assertEqual(class_details(CLASS_SHAMAN), {'colour': 0x0070DE, 'name': 'Shaman'}) def test_for_mage_class(self): # Makes sure that when the id for the Mage class is passed
we get the # correct name and colour. self.assertEqual(class_details(CLASS_MAGE), {'colour': 0x69CCF0, 'name': 'Mage'}) def test_for_warlock_class(self): # Makes sure that when the id for the Warlock class is passed we get the # correct name and colour. self.assertEqual(class_details(CLASS_WARLOCK), {'colour': 0x9482C9, 'name': 'Warlock'}) def test_for_monk_class(self): # Makes sure that when
the id for the Monk class is passed we get the # correct name and colour. self.assertEqual(class_details(CLASS_MONK), {'colour': 0x00FF96, 'name': 'Monk'}) def test_for_druid_class(self): # Makes sure that when the id for the Druid class is passed we get the # correct name and colour. self.assertEqual(class_details(CLASS_DRUID), {'colour': 0xFF7D0A, 'name': 'Druid'}) def test_for_demon_hunter_class(self): # Makes sure that when the id for the Demon Hunter class is passed we get the # correct name and colour. self.assertEqual(class_details(CLASS_DEMON_HUNTER), {'colour': 0xA330C9, 'name': 'Demon Hunter'}) def test_for_faction_name(self): # Makes sure that when the id for either the Horde or Alliance faction is # passsed we get the correct name in return. self.assertEqual(faction_details(FACTION_ALLIANCE), 'Alliance') self.assertEqual(faction_details(FACTION_HORDE), 'Horde') def test_for_achievement_progress(self): # Passes in some mock API data and expects it to return as completed. # Tests for accuracy on each id check, not API data. self.maxDiff = None input_data_horde_sample = { "achievements": { "achievementsCompleted": [11611, 11162, 11185, 11184, 2090, 2093, 2092, 2091, 11194, 11581, 11195, 11874, 5356, 5353, 5349, 11191, 11192, 11874] } } input_data_alliance_sample = { "achievements": { "achievementsCompleted": [11611, 11162, 11185, 11184, 2090, 2093, 2092, 2091, 11194, 11581, 11195, 11874, 5343, 5339, 5334, 11192, 11874, 11875] } } expected_horde_data = { 'challenging_look': 'Completed', 'keystone_master': 'Completed', 'keystone_conqueror': 'Completed', 'keystone_challenger': 'Completed', 'arena_challenger': 'Completed', 'arena_rival': 'Completed', 'arena_duelist': 'Completed', 'arena_gladiator': 'Completed', 'rbg_2400_name': AC_HIGH_WARLORD_NAME, 'rbg_2000_name': AC_CHAMPION_NAME, 'rbg_1500_name': AC_FIRST_SERGEANT_NAME, 'rbg_2400': 'Completed', 'rbg_2000': 'Completed', 'rbg_1500': 'Completed', 'en_feat': 'Cutting Edge', 'tov_feat': 'Ahead of the Curve', 'nh_feat': 'Cutting Edge', 'tos_feat': 'Ahead of the Curve' } expected_alliance_data = { 'challenging_look': 'Completed', 'keystone_master': 'Completed', 'keystone_conqueror': 'Completed', 'keystone_challenger': 'Completed', 'arena_challenger': 'Completed', 'arena_rival': 'Completed', 'arena_duelist': 'Completed', 'arena_gladiator': 'Completed', 'rbg_2400_name': AC_GRAND_MARSHALL_NAME, 'rbg_2000_name': AC_LIEAUTENANT_COMMANDER_NAME, 'rbg_1500_name': AC_SERGEANT_MAJOR_NAME, 'rbg_2400': 'Completed', 'rbg_2000': 'Completed', 'rbg_1500': 'Completed', 'en_feat': 'Ahead of the Curve', 'tov_feat': 'Ahead of the Curve', 'nh_feat': 'Cutting Edge', 'tos_feat': 'Cutting Edge' } self.assertEqual(character_achievements(input_data_horde_sample, 'Horde'), expected_horde_data) self.assertEqual(character_achievements(input_data_alliance_sample, 'Alliance'), expected_alliance_data) def test_pvp_progression(self): # Passes in some mock API data and expects it to return an object with the correct data. # Tests for accuracy on each data check, not API data. self.maxDiff = None sample_data = { "pvp": { "brackets": { "ARENA_BRACKET_2v2": { "rating": 5928, }, "ARENA_BRACKET_3v3": { "rating": 1858, }, "ARENA_BRACKET_RBG": { "rating": 5999, }, "ARENA_BRACKET_2v2_SKIRMISH": { "rating": 2985, } } }, "totalHonorableKills": 888399 } expected_data = { '2v2': 5928, '2v2s': 2985, '3v3': 1858, 'rbg': 5999, 'kills': 888399 } self.assertEqual(character_arena_progress(sample_data), expected_data) def test_pve_progression(self): # Passes in some mock API data and expects it to return an object with the correct data. # Tests for accuracy on each data check, not API data. self.maxDiff = None sample_data = { "progression": { "raids": [ { "id": 8026, "bosses": [{ "lfrKills": 19, "normalKills": 8, "heroicKills": 5, "mythicKills": 3, }, { "lfrKills": 3, "normalKills": 7, "heroicKills": 3, "mythicKills": 2, }] }, { "id": 8440, "bosses": [{ "lfrKills": 7, "normalKills": 1, "heroicKills": 1, "mythicKills": 0, }] }, { "id": 8524, "bosses": [{ "lfrKills": 3, "normalKills": 2, "heroicKills": 4, "mythicKills": 1, }] }, { "id": 8025, "bosses": [{ "lfrKills": 3, "normalKills": 2, "heroicKills": 1, "mythicKills": 0, }, { "lfrKills": 5, "normalKills": 2, "heroicKills": 2, "mythicKills": 0, }] }] } } expected_data = { 'emerald_nightmare':{ 'lfr':2, 'normal':2, 'heroic':2, 'mythic':2, 'bosses':2 }, 'trial_of_valor':{ 'lfr':1, 'normal':1, 'heroic':1, 'mythic':0, 'bosses':1 }, 'the_nighthold':{ 'lfr':2,
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys from nova import flags import sqlalchemy from migrate.versioning import api as versioning_api try: from migrate.versioning import exceptions as versioning_exceptions except ImportError: try: # python-migration changed location of exceptions after 1.6.3 # See LP Bug #717467 from migrate import exceptions as versioning_exceptions except ImportError: sys.exit(_("python-migrate is not installed. Exiting.")) FLAGS = flags.FLAGS def db_
sync(version=None): db_version() repo_path = _find_migrate_repo() return versioning_api.upgrade(FLAGS.sql_connection, repo_path, version) def db_version(): repo_path = _find_migrate_repo() try: return versioning_api.db_version(FLAGS.sql_connection, repo_path) except versioning_exceptions.DatabaseNotControlledError: # If we aren't version controlled we may already have the database # in the state from before we started version
control, check for that # and set up version_control appropriately meta = sqlalchemy.MetaData() engine = sqlalchemy.create_engine(FLAGS.sql_connection, echo=False) meta.reflect(bind=engine) try: for table in ('auth_tokens', 'zones', 'export_devices', 'fixed_ips', 'floating_ips', 'instances', 'key_pairs', 'networks', 'projects', 'quotas', 'security_group_instance_association', 'security_group_rules', 'security_groups', 'services', 'migrations', 'users', 'user_project_association', 'user_project_role_association', 'user_role_association', 'volumes'): assert table in meta.tables return db_version_control(1) except AssertionError: return db_version_control(0) def db_version_control(version=None): repo_path = _find_migrate_repo() versioning_api.version_control(FLAGS.sql_connection, repo_path, version) return version def _find_migrate_repo(): """Get the path for the migrate repository.""" path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'migrate_repo') assert os.path.exists(path) return path
ClosePane(self, pane_info): """ Destroys or hides the pane depending on its flags. :param `pane_info`: a L{AuiPaneInfo} instance. """ # if we were maximized, restore if pane_info.IsMaximized(): self.RestorePane(pane_info) if pane_info.frame: if self._agwFlags & AUI_MGR_ANIMATE_FRAMES: pane_info.frame.FadeOut() # first, hide the window if pane_info.window and pane_info.window.IsShown(): pane_info.window.Show(False) # make sure that we are the parent of this window if pane_info.window and pane_info.window.GetParent() != self._frame: pane_info.window.Reparent(self._frame) # if we have a frame, destroy it if pane_info.frame: pane_info.frame.Destroy() pane_info.frame = None elif pane_info.IsNotebookPage(): # if we are a notebook page, remove ourselves... notebook = self._notebooks[pane_info.notebook_id] id = notebook.GetPageIndex(pane_info.window) notebook.RemovePage(id) # now we need to either destroy or hide the pane to_destroy = 0 if pane_info.IsDestroyOnClose(): to_destroy = pane_info.window self.DetachPane(to_destroy) else: if isinstance(pane_info.window, auibar.AuiToolBar) and pane_info.IsFloating(): tb = pane_info.window if pane_info.dock_direction in [AUI_DOCK_LEFT, AUI_DOCK_RIGHT]: tb.SetAGWWindowStyleFlag(tb.GetAGWWindowStyleFlag() | AUI_TB_VERTICAL) pane_info.Dock().Hide() if pane_info.IsNotebookControl(): notebook = self._notebooks[pane_info.notebook_id] while notebook.GetPageCount(): window = notebook.GetPage(0) notebook.RemovePage(0) info = self.GetPane(window) if info.IsOk(): info.notebook_id = -1 info.dock_direction = AUI_DOCK_NONE # Note: this could change our paneInfo reference ... self.ClosePane(info) if to_destroy: to_destroy.Destroy() def MaximizePane(self, pane_info, savesizes=True): """ Maximizes the input pane. :param `pane_info`: a L{AuiPaneInfo} instance. :param `savesizes`: whether to save previous dock sizes. """ if savesizes: self.SavePreviousDockSizes(pane_info) for p in self._panes: # save hidden state p.SetFlag(p.savedHiddenState, p.HasFlag(p.optionHidden)) if not p.IsToolbar() and not p.IsFloating(): p.Restore() # hide the pane, because only the newly # maximized pane should show p.Hide() pane_info.previousDockPos = pane_info.dock_pos # mark ourselves maximized pane_info.Maximize() pane_info.Show() self._has_maximized = True # last, show the window if pane_info.window and not pane_info.window.IsShown(): pane_info.window.Show(True) def SavePreviousDockSizes(self, pane_info): """ Stores the previous dock sizes, to be used in a "restore" action later. :param `pane_info`: a L{AuiPaneInfo} instance. """ for d in self._docks: if not d.toolbar: for p in d.panes: p.previousDockSize = d.size if pane_info is not p: p.SetFlag(p.needsRestore, True) def RestorePane(self, pane_info): """ Restores the input pane from a previous maximized or minimized state. :param `pane_info`: a L{AuiPaneInfo} instance. """ # restore all the panes for p in self._panes: if not p.IsToolbar(): p.SetFlag(p.optionHidden, p.HasFlag(p.savedHiddenState)) pane_info.SetFlag(pane_info.needsRestore, True) # mark ourselves non-maximized pane_info.Restore() self._has_maximized = False self._has_minimized = False # last, show the window if pane_info.window and not pane_info.window.IsShown(): pane_info.window.Show(True) def RestoreMaximizedPane(self): """ Restores the current maximized pane (if any). """ # restore all the panes for p in self._panes: if p.IsMaximized(): self.RestorePane(p) break def ActivatePane(self, window): """ Activates the pane to which `window` is associated. :param `window`: a `wx.Window` derived window. """ if self.GetAGWFlags() & AUI_MGR_ALLOW_ACTIVE_PANE: while window: ret, self._panes = SetActivePane(self._panes, window) if ret: break window = window.GetParent() self.RefreshCaptions() def CreateNotebook(self): """ Creates an automatic L{AuiNotebook} when a pane is docked on top of another pane. """ notebook = auibook.AuiNotebook(self._frame, -1, wx.Point(0, 0), wx.Size(0, 0), agwStyle=self._autoNBStyle) # This is so we can get the tab-drag event. notebook.GetAuiManager().SetMasterManager(self) notebook.SetArtProvider(self._autoNBTabArt.Clone()) self._notebooks.append(notebook) return notebook def SetAutoNotebookTabArt(self, art): """ Sets the default tab art provider for automatic notebooks. :param `art`: a tab art provider. """ for nb in self._notebooks: nb.SetArtProvider(art.Clone()) nb.Refresh() nb.Update() self._autoNBTabArt = art def GetAutoNotebookTabArt(self): """ Returns the default tab art provider for automatic notebooks. """ return self._autoNBTabArt def SetAutoNotebookStyle(self, agwStyle): """ Sets the default AGW-specific window style for automatic notebooks. :param `agwStyle`: the underlying L{AuiNotebook} window style. This can be a combination of the following bits: ==================================== ================================== Flag name Description ==================================== ================================== ``AUI_NB_TOP`` With this style, tabs are drawn along the top of the notebook ``AUI_NB_LEFT`` With this style, tabs are drawn along the left of the notebook. Not implemented yet. ``AUI_NB_RIGHT`` With this style, tabs are drawn along the right of the notebook. Not implemented yet. ``AUI_NB_BOTTOM`` With this style, tabs are drawn along the bottom of the notebook ``AUI_NB_TAB_SPLIT`` Allows the tab control to be split by dragging a tab ``AUI_NB_TAB_MOVE`` Allows a tab to be moved horizontally by dragging ``AUI_NB_TAB_EXTERNAL_MOVE`` Allows a tab to be moved to another tab control ``AUI_NB_TAB_FIXED_WIDTH`` With
this style, all tabs have the same width ``AUI_NB_SCROLL_BUTTONS`` With this style, left and right scroll buttons are displayed ``AUI_NB_WINDOWLIST_BUTTON`` With this style, a drop-down list of windows is available ``AUI_NB_CLOSE_BUTTON`` With this style, a close button is available on the tab bar ``AUI_NB_CLOSE_ON_ACTIVE_TAB`` With this style, a c
lose button is available on the active tab ``AUI_NB_CLOSE_ON_ALL_TABS`` With this style, a close button is available on all tabs ``AUI_NB_MIDDLE_CLICK_CLOSE`` Al
# -*- coding: utf-8 -*- """ Created on Fri Aug 29 15:52:33 2014 @author: raffaelerainone """ from time import clock from math import sqrt def is_prime(n): check=True i=2 while check and i<=sqrt(n): if n%i==0: check=False i+=1 return check start = clock() lim=50*(10**6) A=[] prime_2 = [i for i in range(2,i
nt(lim**(0.5))) if is_prime(i)] prime_3 = [i for i in prime_
2 if i<(int(lim**(0.34)))] prime_4 = [i for i in prime_3 if i<(int(lim**(0.25)))] for i in prime_2: for j in prime_3: for k in prime_4: x=(i**2)+(j**3)+(k**4) if x<lim: A.append(x) print len(set(A)) print clock() - start
# This is an automatically generated file. # DO NOT EDIT or your changes may be overwritten import base64 from xdrlib import Packer, Unpacker from ..type_checked import type_checked from .int64 import Int64 from .transaction_result_ext import TransactionResultExt from .transaction_result_result import TransactionResultResult __all__ = ["TransactionResult"] @type_checked class TransactionResult: """ XDR Source Code:: struct TransactionResult { int64 feeCharged; // actual fee charged for the transaction union switch (TransactionResultCode code) { case txFEE_BUMP_INNER_SUCCESS: case txFEE_BUMP_INNER_FAILED: InnerTransactionResultPair innerResultPair; case txSUCCESS: case txFAILED: OperationResult results<>; default: void; } result; // reserved for future use union switch (int v) { case 0: void; } ext; }; """ def __init__( self, fee_charged: Int64, result: TransactionResultResult, ext: TransactionResultExt, ) -> None: self.fee_charged = fee_charged self.result = result self.ext = ext def pack(self, packer: Packer) -> None: self.fee_charged.pack(packer) self.result.pack(packer) self.ext.pack(packer) @classmethod def unpack(cls, unpacker: Unpacker) -> "TransactionResult": fee_charged = Int64.unpack(unpacker) result = TransactionResultResult.unpack(unpacker) ext = TransactionResultExt.unpack(unpacker) return cls( fee_charged=fee_charged, result=result, ext=ext, ) def to_xdr_bytes(self) -> bytes: packer = Packer() self.pack(packer) return packer.get_buffer() @classmethod def from_xdr_bytes(cls, xdr: bytes) -> "TransactionResult":
unpacker = Unpacker(xdr) return cls.unpack(unpacker) def to_xdr(self) -> str: xdr_bytes = self.to_xdr_bytes() return base64.b64encode(xdr_bytes).decode() @classmethod def from_xdr(cls, xdr: str) -> "TransactionResult": xdr_bytes = base64.b64decode(xdr.encode()) return cls.from_xdr_bytes(xdr_bytes) def __eq__(self, other: object): if not isinstance(other, self.__class__): return NotImplemented return (
self.fee_charged == other.fee_charged and self.result == other.result and self.ext == other.ext ) def __str__(self): out = [ f"fee_charged={self.fee_charged}", f"result={self.result}", f"ext={self.ext}", ] return f"<TransactionResult {[', '.join(out)]}>"
import os from .ruby impor
t RubyExecutor class Executor(RubyExecutor): name = 'RUBY19' def get_nproc(self): return [-1, 1][os.name == 'nt'] def get_security(self): from cptbox.syscalls import sys_write sec = super(
Executor, self).get_security() sec[sys_write] = lambda debugger: debugger.arg0 in (1, 2, 4) return sec initialize = Executor.initialize
import pytest from gosa.comm
on import Environment from gosa.common.components import PluginRegistry, ObjectRegistry import os def pytest_unconfigure(config): PluginRegistry.getInstance('HTTPService').srv.stop() PluginRegistry.shutdown()
@pytest.fixture(scope="session", autouse=True) def use_test_config(): oreg = ObjectRegistry.getInstance() # @UnusedVariable pr = PluginRegistry() # @UnusedVariable cr = PluginRegistry.getInstance("CommandRegistry") # @UnusedVariable
# -*- coding: utf-8 -*- #------------------------------------------------------------ # Gestión de parámetros de configuración - xbmc #------------------------------------------------------------ # tvalacarta # http://blog.tvalacarta.info/plugin-xbmc/tvalacarta/ #------------------------------------------------------------ # Creado por: Jesús (tvalacarta@gmail.com) # Licencia: GPL (http://www.gnu.org/licenses/gpl-3.0.html) #------------------------------------------------------------ import sys import os import xbmcplugin import xbmc PLATFORM_NAME = "xbmc-plugin" PLUGIN_NAME = "pelisalacarta" def get_platform(): return PLATFORM_NAME def is_xbmc(): return True def get_library_support(): return True def get_system_platform(): """ fonction: pour recuperer la platform que xbmc tourne """ import xbmc platform = "unknown" if xbmc.getCondVisibility( "system.platform.linux" ): platform = "linux" elif xbmc.getCondVisibility( "system.platform.xbox" ): platform = "xbox" elif xbmc.getCondVisibility( "system.platform.windows" ): platform = "windows" elif xbmc.getCondVisibility( "system.platform.osx" ): platform = "osx" return platform def open_settings(): xbmcplugin.openSettings( sys.argv[ 0 ] ) def get_setting(name): return xbmcplugin.getSetting(name) def set_setting(name,value): try: xbmcplugin.setSetting(name,value) except: pass def get_localized_string(code): dev = xbmc.getLocalizedString( code ) try: dev = dev.encode ("utf-8") #This only aplies to unicode strings. The rest stay as they are. except: pass return dev def get_library_path(): #return os.path.join( get_data_path(), 'library' ) default = os.path.join( get_data_path(), 'library' ) value = get_setting("librarypath") if value=="": value=default return value def get_temp_file(filename): return xbmc.translatePath( os.path.join( "special://temp/", filename )) def get_runtime_path(): return os.getcwd() def get_data_path(): devuelve = xbmc.translatePath( os.path.join("special://home/","userdata","plugin_data","video",PLUGIN_NAME) ) # XBMC en modo portable if devuelve.startswith("special:"): devuelve = xbmc.translatePath( os.path.join("special://xbmc/","userdata","plugin_data","video",PLUGIN_NAME) ) # Plex 8 if devuelve.startswith("special:"): devuelve = os.getcwd() return devuelve def get_cookie_data(): import os ficherocookies = os.path.join( get_data_path(), 'cookies.dat' ) cookiedatafile = open(ficherocookies,'r') cookiedata = cookiedatafile.read() cookiedatafile.close(); return cookiedata # Test if all the required directories are created def verify_directories_created(): import logger import os logger.info("pelisalacarta.core.config.verify_directories_created") # Force download path if empty download_path = get_setting("downloadpath") if download_path=="": download_path = os.path.join( get_data_path() , "downloads") set_setting("downloadpath" , download_path) # Force download list path if empty download_list_path = get_setting("downloadlistpath") if download_list_path=="": download_list_path = os.path.join( get_data_path() , "downloads" , "list") set_setting("downloadlistpath" , download_list_path) # Force bookmark path if empty bookmark_path = get_setting("bookmarkpath") if bookmark_path=="": bookmark_path = os.path.join( get_data_path() , "bookmarks") set_setting("bookmarkpath" , bookmark_path) # Create data_path if not exists if not os.path.exists(get_data_path()): logger.debug("Creating data_path "+get_data_path()) try: os.mkdir(get_data_path()) except: pass # Create download_path if not exists if not download_path.lower().startswith("smb") and not os.path.exists(download_path): logger.debug("Creating download_path "+downloa
d_path) try: os.mkdir(download_path) except: pass # Create download_list_path if not exists if not download_list_path.lower().startswith("smb") and not os.path.exists(download_list_path):
logger.debug("Creating download_list_path "+download_list_path) try: os.mkdir(download_list_path) except: pass # Create bookmark_path if not exists if not bookmark_path.lower().startswith("smb") and not os.path.exists(bookmark_path): logger.debug("Creating bookmark_path "+bookmark_path) try: os.mkdir(bookmark_path) except: pass # Create library_path if not exists if not get_library_path().lower().startswith("smb") and not os.path.exists(get_library_path()): logger.debug("Creating library_path "+get_library_path()) try: os.mkdir(get_library_path()) except: pass # Checks that a directory "xbmc" is not present on platformcode old_xbmc_directory = os.path.join( get_runtime_path() , "platformcode" , "xbmc" ) if os.path.exists( old_xbmc_directory ): logger.debug("Removing old platformcode.xbmc directory") try: import shutil shutil.rmtree(old_xbmc_directory) except: pass
impor
t plog.plog as plg PLOG = plg.PLOG plog_color = plg.plog_color plog = plg.plog def perr(*msg, delim=" "): plog(*
msg, type=PLOG.err, delim=delim) def pwrn(*msg, delim=" "): plog(*msg, type=PLOG.warn, delim=delim) __all__ = ["PLOG", "plog_color", "plog", "perr", "pwrn"]
lass RegressionTests(unittest.TestCase): def setUp(self): self.con = sqlite.connect(":memory:") def tearDown(self): self.con.close() def CheckPragmaUserVersion(self): # This used to crash pysqlite because this pragma command returns NULL for the column name cur = self.con.cursor() cur.execute("pragma user_version") def CheckPragmaSchemaVersion(self): # This still crashed pysqlite <= 2.2.1 con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_COLNAMES) try: cur = self.con.cursor() cur.execute("pragma schema_version") finally: cur.close() con.close() def CheckStatementReset(self): # pysqlite 2.1.0 to 2.2.0 have the problem that not all statements are # reset before a rollback, but only those that are still in the # statement cache. The others are not accessible from the connection object. con = sqlite.connect(":memory:", cached_statements=5) cursors = [con.cursor() for x in xrange(5)] cursors[0].execute("create table test(x)") for i in range(10): cursors[0].executemany("insert into test(x) values (?)", [(x,) for x in xrange(10)]) for i in range(5): cursors[i].execute(" " * i + "select x from test") con.rollback() def CheckColumnNameWithSpaces(self): cur = self.con.cursor() cur.execute('select 1 as "foo bar [datetime]"') self.assertEqual(cur.description[0][0], "foo bar") cur.execute('select 1 as "foo baz"') self.assertEqual(cur.description[0][0], "foo baz") def CheckStatementFinalizationOnCloseDb(self): # pysqlite versions <= 2.3.3 only finalized statements in the statement # cache when closing the database. statements that were still # referenced in cursors weren't closed an could provoke " # "OperationalError: Unable to close due to unfinalised statements". con = sqlite.connect(":memory:") cursors = [] # default statement cache size is 100 for i in range(105): cur = con.cursor() cursors.append(cur) cur.execute("select 1 x union select " + str(i)) con.close() def CheckOnConflictRollback(self): if sqlite.sqlite_version_info < (3, 2, 2): return con = sqlite.connect(":memory:") con.execute("create table foo(x, unique(x) on conflict rollback)") con.execute("insert into foo(x) values (1)") try: con.execute("insert into foo(x) values (1)") except sqlite.DatabaseError: pass con.execute("insert into foo(x) values (2)") try: con.commit() except sqlite.OperationalError: self.fail("pysqlite knew nothing about the implicit ROLLBACK") def CheckWorkaroundForBuggySqliteTransferBindings(self): """ pysqlite would crash with older SQLite versions unless a workaround is implemented. """ self.con.execute("create table foo(bar)") self.con.execute("drop table foo") self.con.execute("create table foo(bar)") def CheckEmptyStatement(self): """ pysqlite used to segfault with SQLite versions 3.5.x. These return NULL for "no-operation" statements """ self.con.execute("") def CheckUnicodeConnect(self): """ With pysqlite 2.4.0 you needed to use a string or a APSW connection object for opening database connections. Formerly, both bytestrings and unicode strings used to work. Let's make sure unicode strings work in the future. """ con = sqlite.connect(u":memory:") con.close() def CheckTypeMapUsage(self): """ pysqlite until 2.4.1 did not rebuild the row_cast_map when recompiling a statement. This test exhibits the problem. """ SELECT = "select * from foo" con = sqlite.connect(":memory:",detect_types=sqlite.PARSE_DECLTYPES) con.execute("create table foo(bar timestamp)") con.execute("insert into foo(bar) values (?)", (datetime.datetime.now(),)) con.execute(SELECT) con.execute("drop table foo") con.execute("create table foo(bar integer)") con.execute("insert into foo(bar) values (5)") con.execute(SELECT) def CheckRegisterAdapter(self): """ See issue 3312. """ self.assertRaises(TypeError, sqlite.register_adapter, {}, None) def CheckSetIsolationLevel(self): """ See issue 3312. """ con = sqlite.connect(":memory:") self.assertRaises(UnicodeEncodeError, setattr, con, "isolation_level", u"\xe9") def CheckCursorConstructorCallCheck(self): """ Verifies that cursor methods check whether base class __init__ was called. """ class Cursor(sqlite.Cursor): def __init__(self, con): pass con = sqlite.connect(":memory:") cur = Cursor(con) try: cur.execute("select 4+5").fetchall() self.fail("should have raised ProgrammingError") except sqlite.ProgrammingError: pass except: self.fail("should have raised ProgrammingError") def CheckConnectionConstructorCal
lCheck(self): """ Verifies
that connection methods check whether base class __init__ was called. """ class Connection(sqlite.Connection): def __init__(self, name): pass con = Connection(":memory:") try: cur = con.cursor() self.fail("should have raised ProgrammingError") except sqlite.ProgrammingError: pass except: self.fail("should have raised ProgrammingError") def CheckCursorRegistration(self): """ Verifies that subclassed cursor classes are correctly registered with the connection object, too. (fetch-across-rollback problem) """ class Connection(sqlite.Connection): def cursor(self): return Cursor(self) class Cursor(sqlite.Cursor): def __init__(self, con): sqlite.Cursor.__init__(self, con) con = Connection(":memory:") cur = con.cursor() cur.execute("create table foo(x)") cur.executemany("insert into foo(x) values (?)", [(3,), (4,), (5,)]) cur.execute("select x from foo") con.rollback() try: cur.fetchall() self.fail("should have raised InterfaceError") except sqlite.InterfaceError: pass except: self.fail("should have raised InterfaceError") def CheckAutoCommit(self): """ Verifies that creating a connection in autocommit mode works. 2.5.3 introduced a regression so that these could no longer be created. """ con = sqlite.connect(":memory:", isolation_level=None) def CheckPragmaAutocommit(self): """ Verifies that running a PRAGMA statement that does an autocommit does work. This did not work in 2.5.3/2.5.4. """ cur = self.con.cursor() cur.execute("create table foo(bar)") cur.execute("insert into foo(bar) values (5)") cur.execute("pragma page_size") row = cur.fetchone() def CheckSetDict(self): """ See http://bugs.python.org/issue7478 It was possible to successfully register callbacks that could not be hashed. Return codes of PyDict_SetItem were not checked properly. """ class NotHashable: def __call__(self, *args, **kw): pass def __hash
.ext.viewcode', 'sphinx.ext.intersphinx', 'sphinx.ext.autosummary'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Kurt' copyright = u'2013, Tim Radvan' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '2.0.0' # The full version, including alpha/beta/rc tags. release = '2.0.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build', '_themes'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' #'armstrong' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ["_themes"] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Kurtdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Kurt.tex', u'Kurt Documentation', u'Tim Radvan', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'kurt', u'Kurt Documentation', [u'Tim Radvan'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu e
ntry, description, category) texinfo_documents = [ ('index', 'Kurt', u'Kurt Documentation', u'Tim Radvan', 'Kurt', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # -- Options for Epub output ------
--------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'Kurt' epub_author = u'Tim Radvan' epub_publisher = u'Tim Radvan' epub_copyright = u'2013, Tim Radvan' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True class SimpleDocumenter(autodoc.MethodDocumenter): objtype = "simple" #do not indent the content content_indent = "" #do not add a header to the docstring def add_directive_hea
# coding: utf-8 try: from django.conf.urls import patterns, url, include e
xcept ImportError: from django.conf.urls.defaults import patterns, url, include from django.http import HttpResponse def dummy(request): return HttpResponse() urlpatterns = patterns('', url('^api/.+/$', dummy, name='dummy'), url('', include('django.
contrib.auth.urls', app_name='auth', namespace='auth')) )
# # usage: python k44.py {file
name} {number} # import sys import pydot from k41 import * from k42 import get_relation_pairs if __name__ == '__main__': fn, nos = sys.argv[1], int(sys.argv[2]) sl = load_cabocha(fn)
pl = get_relation_pairs([sl[nos-1]]) g = pydot.graph_from_edges(pl) g.write_png('result.png', prog='dot')
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations import logging from pants.option.option_types import DictOption from pants.option.subsystem import Subsystem DEFAULT_SCALA_VERSION = "2.13.6" _logger = logging.getLogger(__name__) class ScalaSubsystem(Subsystem): options_scope = "scala" help = "Scala programming language" _version_fo
r_resolve = DictOption[str]( "--version-for-resolve", help=( "A dictionary mapping the name of a resolve to the Scala version to use for all Scala " "targets c
onsuming that resolve.\n\n" 'All Scala-compiled jars on a resolve\'s classpath must be "compatible" with one another and ' "with all Scala-compiled first-party sources from `scala_sources` (and other Scala target types) " "using that resolve. The option sets the Scala version that will be used to compile all " "first-party sources using the resolve. This ensures that the compatibility property is " "maintained for a resolve. To support multiple Scala versions, use multiple resolves." ), ) def version_for_resolve(self, resolve: str) -> str: version = self._version_for_resolve.get(resolve) if version: return version return DEFAULT_SCALA_VERSION
"
"" File Allocation Table (FAT) / 12 bit version Used primarily for diskettes """
0x02: ('CALC', 1), 0x03: ('FIFO_I', 1), 0x04: ('FIFO_II', 1), 0x05: ('FIFO_DATA', 1), 0x06: ('ID_DATA', 1), 0x07: ('RC_OSC_I', 1), 0x08: ('RC_OSC_II', 1), 0x09: ('RC_OSC_III', 1), 0x0a: ('CKO_PIN', 1), 0x0b: ('GPIO1_PIN_I', 1), 0x0c: ('GPIO2_PIN_II', 1), 0x0d: ('CLOCK', 1), 0x0e: ('DATA_RATE', 1), 0x0f: ('PLL_I', 1), 0x10: ('PLL_II', 1), 0x11: ('PLL_III', 1), 0x12: ('PLL_IV'
, 1), 0x13: ('PLL_V', 1), 0x14: ('TX_I', 1), 0x15: ('TX_II', 1), 0x16: ('DELAY_I', 1), 0x17: ('DELAY_II', 1), 0x
18: ('RX', 1), 0x19: ('RX_GAIN_I', 1), 0x1a: ('RX_GAIN_II', 1), 0x1b: ('RX_GAIN_III', 1), 0x1c: ('RX_GAIN_IV', 1), 0x1d: ('RSSI_THRES', 1), 0x1e: ('ADC', 1), 0x1f: ('CODE_I', 1), 0x20: ('CODE_II', 1), 0x21: ('CODE_III', 1), 0x22: ('IF_CAL_I', 1), 0x23: ('IF_CAL_II', 1), 0x24: ('VCO_CURR_CAL', 1), 0x25: ('VCO_SB_CALC_I', 1), 0x26: ('VCO_SB_CALC_II', 1), 0x27: ('BATT_DETECT', 1), 0x28: ('TX_TEST', 1), 0x29: ('RX_DEM_TEST_I', 1), 0x2a: ('RX_DEM_TEST_II', 1), 0x2b: ('CPC', 1), 0x2c: ('CRYSTAL_TEST', 1), 0x2d: ('PLL_TEST', 1), 0x2e: ('VCO_TEST_I', 1), 0x2f: ('VCO_TEST_II', 1), 0x30: ('IFAT', 1), 0x31: ('RSCALE', 1), 0x32: ('FILTER_TEST', 1), 0x33: ('UNKNOWN', 1), } class Decoder(srd.Decoder): api_version = 3 id = 'a7105' name = 'A7105' longname = 'AMICCOM A7105' desc = '2.4GHz FSK/GFSK Transceiver with 2K ~ 500Kbps data rate.' license = 'gplv2+' inputs = ['spi'] outputs = [] tags = ['IC', 'Wireless/RF'] options = ( {'id': 'hex_display', 'desc': 'Display payload in Hex', 'default': 'yes', 'values': ('yes', 'no')}, ) annotations = ( # Sent from the host to the chip. ('cmd', 'Commands sent to the device'), ('tx-data', 'Payload sent to the device'), # Returned by the chip. ('rx-data', 'Payload read from the device'), ('warning', 'Warnings'), ) ann_cmd = 0 ann_tx = 1 ann_rx = 2 ann_warn = 3 annotation_rows = ( ('commands', 'Commands', (ann_cmd, ann_tx, ann_rx)), ('warnings', 'Warnings', (ann_warn,)), ) def __init__(self): self.reset() def reset(self): self.next() self.requirements_met = True self.cs_was_released = False def start(self): self.out_ann = self.register(srd.OUTPUT_ANN) def warn(self, pos, msg): '''Put a warning message 'msg' at 'pos'.''' self.put(pos[0], pos[1], self.out_ann, [self.ann_warn, [msg]]) def putp(self, pos, ann, msg): '''Put an annotation message 'msg' at 'pos'.''' self.put(pos[0], pos[1], self.out_ann, [ann, [msg]]) def next(self): '''Resets the decoder after a complete command was decoded.''' # 'True' for the first byte after CS went low. self.first = True # The current command, and the minimum and maximum number # of data bytes to follow. self.cmd = None self.min = 0 self.max = 0 # Used to collect the bytes after the command byte # (and the start/end sample number). self.mb = [] self.mb_s = -1 self.mb_e = -1 def mosi_bytes(self): '''Returns the collected MOSI bytes of a multi byte command.''' return [b[0] for b in self.mb] def miso_bytes(self): '''Returns the collected MISO bytes of a multi byte command.''' return [b[1] for b in self.mb] def decode_command(self, pos, b): '''Decodes the command byte 'b' at position 'pos' and prepares the decoding of the following data bytes.''' c = self.parse_command(b) if c is None: self.warn(pos, 'unknown command') return self.cmd, self.dat, self.min, self.max = c if self.cmd in ('W_REGISTER', 'R_REGISTER'): # Don't output anything now, the command is merged with # the data bytes following it. self.mb_s = pos[0] else: self.putp(pos, self.ann_cmd, self.format_command()) def format_command(self): '''Returns the label for the current command.''' return 'Cmd {}'.format(self.cmd) def parse_command(self, b): '''Parses the command byte. Returns a tuple consisting of: - the name of the command - additional data needed to dissect the following bytes - minimum number of following bytes - maximum number of following bytes ''' if b == 0x05: return ('W_TX_FIFO', None, 1, 32) elif b == 0x45: return ('R_RX_FIFO', None, 1, 32) if b == 0x06: return ('W_ID', None, 1, 4) elif b == 0x46: return ('R_ID', None, 1, 4) elif (b & 0b10000000) == 0: if (b & 0b01000000) == 0: c = 'W_REGISTER' else: c = 'R_REGISTER' d = b & 0b00111111 return (c, d, 1, 1) else: cmd = b & 0b11110000 if cmd == 0b10000000: return ('SLEEP_MODE', None, 0, 0) if cmd == 0b10010000: return ('IDLE_MODE', None, 0, 0) if cmd == 0b10100000: return ('STANDBY_MODE', None, 0, 0) if cmd == 0b10110000: return ('PLL_MODE', None, 0, 0) if cmd == 0b11000000: return ('RX_MODE', None, 0, 0) if cmd == 0b11010000: return ('TX_MODE', None, 0, 0) if cmd == 0b11100000: return ('FIFO_WRITE_PTR_RESET', None, 0, 0) if cmd == 0b11110000: return ('FIFO_READ_PTR_RESET', None, 0, 0) def decode_register(self, pos, ann, regid, data): '''Decodes a register. pos -- start and end sample numbers of the register ann -- is the annotation number that is used to output the register. regid -- may be either an integer used as a key for the 'regs' dictionary, or a string directly containing a register name.' data -- is the register content. ''' if type(regid) == int: # Get the name of the register. if regid not in regs: self.warn(pos, 'unknown register') return name = regs[regid][0] else: name = regid # Multi byte register come LSByte first. data = reversed(data) label = '{}: {}'.format(self.format_command(), name) self.decode_mb_data(pos, ann, data, label, True) def decode_mb_data(self, pos, ann, data, label, always_hex): '''Decodes the data bytes 'data' of a multibyte command at position 'pos'. The decoded data is prefixed with 'label'. If 'always_hex' is True, all bytes are decoded as hex codes, otherwise only non printable characters are escaped.''' if always_hex: def escape(b): return '{:02X}'.format(b) else: def escape(b): c = chr(b) if not str.isprintable(c): return '\\x{:02X}'.format(b) return c data = ''.join([escape(b) for b in data]) text = '{} = "{}"'.format(label, data.strip()) self.putp(pos, ann, text) def finish_command(self, pos): '''Decodes the remaining data bytes at position 'pos'.''' always_hex = self.options['hex_display'] == 'yes' if self.cmd == 'R_REGISTER': self.decode_register(pos, self.ann_cmd, self.dat, self.miso_bytes()) elif self.cmd == 'W_REGISTER':
#This script is for produsing a new list of sites extracted from alexa top site list import re p
refix = 'http://' #suffix = '</td><td></td></tr><tr><td>waitForPageToLoad</td><td></td><td>3000</td></tr>' with open('top100_alexa.txt','r') as f: newlines = [] for line i
n f.readlines(): found=re.sub(r'\d+', '', line) line=found newlines.append(line.replace(',', '')) with open('urls.txt', 'w') as f: for line in newlines: #f.write('%s%s%s\n' % (prefix, line.rstrip('\n'), suffix)) f.write('%s%s\n' % (prefix, line.rstrip('\n')))
__author__ = "Konstantin Osipov <kostja.osipov@gmail.com>" # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. import socket import yaml import sys import re from tarantool_connection import TarantoolConnection ADMIN_SEPARATOR
= '\n' class AdminConnection(TarantoolConnection): def execute_no_reconnect(self, command, silent): if not command: return if not silent: sys.stdout.write(command + ADMIN_SEPARATOR)
cmd = command.replace('\n', ' ') + ADMIN_SEPARATOR self.socket.sendall(cmd) bufsiz = 4096 res = "" while True: buf = self.socket.recv(bufsiz) if not buf: break res = res + buf if (res.rfind("\n...\n") >= 0 or res.rfind("\r\n...\r\n") >= 0): break # validate yaml by parsing it try: yaml.load(res) finally: if not silent: sys.stdout.write(res.replace("\r\n", "\n")) return res def connect(self): super(AdminConnection, self).connect() handshake = self.socket.recv(128) if not re.search(r'^Tarantool.*console.*', str(handshake)): raise RuntimeError('Broken tarantool console handshake')
import lms_code.lib.rep2 as rep2 from lms_code.analysis.run_bem import bemify, boundary_conditions,\ assemble, constrain, solve, evaluate_surface_disp from lms_code.analysis.simplified_bem import create_surface_mesh, \ set_params from codim1.core import simple_line_mesh, combine_meshes, ray_mesh def create_fault_mesh(d): top_fault_vert = [0, -1e9]
top = d['intersection_pt'] joint = [4.20012e5 + 1.6, -2.006e4 - 5] bottom = [3.09134e5 + 1.1, -2.3376e4 - 3] detach = simple_line_mesh(d['fault_elements'], bottom, joint) d['fault_mesh'] = detach if __name__ == "__main__": d = dict() set_params(d) create_fault_mesh(d) create_surface_mesh(d) bemify(d) boundary_conditions(d) assemble(d) # constrain(d)
solve(d) evaluate_surface_disp(d) rep2.save("bem_just_detach", d)
y of the corresponding losses. ``returns`` is built up in a similar way, containing just the unprocessed results of one ``session.run`` call (effectively of one batch). Labels and decodings are converted to text before splicing them into their corresponding results_tuple lists. In the case of decodings, for now we just pick the first available path. ''' # Each of the arrays within results_tuple will get extended by a batch of each available device for i in range(len(available_devices)): # Collect the labels results_tuple[0].extend(sparse_tensor_value_to_texts(returns[0][i])) # Collect the decodings - at the moment we default to the first one results_tuple[1].extend(sparse_tensor_value_to_texts(returns[1][i][0])) # Collect the distances results_tuple[2].extend(returns[2][i]) # Collect the losses results_tuple[3].extend(returns[3][i]) # For reporting we also need a standard way to do time measurements. def stopwatch(start_duration=0): r''' This function will toggle a stopwatch. The first call starts it, second call stops it, third call continues it etc. So if you want to measure the accumulated time spent in a certain area of the code, you can surround that code by stopwatch-calls like this: .. code:: python fun_time = 0 # initializes a stopwatch [...] for i in range(10): [...] # Starts/continues the stopwatch - fun_time is now a point in time (again) fun_time = stopwatch(fun_time) fun() # Pauses the stopwatch - fun_time is now a duration fun_time = stopwatch(fun_time) [...] # The following line only makes sense after an even call of :code:`fun_time = stopwatch(fun_time)`. print 'Time spent in fun():', format_duration(fun_time) ''' if start_duration == 0: return datetime.datetime.utcnow() else: return datetime.datetime.utcnow() - start_duration def format_duration(duration): '''Formats the result of an even stopwatch call as hours:minutes:seconds''' duration = duration if isinstance(duration, int) else duration.seconds m, s = divmod(duration, 60) h, m = divmod(m, 60) return '%d:%02d:%02d' % (h, m, s) # Execution # ========= # String constants for different services of the web handler PREFIX_NEXT_INDEX = '/next_index_' PREFIX_GET_JOB = '/get_job_' # Global ID counter for all objects requiring an ID id_counter = 0 def new_id(): '''Returns a new ID that is unique on process level. Not thread-safe. Returns: int. The new ID ''' global id_counter id_counter += 1 return id_counter class Sample(object): def __init__(self, src, res, loss, mean_edit_distance, sample_wer): '''Represents one item of a WER report. Args: src (str): source text res (str): resulting text loss (float): computed loss of this item mean_edit_distance (float): computed mean edit distance of this item ''' self.src = src self.res = res self.loss = loss self.mean_edit_distance = mean_edit_distance self.wer = sample_wer def __str__(self): return 'WER: %f, loss: %f, mean edit distance: %f\n - src: "%s"\n - res: "%s"' % (self.wer, self.loss, self.mean_edit_distance, self.src, self.res) class WorkerJob(object): def __init__(self, epoch_id, index, set_name, steps, report): '''Represents a job that should be executed by a worker. Args: epoch_id (int): the ID of the 'parent' epoch index (int): the epoch index of the 'parent' epoch set_name (str): the name of the data-set - one of 'train', 'dev', 'test' steps (int): the number of `session.run` calls report (bool): if this job should produce a WER report ''' self.id = new_id() self.epoch_id = epoch_id self.index = index self.worker = -1 self.set_name = set_name self.steps = steps self.report = report self.loss = -1 self.mean_edit_distance = -1 self.wer = -1 self.samples = [] def __str__(self): return 'Job (id: %d, worker: %d, epoch: %d, set_name: %s)' % (self.id, self.worker, self.index, self.set_name) class Epoch(object): '''Represents an epoch that should be executed by the Training Coordinator. Creates `num_jobs` `WorkerJob` instances in state 'open'. Args: index (int): the epoch index of the 'parent' epoch num_jobs (int): the number of jobs in this epoch Kwargs: set_name (str): the name of the data-set - one of 'train', 'dev', 'test' report (bool): if this job should produce a WER report ''' def __init__(self, index, num_jobs, set_name='train', report=False):
self.id = new_id() self.index = index self.num_jobs = num_jobs self.set_name = set_name self.report = report self.wer = -1 self.loss = -1 self.mean_edit_distance = -1 self.jobs_open = [] self.jobs_running = []
self.jobs_done = [] self.samples = [] for i in range(self.num_jobs): self.jobs_open.append(WorkerJob(self.id, self.index, self.set_name, FLAGS.iters_per_worker, self.report)) def name(self): '''Gets a printable name for this epoch. Returns: str. printable name for this epoch ''' if self.index >= 0: ename = ' of Epoch %d' % self.index else: ename = '' if self.set_name == 'train': return 'Training%s' % ename elif self.set_name == 'dev': return 'Validation%s' % ename else: return 'Test%s' % ename def get_job(self, worker): '''Gets the next open job from this epoch. The job will be marked as 'running'. Args: worker (int): index of the worker that takes the job Returns: WorkerJob. job that has been marked as running for this worker ''' if len(self.jobs_open) > 0: job = self.jobs_open.pop(0) self.jobs_running.append(job) job.worker = worker return job else: return None def finish_job(self, job): '''Finishes a running job. Removes it from the running jobs list and adds it to the done jobs list. Args: job (WorkerJob): the job to put into state 'done' ''' index = next((i for i in range(len(self.jobs_running)) if self.jobs_running[i].id == job.id), -1) if index >= 0: self.jobs_running.pop(index) self.jobs_done.append(job) log_traffic('%s - Moved %s from running to done.' % (self.name(), str(job))) else: log_warn('%s - There is no job with ID %d registered as running.' % (self.name(), job.id)) def done(self): '''Checks, if all jobs of the epoch are in state 'done'. It also lazy-prepares a WER report from the result data of all jobs. Returns: bool. if all jobs of the epoch are 'done' ''' if len(self.jobs_open) == 0 and len(self.jobs_running) == 0: num_jobs = len(self.jobs_done) if num_jobs > 0: jobs = self.jobs_done self.jobs_done = [] if not self.num_jobs == num_jobs: log_warn('%s - Number of steps not equal to number of jobs done.' % (self.name())) agg_loss = 0.0 agg_wer = 0.0 agg_mean_edit_distance = 0.0 for i in range(num_jobs): job = jobs.pop(0) agg_loss += job.loss if self.report: agg_wer += job.wer agg_mean_edit_distance += job.mean_edit_distance self.samples.extend(job.samples) self.loss = agg_loss / num_jobs
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2002-2006 Donald N. Allingham # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # gen.filters.rules/Place/_HasNoLatOrLon.py #------------------------------------------------------------------------- # # Standard Python modules # #------------------------------------------------------------------------- from ....const import GRAMPS_LOCALE as glocale _ = glocale.translation.gettext #------------------------------------------------------------------------- # # Gramps modules # #------------------------------------------------------------------------- from .. import Rule #------------------------------------------------------------------------- # # HasNoLatOrLon # #------------------------------------------------------------------------
- class HasNoLatOrLon(Rule): """Rul
e that checks if Latitude or Longitude are not given""" labels = [] name = _('Places with no latitude or longitude given') description = _("Matches places with empty latitude or longitude") category = _('Position filters') def apply(self,db,place): if place.get_latitude().strip and place.get_longitude().strip(): return False return True
import os import numpy as np import nibabel as nb import nighresjava from ..io import load_volume, save_volume from ..utils import _output_dir_4saving, _fname_4saving def levelset_curvature(levelset_image, distance=1.0, save_data=False, overwrite=False, output_dir=None, file_name=None): """Levelset curvature Estimates surface curvature of a levelset using a quadric approximation scheme. Parameters ---------- levelset_image: niimg Levelset image to be turned into probabilities distance: float, optional Distance from the boundary in voxels where to estimate the curvature save_data: bool, optional Save output data to file (default is False) overwrite: bool, optional Overwrite existing results (default is False) output_dir: str, optional Path to desired output directory, will be created if it doesn't exist file_name: str, optional Desired base name for output files with file extension (suffixes will be added) Returns ---------- dict Dictionary collecting outputs under the following keys (suffix of output files in brackets) * mcurv (niimg): Mean curvature (output file suffix _curv-mean) * gcurv (niimg): Gaussian curvature (output file suffix _curv-gauss) Notes ---------- Ported from original Java module by Pierre-Louis Bazin """ print("\nLevelset Curvature") # make sure that saving related parameters are correct if save_data: output_dir = _output_dir_4saving(output_dir, levelset_image) mcurv_file = os.path.join(output_dir, _fname_4saving(module=__name__,file_name=file_name, rootfile=levelset_image, suffix='curv-mean')) gcurv_file = os.path.join(output_dir, _fname_4saving(module=__name__,file_name=file_name, rootfile=levelset_image, suffix='curv-gauss')) if overwrite is False \ and os.path.isfile(mcurv_file) \ and os.path.isfile(gcurv_file) : print("skip computation (use existing results)") output = {'mcurv': mcurv_file, 'gcurv': gcurv_file} return output # load the data lvl_img = load_volume(levelset_image) lvl_data = lvl_img.get_data() hdr = lvl_img.header aff = lvl_img.affine resolution = [x.item() for x in hdr.get_zooms()] dimensions = lvl_data.shape # algorithm # start virtual machine, if not already running try: mem = _check_available_memory() nighresjava.initVM
(initialheap=mem['init'], maxheap=mem['max']) except ValueError: pass # create algorithm instance algori
thm = nighresjava.LevelsetCurvature() # set parameters algorithm.setMaxDistance(distance) # load images and set dimensions and resolution input_image = load_volume(levelset_image) data = input_image.get_data() affine = input_image.get_affine() header = input_image.get_header() resolution = [x.item() for x in header.get_zooms()] dimensions = input_image.shape algorithm.setDimensions(dimensions[0], dimensions[1], dimensions[2]) algorithm.setResolutions(resolution[0], resolution[1], resolution[2]) algorithm.setLevelsetImage(nighresjava.JArray('float')( (data.flatten('F')).astype(float))) # execute try: algorithm.execute() except: # if the Java module fails, reraise the error it throws print("\n The underlying Java code did not execute cleanly: ") print(sys.exc_info()[0]) raise return # Collect output mcurv_data = np.reshape(np.array( algorithm.getMeanCurvatureImage(), dtype=np.float32), dimensions, 'F') gcurv_data = np.reshape(np.array( algorithm.getGaussCurvatureImage(), dtype=np.float32), dimensions, 'F') hdr['cal_min'] = np.nanmin(mcurv_data) hdr['cal_max'] = np.nanmax(mcurv_data) mcurv = nb.Nifti1Image(mcurv_data, aff, hdr) hdr['cal_min'] = np.nanmin(gcurv_data) hdr['cal_max'] = np.nanmax(gcurv_data) gcurv = nb.Nifti1Image(gcurv_data, aff, hdr) if save_data: save_volume(mcurv_file, mcurv) save_volume(gcurv_file, gcurv) return {'mcurv': mcurv_file, 'gcurv': gcurv_file} else: return {'mcurv': mcurv, 'gcurv': gcurv}
# based on killer algo found here: # http://codereview.stackexchange.com/questions/12922/inversion-count-using-merge-sort import sy
s, bisect input_list = map(int,open(sys.argv[1])) sorted_list = sorted(input_list) inversions = 0 # we compare the unsorted list to the sorted list # to compute inversion count, neat! for d in input_list: #locate insertion point in sorted_list for d
p = bisect.bisect_left(sorted_list,d) inversions += p input_list.pop(p) print inversions
#!/usr/bin/env python # -*- coding: utf-8 -*- class UnitDialogue: """ Unit dialogue model """ def __init__(self, **kwargs): self.db = kwargs["db"] self.dialogue = {} def _get_unit_dialogue_map(self, dialogue): unit_dialogue_map = {} for unit_dialogue in dialogue: unit_id = unit_dialogue["unit_id"] if unit_id not in unit_dialogue_map: unit_dialogue_map[unit_id] = [] unit_dialogue_map[unit_id].append(unit_dialogue["dialogue_id"]) return unit_dialogue_map def get_unit_dialogue(
self): """ Get unit dialogue IDs. Those will be queried against the dialogue collection to get the rest of the dialogue information """ cursor = self.db.cursor() cursor.execute("""SELECT ud.id, ud.id AS dialogue_id, ud.unit_id,
ud.dialogue, ud.context FROM spiffyrpg_unit_dialogue ud LEFT JOIN spiffyrpg_units u ON u.id = ud.unit_id""") tmp_dialogue = cursor.fetchall() cursor.close() dialogue = [] if tmp_dialogue: for e in tmp_dialogue: dia = dict(e) dialogue.append(dia) return dialogue
from django.conf import settings from django.contrib import messages from django.forms import Form from django.http import Http404, HttpResponse, HttpResponseBadRequest from django.shortcuts import get_object_or_404, redirect, render from django.views.decorators.csrf import csrf_exempt from django.utils import timezone from django.utils.translation import ugettext as _ import dateutil.parser, json from itsdangerous import BadSignature from appointments.apps.timeslots.models import Action, Constraint from appointments.apps.timeslots.utils import strfdate, strftime, strptime, is_available from .forms import ReminderForm from .models import Appointment, User from .utils import get_logger, get_serializer, send_confirmation, send_receipt, send_reminder # Create your views here. def book(request): logger = get_logger(__name__, request) if 'POST' == request.method and request.is_ajax(): fields = json.loads(request.body) try: user = User.objects.get(email__iexact=fields['email']) except KeyError: # This is an error; time to log, then fail logger.warning("Bad form submission: KeyError (email)") return HttpResponseBadRequest() except User.DoesNotExist: user = User(email=fields['email'], is_active=False) user.save() logger.info("New user %s" % (str(user))) try: action = Action.objects.get(slug=fields['action']) except (KeyError, Action.DoesNotExist): logger.warning("Bad form submission: KeyError (action) or Action.DoesNotExist") # This is an error; time to log, then fail return HttpResponseBadRequest() try: constraint = Constraint.objects.get(slug=fields['constraint']) except (KeyError, Constraint.DoesNotExist): # This is an error; time to log, then fail logger.warning("Bad form submission: KeyError (constraint) or Constraint.DoesNotExist") return HttpResponseBadRequest() if action not in constraint.actions.all(): # This is an error; time to log, then fail logger.warning("Bad form submission: bad constraint/action combination") return HttpResponseBadRequest() # Ignore timezone to prevent one-off problems try: date = dateutil.parser.parse(fields['date'], ignoretz=True).date() time = strptime(fields['time']) except KeyError: # This is an error; time to log, then fail logger.warning("Bad form submission: KeyError (date and/or time)") return HttpResponseBadRequest() # Check if timeslot is available if not is_available(constraint, date, time): # Return some meaningful JSON to say that time is not available logger.warning("Bad form submission: timeslot not available") return HttpResponseBadRequest()
# Preprocess sex to ensure it's a valid value sex = fields['sex'][0].upper() if fields.get('sex', None) else None if sex not in ['M', 'F']: sex = '' appointment = Appointment( user=user, action=action, constraint=constraint,
date=date, time=time, # Optional fields... first_name=fields.get('first_name',''), last_name=fields.get('last_name',''), nationality = fields.get('nationality',''), sex=sex, # See if this works without any changes... identity_number=fields.get('identity_number',''), document_number=fields.get('document_number',''), phone_number=fields.get('phone_number',''), mobile_number=fields.get('mobile_number',''), comment=fields.get('comment',''), ) # Save the appointment; then log it appointment.save() logger.info("New appointment by %s in %s/%s on %s at %s" % ( str(appointment.user), appointment.constraint.key.slug, appointment.constraint.slug, strfdate(appointment.date), strftime(appointment.time), ) ) send_receipt(appointment) messages.success(request, _("We've send you an e-mail receipt. Please confirm your appointment by following the instructions.")) # Return some JSON... return HttpResponse("Ok") elif 'POST' == request.method: logger.warning("XMLHttpRequest header not set on POST request") return HttpResponseBadRequest("XMLHttpRequest (AJAX) form submissions only please!") return render(request, 'book.html') def cancel(request, payload): from itsdangerous import BadSignature s = get_serializer() try: appointment_id = s.loads(payload) except BadSignature: return Http404 appointment = get_object_or_404(Appointment, pk=appointment_id) if appointment.is_cancelled(): messages.warning(request, _("You've already cancelled this appointment.")) return redirect('finish') if 'POST' == request.method: form = Form(request.POST) if form.is_valid(): appointment.cancel() messages.info(request, _("You successfully cancelled your appointment.")) return redirect('finish') # This doesn't seem to be the correct return code return Http404 form = Form() return render(request, 'cancel.html', {'form': form}) def confirm(request, payload): s = get_serializer() try: appointment_id = s.loads(payload) except BadSignature: return Http404 appointment = get_object_or_404(Appointment, pk=appointment_id) if appointment.is_cancelled(): messages.error(request, _("You cannot reconfirm a cancelled appointment. Please book again.")) elif appointment.is_confirmed(): messages.warning(request, _("Thank you, no need to reconfirm.")) else: appointment.confirm() appointment.user.verify() send_confirmation(appointment) messages.success(request, _("Thank you for confirming your appointment.")) return redirect('finish') def reminder(request): if 'POST' == request.method: form = ReminderForm(request.POST) if form.is_valid(): email = form.cleaned_data['email'] try: user = User.objects.get(email=email) date = timezone.now().date() appointments = user.appointments.filter(date__gte=date) send_reminder(user, appointments) except User.DoesNotExist: pass messages.success(request, _("We'll send you an e-mail with all your appointments.")) return redirect('finish') else: form = ReminderForm() return render(request, 'reminder.html', {'form': form}) # Custom error views def handler404(request): return render(request, '404.html')
except ValueError: return False #end def is_float def is_float(var): try: float(var) return True except ValueError: return False #end def is_float def is_array(var,type,delim=None): try: if isinstance(var,str): array(var.split(delim),type) else: array(var,type) #end if return True except ValueError: return False #end def is_float_array def string2val(s,delim=None): if is_bool(s): val = sbool(s) elif is_int(s): val = int(s) elif is_float(s): val = float(s) elif is_array(s,int,delim): val = array(s.split(delim),int) elif is_array(s,float,delim): val = array(s.split(delim),float) else: val = s #end if return val #end def string2val def string2array(string): ilst = string.strip().split(' ') lst = [] for l in ilst: if(l.strip()!=''): lst.append(float(l)) #end if #end for return array(lst) #end def string2array def is_string(var): return type(var)==type("s") #end def is_string def stringmap(s): smap=[] quotes=set(['"',"'"]) altquote={'"':"'","'":'"'} instr=False depth=0 for i in range(len(s)): c=s[i] if not instr and c in quotes: instr=True lastquote=c depth=1 direction=1 elif instr and c in quotes: if c!=altquote[lastquote]: direction=-1 #end if lastquote=c depth+=direction #end if smap+=[instr] if depth==0: instr=False #end if #end for return smap #end def stringmap def stringbreak(s,delimiter): strings=[] blocks='' strstart=s.startswith('"') or s.startswith("'") nblocks=0 smap=[] quotes=set(['"',"'"]) altquote={'"':"'","'":'"'} instr=False bstart=0 depth=0 for i in range(len(s)): c=s[i] if not instr and c in quotes: instr=True lastquote=c depth=1 direction=1 sstart=i bend=i if bend>0: blocks+=s[bstart:bend]+delimiter #end if elif instr and c in quotes: if c!=altquote[lastquote]: direction=-1 #end if lastquote=c depth+=direction #end if #smap+=[instr] if depth==0 and instr: send=i+1 strings+=[s[sstart:send]] instr=False bstart=send #end if #end for if not instr: bend=len(s) blocks+=s[bstart:bend]+delimiter #end if return strings,blocks,strstart #end def stringbreak def find_matching_brace(string,start,end): brace_dict = dict( [ ('(',')'), ('[',']'), ('{','}'), ('<','>') ] ) left_brace = string[start] right_brace = brace_dict[left_brace] found_match = False i = start + 1 left_scope = 0 right_scope = 0 while not found_match and i<end: if string[i]==left_brace: right_scope+=1 elif string[i]==right_brace: found_match = right_scope==left_scope right_scope-=1 #end if i+=1 #end while if found_match: brace_loc = i-1 else: brace_loc = -1 #end if return brace_loc #end def find_matching_brace def find_matching_pair(s,pair,start=0,end=-1): if end==-1: end=len(s) #end if left = pair[0] right = pair[1] llen=len(left) rlen=len(right) ileft = s.find(left,start,end) iright = -1 if ileft==-1: return ileft,iright else: i=ileft+llen left_scope = 0 right_scope = 0 found_match = False failed = False while not found_match and i<end: nleft = s.find(left,i,end) nright = s.find(right,i,end) if nleft!=-1 and nleft<nright: right_scope+=1 i=nleft+llen elif nright!=-1: found_match = right_scope==left_scope right_scope-=1 i=nright+rlen elif nright==-1: failed=True break #end if #end while if found_match: iright = i #end if if failed: ileft,iright=-1,-1 #end if #end if return ileft,iright #end def find_matching_pair def remove_pair_sections(s,pair): sc=s ir=0 n=0 while ir!=-1 and n<10: il,ir = find_matching_pair(sc,pair) sc=sc.replace(sc[il:ir],'') #end while return sc #end def def remove_comment_lines(comment_char,s_in): lines = s_in.splitlines() s_out='' for l in lines: if not l.strip().startswith(comment_char): s_out=s_out+l+'\n' #end if #end if return s_out #def remove_comment_lines def remove_empty_lines(s): sr='' lines = s.splitlines() for l in lines
: if l.strip()!='': sr+=l + '\n' #end if #end for return sr #end def remove_empty_lines def contains_any(str, set): for c in set: if c in str: return 1; return 0; #end def contains_any def contains_all(str, set): for c in set: if c not in str: return 0; retu
rn 1; #end def contains_all invalid_variable_name_chars=set('!"#$%&\'()*+,-./:;<=>?@[\\]^`{|}-\n\t ') def valid_variable_name(s): return not contains_any(s,invalid_variable_name_chars) #end def valid_variable_name def split_delims(s,delims=['.','-','_']): sp = s for d in delims: sp = sp.replace(d,' ') #end for return sp.split() #end def split_delims #///////////////////////////////////////////////// #/////// C/C++ /////// #///////////////////////////////////////////////// def find_preprocessor_end(string,start,end): newline_loc = string.find('\n',start,end) prep_end = newline_loc line_continues = string[start:prep_end+1].rstrip(' \t\n').endswith('\\') continued_preprocessor = line_continues while line_continues: newline_loc = string.find('\n',prep_end+1,end) prep_end = newline_loc line_continues = string[start:prep_end+1].rstrip(' \t\n').endswith('\\') #end while return prep_end #end def find_preprocessor_end def find_comment_block_end(string,start,end): loc = string.find('*/',start,end) if loc!=-1: loc +=1 #print 'fcbe',string[loc-1],string[loc] #end if return loc #end def find_comment_block_end def find_matching_cbrace(string,start,end,verbose=True): brace_dict = dict( [ ('(',')'), ('[',']'), ('{','}'), ('<','>') ] ) left_brace = string[start] right_brace = brace_dict[left_brace] found_match = False i = start + 1 left_scope = 0 right_scope = 0 in_comment_line = False in_comment_block = False in_preprocessor = False comment_block = False while not found_match and i<end: ## if comment_block: ## print 'fmb2',string[i],string[i+1] ## #end if comment_block = False if string[i]=='#': preprocessor_end = find_preprocessor_end(string,i,end) if preprocessor_end!=-1: i = preprocessor_end else: if verbose: print 'ERROR: in find_matching_brace' print ' end of preprocessor statement not found' #end if brace_loc = -1 #end if elif string[i]=='/': comment_end = -1 if string[i+1]=='/': comment_end = find_line_end(string,i,end) elif string[i+1]=='*': comment_block = True comment_end = find_comment_block_end(string,i,end) else: comment_end = i #this is in the case of regular d
if out_of_date(code_path, img.filename(format)): all_exists = False break img.formats.append(format) # assume that if we have one, we have them all if not all_exists: all_exists = (j > 0) break images.append(img) if not all_exists: break results.append((code_piece, images)) if all_exists: return results # We didn't find the files, so build them results = [] if context: ns = plot_context else: ns = {} for i, code_piece in enumerate(code_pieces): if not context or config.plot_apply_rcparams: clear_state(config.plot_rcparams) run_code(code_piece, code_path, ns, function_name) images = [] fig_managers = _pylab_helpers.Gcf.get_all_fig_managers() for j, figman in enumerate(fig_managers): if len(fig_managers) == 1 and len(code_pieces) == 1: img = ImageFile(output_base, output_dir) elif len(code_pieces) == 1: img = ImageFile("%s_%02d" % (output_base, j), output_dir) else: img = ImageFile("%s_%02d_%02d" % (output_base, i, j), output_dir) images.append(img) for format, dpi in formats: try: figman.canvas.figure.savefig(img.filename(format), dpi=dpi) except Exception,err: raise PlotError(traceback.format_exc()) img.formats.append(format) results.append((code_piece, images)) if not context or config.plot_apply_rcparams: clear_state(config.plot_rcparams) return results def run(arguments, content, options, state_machine, state, lineno): # The user may provide a filename *or* Python code content, but not both if arguments and content: raise RuntimeError("plot:: directive can't have both args and content") document = state_machine.document config = document.se
ttings.env.config nofigs = options.has_key('nofigs') options.setdefault('include-source', config.plot_include_source) context = options.has_key('context') rst_file = document.attributes['source'] rst_dir = os.path.dirname(rst_file) if len(arguments): if not config.plot_basedir: source_file_name = os.path.join(setup.app.builder.srcdir,
directives.uri(arguments[0])) else: source_file_name = os.path.join(setup.confdir, config.plot_basedir, directives.uri(arguments[0])) # If there is content, it will be passed as a caption. caption = '\n'.join(content) # If the optional function name is provided, use it if len(arguments) == 2: function_name = arguments[1] else: function_name = None with open(source_file_name, 'r') as fd: code = fd.read() output_base = os.path.basename(source_file_name) else: source_file_name = rst_file code = textwrap.dedent("\n".join(map(str, content))) counter = document.attributes.get('_plot_counter', 0) + 1 document.attributes['_plot_counter'] = counter base, ext = os.path.splitext(os.path.basename(source_file_name)) output_base = '%s-%d.py' % (base, counter) function_name = None caption = '' base, source_ext = os.path.splitext(output_base) if source_ext in ('.py', '.rst', '.txt'): output_base = base else: source_ext = '' # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames output_base = output_base.replace('.', '-') # is it in doctest format? is_doctest = contains_doctest(code) if options.has_key('format'): if options['format'] == 'python': is_doctest = False else: is_doctest = True # determine output directory name fragment source_rel_name = relpath(source_file_name, setup.confdir) source_rel_dir = os.path.dirname(source_rel_name) while source_rel_dir.startswith(os.path.sep): source_rel_dir = source_rel_dir[1:] # build_dir: where to place output files (temporarily) build_dir = os.path.join(os.path.dirname(setup.app.doctreedir), 'plot_directive', source_rel_dir) # get rid of .. in paths, also changes pathsep # see note in Python docs for warning about symbolic links on Windows. # need to compare source and dest paths at end build_dir = os.path.normpath(build_dir) if not os.path.exists(build_dir): os.makedirs(build_dir) # output_dir: final location in the builder's directory dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir, source_rel_dir)) if not os.path.exists(dest_dir): os.makedirs(dest_dir) # no problem here for me, but just use built-ins # how to link to files from the RST file dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir), source_rel_dir).replace(os.path.sep, '/') build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/') source_link = dest_dir_link + '/' + output_base + source_ext # make figures try: results = render_figures(code, source_file_name, build_dir, output_base, context, function_name, config) errors = [] except PlotError, err: reporter = state.memo.reporter sm = reporter.system_message( 2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base, source_file_name, err), line=lineno) results = [(code, [])] errors = [sm] # Properly indent the caption caption = '\n'.join(' ' + line.strip() for line in caption.split('\n')) # generate output restructuredtext total_lines = [] for j, (code_piece, images) in enumerate(results): if options['include-source']: if is_doctest: lines = [''] lines += [row.rstrip() for row in code_piece.split('\n')] else: lines = ['.. code-block:: python', ''] lines += [' %s' % row.rstrip() for row in code_piece.split('\n')] source_code = "\n".join(lines) else: source_code = "" if nofigs: images = [] opts = [':%s: %s' % (key, val) for key, val in options.items() if key in ('alt', 'height', 'width', 'scale', 'align', 'class')] only_html = ".. only:: html" only_latex = ".. only:: latex" only_texinfo = ".. only:: texinfo" if j == 0: src_link = source_link else: src_link = None result = format_template( config.plot_template or TEMPLATE, dest_dir=dest_dir_link, build_dir=build_dir_link, source_link=src_link, multi_image=len(images) > 1, only_html=only_html, only_latex=only_latex, only_texinfo=only_texinfo, options=opts, images=images, source_code=source_code, html_show_formats=config.plot_html_show_formats, caption=caption) total_lines.extend(result.split("\n")) total_lines.extend("\n") if total_lines: state_machine.insert_input(total_lines, source=source_file_name) # copy image files to builder's output directory, if necessary if not os.path.exists(dest_dir): cbook.mkdirs(dest_dir) for code_piece, images in results: for img in images: for fn in img.filenames(): destimg = os.path.join(dest_dir, os.path.basename(fn)) if fn != destimg:
FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # import inspect import six from collections import defaultdict, deque import logging logger = logging.getLogger(__name__) def first_non_none_response(responses, default=None): """Find first non None response in a list of tuples. This function can be used to find the first non None response from handlers connected to an event. This is useful if you are interested in the returned responses from event handlers. Example usage:: print(first_non_none_response([(func1, None), (func2, 'foo'), (func3, 'bar')])) # This will print 'foo' :type responses: list of tuples :param responses: The responses from the ``EventHooks.emit`` method. This is a list of tuples, and each tuple is (handler, handler_response). :param default: If no non-None responses are found, then this default value will be returned. :return: The first non-None response in the list of tuples. """ for response in responses: if response[1] is not None: return response[1] return default class BaseEventHooks(object): def emit(self, event_name, **kwargs): return [] def register(self, event_name, handler): self._verify_is_callable(handler) self._verify_accept_kwargs(handler) self._register(event_name, handler) def unregister(self, event_name, handler): pass def _verify_is_callable(self, func): if not six.callable(func): raise ValueError("Event handler %s must be callable." % func) def _verify_accept_kwargs(self, func): """Verifies a callable accepts kwargs :type func: callable :param func: A callable object. :returns: True, if ``func`` accepts kwargs, otherwise False. """ try: argspec = inspect.getargspec(func) except TypeError: return False else: if argspec[2] is None: raise ValueError("Event handler %s must accept keyword " "arguments (**kwargs)" % func) class EventHooks(BaseEventHooks): def __init__(self): # event_name -> [handler, ...] self._handlers = defaultdict(list) def emit(self, event_name, **kwargs): """Call all handlers subscribed to an event. :type event_name: str :param event_name: The name of the event to emit. :type **kwargs: dict :param **kwargs: Arbitrary kwargs to pass through to the subscribed handlers. The ``event_name`` will be injected into the kwargs so it's not necesary to add this to **kwargs. :rtype: list of tuples :return: A list of ``(handler_func, handler_func_return_value)`` """ kwargs['event_name'] = event_name responses = [] for handler in self._handlers[event_name]: response = handler(**kwargs) responses.append((handler, response)) return responses def _register(self, event_name, handler): self._handlers[event_name].append(handler) def unregister(self, event_name, handler): try: self._handlers[event_name].remove(handler) except ValueError: pass class HierarchicalEmitter(BaseEventHooks): def __init__(self): # We keep a reference to the handlers for quick # read only access (we never modify self._handlers). # A cache of event name to handler list. self._lookup_cache = {} self._han
dlers = _PrefixTrie() def emit(self, event_name, **kwargs): responses = [] # Invoke the event handlers from most specific # to least specific, each time stripping off a dot. logger.debug('emit: %s' % event_name) if event_name in self._lookup_cache: handlers_to_call = self._lookup_cache[event_name] else: handlers_to_call = self._handlers_for_event(event_name)
self._lookup_cache[event_name] = handlers_to_call kwargs['event_name'] = event_name responses = [] for handler in handlers_to_call: logger.debug('emit: calling %s' % handler) response = handler(**kwargs) responses.append((handler, response)) return responses def _handlers_for_event(self, event): return self._handlers.prefix_search(event) def _register(self, event_name, handler): # Super simple caching strategy for now, if we change the registrations # clear the cache. This has the opportunity for smarter invalidations. self._handlers.append_item(event_name, handler) self._lookup_cache = {} def unregister(self, event_name, handler): try: self._handlers.remove_item(event_name, handler) self._lookup_cache = {} except ValueError: pass class _PrefixTrie(object): """Specialized prefix trie that handles wildcards. The prefixes in this case are based on dot separated names so 'foo.bar.baz' is:: foo -> bar -> baz Wildcard support just means that having a key such as 'foo.bar.*.baz' will be matched with a call to ``get_items(key='foo.bar.ANYTHING.baz')``. You can think of this prefix trie as the equivalent as defaultdict(list), except that it can do prefix searches: foo.bar.baz -> A foo.bar -> B foo -> C Calling ``get_items('foo.bar.baz')`` will return [A + B + C], from most specific to least specific. """ def __init__(self): self._root = _Node(None, None) def append_item(self, key, value): """Add an item to a key. If a value is already associated with that key, the new value is appended to the list for the key. """ key_parts = key.split('.') current = self._root for part in key_parts: if part not in current.children: new_child = _Node(part) current.children[part] = new_child current = new_child else: current = current.children[part] if current.values is None: current.values = [value] else: current.values.append(value) def prefix_search(self, key): """Collect all items that are prefixes of key. Prefix in this case are delineated by '.' characters so 'foo.bar.baz' is a 3 chunk sequence of 3 "prefixes" ( "foo", "bar", and "baz"). """ collected = deque() key_parts = key.split('.') current = self._root self._get_items(current, key_parts, collected, index=0) return collected def remove_item(self, key, value): """Remove an item associated with a key. If the value is not associated with the key a ``ValueError`` will be raised. If the key does not exist in the trie, a ``ValueError`` will be raised. """ key_parts = key.split('.') previous = None current = self._root self._remove_item(current, key_parts, value, index=0) def _remove_item(self, current_node, key_parts, value, index): if current_node is None: return elif index < len(key_parts): next_node = current_node.children.get(key_parts[index]) if next_node is not None: self._remove_item(next_node, key_parts, value, index + 1) if index == len(key_parts) - 1: next_node.values.remove(value) if not next_node.children and not next_node.values: # Then this is a leaf node with no values so # we can just delete this link from the parent node. # This makes subsequent search faster in the case # where a key does not exist. del current_node.children[key_parts[index]] else: raise ValueErr
# -*- coding: utf-8 -*- from __future__ import print_function from nltk import download TOKENIZER_MODEL = "punkt" POS_TAGGER = "maxent_treebank_pos_tagger" def downloadDependencies(): download(TOKENIZER_MODEL) downl
oad(POS
_TAGGER) if __name__ == '__main__': downloadDependencies()
# Copyright 2019 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Pull in helpers that 'charms_openstack.plugins' will export from charms_openstack.plugins.ada
pters import ( CephRelationAdapter, ) from charms_openstack.plugins.classes import ( BaseOpenStackCephCharm, CephCharm, PolicydOverridePlugin, ) from charms_openstack.plugins.trilio import ( TrilioVaultCharm, TrilioVaultSubordinateCharm, TrilioVaultCharmGhostAction, ) __all__ = ( "BaseOpenStackCephCharm", "CephCharm", "CephRelationAdapter", "PolicydOverridePlugin",
"TrilioVaultCharm", "TrilioVaultSubordinateCharm", "TrilioVaultCharmGhostAction", )
import numpy as np import warnings from scipy._lib._util import check_random_state def rvs_ratio_uniforms(pdf, umax, vmin, vmax, size=1, c=0, random_state=None): """ Generate random samples from a probability density function using the ratio-of-uniforms method. Parameters ---------- pdf : callable A function with signature `pdf(x)` that is the probability density function of the distribution. umax : float The upper bound of the bounding rectangle in the u-direction. vmin : float The lower bound of the bounding rectangle in the v-direction. vmax : float The upper bound of the bounding rectangle in the v-direction. size : int or tuple of ints, optional Defining number of random variates (default is 1). c : float, optional. Shift parameter of ratio-of-uniforms method, see Notes. Default is 0. random_state : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional If `random_state` is `None` the `~np.random.RandomState` singleton is used. If `random_state` is an int, a new ``RandomState`` instance is used, seeded with random_state. If `random_state` is already a ``RandomState`` or ``Generator`` instance, then that object is used. Default is None. Returns ------- rvs : ndarray The random variates distributed according to the probability distribution defined by the pdf. Notes ----- Given a univariate probability density function `pdf` and a constant `c`, define the set ``A = {(u, v) : 0 < u <= sqrt(pdf(v/u + c))}``. If `(U, V)` is a random vector uniformly distributed over `A`, then `V/U + c` follows a distribution according to `pdf`. The above result (see [1]_, [2]_) can be used to sample random variables using only the pdf, i.e. no inversion of the cdf is required. Typical choices of `c` are zero or the mode of `pdf`. The set `A` is a subset of the rectangle ``R = [0, umax] x [vmin, vmax]`` where - ``umax = sup sqrt(pdf(x))`` - ``vmin = inf (x - c) sqrt(pdf(x))`` - ``vmax = sup (x - c) sqrt(pdf(x))`` In particular, these values are finite if `pdf` is bounded and ``x**2 * pdf(x)`` is bounded (i.e. subquadratic tails). One can generate `(
U, V)` uniformly on `R` and return `V/U + c` if `(U, V)` are also in `A` which can be directly verified. Intuitively, the method works well if `A` fills up most of the enclosing rectangle such that the probability is high that `(U, V)`
lies in `A` whenever it lies in `R` as the number of required iterations becomes too large otherwise. To be more precise, note that the expected number of iterations to draw `(U, V)` uniformly distributed on `R` such that `(U, V)` is also in `A` is given by the ratio ``area(R) / area(A) = 2 * umax * (vmax - vmin)``, using the fact that the area of `A` is equal to 1/2 (Theorem 7.1 in [1]_). A warning is displayed if this ratio is larger than 20. Moreover, if the sampling fails to generate a single random variate after 50000 iterations (i.e. not a single draw is in `A`), an exception is raised. If the bounding rectangle is not correctly specified (i.e. if it does not contain `A`), the algorithm samples from a distribution different from the one given by `pdf`. It is therefore recommended to perform a test such as `~scipy.stats.kstest` as a check. References ---------- .. [1] L. Devroye, "Non-Uniform Random Variate Generation", Springer-Verlag, 1986. .. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian random variates", Statistics and Computing, 24(4), p. 547--557, 2014. .. [3] A.J. Kinderman and J.F. Monahan, "Computer Generation of Random Variables Using the Ratio of Uniform Deviates", ACM Transactions on Mathematical Software, 3(3), p. 257--260, 1977. Examples -------- >>> from scipy import stats Simulate normally distributed random variables. It is easy to compute the bounding rectangle explicitly in that case. >>> f = stats.norm.pdf >>> v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2) >>> umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound >>> np.random.seed(12345) >>> rvs = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=2500) The K-S test confirms that the random variates are indeed normally distributed (normality is not rejected at 5% significance level): >>> stats.kstest(rvs, 'norm')[1] 0.3420173467307603 The exponential distribution provides another example where the bounding rectangle can be determined explicitly. >>> np.random.seed(12345) >>> rvs = stats.rvs_ratio_uniforms(lambda x: np.exp(-x), umax=1, ... vmin=0, vmax=2*np.exp(-1), size=1000) >>> stats.kstest(rvs, 'expon')[1] 0.928454552559516 Sometimes it can be helpful to use a non-zero shift parameter `c`, see e.g. [2]_ above in the case of the generalized inverse Gaussian distribution. """ if vmin >= vmax: raise ValueError("vmin must be smaller than vmax.") if umax <= 0: raise ValueError("umax must be positive.") exp_iter = 2 * (vmax - vmin) * umax # rejection constant (see [1]) if exp_iter > 20: msg = ("The expected number of iterations to generate a single random " "number from the desired distribution is larger than {}, " "potentially causing bad performance.".format(int(exp_iter))) warnings.warn(msg, RuntimeWarning) size1d = tuple(np.atleast_1d(size)) N = np.prod(size1d) # number of rvs needed, reshape upon return # start sampling using ratio of uniforms method rng = check_random_state(random_state) x = np.zeros(N) simulated, i = 0, 1 # loop until N rvs have been generated: expected runtime is finite # to avoid infinite loop, raise exception if not a single rv has been # generated after 50000 tries. even if exp_iter = 1000, probability of # this event is (1-1/1000)**50000 which is of order 10e-22 while simulated < N: k = N - simulated # simulate uniform rvs on [0, umax] and [vmin, vmax] u1 = umax * rng.uniform(size=k) v1 = rng.uniform(vmin, vmax, size=k) # apply rejection method rvs = v1 / u1 + c accept = (u1**2 <= pdf(rvs)) num_accept = np.sum(accept) if num_accept > 0: x[simulated:(simulated + num_accept)] = rvs[accept] simulated += num_accept if (simulated == 0) and (i*N >= 50000): msg = ("Not a single random variate could be generated in {} " "attempts. The ratio of uniforms method does not appear " "to work for the provided parameters. Please check the " "pdf and the bounds.".format(i*N)) raise RuntimeError(msg) i += 1 return np.reshape(x, size1d)
#!/usr/bin/env python # # GrovePi Example for using the Grove Thumb Joystick (http://www.seeedstudio.com/wiki/Grove_-_Thumb_Joystick) # # The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi # # Have a question about this example? Ask on the forums here: http://forum.dexterindustries.com/c/grovepi # ''' ## License The MIT License (MIT) GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi. Copyright (C) 2015 Dexter Industries Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' import time import grovepi # Connect the Grove Thumb Joystick to analog port A0 # GrovePi Port A0 uses Arduino pins 0 and 1 # GrovePi Port A1 uses Arduino pins 1 and 2 # Don't plug anything into port A1 that uses pin 1 # Most Grove sensors only use 3 of their 4 pins,
which is why the GrovePi shares Arduino pins between adjacent ports # If the sensor has a pin definition SIG,NC,VCC,GND, the second (white) pin is not connected to anything # If you wish to connect two joysticks, use ports A0 and A2 (skip A1) # Uses two pins - one for the X axis and one for the Y axis # This configuration means you are using port A0 xPin = 0 yPin = 1 grovepi.pinMode(xPin,"INPUT") grovepi.pin
Mode(yPin,"INPUT") # The Grove Thumb Joystick is an analog device that outputs analog signal ranging from 0 to 1023 # The X and Y axes are two ~10k potentiometers and a momentary push button which shorts the x axis # My joystick produces slightly different results to the specifications found on the url above # I've listed both here: # Specifications # Min Typ Max Click # X 206 516 798 1023 # Y 203 507 797 # My Joystick # Min Typ Max Click # X 253 513 766 1020-1023 # Y 250 505 769 while True: try: # Get X/Y coordinates x = grovepi.analogRead(xPin) y = grovepi.analogRead(yPin) # Calculate X/Y resistance Rx = (float)(1023 - x) * 10 / x Ry = (float)(1023 - y) * 10 / y # Was a click detected on the X axis? click = 1 if x >= 1020 else 0 print("x =", x, " y =", y, " Rx =", Rx, " Ry =", Ry, " click =", click) time.sleep(.5) except IOError: print ("Error")
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Invokes html2js and appends goog.provide. https://www.npmjs.com/package/html2js """ import subprocess import sys def main(argv): # path to html2js html2js = argv[1] # A string that will be stripped out of every filename in the template id. strip_prefix = argv[2] # A string to prepend to template paths. prepend_prefix = argv[3] # Name of AngularJS module that needs to be created. module_name = argv[4] # goog module name. goog_provide = argv[5] # remaining args interpreted as html location. html_paths = argv[6:] result = ["goog.provide('{}');".format(goog_provide)] for src in html_
paths: assert src.startswith(strip_prefix) js = subprocess.check_output([html2js, src, '--module', module_name], env={}) template_name = prepend_prefix + src[len(strip_prefix):] js = js.replace(src, template_name)
result.append(js) result.append("{} = angular.module('{}');".format(goog_provide, module_name)) print '\n'.join(result) if __name__ == '__main__': main(sys.argv)
from amitools.vamos.astructs import LibraryStruct from amitools.vamos.atypes import Library, NodeType from amitools.fd import read_lib_fd, generate_fd from .vlib import VLib from .stub import LibStubGen from .patch import LibPatcherMultiTrap from .impl import LibImplScanner class LibCreator(object): """create a vamos internal libs""" def __init__(self, alloc, traps, fd_dir=None, log_missing=None, log_valid=None, lib_profiler=None): self.alloc = alloc self.traps = traps # options self.fd_dir = fd_dir self.profiler = lib_profiler self.stub_gen = LibStubGen(log_missing=log_missing, log_valid=log_valid) def _create_library(self, info, is_dev, fd): if is_dev: ltype = NodeType.NT_DEVICE else: ltype = NodeType.NT_LIBRARY name = info.get_name() id_str = info.get_id_string() neg_size = info.get_neg_size() pos_size = info.get_pos_size() library = Library.alloc(self.alloc, name, id_str, neg_size, pos_size, fd) version = info.get_version() revision = info.get_revision() library.setup(version=version, revision=revision, type=ltype) return library def _generate_fake_fd(self, name, lib_cfg): if lib_cfg: num_calls = lib_cfg.num_fake_funcs else: num_calls = 0 return generate_fd(name, num_calls) def get_profiler(self): return self.profiler def create_lib(self, info, ctx, impl=None, lib_cfg=None, check=False): name = info.get_name() if name.endswith('.device'): is_dev = True elif name.endswith('.library'): is_dev = False else: raise ValueError("create_lib: %s is neither lib nor dev!" % name) # get fd: either read from fd or fake one fd = read_lib_fd(name, self.fd_dir) if fd is None: fd = self._generate_fake_fd(name, lib_cfg) # if impl is available scan it scan = None if impl: scanner = LibImplScanner() if check: scan = scanner.scan_checked(name, impl, fd, True) else: scan = scanner.scan(name, impl, fd, True) # add profile? if self.profiler: # get some impl information if scan: func_tags = scan.get_func_tags() else: func_tags = None profile = self.profiler.create_profile(name, fd, func_tags) else: profile = None # create stub if scan is None:
stub = self.stub_gen.gen_fake_stub(name, fd, ctx, profile) struct = Libr
aryStruct else: stub = self.stub_gen.gen_stub(scan, ctx, profile) struct = impl.get_struct_def() # adjust info pos/neg size if info.pos_size == 0: info.pos_size = struct.get_size() if info.neg_size == 0: info.neg_size = fd.get_neg_size() # allocate and init lib library = self._create_library(info, is_dev, fd) addr = library.get_addr() # patcher patcher = LibPatcherMultiTrap(self.alloc, self.traps, stub) patcher.patch_jump_table(addr) # fix lib sum library.update_sum() # create vamos lib and combine all pieces vlib = VLib(library, info, struct, fd, impl, stub, ctx, patcher, profile, is_dev) return vlib
#!/usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2015, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import matplotlib.pyplot as plt import numpy as np import pyaudio import wave from sound_encoder import SoundEncoder CHUNK = 1024 FORMAT = pyaudio.paInt16 CHANNELS = 2 RATE = 44100 RECORD_SECONDS = 10 def getAudioStream(): p = pyaudio.PyAudio() return p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) def transformData(data, window): return np.array(wave.struct.unpack("%dh"%(len(data)/CHANNELS),\ data))*window def visualizeSDRs(sdrs): sdrsToVisualize = [] for sdr in sdrs: sdrsToVisualize.append([255 if x else 0 for x in sdr]) imageArray = np.rot90(np.array(sdrsToVisualize)) plt.imshow(imageArray, cmap='Grey
s', interpolation='nearest') plt.show() def recordAndEncode(stream, soundEncoder): window = np.blackman(CHANNELS*CHUNK) sdrs = [] print "---recording---" for _ in range(0, (RATE/CHUNK * RECORD_SECONDS)): data = stream.read(CHUNK) transformedData = transformData(data, window) sdr = soundEncoder.encode(transformedData) sdrs.append(sdr) stream.stop_stream() stream.close() print "---done---" return sdrs if __name__ == "__main__": n =
300 w = 31 minval = 20 maxval = 10000 soundEncoder = SoundEncoder(n, w, RATE, CHUNK, minval, maxval) stream = getAudioStream() sdrs = recordAndEncode(stream, soundEncoder) visualizeSDRs(sdrs)
"""Integration tests for client-server interaction.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from multiprocessing import Process import os import shutil import tempfile import time import unittest from filetracker.client import Client, FiletrackerError from filetracker.servers.run import main as server_main _TEST_PORT_NUMBER = 45735 class InteractionTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.cache_dir = tempfile.mkdtemp() cls.server_dir = tempfile.mkdtemp() cls.temp_dir = tempfile.mkdtemp() cls.server_process = Process(target=_start_server, args=(cls.server_dir,)) cls.server_process.start() time.sleep(2) # give server some time to start cls.client = Client( cache_dir=cls.cache_dir, remote_url='http://127.0.0.1:{}'.format(_TEST_PORT_NUMBER), ) @classmethod def tearDownClass(cls): cls.server_process.terminate() shutil.rmtree(cls.cache_dir) shutil.rmtree(cls.server_dir) shutil.rmtree(cls.temp_dir) def setUp(self): # Shortcuts for convenience self.cache_dir = InteractionTest.cache_dir self.server_dir = InteractionTest.server_dir self.temp_dir = InteractionTest.temp_dir self.client = InteractionTest.client def test_put_file_should_save_file_both_locally_and_remotely(self): temp_file = os.path.join(self.temp_dir, 'put.txt') with open(temp_file, 'w') as tf: tf.write('hello') self.client.put_file('/put.txt', temp_file) cache_path = os.path.join(self.cache_dir, 'files', 'put.txt') remote_path = os.path.join(self.server_dir, 'links', 'put.txt') self.assertTrue(os.path.exists(cache_path)) self.assertTrue(os.path.exists(remote_path)) with open(cache_path, 'r') as cf: self.assertEqual(cf.read(), 'hello') rf, _ = self.client.get_stream('/put.txt') self.assertEqual(rf.read(), b'hello') def test_get_file_should_raise_error_if_file_doesnt_exist(self): temp_file = os.path.join(self.temp_dir, 'get_doesnt_exist.txt') with self.assertRaises(FiletrackerError): self.client.get_file('/doesnt_exist', temp_file) def test_get_file_should_save_file_contents_to_destination(self): src_file = os.path.join(self.temp_dir, 'get_src.txt') dest_file = os.path.join(self.temp_dir, 'get_dest.txt') with open(src_file, 'w') as sf: sf.write('hello') self.client.put_file('/get.txt', src_file) self.client.get_file('/get.txt', dest_file) with open(dest_file, 'r') as df: self.assertEqual(df.read(), 'hello') def test_get_stream_should_return_readable_stream(self): src_file = os.path.join(self.temp_dir, 'streams.txt') with open(src_file, 'wb') as sf: sf.write(b'hello streams') self.client.put_file('/streams.txt', src_file) f, _ = self.client.get_stream('/streams.txt') self.assertEqual(f.read(), b'hello streams') def test_big_files_should_be_handled_correctly(self): # To be more precise, Content-Length header should be # set to the actual size of the file. src_file = os.path.join(self.temp_dir, 'big.txt') with open(src_file, 'wb') as sf: sf.write(b'r') for _ in range(1024 * 1024): sf.write(b'ee') self.client.put_file('/big.txt', src_file) f, _ = self.client.get_stream('/big.txt') with open(src_file, 'rb') as sf: self.assertEqual(sf.read(), f.read()) def test_file_version_should_be_set_to_current_time_on_upload(self): src_file = os.path.join(self.temp_dir, 'version.txt') with open(src_file, 'wb') as sf: sf.write(b'hello version') os.utime(src_file, (1, 1)) pre_upload = int(time.time()) self.client.put_file('/version.txt', src_file) post_upload = int(time.time()) version = self.client.file_version('/version.txt') self.assertNotEqual(version, 1) self.assertTrue(pre_upload <= version <= post_upload) def test_file_size_should_return_decompressed_size_without_cache(self): src_file = os.path.join(self.temp_dir, 'size.txt') with open(src_file, 'wb') as sf: sf.write(b'hello size') # size = 10 self.client.put_file('/size.txt', src_file, to_local_store=False) self.assertEqual(self.client.file_size('/size.txt'), len(b'hello size')) def test_every_link_should_have_independent_version(self): src_file = os.path.join(self.temp_dir, 'foo.txt') with open(src_file, 'wb') as sf: sf.write(b'hello foo') self.client.put_file('/foo_a.txt', src_file) time.sleep(1) self.client.put_file('/foo_b.txt', src_file) version_a = self.client.file_version('/foo_a.txt') version_b = self.client.file_version('/foo_b.txt') self.assertNotEqual(version_a, version_b) def test_put_older_should_fail(self): """This test assumes file version is stored in mtime.""" src_file = os.path.join(self.temp_dir, 'older.txt') with open(src_file, 'wb') as sf: sf.write(b'version 1') self.client.put_file('/older.txt@1', src_file) with open(src_file, 'wb') as sf: sf.write(b'version 2') self.client.put_file('/older.txt@2', src_file)
with open(src_file, 'wb') as sf: sf.write(b'version 3 (1)') self.client.put_file('/older.txt@1', src_file) f, _ = self.client.get_stream('/older.txt') self.assertEqual(f.read(), b'version 2') with self.assertRaises(FiletrackerError): self.client.get_stream('/older.txt@1') def test_get_nonexistent_should_404(self): with self.assertRaisesRegexp(FiletrackerError, "404"): self.client.get_stream('/n
onexistent.txt') def test_delete_nonexistent_should_404(self): with self.assertRaisesRegexp(FiletrackerError, "404"): self.client.delete_file('/nonexistent.txt') def test_delete_should_remove_file_and_dir(self): src_file = os.path.join(self.temp_dir, 'del.txt') with open(src_file, 'wb') as sf: sf.write(b'test') self.client.put_file('/dir/del.txt', src_file) self.client.delete_file('/dir/del.txt') for d in (self.cache_dir, self.server_dir): for f in ('files', 'locks'): self.assertFalse( os.path.exists(os.path.join(d, f, 'dir')), "{}/{}/dir not deleted ({})".format( d, f, d == self.cache_dir and "cache" or "server" ), ) with self.assertRaisesRegexp(FiletrackerError, "404"): self.client.get_stream('/dir/del.txt') def _start_server(server_dir): server_main( ['-p', str(_TEST_PORT_NUMBER), '-d', server_dir, '-D', '--workers', '4'] )
# This file is part of Buildbot. Buildbot is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members import sqlalchemy as sa from buildbot.test.util import migration from twisted.trial import unittest class Migration(migrati
on.MigrateTestMixin, unittest.TestCase): def setUp(self): return self.setUpMigrateTest() def tearDown(self): return self.tearDownMigrateTest() cols = [ 'buildrequests.id', 'builds.id', 'buildsets.id', 'changes.changeid', 'patches.id', 'sourcestampsets.id', 'sourcestamps.id', 'objects.id', 'users.uid', ] # tests def test_update(self): def setup_thd(conn): metadata = sa.MetaData() metadata.bind = conn # insert a row into each table, giving an explicit id column so # that the sequence is not advanced correctly, but leave no rows in # one table to test that corner case for i, col in enumerate(self.cols): tbl_name, col_name = col.split('.') tbl = sa.Table(tbl_name, metadata, sa.Column(col_name, sa.Integer, primary_key=True)) tbl.create() if i > 1: conn.execute(tbl.insert(), {col_name: i}) def verify_thd(conn): metadata = sa.MetaData() metadata.bind = conn # try inserting *without* an ID, and verify that the resulting ID # is as expected for i, col in enumerate(self.cols): tbl_name, col_name = col.split('.') tbl = sa.Table(tbl_name, metadata, sa.Column(col_name, sa.Integer, primary_key=True)) r = conn.execute(tbl.insert(), {}) if i > 1: exp = i + 1 else: exp = 1 self.assertEqual(r.inserted_primary_key[0], exp) return self.do_test_migration(20, 21, setup_thd, verify_thd)
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test code for QNN operators.""" import numpy as np import tvm from tvm import topi, relay, te from tvm.contrib import graph_executor import tvm.topi.testing def verify_simulated_quantize(data_shape, out_dtype, channels, axis): # Create placeholder variables for all qnn inputs. A = te.placeholder(data_shape, name="value", dtype="float32") D = te.placeholder([], name="dtype", dtype="int32") S = te.placeholder([te.size_var("scale_dim")], name="scale", dtype="float32") Z = te.placeholder([te.size_var("zp_dim")], name="zp", dtype="int32") SIM_Q = topi.nn.simulated_quantize(A, D, output_scale=S, output_zero_point=Z, axis=axis) # Create random numpy values to assign to inputs. a_np = np.random.uniform(size=data_shape).astype("float32") d_np = np.int32(topi.nn.SQNN_DTYPE_TO_CODE[out_dtype]) s_np = np.random.uniform(low=1e-4, high=0.1, size=channels).astype("float32") z_np = np.random.uniform(low=-10, high=10, size=channels).astype("int32") q_np = np.zeros(shape=data_shape, dtype="float32") def check_target(target, dev): # Wrap the numpy arrays in nd arrays. a = tvm.nd.array(a_np, dev) d = tvm.nd.array(d_np, dev) s = tvm.nd.array(s_np, dev) z = tvm.nd.array(z_np, dev) q = tvm.nd.array(q_np, dev) # Construct equivalent relay graph. per_channel = channels[0] != 1 a_var = relay.var("a", shape=data_shape, dtype="float32") if per_channel: s_var = relay.const(s_np) z_var = relay.const(z_np) else: s_var = relay.const(s_np[0]) z_var = relay.const(z_np[0]) real_q_op = relay
.qnn.op.quantize(a_var, s_var, z_var, axis=axis, out_dtype=out_dtype) with tvm.transform.PassContext(opt_level=3): lib =
relay.build(tvm.IRModule.from_expr(real_q_op), target=target) # Get real qnn quantize output. m = graph_executor.GraphModule(lib["default"](dev)) m.set_input("a", a_np) m.run() real_q_out = m.get_output(0) # Compile the simulated quantize function. with tvm.target.Target(target): sched = tvm.topi.testing.get_injective_schedule(target)(SIM_Q) func = tvm.build(sched, [A, D, S, Z, SIM_Q], target, name="sim_quantize") func(a, d, s, z, q) # Check correctness against the true qnn output. mismatch = q.numpy() != real_q_out.numpy().astype("float32") # Allow some rounding errors due to GPU fp32 arithmetic. assert np.sum(mismatch) <= 3 for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) def test_simulated_quantize(): verify_simulated_quantize([1], "int8", [1], -1) verify_simulated_quantize([2, 5], "int8", [5], 1) verify_simulated_quantize([1, 32, 32, 32], "int8", [32], -1) verify_simulated_quantize([1, 32, 32, 32], "uint8", [32], -2) verify_simulated_quantize([2, 5], "int32", [5], 1) def verify_simulated_dequantize(data_shape, in_dtype, channels, axis): # Create placeholder variables for all qnn inputs. A = te.placeholder(data_shape, name="value", dtype="float32") D = te.placeholder([], name="dtype", dtype="int32") S = te.placeholder([te.size_var("scale_dim")], name="scale", dtype="float32") Z = te.placeholder([te.size_var("zp_dim")], name="zp", dtype="int32") SIM_DQ = topi.nn.simulated_dequantize(A, D, input_scale=S, input_zero_point=Z, axis=axis) # Create random numpy values to assign to inputs. a_np = np.random.uniform(low=-128, high=127, size=data_shape).astype(in_dtype) a_np_f = a_np.astype("float32") d_np = np.int32(topi.nn.SQNN_DTYPE_TO_CODE[in_dtype]) s_np = np.random.uniform(low=1e-4, high=0.1, size=channels).astype("float32") z_np = np.random.uniform(low=-10, high=10, size=channels).astype("int32") dq_np = np.zeros(shape=data_shape, dtype="float32") def check_target(target, dev): # Wrap the numpy arrays in nd arrays. a = tvm.nd.array(a_np_f, dev) d = tvm.nd.array(d_np, dev) s = tvm.nd.array(s_np, dev) z = tvm.nd.array(z_np, dev) dq = tvm.nd.array(dq_np, dev) # Construct equivalent relay graph. per_channel = channels[0] != 1 a_var = relay.var("a", shape=data_shape, dtype=in_dtype) if per_channel: s_var = relay.const(s_np) z_var = relay.const(z_np) else: s_var = relay.const(s_np[0]) z_var = relay.const(z_np[0]) real_dq_op = relay.qnn.op.dequantize(a_var, s_var, z_var, axis=axis) with tvm.transform.PassContext(opt_level=3): lib = relay.build(tvm.IRModule.from_expr(real_dq_op), target=target) # Get real qnn quantize output. m = graph_executor.GraphModule(lib["default"](dev)) m.set_input("a", a_np) m.run() real_dq_out = m.get_output(0) # Compile the simulated quantize function. with tvm.target.Target(target): sched = tvm.topi.testing.get_injective_schedule(target)(SIM_DQ) func = tvm.build(sched, [A, D, S, Z, SIM_DQ], target, name="sim_quantize") func(a, d, s, z, dq) # Check correctness against the true qnn output. tvm.testing.assert_allclose(dq.numpy(), real_dq_out.numpy().astype("float32"), rtol=1e-5) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) def test_simulated_dequantize(): verify_simulated_dequantize([1], "int8", [1], -1) verify_simulated_dequantize([2, 5], "int8", [5], 1) verify_simulated_dequantize([2, 5], "int8", [2], 0) verify_simulated_dequantize([1, 32, 32, 32], "int8", [32], -1) verify_simulated_dequantize([1, 32, 32, 32], "uint8", [32], -2) verify_simulated_dequantize([2, 5], "int32", [5], 1) if __name__ == "__main__": test_simulated_quantize() test_simulated_dequantize()
"""Tests for registry module - datasets method""" import vcr from pygbif import registry @vcr.use_cassette("test/vcr_cassettes/test_datasets.yaml") def test_datasets(): "registry.datasets - basic test" res = registry.datasets() assert dict == res.__class__ @vcr.use_cassette("t
est/vcr_cassettes/test_datasets_limit.yaml") def test_datasets_limit(): "registry.datasets - limit param" res = registry.datasets(limit=1) assert dict == res.__class__ assert 1 == len(res["results"]) res = registry.datasets(limit=3) assert dict == res.__class__ assert 3 == len(res["results"]) @vcr.use_cassette("test/vcr_cassettes/test_datasets_type.yaml") def test_datasets_type(): "registry.datasets - type param" res = registry.datasets(type="OCCURRENCE")
vv = [x["type"] for x in res["results"]] assert dict == res.__class__ assert 100 == len(res["results"]) assert "OCCURRENCE" == list(set(vv))[0]
See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members from __future__ import absolute_import from __future__ import print_function import os from twisted.python import util from twisted.trial import unittest from buildbot import config from buildbot.scripts import runner from buildbot.test.util import dirs from buildbot.test.util.warnings import assertNotProducesWarnings from buildbot.test.util.warnings import assertProducesWarnings from buildbot.worker_transition import DeprecatedWorkerAPIWarning from buildbot.worker_transition import DeprecatedWorkerNameWarning class RealConfigs(dirs.DirsMixin, unittest.TestCase): def setUp(self): self.setUpDirs('basedir') self.basedir = os.path.abspath('basedir') self.filename = os.path.abspath("test.cfg") def tearDown(self): self.tearDownDirs() def test_sample_config(self): filename = util.sibpath(runner.__file__, 'sample.cfg') with assertNotProducesWarnings(DeprecatedWorkerAPIWarning): config.FileLoader(self.basedir, filename).loadConfig() def test_0_9_0b5_api_renamed_config(self): with open(self.filename, "w") as f: f.write(sample_0_9_0b5_api_renamed) with assertNotProducesWarnings(DeprecatedWorkerAPIWarning): config.FileLoader(self.basedir, self.filename).loadConfig() def test_0_9_0b5_config(self): with open(self.filename, "w") as f: f.write(sample_0_9_0b5) with assertProducesWarnings( DeprecatedWorkerNameWarning, messages_patterns=[ r"'buildbot\.plugins\.buildslave' plugins namespace is deprecated", r"'slavenames' keyword argument is deprecated", r"c\['slaves'\] key is deprecated"]): config.FileLoader(self.basedir, self.filename).loadConfig() def test_0_7_12_config(self): with open(self.filename, "w") as f: f.write(sample_0_7_12) with assertProducesWarnings( DeprecatedWorkerNameWarning, messages_patterns=[ r"BuildSlave was deprecated", r"c\['slavePortnum'\] key is deprecated", r"'slavename' keyword argument is deprecated", r"c\['slaves'\] key is deprecated"]): config.FileLoader(self.basedir, self.filename).loadConfig() def test_0_7_6_config(self): with open(self.filename, "w") as f: f.write(sample_0_7_6) with assertProducesWarnings( DeprecatedWorkerNameWarning, messages_patterns=[ r"BuildSlave was deprecated", r"c\['slavePortnum'\] key is deprecated", r"'slavename' keyword argument is deprecated", r"c\['slaves'\] key is deprecated"]): config.FileLoader(self.basedir, self.filename).loadConfig() # sample.cfg from various versions, with comments stripped. Adjustments made # for compatibility are marked with comments sample_0_7_6 = """\ c = BuildmasterConfig = {} from buildbot.buildslave import BuildSlave c['slaves'] = [BuildSlave("bot1name", "bot1passwd")] c['slavePortnum'] = 9989 from buildbot.changes.pb import PBChangeSource c['change_source'] = PBChangeSource() from buildbot.scheduler import Scheduler c['schedulers'] = [] c['schedulers'].append(Scheduler(name="all", branch=None, treeStableTimer=2*60, builderNames=["buildbot-full"])) cvsroot = ":pserver:anonymous@cvs.sourceforge.net:/cvsroot/buildbot" cvsmodule = "buildbot" from buildbot.process import factory from buildbot.steps.python_twisted import Trial from buildbot.steps.shell import Compile from buildbot.steps.source.cvs import CVS f1 = factory.BuildFactory() f1.addStep(CVS(cvsroot=cvsroot, cvsmodule=cvsmodule, login="", method="copy")) f1.addStep(Compile(command=["python", "./setup.py", "build"])) # original lacked testChanges=True; this failed at the time f1.addStep(Trial(testChanges=True, testpath=".")) b1 = {'name': "buildbot-full", 'slavename': "bot1name", 'builddir': "full", 'factory': f1, } c['builders'] = [b1] c['projectName'] = "Buildbot" c['projectURL'] = "http://buildbot.sourceforge.net/" c['buildbotURL'] = "http://localhost:8010/" """ sample_0_7_12 = """\ c = BuildmasterConfig = {} from buildbot.buildslave import BuildSlave c['slaves'] = [BuildSlave("bot1name", "bot1passwd")] c['slavePortnum'] = 9989 from buildbot.changes.pb import PBChangeSource c['change_source'] = PBChangeSource() from buildbot.scheduler import Scheduler c['schedulers'] = [] c['schedulers'].append(Scheduler(name="all", branch=None, treeStableTimer=2*60, builderNames=["buildbot-full"])) cvsroot = ":pserver:anonymous@cvs.sourceforge.net:/cvsroot/buildbot" cvsmodule = "buildbot" from buildbot.process import factory # old source is deprecated, so we use the new source from buildbot.steps.python_twisted import Trial from buildbot.steps.shell import Compile from buildbot.steps.source.cvs import CVS f1 = factory.BuildFactory() f1.addStep(CVS(cvsroot=cvsroot, cvsmodule=cvsmodule, login="", method="copy")) f1.addStep(Compile(command=["python", "./setup.py", "build"])) f1.addStep(Trial(testChanges=True, testpath=".")) b1 = {'name': "buildbot-full", 'slavename': "bot1name", 'builddir': "full", 'factory': f1, } c['builders'] = [b1] c['projectName'] = "Buildbot" c['projectURL'] = "http:/
/buildbot.sourceforge.net/" c['buildbotURL'] = "http://localhost:8010/" """ # Template for master configuration just before worker renaming. sample_0_9_0b5 = """\ from buildbot.plugins import * c = BuildmasterConfig = {} c['slaves'] = [buildslave.BuildSlave("example-slave", "pass")] c['protocols'] = {'pb': {'port': 9989}} c['change_source'] = [] c['change_source'].append(changes.GitPoller( 'git:
//github.com/buildbot/hello-world.git', workdir='gitpoller-workdir', branch='master', pollinterval=300)) c['schedulers'] = [] c['schedulers'].append(schedulers.SingleBranchScheduler( name="all", change_filter=util.ChangeFilter(branch='master'), treeStableTimer=None, builderNames=["runtests"])) c['schedulers'].append(schedulers.ForceScheduler( name="force", builderNames=["runtests"])) factory = util.BuildFactory() factory.addStep(steps.Git(repourl='git://github.com/buildbot/hello-world.git', mode='incremental')) factory.addStep(steps.ShellCommand(command=["trial", "hello"], env={"PYTHONPATH": "."})) c['builders'] = [] c['builders'].append( util.BuilderConfig(name="runtests", slavenames=["example-slave"], factory=factory)) c['title'] = "Pyflakes" c['titleURL'] = "https://launchpad.net/pyflakes" c['buildbotURL'] = "http://localhost:8020/" c['www'] = dict(port=8010, plugins=dict(waterfall_view={}, console_view={})) c['db'] = { 'db_url' : "sqlite:///state.sqlite", } """ # Template for master configuration just after worker renaming. sample_0_9_0b5_api_renamed = """\ from buildbot.plugins import * c = BuildmasterConfig = {} c['workers'] = [worker.Worker("example-worker", "pass")] c['protocols'] = {'pb': {'port': 9989}} c['change_source'] = [] c['change_source'].append(changes.GitPoller( 'git://github.com/buildbot/hello-world.git', workdir='gitpoller-workdir', branch='master', pollinterval=300)) c['schedulers'] = [] c['schedulers'].append(schedulers.SingleBranchScheduler( name="all", change_filter=util.ChangeFilter(branch='master'),