prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.internet._sigchld}, an alternate, superior SIGCHLD monitoring API. """ import os, signal, errno from twisted.python.log import msg from twisted.trial.unittest import TestCase from twisted.internet.fdesc import setNonBlocking from twisted.internet._signals import installHandler, isDefaultHandler from twisted.internet._signals import _extInstallHandler, _extIsDefaultHandler from twisted.internet._signals import _installHandlerUsingSetWakeup, \ _installHandlerUsingSignal, _isDefaultHandler class SIGCHLDTestsMixin: """ Mixin for L{TestCase} subclasses which defines several tests for I{installHandler} and I{isDefaultHandler}. Subclasses are expected to define C{self.installHandler} and C{self.isDefaultHandler} to invoke the implementation to be tested. """ if getattr(signal, 'SIGCHLD', None) is None: skip = "Platform does not have SIGCHLD" def installHandler(self, fd): """ Override in a subclass to install a SIGCHLD handler which writes a byte to the given file descriptor. Return the previously registered file descriptor. """ raise NotImplementedError() def isDefaultHandler(self): """ Override in a subclass to determine if the current SIGCHLD handler is SIG_DFL or not. Return True if it is SIG_DFL, False otherwise. """ raise NotImplementedError() def pipe(self): """ Create a non-blocking pipe which will be closed after the currently running test. """ read, write = os.pipe() self.addCleanup(os.close, read) self.addCleanup(os.close, write) setNonBlocking(read) setNonBlocking(write) return read, write def setUp(self): """ Save the current SIGCHLD handler as reported by L{signal.signal} and the current file descriptor registered with L{installHandler}. """ handler = signal.getsignal(signal.SIGCHLD) if handler != signal.SIG_DFL: self.signalModuleHandler = handler signal.signal(signal.SIGCHLD, signal.S
IG_DFL) else: self.signalModuleHandler = None self.oldFD = self.installHandler(-1) if self.signalModuleHandler is not None and self.oldFD != -1: msg("SIGCHLD setup issue: %r %r" % (self
.signalModuleHandler, self.oldFD)) raise RuntimeError("You used some signal APIs wrong! Try again.") def tearDown(self): """ Restore whatever signal handler was present when setUp ran. """ # If tests set up any kind of handlers, clear them out. self.installHandler(-1) signal.signal(signal.SIGCHLD, signal.SIG_DFL) # Now restore whatever the setup was before the test ran. if self.signalModuleHandler is not None: signal.signal(signal.SIGCHLD, self.signalModuleHandler) elif self.oldFD != -1: self.installHandler(self.oldFD) def test_isDefaultHandler(self): """ L{isDefaultHandler} returns true if the SIGCHLD handler is SIG_DFL, false otherwise. """ self.assertTrue(self.isDefaultHandler()) signal.signal(signal.SIGCHLD, signal.SIG_IGN) self.assertFalse(self.isDefaultHandler()) signal.signal(signal.SIGCHLD, signal.SIG_DFL) self.assertTrue(self.isDefaultHandler()) signal.signal(signal.SIGCHLD, lambda *args: None) self.assertFalse(self.isDefaultHandler()) def test_returnOldFD(self): """ L{installHandler} returns the previously registered file descriptor. """ read, write = self.pipe() oldFD = self.installHandler(write) self.assertEqual(self.installHandler(oldFD), write) def test_uninstallHandler(self): """ C{installHandler(-1)} removes the SIGCHLD handler completely. """ read, write = self.pipe() self.assertTrue(self.isDefaultHandler()) self.installHandler(write) self.assertFalse(self.isDefaultHandler()) self.installHandler(-1) self.assertTrue(self.isDefaultHandler()) def test_installHandler(self): """ The file descriptor passed to L{installHandler} has a byte written to it when SIGCHLD is delivered to the process. """ read, write = self.pipe() self.installHandler(write) exc = self.assertRaises(OSError, os.read, read, 1) self.assertEqual(exc.errno, errno.EAGAIN) os.kill(os.getpid(), signal.SIGCHLD) self.assertEqual(len(os.read(read, 5)), 1) class DefaultSIGCHLDTests(SIGCHLDTestsMixin, TestCase): """ Tests for whatever implementation is selected for the L{installHandler} and L{isDefaultHandler} APIs. """ installHandler = staticmethod(installHandler) isDefaultHandler = staticmethod(isDefaultHandler) class ExtensionSIGCHLDTests(SIGCHLDTestsMixin, TestCase): """ Tests for the L{twisted.internet._sigchld} implementation of the L{installHandler} and L{isDefaultHandler} APIs. """ try: import twisted.internet._sigchld except ImportError: skip = "twisted.internet._sigchld is not available" installHandler = _extInstallHandler isDefaultHandler = _extIsDefaultHandler class SetWakeupSIGCHLDTests(SIGCHLDTestsMixin, TestCase): """ Tests for the L{signal.set_wakeup_fd} implementation of the L{installHandler} and L{isDefaultHandler} APIs. """ # Check both of these. On Ubuntu 9.10 (to take an example completely at # random), Python 2.5 has set_wakeup_fd but not siginterrupt. if (getattr(signal, 'set_wakeup_fd', None) is None or getattr(signal, 'siginterrupt', None) is None): skip = "signal.set_wakeup_fd is not available" installHandler = staticmethod(_installHandlerUsingSetWakeup) isDefaultHandler = staticmethod(_isDefaultHandler) class PlainSignalModuleSIGCHLDTests(SIGCHLDTestsMixin, TestCase): """ Tests for the L{signal.signal} implementation of the L{installHandler} and L{isDefaultHandler} APIs. """ installHandler = staticmethod(_installHandlerUsingSignal) isDefaultHandler = staticmethod(_isDefaultHandler)
# Copyright (C) 2010-2011 Richard Lincoln # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject class FossilFuel(IdentifiedObject): """The fossil fuel consumed by the non-nuclear thermal generating units, e.g., coal, oil, gasThe fossil fuel consumed by the non-nuclear thermal generating units, e.g., coal, oil, gas """ def __init__(self, fuelSulfur=0.0, fuelCost=0.0, fossilFuelType="oil", lowBreakpointP=0.0, fuelDispatchCost=0.0, fuelHandlingCost=0.0, fuelHeatContent=0.0, fuelEffFactor=0.0, fuelMixture=0.0, highBreakpointP=0.0, ThermalGeneratingUnit=None, FuelAllocationSchedules=None, *args, **kw_args): """Initialises a new 'FossilFuel' instance. @param fuelSulfur: The fuel's fraction of pollution credit per unit of heat content @param fuelCost: The cost in terms of heat value for the given type of fuel @param fossilFuelType: The type of fossil fuel, such as coal, oil, or gas. Values are: "oil", "coal", "lignite", "gas" @param lowBreakpointP: The active power output level of the unit at which the given type of fuel is switched off. This fuel (e.g., oil) is sometimes used to stabilize the base fuel (e.g., coal) at low active power output levels. @param fuelDispatchCost: The cost of fuel used for economic dispatching which includes: fuel cost, transportation cost, and incremental maintenance cost @param fuelHandlingCost: Handling and processing cost associated with this fuel @param fuelHeatContent: The amount of heat per weight (or volume) of the given type of fuel @param fuelEffFactor: The efficiency factor for the fuel (per unit) in terms of the effective energy absorbed @param fuelMixture: Relative amount of the given type of fuel, when multiple fuels are being consumed. @param highBreakpointP: The active power output level of the unit at which the given type of fuel is switched on. This fuel (e.g., oil) is sometimes used to supplement the base fuel (e.g., coal) at high active power output levels. @param ThermalGeneratingUnit: A thermal generating unit may have one or more fossil fuels @param FuelAllocationSchedules: A fuel allocation schedule must have a fossil fuel """ #: The fuel's fraction of pollution credit per unit of heat content self.fuelSulfur = fuelSulfur #: The cost in terms of heat value for the given
type of fuel self.fuelCost = fuelCost #: The type of fossil fuel, such as coal, oil, or gas. Values are: "oil", "coal", "lignite", "gas" self.fossilFuelTyp
e = fossilFuelType #: The active power output level of the unit at which the given type of fuel is switched off. This fuel (e.g., oil) is sometimes used to stabilize the base fuel (e.g., coal) at low active power output levels. self.lowBreakpointP = lowBreakpointP #: The cost of fuel used for economic dispatching which includes: fuel cost, transportation cost, and incremental maintenance cost self.fuelDispatchCost = fuelDispatchCost #: Handling and processing cost associated with this fuel self.fuelHandlingCost = fuelHandlingCost #: The amount of heat per weight (or volume) of the given type of fuel self.fuelHeatContent = fuelHeatContent #: The efficiency factor for the fuel (per unit) in terms of the effective energy absorbed self.fuelEffFactor = fuelEffFactor #: Relative amount of the given type of fuel, when multiple fuels are being consumed. self.fuelMixture = fuelMixture #: The active power output level of the unit at which the given type of fuel is switched on. This fuel (e.g., oil) is sometimes used to supplement the base fuel (e.g., coal) at high active power output levels. self.highBreakpointP = highBreakpointP self._ThermalGeneratingUnit = None self.ThermalGeneratingUnit = ThermalGeneratingUnit self._FuelAllocationSchedules = [] self.FuelAllocationSchedules = [] if FuelAllocationSchedules is None else FuelAllocationSchedules super(FossilFuel, self).__init__(*args, **kw_args) _attrs = ["fuelSulfur", "fuelCost", "fossilFuelType", "lowBreakpointP", "fuelDispatchCost", "fuelHandlingCost", "fuelHeatContent", "fuelEffFactor", "fuelMixture", "highBreakpointP"] _attr_types = {"fuelSulfur": float, "fuelCost": float, "fossilFuelType": str, "lowBreakpointP": float, "fuelDispatchCost": float, "fuelHandlingCost": float, "fuelHeatContent": float, "fuelEffFactor": float, "fuelMixture": float, "highBreakpointP": float} _defaults = {"fuelSulfur": 0.0, "fuelCost": 0.0, "fossilFuelType": "oil", "lowBreakpointP": 0.0, "fuelDispatchCost": 0.0, "fuelHandlingCost": 0.0, "fuelHeatContent": 0.0, "fuelEffFactor": 0.0, "fuelMixture": 0.0, "highBreakpointP": 0.0} _enums = {"fossilFuelType": "FuelType"} _refs = ["ThermalGeneratingUnit", "FuelAllocationSchedules"] _many_refs = ["FuelAllocationSchedules"] def getThermalGeneratingUnit(self): """A thermal generating unit may have one or more fossil fuels """ return self._ThermalGeneratingUnit def setThermalGeneratingUnit(self, value): if self._ThermalGeneratingUnit is not None: filtered = [x for x in self.ThermalGeneratingUnit.FossilFuels if x != self] self._ThermalGeneratingUnit._FossilFuels = filtered self._ThermalGeneratingUnit = value if self._ThermalGeneratingUnit is not None: if self not in self._ThermalGeneratingUnit._FossilFuels: self._ThermalGeneratingUnit._FossilFuels.append(self) ThermalGeneratingUnit = property(getThermalGeneratingUnit, setThermalGeneratingUnit) def getFuelAllocationSchedules(self): """A fuel allocation schedule must have a fossil fuel """ return self._FuelAllocationSchedules def setFuelAllocationSchedules(self, value): for x in self._FuelAllocationSchedules: x.FossilFuel = None for y in value: y._FossilFuel = self self._FuelAllocationSchedules = value FuelAllocationSchedules = property(getFuelAllocationSchedules, setFuelAllocationSchedules) def addFuelAllocationSchedules(self, *FuelAllocationSchedules): for obj in FuelAllocationSchedules: obj.FossilFuel = self def removeFuelAllocationSchedules(self, *FuelAllocationSchedules): for obj in FuelAllocationSchedules: obj.FossilFuel = None
# A Test Program for pipeTestService.py # # Install and start the Pipe Test service, then run this test # either from the same machine, or from another using the "-s" param. # # Eg: pipeTestServiceClient.py -s server_name Hi There # Should work. from win32pipe import * from win32file import * from win32event import * import pywintypes import win32api import winerror import sys, os, traceback verbose = 0 #def ReadFromPipe(pipeName): # Could (Should?) use CallNamedPipe, but this technique allows variable size # messages (whereas you must supply a buffer size for CallNamedPipe! # hPipe = CreateFile(pipeName, GENERIC_WRITE, 0, None, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, 0) # more = 1 # while more: # hr = ReadFile(hPipe, 256) # if hr==0: # more = 0 # except win32api.error (hr, fn, desc): # if hr==winerror.ERROR_MORE_DATA: # data = dat # def CallPipe(fn, args): ret = None retryCount = 0 while retryCount < 8: # Keep looping until user cancels. retryCount = retryCount + 1 try: return fn(*args) except win32api.error as exc: if exc.winerror==winerror.ERROR_PIPE_BUSY: win32api.Sleep(5000) continue else: raise raise RuntimeError("Could not make a connection to the server") def testClient(server,msg): if verbose: print("Sending", msg) data = CallPipe(CallNamedPipe, ("\\\\%s\\pipe\\PyPipeTest" % server, msg, 256, NMPWAIT_WAIT_FOREVER)) if verbose: print("Server sent back '%s'" % data) print("Sent and received a message!") def testLargeMessage(server, size = 4096): if verbose: print("Sending message of size %d" % (size)) msg = "*" * size data = CallPipe(CallNamedPipe, ("\\\\%s\\pipe\\PyPipeTest" % server, msg, 512, NMPWAIT_WAIT_FOREVER)) if len(data)-size: print("Sizes are all wrong - send %d, got back %d" % (size, len(data))) def stressThread(server, numMessages, wait): try: try: for i in range(numMessages): r = CallPipe(CallNamedPipe, ("\\\\%s\\pipe\\PyPipeTest" % server, "#" * 512, 1024, NMPWAIT_WAIT_FOREVER)) except: traceback.print_exc() print("Failed after %d messages" % i) finally: SetEvent(wait) def stressTestClient(server, numThreads, numMessages): import _thread thread_waits = [] for t_num in range(numThreads): # Note I could just wait on thread handles (after calling DuplicateHandle) # See the service itself for an example of waiting for the clients... wait = CreateEvent(None, 0, 0, None) thread_waits.append(wait) _thread.start_new_thread(stressThread, (server,numMessages, wait)) # W
ait for all threads to finish. WaitForMultipleObjects(thread_waits, 1, INFINITE) def main(): import sys, getopt server = "." thread_count = 0 msg_count = 500 try: opts, args = getopt.getopt(sys.argv[1:], 's:t:m:vl') for o,a in
opts: if o=='-s': server = a if o=='-m': msg_count = int(a) if o=='-t': thread_count = int(a) if o=='-v': global verbose verbose = 1 if o=='-l': testLargeMessage(server) msg = " ".join(args).encode("mbcs") except getopt.error as msg: print(msg) my_name = os.path.split(sys.argv[0])[1] print("Usage: %s [-v] [-s server] [-t thread_count=0] [-m msg_count=500] msg ..." % my_name) print(" -v = verbose") print(" Specifying a value for -t will stress test using that many threads.") return testClient(server, msg) if thread_count > 0: print("Spawning %d threads each sending %d messages..." % (thread_count, msg_count)) stressTestClient(server, thread_count, msg_count) if __name__=='__main__': main()
class Config: # specific (for this development instance) # SERVER_NAME = 'localhost:5000' SQLALCHEMY_DATABASE_URI = "sqlite:///data.db" ANTIVIRUS_CHECK_REQUIRED = False SECRET_KEY = "toto" # develop settings DEBUG = True ASSETS_DEBUG = True DEBUG_TB_ENABLED = True # TEMPLATE_DEBUG = False DEBUG_TB_INTERCEPT_REDIRECTS = False DEBUG_TB_PROFILER_ENABLED = False # Generic for this project SITE_NAME = "Abilian Core Demo" MAIL_SENDER = "sender@example.com" SESSION_COOKIE_NAME = "abilian-core-session" PRIVATE_SITE = True MAIL_ASCII_ATTACHMENTS = True BABEL_ACCEPT_LANGUAGES = ("fr", "en", "es", "tr", "zh") # celery settings REDIS_URI = "redis://localhost/0" BROKER_URL = REDIS_URI CELERY_RESULT_BACKEND = REDIS_URI CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True CELERYD
_PREFETCH_MULTIPLIER = 1
CELERY_ALWAYS_EAGER = False # True: run tasks locally, no async CELERY_EAGER_PROPAGATES_EXCEPTIONS = True # uncomment if you don't want to use system timezone # CELERY_TIMEZONE = 'Europe/Paris'
olic booleans as variadic positional arguments. """ if len(args) > 0 and isinstance(args[0], (list, tuple)): raise Exception("Tuple or list passed to add_constraints!") if o.TRACK_CONSTRAINTS in self.options and len(args) > 0: if o.SIMPLIFY_CONSTRAINTS in self.options: constraints = [ self.simplify(a) for a in args ] else: constraints = args self._inspect('constraints', BP_BEFORE, added_constraints=constraints) constraints = self._inspect_getattr("added_constraints", constraints) added = self.se.add(*constraints) self._inspect('constraints', BP_AFTER) # add actions for the added constraints if o.TRACK_CONSTRAINT_ACTIONS in self.options: for c in added: sac = SimActionConstraint(self, c) self.history.add_action(sac) else: # preserve the old action logic for when we don't track constraints (why?) if ( 'action' in kwargs and kwargs['action'] and o.TRACK_CONSTRAINT_ACTIONS in self.options and len(args) > 0 ): for arg in args: if self.se.symbolic(arg): sac = SimActionConstraint(self, arg) self.history.add_action(sac) if o.ABSTRACT_SOLVER in self.options and len(args) > 0: for arg in args: if self.se.is_false(arg): self._satisfiable = False return if self.se.is_true(arg): continue # `is_true` and `is_false` does not use VSABackend currently (see commits 97a75366 and 2dfba73e in # claripy). There is a chance that VSA backend can in fact handle it. # Therefore we try to resolve it with VSABackend again if claripy.backends.vsa.is_false(arg): self._satisfiable = False return if claripy.backends.vsa.is_true(arg): continue # It's neither True or False. Let's try to apply the condition # We take the argument, extract a list of constrained SIs out of it (if we could, of course), and # then replace each original SI the intersection of original SI and the constrained one. _, converted = self.se.constraint_to_si(arg) for original_expr, constrained_si in converted: if not original_expr.variables: l.error('Incorrect original_expression to replace in add_constraints(). ' + 'This is due to defects in VSA logics inside claripy. Please report ' + 'to Fish and he will fix it if he\'s free.') continue new_expr = constrained_si self.registers.replace_all(original_expr, new_expr) for _, region in self.memory.regions.items(): region.memory.replace_all(original_expr, new_expr) l.debug("SimState.add_constraints: Applied to final state.") elif o.SYMBOLIC not in self.options and len(args) > 0: for arg in args: if self.se.is_false(arg): self._satisfiable = False return def satisfiable(self, **kwargs): """ Whether the state's constraints are satisfiable """ if o.ABSTRACT_SOLVER in self.options or o.SYMBOLIC not in self.options: extra_constraints = kwargs.pop('extra_constraints', ()) for e in extra_constraints: if self.se.is_false(e): return False return self._satisfiable else: return self.se.satisfiable(**kwargs) def downsize(self): """ Clean up after the solver engine. Calling this when a state no longer needs to be solved on will reduce memory usage. """ if 'solver_engine' in self.pl
ugins: self.se.downsize() # # State branching operations # def step(self, **kwargs): """ Perform a step of symbolic execution using this state. Any arguments to `AngrObjectFactory.successors` can be passed to this. :return: A SimSuccessors object categorizing the results of the step. """ return self.project.factory.successors(self, **kwargs) def block(self, *args, **kwargs): """ Represent t
he basic block at this state's instruction pointer. Any arguments to `AngrObjectFactory.block` can ba passed to this. :return: A Block object describing the basic block of code at this point. """ if not args and 'addr' not in kwargs: kwargs['addr'] = self.addr return self.project.factory.block(*args, backup_state=self, **kwargs) # Returns a dict that is a copy of all the state's plugins def _copy_plugins(self): memo = {} out = {} for n, p in self.plugins.iteritems(): if id(p) in memo: out[n] = memo[id(p)] else: out[n] = p.copy() memo[id(p)] = out[n] return out def copy(self): """ Returns a copy of the state. """ if self._global_condition is not None: raise SimStateError("global condition was not cleared before state.copy().") c_plugins = self._copy_plugins() state = SimState(project=self.project, arch=self.arch, plugins=c_plugins, options=self.options, mode=self.mode, os_name=self.os_name) state.uninitialized_access_handler = self.uninitialized_access_handler state._special_memory_filler = self._special_memory_filler state.ip_constraints = self.ip_constraints return state def merge(self, *others, **kwargs): """ Merges this state with the other states. Returns the merging result, merged state, and the merge flag. :param states: the states to merge :param merge_conditions: a tuple of the conditions under which each state holds :param common_ancestor: a state that represents the common history between the states being merged. Usually it is only available when EFFICIENT_STATE_MERGING is enabled, otherwise weak-refed states might be dropped from state history instances. :param plugin_whitelist: a list of plugin names that will be merged. If this option is given and is not None, any plugin that is not inside this list will not be merged, and will be created as a fresh instance in the new state. :param common_ancestor_history: a SimStateHistory instance that represents the common history between the states being merged. This is to allow optimal state merging when EFFICIENT_STATE_MERGING is disabled. :return: (merged state, merge flag, a bool indicating if any merging occured) """ merge_conditions = kwargs.pop('merge_conditions', None) common_ancestor = kwargs.pop('common_ancestor', None) plugin_whitelist = kwargs.pop('plugin_whitelist', None) common_ancestor_history = kwargs.pop('common_ancestor_history', None) if len(kwargs) != 0: raise ValueError("invalid arguments: %s" % kwargs.keys()) if merge_conditions is None: # TODO: maybe make the length of this smaller? Maybe: math.ceil(math.log(len(others)+1, 2)) merge_flag = self.se.BVS("state_merge_%d" % merge_counter.next(), 16) merge_values = range(len(others)+1) merge_conditions = [ merge_flag == b for b in merge_values ] else: merge_conditions = [ (sel
########## recombination.py parameters class Recombination_Parameters(object): # Change these two values to the folders you prefer - use an absolute path e.g. /Users/Harry/fastq-data and # /Users/Harry/csv-data or a path relative to the tools directory. # You may use the same folder for input and output. input_folder = "data" output_folder = "data" # The number of bases to retrieve before the seed sequence HEAD = 10 # The number of bases to retrieve after the seed sequences TAIL = 10 seed_sequences = { "loxP": "ATAACTTCGTATAGCATACATTATACGAAGTTAT", "lox2272": "ATAACTTCGTATAGGATACTTTATACGAAGTTAT", } ########## serotypes.py parameters class Serotypes_Parameters(object): # Change these two values to the folders you prefer - use an absolute path e.g. /Users/Harry/fastq-data and # /Users/Harry/csv-data or a path relative to the tools directory. # You may use the same folder for input and output. input_folder = "data" output_folder = "data" # These are the signatures that will be matched. The first part is the name, the part in brackets c
ontains the # actual signatures, separated by a comma (each serotype can have multiple signatures) signatures = { "AAV1": [ "AGTGCTTCAACGGGGGCCAG", "GGGCGTGAATCCATCATCAACCCTGG", "CCGGAGCTTCAAACACTGCATTGGACAAT" ], "AAV2": [ "AGGCAACAGACAAGCAGCTACC", "AACAGACAAGCAGCTACCGCA" ], "AAV5": [ "TCCAAGCCTTCCACCTCGTCAGACGCCGAA", "CACCAACAACCAGAGCTCCACCACTG", "GCCCGTCAGCAGC
TTCATC" ], "AAV7": [ "AGTGAAACTGCAGGTAGTACC" ], "AAV8": [ "GCAAAACACGGCTCCTCAAAT", "CAGCAAGCGCTGGAACCCCGAGATCCAGTA", "AAATACCATCTGAATGGAAGAAATTCATTG", "CGTGGCAGATAACTTGCAGC", "ATCCTCCGACCACCTTCAACC" ], "AAV9": [ "AGTGCCCAAGCACAGGCGCA", "ATCTCTCAAAGACTATTAAC", "GGCGAGCAGTCTTCCAGGCA" ], "AAVrh10": [ "CTACAAATCTACAAATGTGGACTTTG" ], "PHPeB": [ "CTTTGGCGGTGCCTTTTAAGGCACAGGCGCAGA" ], "PHPs": [ "AGGCGGTTAGGACGTCTTTGGCACAGGCGCAGA" ], "AAVrg": [ "TAGCAGACCAAGACTACACAAAAACTGCT" ], }
output_shape = ((2, 2048, 1, 1) if data_format == 'channels_first' else (2, 1, 1, 2048)) self.assertEqual(output_shape, output.shape) if data_format == 'channels_first': block_shapes = { 'block0': (2, 64, 112, 112), 'block0mp': (2, 64, 55, 55), 'block1': (2, 256, 55, 55), 'block2': (2, 512, 28, 28), 'block3': (2, 1024, 7, 7), 'block4': (2, 2048, 1, 1), } else: block_shapes = { 'block0': (2, 112, 112, 64), 'block0mp': (2, 55, 55, 64), 'block1': (2, 55, 55, 256), 'block2': (2, 28, 28, 512), 'block3': (2, 7, 7, 1024), 'block4': (2, 1, 1, 2048), } for (block_name, block) in intermediates_dict.items(): self.assertEqual(block_shapes[block_name], block.shape) def _test_train(self, execution_mode=None): device, data_format = resnet50_test_util.device_and_data_format() model = resnet50.ResNet50(data_format) tf.compat.v2.summary.experimental.set_step( tf.compat.v1.train.get_or_create_global_step()) logdir = tempfile.mkdtemp() with tf.compat.v2.summary.create_file_writer( logdir, max_queue=0, name='t0').as_default(), tf.compat.v2.summary.record_if(True): with tf.device(device), context.execution_mode(execution_mode): optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1) images, labels = resnet50_test_util.random_batch(2, d
ata_format) apply_gradients(model, optimizer, compute_gradients(model, images, labels)) self.assertEqual(320, len(model.variables)) context.async_wait() events = events_fr
om_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].tag, 'loss') @test_util.disable_tfrt('Flaky test. b/157103729') def test_train(self): self._test_train() @test_util.disable_tfrt('TFE_ContextGetExecutorForThread missing b/156188669') def test_train_async(self): self._test_train(execution_mode=context.ASYNC) @test_util.disable_tfrt('Flaky test. b/157103729') def test_no_garbage(self): device, data_format = resnet50_test_util.device_and_data_format() model = resnet50.ResNet50(data_format) optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1) with tf.device(device): images, labels = resnet50_test_util.random_batch(2, data_format) gc.disable() # Warm up. Note that this first run does create significant amounts of # garbage to be collected. The hope is that this is a build-only effect, # and a subsequent training loop will create nothing which needs to be # collected. apply_gradients(model, optimizer, compute_gradients(model, images, labels)) gc.collect() previous_gc_debug_flags = gc.get_debug() gc.set_debug(gc.DEBUG_SAVEALL) for _ in range(2): # Run twice to ensure that garbage that is created on the first # iteration is no longer accessible. apply_gradients(model, optimizer, compute_gradients(model, images, labels)) gc.collect() # There should be no garbage requiring collection. self.assertEqual(0, len(gc.garbage)) gc.set_debug(previous_gc_debug_flags) gc.enable() class MockIterator(object): def __init__(self, tensors): self._tensors = [tf.identity(x) for x in tensors] def next(self): return self._tensors class ResNet50Benchmarks(tf.test.Benchmark): def _report(self, label, start, num_iters, device, batch_size, data_format, num_replicas=1): resnet50_test_util.report(self, label, start, num_iters, device, batch_size, data_format, num_replicas) def _train_batch_sizes(self): """Choose batch sizes based on GPU capability.""" for device in device_lib.list_local_devices(): # TODO(b/141475121): We need some way to check which batch sizes would # work using a public API. if tf.DeviceSpec.from_string(device.name).device_type == 'GPU': # Avoid OOM errors with larger batch sizes, which seem to cause errors # later on even if caught. # # TODO(allenl): Base this on device memory; memory limit information # during the test seems to exclude the amount TensorFlow has allocated, # which isn't useful. if 'K20' in device.physical_device_desc: return (16,) # Quardro P1000. if 'P1000' in device.physical_device_desc: return (16,) if 'P100' in device.physical_device_desc: return (16, 32, 64) if tf.DeviceSpec.from_string(device.name).device_type == 'TPU': return (32,) return (16, 32) def _force_device_sync(self): # If this function is called in the context of a non-CPU device # (e.g., inside a 'with tf.device("/gpu:0")' block) # then this will force a copy from CPU->NON_CPU_DEVICE->CPU, # which forces a sync. This is a roundabout way, yes. tf.constant(1.).cpu() def _benchmark_eager_apply(self, label, device_and_format, defun=False, execution_mode=None): with context.execution_mode(execution_mode): device, data_format = device_and_format model = resnet50.ResNet50(data_format) if defun: model.call = tf.function(model.call) batch_size = 64 num_burn = 5 num_iters = 30 with tf.device(device): images, _ = resnet50_test_util.random_batch(batch_size, data_format) for _ in xrange(num_burn): model(images, training=False).cpu() if execution_mode: context.async_wait() gc.collect() start = time.time() for _ in xrange(num_iters): model(images, training=False).cpu() if execution_mode: context.async_wait() self._report(label, start, num_iters, device, batch_size, data_format) def benchmark_eager_apply_sync(self): self._benchmark_eager_apply( 'eager_apply', resnet50_test_util.device_and_data_format(), defun=False) def benchmark_eager_apply_async(self): self._benchmark_eager_apply( 'eager_apply_async', resnet50_test_util.device_and_data_format(), defun=False, execution_mode=context.ASYNC) def benchmark_eager_apply_with_defun(self): self._benchmark_eager_apply( 'eager_apply_with_defun', resnet50_test_util.device_and_data_format(), defun=True) def _benchmark_eager_train(self, label, make_iterator, device_and_format, defun=False, execution_mode=None): with context.execution_mode(execution_mode): device, data_format = device_and_format for batch_size in self._train_batch_sizes(): (images, labels) = resnet50_test_util.random_batch( batch_size, data_format) model = resnet50.ResNet50(data_format) # TODO(b/161911585): tf_to_corert MLIR lowering pipeline should handle # case when momentum is not set. optimizer = tf.keras.optimizers.SGD(0.1, 0.1) apply_grads = apply_gradients if defun: model.call = tf.function(model.call) apply_grads = tf.function(apply_gradients) num_burn = 3 num_iters = 10 with tf.device(device): iterator = make_iterator((images, labels)) for _ in xrange(num_burn): (images, labels) = iterator.next() apply_grads(model, optimizer, compute_gradients(model, images, labels)) if execution_mode: context.async_wait() self._force_device_sync() gc.collect() start = time.time() for _ in xrange(num_iters): (images, labels) = iterator.next() apply_grads(model, optimizer, compute_gradients(model, images, labels)) if execution_mode: context.async
#!/usr/bin/env python3 # 574A_bear.py - Codeforces.com/problemset/problem/574/A Bear program by Sergey 2015 import unittest import sys ############################################################################### # Bear Class ############################################################################### class Bear: """ Bear representation """ def __init__(self, test_inputs=None): """ Default constructor """ it = iter(test_inputs.split("\n")) if test_inputs else None def uinput(): return next(it) if it else sys.stdin.readline().rstrip() # Reading single elements self.n = int(uinput()) # Reading a single line of multiple elements self.nums = list(map(int, uinput().split())) def calculate(self): """ Main calcualtion function of the class """ lamak = self.nums[0] srt = sorted(self.nums[1:]) result = 0 while lamak <= srt[-1]: srt[-1] -= 1 lamak += 1 result += 1 srt = sorted(srt) return str(result) ############################################################################### # Unit Tests ###########################################
#################################### class unitTests(unittest.TestCase): def test_single_test(self): """ Bear class testing """ # Constructor test test = "5\n5 1 11 2 8" d = Bear(test) self.assertEqual(d.n, 5) self.assertEqual(d.nums, [5, 1, 11, 2, 8]) # Sample test self.assertEqual(Bear(test).calculate(), "4") # Sa
mple test test = "4\n1 8 8 8" self.assertEqual(Bear(test).calculate(), "6") # Sample test test = "2\n7 6" self.assertEqual(Bear(test).calculate(), "0") # My tests test = "4\n0 1 1 1" self.assertEqual(Bear(test).calculate(), "2") # Time limit test self.time_limit_test(100) def time_limit_test(self, nmax): """ Timelimit testing """ import random import timeit # Random inputs test = str(nmax) + "\n" test += "0 " nums = [1000 for i in range(nmax-1)] test += " ".join(map(str, nums)) + "\n" # Run the test start = timeit.default_timer() d = Bear(test) calc = timeit.default_timer() d.calculate() stop = timeit.default_timer() print("\nTimelimit Test: " + "{0:.3f}s (init {1:.3f}s calc {2:.3f}s)". format(stop-start, calc-start, stop-calc)) if __name__ == "__main__": # Avoiding recursion limitaions sys.setrecursionlimit(100000) if sys.argv[-1] == "-ut": unittest.main(argv=[" "]) # Print the result string sys.stdout.write(Bear().calculate())
#!/usr/bin/python -O # # /usr/sbin/webapp-config # Python script for managing the deployment of web-based # applications # # Originally written for the Gentoo Linux distribution # # Copyright (c) 1999-2007 Authors # Released under v2 of the GNU GPL # # Author(s) Stuart Herbert # Renat Lumpau <rl03@gentoo.org> # Gunnar Wrobel <wrobel@gentoo.org> # # ======================================================================== ''' Provides a class that handles ebuild related tasks. ''' # ======================================================================== # Dependencies # ------------------------------------------------------------------------ import os.path, re, pwd, grp from WebappConfig.debug import OUT import WebappConfig.wrapper as wrapper from WebappConfig.sandbox import Sandbox # ======================================================================== # Handler for ebuild related tasks # ------------------------------------------------------------------------ class Ebuild: ''' This class handles all ebuild related task. Currently this includes displaying the post install instruction as well as running hooks provided by the ebuild. ''' def __init__(self, config): self.config = config self.__root = wrapper.get_root(self.config) self.__re = re.compile('/+') self.__sourced = self.__re.sub('/', self.__root + self.get_config('my_appdir')) self.__hooksd = self.__re.sub('/', self.__root + self.get_config('my_hookscriptsdir')) def get_config(self, option): ''' Return a config option.''' return self.config.config.get('USER', option) def run_hooks(self, type, server): ''' Run the hook scripts - if there are any ''' if self.config.pretend(): return sandbox = Sandbox(self.config) # save list of environment variables to set env_map = self.run_vars(server) if os.path.isdir(self.__hooksd): for x in os.listdir(self.__hooksd): if (os.path.isfile(self.__hooksd + '/' + x) and os.access(self.__hooksd + '/' + x, os.X_OK)): OUT.debug('Running hook script', 7) sandbox.spawn(self.__hooksd + '/' + x + ' ' + type, env_map) def show_post(self, filename, ptype, server = None): ''' Display one of the post files. ''' post_file = self.__sourced + '/' + filename OUT.debug('Check for instruction file', 7) if not os.path.isfile(post_file): return self.run_vars(server) post_instructions = open(post_file).readlines() OUT.debug('Read post instructions', 7) post = [ '', '=================================================================', 'POST-' + ptype.upper() + ' INSTRUCTIONS', '=================================================================', ''] for i in post_instructions: i = i.replace('"', '\\"') post.append(os.popen('printf "' + i + '"\n').read()[:-1]) post = post + [ '', '=================================================================', ''] for i in post: OUT.notice(i) def show_postinst(self, server = None): ''' Display any post-installation instructions, if there are any. ''' OUT.debug('Running show_postinst', 6) self.show_post(filename = 'postinst-en.txt', ptype = 'install', server = server) def show_postupgrade(self, server = None): ''' Display any post-upgrade instructions, if there are any. ''' OUT.debug('Running show_postupgrade', 6) self.show_post(filename = 'postupgrade-en.txt', ptype = 'upgrade', server = server) def run_vars(self, server = None): ''' This function exports the necessary variables to the shell environment so that they are accessible within the shell scripts and/or files provided by the ebuild. ''' v_root = self.get_config('vhost_root') v_cgi = self.get_config('g_cgibindir') v_conf = self.get_config('vhost_config_dir') v_err = v_root + '/' + self.get_config('my_errorsbase') v_icon = v_root + '/' + self.get_config('my_iconsbase') g_inst = self.get_config('g_installdir') g_htd = self.get_config('g_htdocsdir') g_orig = self.get_config('g_orig_installdir') vsu = None vsg = None if server: vsu = pwd.getpwuid(server.vhost_server_uid)[0] vsg = grp.getgrgid(server.vhost_server_gid)[0] OUT.debug('Exporting variables', 7) export_map = {'MY_HOSTROOTDIR' : None, 'MY_HTDOCSDIR' : None, 'MY_CGIBINDIR' : None, 'MY_INSTALLDIR' : g_inst, 'MY_ICONSDIR' : None, 'MY_SERVERCONFIGDIR' : None, 'MY_ERRORSDIR' : None, 'MY_SQLSCRIPTSDIR' : None, 'VHOST_ROOT' : None, 'VHOST_HTDOCSDIR' : g_htd, 'VHOST_CGIBINDIR' : v_cgi, 'VHOST_CONFDIR' : v_conf, 'VHOST_ERRORSDIR' : v_err, 'VHOST_ICONSDIR' : v_icon, 'VHOST_HOSTNAME' : None, 'VHOST_SERVER' : None, 'VHOST_APPDIR' : g_orig, 'VHOST_CONFIG_UID' : None, 'VHOST_CONFIG_GID' : None, 'VHOST_SERVER_UID' : vsu, 'VHOST_SERVER_GID' : vsg, 'VHOST_DEFAULT_UID' : None, 'VHOST_DEFAULT_GID' : None, 'VHOST_PERMS_SERVEROWNED_DIR' : None, 'VHOST_PERMS_SERVEROWNED_FILE' : None, 'VHOST_PERMS_CONFIGOWNED_DIR' : None, 'VHOST_PERMS_CONFIGOWNED_FILE' : None, 'VHOST_PERMS_DEFAULTOWN
ED_DIR' : None, 'VHOST_PERMS_VIRTUALOWNED_FILE': None, 'VHOST_PERMS_INSTALLDIR' : None, 'ROOT'
: self.__root, 'PN' : None, 'PVR': None} result = {} for i in list(export_map.keys()): value = export_map[i] if not value: value = self.get_config(i.lower()) os.putenv(i, str(value)) result[i] = str(value) return result
kld = GPSI.compute_fe_terms(xi, xo, xm, sample_count=10) vfe = np.mean(nll) + np.mean(kld) str1 = " va_nll_bound : {}".format(vfe) str2 = " va_nll_term : {}".format(np.mean(nll)) str3 = " va_kld_q2p : {}".format(np.mean(kld)) joint_str = "\n".join([str1, str2, str3]) print(joint_str) out_file.write(joint_str+"\n") out_file.flush() if ((i % 2000) == 0): GPSI.save_to_file("{}_PARAMS.pkl".format(result_tag)) # Get some validation samples for evaluating model performance xb = to_fX( Xva[0:100] ) xi, xo, xm = construct_masked_data(xb, drop_prob=drop_prob, \ occ_dim=occ_dim, data_mean=data_mean) xi = np.repeat(xi, 2, axis=0) xo = np.repeat(xo, 2, axis=0) xm = np.repeat(xm, 2, axis=0) # draw some sample imputations from the model samp_count = xi.shape[0] _, model_samps = GPSI.sample_imputer(xi, xo, xm, use_guide_policy=False) seq_len = len(model_samps) seq_samps = np.zeros((seq_len*samp_count, model_samps[0].shape[1])) idx = 0 for s1 in range(samp_count): for s2 in range(seq_len): seq_samps[idx] = model_samps[s2][s1] idx += 1 file_name = "{0:s}_samples_ng_b{1:d}.png".format(result_tag, i) utils.visualize_samples(seq_samps, file_name, num_rows=20) # get visualizations of policy parameters # file_name = "{0:s}_gen_step_weights_b{1:d}.png".format(result_tag, i) # W = GPSI.gen_step_weights.get_value(borrow=False) # utils.visualize_samples(W[:,:x_dim], file_name, num_rows=20) # file_name = "{0:s}_gen_write_gate_weights_b{1:d}.png".format(result_tag, i) # W = GPSI.gen_write_gate_weights.get_value(borrow=False) # utils.visualize_samples(W[:,:x_dim], file_name, num_rows=20) # file_name = "{0:s}_gen_erase_gate_weights_b{1:d}.png".format(result_tag, i) # W = GPSI.gen_erase_gate_weights.get_value(borrow=False) # utils.visualize_samples(W[:,:x_dim], file_name, num_rows=20) # file_name = "{0:s}_gen_inf_weights_b{1:d}.png".format(result_tag, i) # W = GPSI.gen_inf_weights.get_value(borrow=False).T # utils.visualize_samples(W[:,:x_dim], file_name, num_rows=20) ################################# ################################# ## CHECK MNIST IMPUTER RESULTS ## ################################# ################################# def test_mnist_results(step_type='add', imp_steps=6, occ_dim=15, drop_prob=0.0): ######################################### # Format the result tag more thoroughly # ######################################### dp_int = int(100.0 * drop_prob) result_tag = "{}GPSI_OD{}_DP{}_IS{}_{}_NA".format(RESULT_PATH, occ_dim, dp_int, imp_steps, step_type) ########################## # Get some training data # ########################## rng = np.random.RandomState(1234) Xtr, Xva, Xte = load_binarized_mnist(data_path='./data/') Xtr = np.vstack((Xtr, Xva)) Xva = Xte #del Xte tr_samples = Xtr.shape[0] va_samples = Xva.shape[0] ########################## # Get some training data # ########################## # rng = np.random.RandomState(1234) # dataset = 'data/mnist.pkl.gz' # datasets = load_udm(dataset, as_shared=False, zero_mean=False) # Xtr = datasets[0][0] # Xva = datasets[1][0] # Xte = datasets[2][0] # # Merge validation set and training set, and test on test set. # #Xtr = np.concatenate((Xtr, Xva), axis=0) # #Xva = Xte # Xtr = to_fX(shift_and_scale_into_01(Xtr)) # Xva = to_fX(shift_and_scale_into_01(Xva)) # tr_samples = Xtr.shape[0] # va_samples = Xva.shape[0] batch_size = 250 batch_reps = 1 all_pix_mean = np.mean(np.mean(Xtr, axis=1)) data_mean = to_fX( all_pix_mean * np.ones((Xtr.shape[1],)) ) # Load parameters from a previously trained model print("Testing model load from file...") GPSI = load_gpsimputer_from_file(f_name="{}_PARAMS.pkl".format(result_tag), \
rng=rng) ############
#################################################### # Apply some updates, to check that they aren't totally broken # ################################################################ log_name = "{}_FINAL_RESULTS_NEW.txt".format(result_tag) out_file = open(log_name, 'wb') Xva = row_shuffle(Xva) # record an estimate of performance on the test set str0 = "GUIDED SAMPLE BOUND:" print(str0) xi, xo, xm = construct_masked_data(Xva[:5000], drop_prob=drop_prob, \ occ_dim=occ_dim, data_mean=data_mean) nll_0, kld_0 = GPSI.compute_fe_terms(xi, xo, xm, sample_count=10, \ use_guide_policy=True) xi, xo, xm = construct_masked_data(Xva[5000:], drop_prob=drop_prob, \ occ_dim=occ_dim, data_mean=data_mean) nll_1, kld_1 = GPSI.compute_fe_terms(xi, xo, xm, sample_count=10, \ use_guide_policy=True) nll = np.concatenate((nll_0, nll_1)) kld = np.concatenate((kld_0, kld_1)) vfe = np.mean(nll) + np.mean(kld) str1 = " va_nll_bound : {}".format(vfe) str2 = " va_nll_term : {}".format(np.mean(nll)) str3 = " va_kld_q2p : {}".format(np.mean(kld)) joint_str = "\n".join([str0, str1, str2, str3]) print(joint_str) out_file.write(joint_str+"\n") out_file.flush() # record an estimate of performance on the test set str0 = "UNGUIDED SAMPLE BOUND:" print(str0) xi, xo, xm = construct_masked_data(Xva[:5000], drop_prob=drop_prob, \ occ_dim=occ_dim, data_mean=data_mean) nll_0, kld_0 = GPSI.compute_fe_terms(xi, xo, xm, sample_count=10, \ use_guide_policy=False) xi, xo, xm = construct_masked_data(Xva[5000:], drop_prob=drop_prob, \ occ_dim=occ_dim, data_mean=data_mean) nll_1, kld_1 = GPSI.compute_fe_terms(xi, xo, xm, sample_count=10, \ use_guide_policy=False) nll = np.concatenate((nll_0, nll_1)) kld = np.concatenate((kld_0, kld_1)) str1 = " va_nll_bound : {}".format(np.mean(nll)) str2 = " va_nll_term : {}".format(np.mean(nll)) str3 = " va_kld_q2p : {}".format(np.mean(kld)) joint_str = "\n".join([str0, str1, str2, str3]) print(joint_str) out_file.write(joint_str+"\n") out_file.flush() if __name__=="__main__": ######### # MNIST # ######### # TRAINING #test_mnist(step_type='add', occ_dim=14, drop_prob=0.0) #test_mnist(step_type='add', occ_dim=16, drop_prob=0.0) #test_mnist(step_type='add', occ_dim=0, drop_prob=0.6) #test_mnist(step_type='add', occ_dim=0, drop_prob=0.8) #test_mnist(step_type='jump', occ_dim=14, drop_prob=0.0) #test_mnist(step_type='jump', occ_dim=16, drop_prob=0.0) #test_mnist(step_type='jump', occ_dim=0, drop_prob=0.6) #test_mnist(step_type='jump', occ_dim=0, drop_prob=0.8) #test_mnist(step_type='add', imp_steps=1, occ_dim=0, drop_prob=0.9) #test_mnist(step_type='add', imp_steps=2, occ_dim=0, drop_prob=0.9) test_mnist(step_type='add', imp_steps=5, occ_dim=0, drop_prob=0.9) #test_mnist(step_type='add', imp_steps=10, occ_dim=0, drop_prob=0.9) #test_mnist(step_type='add', imp_steps=15, occ_dim=0, drop_prob=0.9) # RESULTS # test_mnist_results(step_type='add', occ_dim=14, drop_prob=0.0) # test_mnist_results(step_type='add', occ_dim=16, drop_prob=0.0) # test_mnist_results(step_type='add', occ_dim=0, drop_prob=0.6) # test_mnist_results(step_type='add', occ_dim=0, drop_prob=0.7) # test_mnist_results(step_type='add', oc
# -*- coding: utf-8 -*- # Gene
rated by Django 1.10.1 on 2016-12-09 01:29 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [("hordak", "0007_auto_20161209_0111")] operations = [ migrations.RenameField("Account", "has_statements", "is_bank_account"
) ]
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' __version__ = "0.2" __author__ = "nagev" __license__ = "GPLv3" import re import sys import time import json import urllib import urllib2 from urlparse import parse_qs def _decrypt_signature(s): if len(s) == 92: return s[25] + s[3:25] + s[0] + s[26:42] + s[79] + s[43:79] + s[91] + \ s[80:83] elif len(s) == 90: return s[25] + s[3:25] + s[2] + s[26:40] + s[77] + s[41:77] + s[89] + \ s[78:81] elif len(s) == 88: return s[48] + s[81:67:-1] + s[82] + s[66:62:-1] + s[85] + s[61:48:-1]\ + s[67] + s[47:12:-1] + s[3] + s[11:3:-1] + s[2] + s[12] elif len(s) == 87: return s[4:23] + s[86] + s[24:85] elif len(s) == 86: return s[83:85] + s[26] + s[79:46:-1] + s[85] + s[45:36:-1] + s[30] + \ s[35:30:-1] + s[46] + s[29:26:-1] + s[82] + s[25:1:-1] elif len(s) == 85: return s[2:8] + s[0] + s[9:21] + s[65] + s[22:65] + s[84] + s[66:82] +\ s[21] elif len(s) == 84: return s[83:36:-1] + s[2] + s[35:26:-1] + s[3] + s[25:3:-1] + s[26] elif len(s) == 83: return s[:15] + s[80] + s[16:80] + s[15] elif len(s) == 82: return s[36] + s[79:67:-1] + s[81] + s[66:40:-1] + s[33] + s[39:36:-1]\ + s[40] + s[35] + s[0] + s[67] + s[32:0:-1] + s[34] elif len(s) == 81: return s[56] + s[79:56:-1] + s[41] + s[55:41:-1] + s[80] + s[40:34:-1]\ + s[0] + s[33:29:-1] + s[34] + s[28:9:-1] + s[29] + s[8:0:-1] + s[9] elif len(s) == 79: return s[54] + s[77:54:-1] + s[39] + s[53:39:-1] + s[78] + s[38:34:-1]\ + s[0] + s[33:29:-1] + s[34] + s[28:9:-1] + s[29] + s[8:0:-1] + s[9] else: raise NameError("Unable to decode video url - sig len %s" % len(s)) class Stream(): resolutions = { '5': ('240x400', 'flv'), '17': ('144x176', '3gp'), '18': ('360x640', 'mp4'), '22': ('720x1280', 'mp4'), '34': ('360x640', 'flv'), '35': ('480x854', 'flv'), '36': ('320x240', '3gp'), '37': ('1080x1920', 'mp4'), '38': ('3072x4096', 'superHD')
, '43': ('360x640', 'webm'), '44': ('480x854', 'webm'), '45': ('720x1280', 'webm'), '46': ('1080x1920', 'webm'),
'82': ('640x360-3D', 'mp4'), '84': ('1280x720-3D', 'mp4'), '100': ('640x360-3D', 'webm'), '102': ('1280x720-3D', 'webm')} def __init__(self, streammap, opener, title="ytvid"): if not streammap.get("sig", ""): streammap['sig'] = [_decrypt_signature(streammap['s'][0])] self.url = streammap['url'][0] + '&signature=' + streammap['sig'][0] self.vidformat = streammap['type'][0].split(';')[0] self.resolution = self.resolutions[streammap['itag'][0]][0] self.extension = self.resolutions[streammap['itag'][0]][1] self.itag = streammap['itag'][0] self.title = title self.filename = self.title + "." + self.extension self._opener = opener def get_filesize(self): opener = self._opener return int(opener.open(self.url).headers['content-length']) def download(self, progress=True, filepath=""): response = self._opener.open(self.url) total = int(response.info().getheader('Content-Length').strip()) print u"-Downloading '{}' [{:,} Bytes]".format(self.filename, total) status_string = (' {:,} Bytes [{:.2%}] received. Rate: [{:4.0f} ' 'kbps]. ETA: [{:.0f} secs]') chunksize, bytesdone, t0 = 16834, 0, time.time() outfh = open(filepath or self.filename, 'wb') while 1: chunk = response.read(chunksize) elapsed = time.time() - t0 outfh.write(chunk) bytesdone += len(chunk) if not chunk: outfh.close() break if progress: rate = (bytesdone / 1024) / elapsed eta = (total - bytesdone) / (rate * 1024) display = (bytesdone, bytesdone * 1.0 / total, rate, eta) status = status_string.format(*display) sys.stdout.write("\r" + status + ' ' * 4 + "\r") sys.stdout.flush print "\nDone" class Pafy(): def __len__(self): return self.length def __repr__(self): out = "" keys = "Title Author ID Duration Rating Views Thumbnail Keywords" keys = keys.split(" ") keywords = ", ".join(self.keywords).decode("utf8") length = time.strftime('%H:%M:%S', time.gmtime(self.length)) info = dict(Title=self.title, Author=self.author, Views=self.viewcount, Rating=self.rating, Duration=length, ID=self.videoid, Thumbnail=self.thumb, Keywords=keywords) for k in keys: try: out += "%s: %s\n" % (k, info[k]) except KeyError: pass return out.encode("utf8", "ignore") def __init__(self, video_url): infoUrl = 'https://www.youtube.com/get_video_info?video_id=' vidid = re.search(r'v=([a-zA-Z0-9-_]*)', video_url).group(1) infoUrl += vidid + "&asv=3&el=detailpage&hl=en_US" self.urls = [] opener = urllib2.build_opener() ua = ("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64;" "Trident/5.0)") opener.addheaders = [('User-Agent', ua)] self.keywords = "" self.rawinfo = opener.open(infoUrl).read() self.allinfo = parse_qs(self.rawinfo) self.title = self.allinfo['title'][0].decode('utf-8') self.author = self.allinfo['author'][0] self.videoid = self.allinfo['video_id'][0] if 'keywords' in self.allinfo: self.keywords = self.allinfo['keywords'][0].split(',') self.rating = float(self.allinfo['avg_rating'][0]) self.length = int(self.allinfo['length_seconds'][0]) self.duration = time.strftime('%H:%M:%S', time.gmtime(self.length)) self.viewcount = int(self.allinfo['view_count'][0]) self.thumb = urllib.unquote_plus(self.allinfo['thumbnail_url'][0]) self.formats = self.allinfo['fmt_list'][0].split(",") self.formats = [x.split("/") for x in self.formats] if self.allinfo.get('iurlsd'): self.bigthumb = self.allinfo['iurlsd'][0] if self.allinfo.get('iurlmaxres'): self.bigthumbhd = self.allinfo['iurlmaxres'][0] streamMap = self.allinfo['url_encoded_fmt_stream_map'][0].split(',') smap = [parse_qs(sm) for sm in streamMap] if not smap[0].get("sig", ""): # vevo! watchurl = "https://www.youtube.com/watch?v=" + vidid watchinfo = opener.open(watchurl).read() match = re.search(r';ytplayer.config = ({.*?});', watchinfo) try: myjson = json.loads(match.group(1)) except: raise NameError('Problem handling this video') args = myjson['args'] streamMap = args['url_encoded_fmt_stream_map'].split(",") smap = [parse_qs(sm) for sm in streamMap] self.streams = [Stream(sm, opener, self.title) for sm in smap] def getbest(self, preftype="any", ftypestrict=True): # set ftypestrict to False to use a non preferred format if that # has a higher resolution def _sortkey(x, key3d=0, keyres=0, keyftype=0): key3d = "3D" not in x.resolution keyres = int(x.resolution.split("x")[0]) keyftype = preftype == x.extension if ftypestrict: return (key3d, keyftype, keyres) else: return (key3d, keyres, keyftype)
""" Markup class allows the use of easy-to-write characters to style the text instead of using escape codes. ==text== --> reverse video '''text''' --> bold ~~text~~ --> strikethrough Copyright (c) 2015 makos <https://github.com/makos>, chibi <http://neetco.de/chibi> under GNU GPL v3, see LICENSE for details """ import re class Marker(): def esc(self, input_text):
input_text = input_text.replace('\033', '\\033') return input_text def demarkify(self, input_text): """Prints out a marked-up piece of text.""" output_text = self.esc(input_text) # strikethrough output_text = re.sub( '~~(?P<substring>.*?)~~', '\033[0;9m\g<substring>\033[0m', ou
tput_text) # bold output_text = re.sub( '\'\'\'(?P<substring>.*?)\'\'\'', '\033[0;1m\g<substring>\033[0m', output_text) # rv output_text = re.sub( '==(?P<substring>.*?)==', '\033[0;7m\g<substring>\033[0m', output_text) return output_text
turn attr() return attr def get_feed(self, extra_params=None): if extra_params: try: obj = self.get_object(extra_params.split('/')) except (AttributeError, LookupError): raise LookupError('Feed does not exist') else: obj = None feed = AtomFeed( atom_id=self.__get_dynamic_attr('feed_id', obj), title=self.__get_dynamic_attr('feed_title', obj), updated=self.__get_dynamic_attr('feed_updated', obj), icon=self.__get_dynamic_attr('feed_icon', obj), logo=self.__get_dynamic_attr('feed_logo', obj), rights=self.__get_dynamic_attr('feed_rights', obj), subtitle=self.__get_dynamic_attr('feed_subtitle', obj), authors=self.__get_dynamic_attr('feed_authors', obj, default=[]), categories=self.__get_dynamic_attr('feed_categories', obj, default=[]), contributors=self.__get_dynamic_attr('feed_contributors', obj, default=[]), links=self.__get_dynamic_attr('feed_links', obj, default=[]), extra_attrs=self.__get_dynamic_attr('feed_extra_attrs', obj), hide_generator=self.__get_dynamic_attr('hide_generator', obj, default=False) ) items = self.__get_dynamic_attr('items', obj) if items is None: raise LookupError('Feed has no items field') for item in items: feed.add_item( atom_id=self.__get_dynamic_attr('item_id', item), title=self.__get_dynamic_attr('item_title', item), updated=self.__get_dynamic_attr('item_updated', item), content=self.__get_dynamic_attr('item_content', item), published=self.__get_dynamic_attr('item_published', item), rights=self.__get_dynamic_attr('item_rights', item), source=self.__get_dynamic_attr('item_source', item), summary=self.__get_dynamic_attr('item_summary', item), authors=self.__get_dynamic_attr('item_authors', item, default=[]), categories=self.__get_dynamic_attr('item_categories', item, default=[]), contributors=self.__get_dynamic_attr('item_contributors', item, default=[]), links=self.__get_dynamic_attr('item_links', item, default=[]), extra_attrs=self.__get_dynamic_attr('item_extra_attrs', None, default={}), ) if self.VALIDATE: feed.validate() return feed class ValidationError(Exception): pass # based on django.utils.feedgenerator.SyndicationFeed and django.utils.feedgenerator.Atom1Feed class AtomFeed(object): mime_type = 'application/atom+xml' ns = u'http://www.w3.org/2005/Atom' def __init__(self, atom_id, title, updated=None, icon=None, logo=None, rights=None, subtitle=None, authors=None, categories=None, contributors=None, links=None, extra_attrs={}, hide_generator=False): if atom_id is None: raise LookupError('Feed has no feed_id field') if title is None: raise LookupError('Feed has no feed_title field') # if updated == None, we'll calculate it self.feed = { 'id': atom_id, 'title': title, 'updated': updated, 'icon': icon, 'logo': logo, 'rights': rights, 'subtitle': subtitle, 'authors': authors or [], 'categories': categories or [], 'contributors': contributors or [], 'links': links or [], 'extra_attrs': extra_attrs, 'hide_generator': hide_generator, } self.items = [] def add_item(self, atom_id, title, updated, content=None, published=None, rights=None, source=None, summary=None, authors=None, categories=None, contributors=None, links=None, extra_attrs={}): if atom_id is None: raise LookupError('Feed has no item_id method') if title is None: raise LookupError('Feed has no item_title method') if updated is None: raise LookupError('Feed has no item_updated method') self.items.append({ 'id': atom_id, 'title': title, 'updated': updated, 'content': content, 'published': published, 'rights': rights, 'source': source, 'summary': summary, 'authors': authors or [], 'categories': categories or [], 'contributors': contributors or [], 'links': links or [], 'extra_attrs': extra_attrs, }) def latest_updated(self): """ Returns the latest item's updated or the current time if there are no items. """ updates = [item['updated'] for item in self.items] if len(updates) > 0: updates.sort() return updates[-1] else: return timezone.now() # @@@ really we should allow a feed to define its "start" for this case def write_text_construct(self, handler, element_name, data): if isinstance(data, tuple): text_type, text = data if text_type == 'xhtml': handler.startElement(element_name, {'type': text_type}) handler._write(text) # write unescaped -- it had better be well-formed XML handler.endElement(element_name) else: handler.addQuickElement(element_name, text, {'type': text_type}) else: handler.addQuickElement(element_name, data) def write_person_construct(self, handler, element_name, person): handler.startElement(element_name, {}) handler.addQuickElement(u'name', person['name']) if 'uri' in person: handler.addQuickElement(u'uri', person['uri']) if 'email' in person: handler.addQuickElement(u'email', person['email']) handler.endElement(element_name) def write_link_construct(self, handler, link): if 'length' in link: link['length'] = str(link['length']) handler.addQuickElement(u'link', None, link) def write_category_construct(self, handler, category): handler.addQuickElement(u'category', None, category) def write_source(self, hand
ler, data): handler.startElement(u'source', {}) if data.get('id'): handler.addQuickElement(u'id', data['id']) if data.get('title'): self.write_text_construct(handler, u'title', data['title']
) if data.get('subtitle'): self.write_text_construct(handler, u'subtitle', data['subtitle']) if data.get('icon'): handler.addQuickElement(u'icon', data['icon']) if data.get('logo'): handler.addQuickElement(u'logo', data['logo']) if data.get('updated'): handler.addQuickElement(u'updated', rfc3339_date(data['updated'])) for category in data.get('categories', []): self.write_category_construct(handler, category) for link in data.get('links', []): self.write_link_construct(handler, link) for author in data.get('authors', []): self.write_person_construct(handler, u'author', author) for contributor in data.get('contributors', []): self.write_person_construct(handler, u'contributor', contributor) if data.get('rights'): self.write_text_construct(handler, u'rights', data['rights']) handler.endElement(u'source') def write_content(self, handler, data): if isinstance(data, tuple): content_dict, text = data if content_dict.get('type') == 'xhtml': handler.startElement(u'content', content_dict) handler._write(text) # write unescaped -- it had better be well-formed XML handler.endElement(u'content') else: handler.addQuickElement(u'content', text, content_dict) else: handler.addQuickElement(u'content', data) de
# Copyright (C) 2016 # Jakub Krajniak (jkrajniak at gmail.com) # # This file is part of ChemLab # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import unittest import sys sys.path.insert(0, os.path.abspath(os.path
.join(os.path.dirname(__file__), '..'))) import chemlab.gromacs_topology class TestTopologyReader(unittest.TestCase): @classmethod def setUpClass(cls): cls.topol_file = 'topol.top' cls.gt = chemlab.gromacs_topology.GromacsTopology(cls.topol_file, generate_exclusions=True) cls.gt.read() def test_replicated_molecules(self): """Test the molecule replication""" total_nr_atoms = len(self.gt.atoms)
expected_nr_atoms = 0 for mol_name, nmols in self.gt.gt.molecules: mol_atoms = len(self.gt.gt.molecules_data[mol_name]['atoms']) expected_nr_atoms += nmols * mol_atoms self.assertEqual(total_nr_atoms, expected_nr_atoms) total_nr_bonds = len(self.gt.bonds) expected_nr_bonds = 0 for mol_name, nmols in self.gt.gt.molecules: mol_bonds = len(self.gt.gt.molecules_data[mol_name].get('bonds', [])) expected_nr_bonds += nmols * mol_bonds self.assertEqual(total_nr_bonds, expected_nr_bonds) total_nr_angles = len(self.gt.angles) expected_nr_angles = 0 for mol_name, nmols in self.gt.gt.molecules: mol_angles = len(self.gt.gt.molecules_data[mol_name].get('angles', [])) expected_nr_angles += nmols * mol_angles self.assertEqual(total_nr_angles, expected_nr_angles) total_nr_dihedrals = len(self.gt.dihedrals) expected_nr_dihedrals = 0 for mol_name, nmols in self.gt.gt.molecules: mol_dihedrals = len(self.gt.gt.molecules_data[mol_name].get('dihedrals',[])) expected_nr_dihedrals += nmols * mol_dihedrals self.assertEqual(total_nr_dihedrals, expected_nr_dihedrals) total_nr_pairs = len(self.gt.pairs) expected_nr_pairs = 0 for mol_name, nmols in self.gt.gt.molecules: mol_pairs = len(self.gt.gt.molecules_data[mol_name].get('pairs', [])) expected_nr_pairs += nmols * mol_pairs self.assertEqual(total_nr_pairs, expected_nr_pairs) if __name__ == '__main__': unittest.main()
has a good docstring. # - Add methods for regex find and replace. # - guess_content_type() method? # - Perhaps support arguments to touch(). import sys, warnings, os, fnmatch, glob, shutil, codecs, hashlib __version__ = '2.2' __all__ = ['path'] # Platform-specific support for path.owner if os.name == 'nt': try: import win32security except ImportError: win32security = None else: try: import pwd except ImportError: pwd = None # Pre-2.3 support. Are unicode filenames supported? _base = str _getcwd = os.getcwd try: if os.path.supports_unicode_filenames: _base = unicode _getcwd = os.getcwdu except AttributeError: pass # Pre-2.3 workaround for booleans try: True, False except NameError: True, False = 1, 0 # Pre-2.3 workaround for basestring. try: basestring except NameError: basestring = (str, unicode) # Universal newline support _textmode = 'r' if hasattr(file, 'newlines'): _textmode = 'U' class TreeWalkWarning(Warning): pass class path(_base): """ Represents a filesystem path. For documentation on individual methods, consult their counterparts in os.path. """ # --- Special Python methods. def __repr__(self): return 'path(%s)' % _base.__repr__(self) # Adding a path and a string yields a path. def __add__(self, more): try: resultStr = _base.__add__(self, more) except TypeError: #Python bug resultStr = NotImplemented if resultStr is NotImplemented: return resultStr return self.__class__(resultStr) def __radd__(self, other): if isinstance(other, basestring): return self.__class__(other.__add__(self)) else: return NotImplemented # The / operator joins paths. def __div__(self, rel): """ fp.__div__(rel) == fp / rel == fp.joinpath(rel) Join two path components, adding a separator character if needed. """ return self.__class__(os.path.join(self, rel)) # Make the / operator work even when true division is enabled. __truediv__ = __div__ def getcwd(cls): """ Return the current working directory as a path object. """ return cls(_getcwd()) getcwd = classmethod(getcwd) # --- Operations on path strings. isabs = os.path.isabs def abspath(self): return self.__class__(os.path.abspath(self)) def normcase(self): return self.__class__(os.path.normcase(self)) def normpath(self): return self.__class__(os.path.normpath(self)) def realpath(self): return self.__class__(os.path.realpath(self)) def expanduser(self): return self.__class__(os.path.expanduser(self)) def expandvars(self): return self.__class__(os.path.expandvars(self)) def dirname(self): return self.__class__(os.path.dirname(self)) basename = os.path.basename def expand(self): """ Clean up a filename by calling expandvars(), expanduser(), and normpath() on it. This is commonly everything needed to clean up a filename read from a configuration file, for example. """ return self.expandvars().expanduser().normpath() def _get_namebase(self): base, ext = os.path.splitext(self.name) return base def _get_ext(self): f, ext = os.path.splitext(_base(self)) return ext def _get_drive(self): drive, r = os.path.splitdrive(self) return self.__class__(drive) parent = property( dirname, None, None, """ This path's parent directory, as a new path object. For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib') """) name = property( basename, None, None, """ The name of this file or directory without the full path. For example, path('/usr/local/lib/libpython.so').name == 'libpython.so' """) namebase = property( _get_namebase, None, None, """ The same as path.name, but with one file extension stripped off. For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz', but path('/home/guido/python.tar.gz').namebase == 'python.tar' """) ext = property( _get_ext, None, None, """ The file extension, for example '.py'. """) drive = property( _get_drive, None, None, """ The drive specifier, for example 'C:'. This is always empty on systems that don't use drive specifiers. """) def splitpath(self): """ p.splitpath() -> Return (p.parent, p.name). """ parent, child = os.path.split(self) return self.__class__(parent), child def splitdrive(self): """ p.splitdrive() -> Return (p.drive, <the rest of p>). Split the drive specifier from this path. If there is no drive specifier, p.drive is empty, so the return value is simply (path(''), p). This is always the case on Unix. """ drive, rel = os.path.splitdrive(self) return self.__class__(drive), rel def splitext(self): """ p.splitext() -> Return (p.stripext(), p.ext). Split the filename extension from this path and return the two parts. Either part may be empty. The extension is everything from '.' to the end of the last path segment. This has the property that if (a, b) == p.splitext(), then a + b == p. """ filename, ext = os.path.splitext(self) return self.__class__(filename), ext def stripext(self): """ p.stripext() -> Remove one file extension from the path. For example, path('/home/guido/python.tar.gz').stripext() returns path('/home/guido/python.tar'). """ return self.splitext()[0] if hasattr(os.path, 'splitunc'): def splitunc(self): unc, rest = os.path.splitunc(self) return self.__class__(unc), rest def _get_uncshare(self): unc, r = os.path.splitunc(self) return self.__class__(unc) uncshare = property( _get_uncshare, None, None, """ The UNC mount point for this path. This is empty for paths on local drives. """) def joinpath(self, *args): """ Join two or more path components, adding a separator character (os.sep) if needed. Returns a new path object. """ return self.__class__(os.path.join(self, *args)) def splitall(self
): r""" Return a list of the path components in this path. The first item in the list will be a path. Its value will be either os.curdir
, os.pardir, empty, or the root directory of this path (for example, '/' or 'C:\\'). The other items in the list will be strings. path.path.joinpath(*result) will yield the original path. """ parts = [] loc = self while loc != os.curdir and loc != os.pardir: prev = loc loc, child = prev.splitpath() if loc == prev: break parts.append(child) parts.append(loc) parts.reverse() return parts def relpath(self): """ Return this path as a relative path, based from the current working directory. """ cwd = self.__class__(os.getcwd()) return cwd.relpathto(self) def relpathto(self, dest): """ Return a relative path from self to dest. If there is no relative path from self to dest, for example if they reside on different drives in Windows, then this returns dest.abspath(). """ origin = self.abspath() dest = self.__class__(dest).abspath() orig_list = origin.normcase().splitall() # Don't normcase dest! We want to preserve the case. dest_list = dest.splitall() if orig_list[0] != os.path.normcase(dest_list[0]): # Can't get here from there. return dest # Find the l
# Copyright 2014-2015 Rumma & Ko Ltd # # License: GNU Affero General Public License v3 (see file COPYING for details) """Defines "parency links" between two "persons", and a user interface to manage them. This module is probably useful in combination with :mod:`lino_xl.lib.households`. .. autosummary:: :toctree: choicelists models """ from lino.api import ad, _ class Plugin(ad.Plugin): "Extends :class:`lino.core.plugin.Plugin`." verbose_name = _("Parency links") ## settings person_model = 'contacts.Person' """ A string referring t
o the model which represents a human in your application. Default value is ``'contacts.Person'`` (referring to :class:`lino_xl.lib.contacts.Person`). """ def on_site_startup(self, site): self.person_model = site.models.resolve(self.person_model) super(Plugin, self).on_site_startup(site) def setup_explorer_menu(self
, site, user_type, m): # mg = site.plugins.contacts mg = site.plugins[self.person_model._meta.app_label] m = m.add_menu(mg.app_label, mg.verbose_name) m.add_action('humanlinks.Links') m.add_action('humanlinks.LinkTypes')
from
investor_lifespan_model.investor import Investor from investor_lifespan_model.market import Market from investor_lifespan_model.insurer import Insurer from investor_lifespan_model.lifespan_model import LifespanModel f
rom investor_lifespan_model.mortality_data import π, G, tf
RegObj.dll is an ActiveX server--and, hence, has an automation interface--that is available with documentation in the distribution file kno
wn as RegObji.exe, from the following page: http://msdn.microsoft.com/vbasic/downloads/addins.asp To provide early binding for RegObj use >>> from win32com.client import gencache >>> gencache.EnsureModule('{DE10C540-810E-11CF-BBE7-444553540000}', 0, 1, 0) or the MakePy ut
ility within PythonWin, referring to "Regstration Manipulation Classes (1.0)" (Please notice the spelling error.) Sample use, to determine what command is associated with a Python file: >>> from win32com.client import Dispatch, gencache >>> from win32con import HKEY_CLASSES_ROOT >>> gencache.EnsureModule('{DE10C540-810E-11CF-BBE7-444553540000}', 0, 1, 0) >>> regobj = Dispatch ( 'RegObj.Registry' ) >>> HKCR = regobj.RegKeyFromHKey ( HKEY_CLASSES_ROOT ) >>> PythonFileKey = HKCR.ParseKeyName('Python.File\Shell\Open\command') >>> PythonFileKey.Value u'J:\\Python22\\pythonw.exe "%1" %*'
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # Copyright (C) 2010-2012 ChriCar Beteiligungs- und Beratungs- GmbH (<http://www.camptocamp.at>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software
Foundation, either version 3 of the # License, or (at your option) any later ver
sion. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'sequence': 500, 'name' : 'Displays product in hr analytic timesheet' , 'version' : '0.7' , 'category' : 'HR' , 'description' : """ This module displays the hidden field product_id """ , 'author' : 'ChriCar Beteiligungs- und Beratungs- GmbH' , 'depends' : ['hr_timesheet' ] , 'data' : ['hr_timesheet_product.xml'] , 'demo_xml' : [] , 'installable': False , 'active' : False } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
from django.core.management.base import BaseCommand, CommandError from geography.models import Place class Command(BaseCommand): help = u"""Add new places""" usage_str = 'USAGE: ./manage.py add_places map_name STATE|CITY|RIVER|LAKE|... [difficulty]' def handle(self, *args, **options): if len(args) < 2: raise CommandError(self.usage_str) if not args[1] in Place.PLACE_TYPE_SLUGS: raise CommandError(self
.usage_str) place_type = self.Place.PLACE_TYPE_SLUGS[args[1]] map_name = args[0] state_file = open(map_name.lower() + ".txt") states = state_file.read() ss = states.split("\n") for s in ss: place = s.split("\t") if(len(place) == 2): name = place[1] code = place[0] p = Place(code=code, name=name, difficulty=500, type=p
lace_type) p.save() self.stdout.write(name + " added")
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('django_nyt', '0006_auto_20141229_1630'), ('wiki', '0001_initial'), ] operations = [ migrations.CreateModel( name
='ArticleSubscription', fields=[ ('articleplugin_ptr', models.OneToOneField(auto_created=True, to='wiki.ArticlePlugin', primary_key=True, parent_link=True, serialize=False, on_delete=models.CASCADE)), ('subscription', models.OneToOneField(to='django_nyt.Subscription', on_del
ete=models.CASCADE)), ], options={ }, bases=('wiki.articleplugin',), ), migrations.AlterUniqueTogether( name='articlesubscription', unique_together=set([('subscription', 'articleplugin_ptr')]), ), ]
""" Support for Vera cover - curtains, rollershutters etc. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/cover.vera/ """ import logging from homeassistant.components.cover import CoverDevice from homeassistant.components.vera import ( VeraDevice, VERA_DEVICES, VERA_CONTROLLER) DEPENDENCIES = ['vera'] _LOGGER = logging.getLogger(__name__) def setup_platform(hass, config, add_devices_callback, discovery_info=None): """Find and return Vera covers.""" add_devices_callback( VeraCover(device, VERA_CONTROLLER) for device in VERA_DEVICES['cover']) # pylint: disable=abstract-method class VeraCover(VeraDevice, CoverDevice): """Represents a Vera Cover in Home Assistant.""" def __init__(self, vera_device, controller): """Initialize the Vera device.""" VeraDevice.__init__(self, vera_device, controller) @property def current_cover_position(self): """ Return current position of cover. 0 is closed, 100 is fully open. """ position = self.vera_device.get_level() if position <= 5: return 0 if position >= 95: return 100 return position def set_cover_position(self, position, **kwargs): """Move the cover to a specific position.""" self.vera_device.set_level(position) @property def is_closed(self): """Return if the cover is closed.""" if self.current_cover_position is not None: if self.current_cover_position > 0: return False
else: return True def open_cover(self, **kwargs): """Open the cover.""" self.vera_device.open(
) def close_cover(self, **kwargs): """Close the cover.""" self.vera_device.close() def stop_cover(self, **kwargs): """Stop the cover.""" self.vera_device.stop()
# -*- coding: utf-8 -*- """ @file costMemory.py @author Jakob Erdmann @author Michael Behrisch @date 2012-03-14 @version $Id: costMemory.py 22608 2017-01-17 06:28:54Z behrisch $ Perform smoothing of edge costs across successive iterations of duaIterate SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/ Copyright (C) 2012-2017 DLR (http://www.dlr.de/) and contributors This file is part of SUMO. SUMO is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. """ from __future__ import print_function from __future__ import absolute_import import os import sys from collections import defaultdict from xml.sax import saxutils, make_parser, handler sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) from sumolib.net import readNet class EdgeMemory: def __init__(self, cost): self.cost = cost self.seen = True def update(self, cost, memory_weight, new_weight, pessimism): p = (cost / self.cost) ** pessimism if self.cost > 0 else 1 memory_factor = memory_weight / (memory_weight + new_weight * p) self.cost = self.cost * memory_factor + cost * (1 - memory_factor) self.seen = True class CostMemory(handler.ContentHandler): # memorize the weighted average of edge costs def __init__(self, cost_attribute, pessimism=0, network_file=None): # the cost attribute to parse (i.e. 'traveltime') self.cost_attribute = cost_attribute.encode('utf8') # the duaIterate iteration index self.iteration = None # the main data store: for every interval and edge id we store costs and # whether data was seen in the last call of load_costs() # start -> (edge_id -> EdgeMemory) self.intervals = defaultdict(dict) # the intervall length (only known for certain if multiple intervals # have been seen) self.interval_length = 214748 # SUMOTIME_MAXSTRING # the intervall currently being parsed self.current_interval = None # the combined weigth of all previously loaded costs self.memory_weight = 0.0 # update is done according to: memory * memory_factor + new * (1 - # memory_factor) self.memory_factor = None # differences between the previously loaded costs and the memorized # costs self.errors = None # some statistics self.num_loaded = 0 self.num_decayed = 0 # travel times without obstructing traffic # XXX could use the minimum known traveltime self.traveltime_free = defaultdict(lambda: 0) if network_file is not None: # build a map of default weights for decaying edges assuming the # attribute is traveltime self.traveltime_free = dict([(e.getID(), e.getLength() / e.getSpeed()) for e in readNet(network_file).getEdges()]) self.pessimism = pessimism def startElement(self, name, attrs): if name == 'interval': self.current_interval = self.intervals[float(attrs['begin'])] if name == 'edge': id = attrs['id'] # may be missing for some if self.cost_attribute.decode('utf-8') in attrs: self.num_loaded += 1 cost = float(attrs[self.cost_attribute.decode('utf-8')]) if id in self.current_interval: edgeMemory = self.current_interval[id] self.errors.append(edgeMemory.cost - cost) edgeMemory.update( cost, self.memory_weight, self.new_weight, self.pessimism) # if id == "4.3to4.4": # with open('debuglog', 'a') as f: # print(self.memory_factor, edgeMemory.cost, file=f) else: self.errors.append(0) self.current_interval[id] = EdgeMemory(cost) def load_costs(self, dumpfile, iteration, weight): # load costs from dumpfile and update memory according to weight and # iteration if weight <= 0: sys.stderr.write( "Skipped loading of costs because the weight was %s but should have been > 0\n" % weight) return assert(weight > 0) if self.iteration == None and iteration != 0: print("Warning: continuing with empty memory") # update memory weights. memory is a weighted average across all runs self.new_weight = float(weight) self.iteration = iteration self.errors = [] # mark all edges as unseen for edges in self.intervals.values(): for edgeMemory in edges.values(): edgeMemory.seen = False # parse costs self.num_loaded = 0 parser = make_parser() parser.setContentHandler(self) parser.parse(dumpfile)
# decay costs of unseen edges self.num_decayed = 0 for edges in self.intervals.values(): for id, edgeMemory in edges.items(): if not edgeMemory.seen: edgeMemory.update( self.traveltime_free[id], self.memory_weight, self.new_weight, self.pessimism) self.num_decayed += 1
# if id == "4.3to4.4": # with open('debuglog', 'a') as f: # print(self.memory_factor, 'decay', edgeMemory.cost, file=f) # figure out the interval length if len(self.intervals.keys()) > 1: sorted_begin_times = sorted(self.intervals.keys()) self.interval_length = sorted_begin_times[ 1] - sorted_begin_times[0] self.memory_weight += self.new_weight def write_costs(self, weight_file): with open(weight_file, 'w') as f: f.write('<netstats>\n') for start, edge_costs in self.intervals.items(): f.write(' <interval begin="%d" end="%d">\n' % (start, start + self.interval_length)) for id, edgeMemory in edge_costs.items(): f.write(' <edge id="%s" %s="%s"/>\n' % (id, self.cost_attribute.decode('utf-8'), edgeMemory.cost)) f.write(' </interval>\n') f.write('</netstats>\n') def avg_error(self, values=None): if not values: values = self.errors l = len(list(values)) if l > 0: return (sum(list(values)) / l) else: return 0 def avg_abs_error(self): return self.avg_error(list(map(abs, self.errors))) def mean_error(self, values=None): if not values: values = self.errors values.sort() if values: return values[len(values) // 2] def mean_abs_error(self): return self.mean_error(list(map(abs, self.errors))) def loaded(self): return self.num_loaded def decayed(self): return self.num_decayed
#<ImportSpecificModules> import ShareYourSystem as SYS #
</ImportSpecificModules> #print(SYS.SumClass().insert('Parameter').hdfview().HdformatedConsoleStr) #print(SYS.SumClass().insert('Result').hdfview().HdformatedConsoleStr) #print(SYS.Sum.attest_insert()) #
print(SYS.Sum.attest_retrieve()) #print(SYS.Sum.attest_find()) #print(SYS.Sum.attest_recover()) #print(SYS.Sum.attest_scan())
dtv_test2, \t.platdata_size\t= sizeof(dtv_test2), \t.parent_idx\t= -1, }; /* Node /test3 index 2 */ static struct dtd_test3 dtv_test3 = { \t.reg\t\t\t= {0x1234567890123456, 0x9876543210987654, 0x2, 0x3}, }; U_BOOT_DEVICE(test3) = { \t.name\t\t= "test3", \t.platdata\t= &dtv_test3, \t.platdata_size\t= sizeof(dtv_test3), \t.parent_idx\t= -1, }; ''' + C_EMPTY_POPULATE_PHANDLE_DATA, data) def test_addresses32(self): """Test output from a node with a 'reg' property with na=1, ns=1""" dtb_file = get_dtb_file('dtoc_test_addr32.dts') output = tools.GetOutputFilename('output') self.run_test(['struct'], dtb_file, output) with open(output) as infile: data = infile.read() self._CheckStrings(HEADER + ''' struct dtd_test1 { \tfdt32_t\t\treg[2]; }; struct dtd_test2 { \tfdt32_t\t\treg[4]; }; ''', data) self.run_test(['platdata'], dtb_file, output) with open(output) as infile: data = infile.read() self._CheckStrings(C_HEADER + ''' /* Node /test1 index 0 */ static struct dtd_test1 dtv_test1 = { \t.reg\t\t\t= {0x1234, 0x5678}, }; U_BOOT_DEVICE(test1) = { \t.name\t\t= "test1", \t.platdata\t= &dtv_test1, \t.platdata_size\t= sizeof(dtv_test1), \t.parent_idx\t= -1, }; /* Node /test2 index 1 */ static struct dtd_test2 dtv_test2 = { \t.reg\t\t\t= {0x12345678, 0x98765432, 0x2, 0x3}, }; U_BOOT_DEVICE(test2) = { \t.name\t\t= "test2", \t.platdata\t= &dtv_test2, \t.platdata_size\t= sizeof(dtv_test2), \t.parent_idx\t= -1, }; ''' + C_EMPTY_POPULATE_PHANDLE_DATA, data) def test_addresses64_32(self): """Test output from a node with a 'reg' property with na=2, ns=1""" dtb_file = get_dtb_file('dtoc_test_addr64_32.dts') output = tools.GetOutputFilename('output') self.run_test(['struct'], dtb_file, output) with open(output) as infile: data = infile.read() self._CheckStrings(HEADER + ''' struct dtd_test1 { \tfdt64_t\t\treg[2]; }; struct dtd_test2 { \tfdt64_t\t\treg[2]; }; struct dtd_test3 { \tfdt64_t\t\treg[4]; }; ''', data) self.run_test(['platdata'], dtb_file, output) with open(output) as infile: data = infile.read() self._CheckStrings(C_HEADER + ''' /* Node /test1 index 0 */ static struct dtd_test1 dtv_test1 = { \t.reg\t\t\t= {0x123400000000, 0x5678}, }; U_BOOT_DEVICE(test1) = { \t.name\t\t= "test1", \t.platdata\t= &dtv_test1, \t.platdata_size\t= sizeof(dtv_test1), \t.parent_idx\t= -1, }; /* Node /test2 index 1 */ static struct dtd_test2 dtv_test2 = { \t.reg\t\t\t= {0x1234567890123456, 0x98765432}, }; U_BOOT_DEVICE(test2) = { \t.name\t\t= "test2", \t.platdata\t= &dtv_test2, \t.platdata_size\t= sizeof(dtv_test2), \t.parent_idx\t= -1, }; /* Node /test3 index 2 */ static struct dtd_test3 dtv_test3 = { \t.reg\t\t\t= {0x1234567890123456, 0x98765432, 0x2, 0x3}, }; U_BOOT_DEVICE(test3) = { \t.name\t\t= "test3", \t.platdata\t= &dtv_test3, \t.platdata_size\t= sizeof(dtv_test3), \t.parent_idx\t= -1, }; ''' + C_EMPTY_POPULATE_PHANDLE_DATA, data) def test_addresses32_64(self): """Test output from a node with a 'reg' property with na=1, ns=2""" dtb_file = get_dtb_file('dtoc_test_addr32_64.dts') output = tools.GetOutputFilename('output') self.run_test(['struct'], dtb_file, output) with open(output) as infile: data = infile.read() self._CheckStrings(HEADER + ''' struct dtd_test1 { \tfdt64_t\t\treg[2]; }; struct dtd_test2 { \tfdt64_t\t\treg[2]; }; struct dtd_test3 { \tfdt64_t\t\treg[4]; }; ''', data) self.run_test(['platdata'], dtb_file, output) with open(output) as infile: data = infile.read() self._CheckStrings(C_HEADER + ''' /* Node /test1 index 0 */ static struct dtd_test1 dtv_test1 = { \t.reg\t\t\t= {0x1234, 0x567800000000}, }; U_BOOT_DEVICE(test1) = { \t.name\t\t= "test1", \t.platdata\t= &dtv_test1, \t.platdata_size\t= sizeof(dtv_test1), \t.parent_idx\t= -1, }; /* Node /test2 index 1 */ static struct dtd_test2 dtv_test2 = { \t.reg\t\t\t= {0x12345678, 0x9876543210987654}, }; U_BOOT_DEVICE(test2) = { \t.name\t\t= "test2", \t.platdata\t= &dtv_test2, \t.platdata_size\t= sizeof(dtv_test2), \t.parent_idx\t= -1, }; /* Node /test3 index 2 */ static struct dtd_test3 dtv_test3 = { \t.reg\t\t\t= {0x12345678, 0x9876543210987654, 0x2, 0x3}, }; U_BOOT_DEVICE(test3) = { \t.name\t\t= "test3", \t.platdata\t= &dtv_test3, \t.platdata_size\t= sizeof(dtv_test3), \t.parent_idx\t= -1, }; ''' + C_EMPTY_POPULATE_PHANDLE_DATA, data) def test_bad_reg(self): """Test that a reg property with an invalid type generates an error""" # Capture stderr since dtc will emit warnings for this file dtb_file = get_dtb_file('dtoc_test_bad_reg.dts', capture_stderr=True) output = tools.GetOutputFilename('output') with self.assertRaises(ValueError) as e: self.run_test(['struct'], dtb_file, output) self.assertIn("Node 'spl-test' reg property is not an int", str(e.exception)) def test_bad_reg2(self): """Test that a reg property with an invalid cell count is detected""" # Capture stderr since dtc will emit warnings for this file dtb_file = get_dtb_file('dtoc_test_bad_reg2.dts', capture_stderr=True) output = tools.GetOutputFilename('output') with self.assertRaises(ValueError) as e: self.run_test(['struct'], dtb_file, output) self.assertIn("Node 'spl-test' reg property has 3 cells which is not a multiple
of na + ns = 1 + 1)", str(e.exception)) def test_add_prop(self): """Test that a subequent node can
add a new property to a struct""" dtb_file = get_dtb_file('dtoc_test_add_prop.dts') output = tools.GetOutputFilename('output') self.run_test(['struct'], dtb_file, output) with open(output) as infile: data = infile.read() self._CheckStrings(HEADER + ''' struct dtd_sandbox_spl_test { \tfdt32_t\t\tintarray; \tfdt32_t\t\tintval; }; ''', data) self.run_test(['platdata'], dtb_file, output) with open(output) as infile: data = infile.read() self._CheckStrings(C_HEADER + ''' /* Node /spl-test index 0 */ static struct dtd_sandbox_spl_test dtv_spl_test = { \t.intval\t\t\t= 0x1, }; U_BOOT_DEVICE(spl_test) = { \t.name\t\t= "sandbox_spl_test", \t.platdata\t= &dtv_spl_test, \t.platdata_size\t= sizeof(dtv_spl_test), \t.parent_idx\t= -1, }; /* Node /spl-test2 index 1 */ static struct dtd_sandbox_spl_test dtv_spl_test2 = { \t.intarray\t\t= 0x5, }; U_BOOT_DEVICE(spl_test2) = { \t.name\t\t= "sandbox_spl_test", \t.platdata\t= &dtv_spl_test2, \t.platdata_size\t= sizeof(dtv_spl_test2), \t.parent_idx\t= -1, }; ''' + C_EMPTY_POPULATE_PHANDLE_DATA, data) def testStdout(self): """Test output to stdout""" dtb_file = get_dtb_file('dtoc_test_simple.dts') with test_util.capture_sys_output() as (stdout, stderr): self.run_test(['struct'], dtb_file, '-') def testNoCommand(self): """Test running dtoc without a command""" with self.assertRaises(ValueError) as e: self.run_test([], '', '') self.assertIn("Please specify a command: struct, platdata", str(e.exception)) def testBadCommand(self): """Test running dtoc with an invalid command""" dtb_file = get_dtb_file('dtoc_test_simple.dts') output = tools.GetOutputFilename('output') with self.assertRaises(ValueError) as e: self.run_test(['invalid-cmd'], dtb_file, output) self.assertIn("Unknown command 'invalid-cmd': (use: struct, platdata)", str(e.exception)) def testScanDrivers(self): """Test running dtoc with additional drivers to scan""" dtb_file = get_dtb_file('dtoc_test_simple.dts') output = tools.GetOutputFilename('output') with test_util.capture_sys_output() as (stdout, stderr): dtb_platdata.run_steps(['struct'], dtb_file, False, output, True, [None, '', 'tools/dtoc/d
import codecs import mock import os import tempfile import unittest from time import strftime import six from kinto import config from kinto import __version__ class ConfigTest(unittest.TestCase): def test_transpose_parameters_into_template(self): self.maxDiff = None template = "kinto.tpl" dest = tempfile.mktemp() config.render_template(template, dest, secret='secret', storage_backend='storage_backend', cache_backend='cache_backend', permission_backend='permission_backend', storage_url='storage_url', cache_url='cache_url', permission_url='permission_url', kinto_version='kinto_version', config_file_timestamp='config_file_timestamp') with codecs.open(dest, 'r', encoding='utf-8') as d: destination_temp = d.read() sample_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "test_configuration/test.ini") with codecs.open(sample_path, 'r', encoding='utf-8') as c: sample = c.read() self.assertEqual(destination_temp, sample) def test_create_destination_directory(self): dest = os.path.join(tempfile.mkdtemp(), 'config', 'kinto.ini') config.render_template("kinto.tpl", dest, secret='secret', storage_backend='storage_backend', cache_backend='cache_backend', permission_backend='permission_backend', storage_url='storage_url', cache_url='cache_url', permission_url='permission_url', kinto_version='kinto_version', config_file_timestamp='config_file_timestamp') self.assertTrue(os.path.exists(dest)) @mock.patch('kinto.config.render_template') def test_hmac_secret_is_text(self, mocked_render_template): config.init('kinto.ini', 'postgresql') args, kwargs = list(mocked_render_template.call_args) self.assertEquals(type(kwargs['secret']), six.text_type) @mock.patch('kinto.config.render_template') def test_init_postgresql_values(self, mocked_render_template): config.init('kinto.ini', 'postgresql') args, kwargs = list(mocked_render_template.call_args) self.assertEquals(args, ('kinto.tpl', 'kinto.ini')) postgresql_url = "postgres://postgres:postgres@localhost/postgres" self.assertDictEqual(kwargs, { 'secret': kwargs['secret'], 'storage_backend': 'kinto.core.storage.postgresql', 'cache_backend': 'kinto.core.cache.postgresql', 'permission_backend': 'kinto.core.permission.postgresql', 'storage_url': postgresql_url, 'cache_url': postgresql_url, 'permission_url': postgresql_url, 'kinto_version': __version__, 'config_file_timestamp': strftime('%a, %d %b %Y %H:%M:%S %z') }) @mock.patch('kinto.config.render_template') def test_init_redis_values(self, mocked_render_template): config.init('kinto.ini', 'redis') args, kwargs = list(mocked_render_template.call_args) self.assertEquals(args, ('kinto.tpl', 'kinto.ini')) redis_url = "redis://localhost:6379" self.maxDiff = None # See the full diff in case of error self.assertDictEqual(kwargs, { 'secret': kwargs['secret'], 'storage_backend': 'kinto_redis.storage', 'cache_backend': 'kinto_redis.cache', 'permission_backend': 'kinto_redis.permission', 'storage_url': redis_url + '/1', 'cache_url': redis_url + '/2', 'permission_url': redis_url + '/3', 'kinto_version': __version__, 'config_file_timestamp': strftime('%a, %d %b %Y %H:%M:%S %z') }) @mock.patch('kinto.config.render_template') def test_init_memory_values(self, mocked_render_template):
config.init('kinto.ini', 'memory') args, kwargs = list(mocked_render_template.call_args) self.assertEquals(args, ('kinto.tp
l', 'kinto.ini')) self.assertDictEqual(kwargs, { 'secret': kwargs['secret'], 'storage_backend': 'kinto.core.storage.memory', 'cache_backend': 'kinto.core.cache.memory', 'permission_backend': 'kinto.core.permission.memory', 'storage_url': '', 'cache_url': '', 'permission_url': '', 'kinto_version': __version__, 'config_file_timestamp': strftime('%a, %d %b %Y %H:%M:%S %z') }) def test_render_template_creates_directory_if_necessary(self): temp_path = tempfile.mkdtemp() destination = os.path.join(temp_path, 'config/kinto.ini') config.render_template('kinto.tpl', destination, **{ 'secret': "abcd-ceci-est-un-secret", 'storage_backend': 'kinto.core.storage.memory', 'cache_backend': 'kinto.core.cache.memory', 'permission_backend': 'kinto.core.permission.memory', 'storage_url': '', 'cache_url': '', 'permission_url': '', 'kinto_version': '', 'config_file_timestamp': '' }) self.assertTrue(os.path.exists(destination)) def test_render_template_works_with_file_in_cwd(self): temp_path = tempfile.mkdtemp() os.chdir(temp_path) config.render_template('kinto.tpl', 'kinto.ini', **{ 'secret': "abcd-ceci-est-un-secret", 'storage_backend': 'kinto.core.storage.memory', 'cache_backend': 'kinto.core.cache.memory', 'permission_backend': 'kinto.core.permission.memory', 'storage_url': '', 'cache_url': '', 'permission_url': '', 'kinto_version': '', 'config_file_timestamp': '' }) self.assertTrue(os.path.exists( os.path.join(temp_path, 'kinto.ini') ))
def max_rectangle(heights): res = 0 heights.append(0) stack = [0] for i in range(1, len(heights)): while stack and heights[i] < heights[stack[-1]]: h = heights[stack.pop()] w = i if not stack else i - stack[-1] - 1 res = max(res, h * w) stack.append(i) return res class Solution: def maximalRectangle(self, matrix): """ :type matrix: List[List[str]] :rtype: int """ if not matrix or not matrix[0]: return 0 m = len(matrix) n = len(matrix[0]) heights = [1 if x == '1' else 0 for x in matrix[0]] ans = max_rectangle(heights) for i in range(1, m): for j in range(n):
heights[j]
= 0 if matrix[i][j] == '0' else heights[j] + 1 ans = max(ans, max_rectangle(heights)) return ans if __name__ == "__main__": sol = Solution() M = [['1', '0', '1', '0', '0'], ['1', '0', '1', '1', '1'], ['1', '1', '1', '1', '1'], ['1', '0', '0', '1', '0']] print(sol.maximalRectangle(M))
#!/usr/bin/env python # -*- co
ding: utf-8 -*- # Copyright (c) 2015-2017: # Frederic Mohier, frederic.mohier@alignak.net # """ Alignak - C
hecks pack for NRPE monitored Linux hosts/services """
', 'yeux_enfonces', 'soif', 'pli_cutane', 'fievre_presence', 'fievre_presence_duree', 'fievre_presence_longue', 'tdr', 'urines_foncees', 'saignements_anormaux', 'raideur_nuque', 'ictere', 'choc', 'eruption_cutanee', 'ecoulement_nasal', 'yeux_rouge', 'ecoulement_oculaire', 'ulcerations', 'cornee', 'oreille', 'oreille_probleme', 'oreille_douleur', 'oreille_ecoulement', 'oreille_ecoulement_duree', 'oreille_gonflement', 'paleur_palmaire', 'oedemes', 'test_appetit', 'serologie_enfant', 'test_enfant', 'pneumonie_recidivante', 'diarrhee_dernierement', 'candidose_buccale', 'hypertrophie_ganglions_lymphatiques', 'augmentation_glande_parotide', 'test_mere', 'serologie_mere', 'other_comments'] class ChildClassificationExtended(BaseSqlData): table_name = "fluff_TDHChildClassificationFluff" slug = 'child_classification' title = 'Child Classification' @property def columns(self): from custom.tdh.models import TDHChildClassificationFluff return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHChildClassificationFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS] @property def headers(self): from custom.tdh.models import TDHChildClassificationFluff return [DataTablesColumn(self.header(k)) for k in TDHChildClassificationFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS + ['case_id']] @property def group_by(self): from custom.tdh.models import TDHChildClassificationFluff return [k for k in TDHChildClassificationFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS] class EnrollChild(BaseSqlData): table_name = "fluff_TDHEnrollChildFluff" slug = 'enroll_child' title = 'Enroll Child' @property def filters(self): return [] @property def group_by(self): return ['case_id', 'dob', 'sex', 'village'] @property def headers(self): return [DataTablesColumn(self.header(k)) for k in self.group_by] class EnrollChildExtended(BaseSqlData): table_name = "fluff_TDHEnrollChildFluff" slug = 'enroll_child' title = 'Enroll Child' @property def filters(self): return [] @property def columns(self): from custom.tdh.models import TDHEnrollChildFluff return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHEnrollChildFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS] @property def headers(self): from custom.tdh.models import TDHEnrollChildFluff return [DataTablesColumn(self.header(k)) for k in TDHEnrollChildFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS + ['case_id']] @property def group_by(self):
from custom.tdh.models import TDHEnrollChildFluff return [k for k in TDHEnrollChildFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS] class InfantTreatment(BaseSqlData): table_name = "fluff_TDHInfantTreatmentFluff" slug = 'infant_treatment' title = 'Infant Treatment' @property def filters(self): return
[] @property def group_by(self): return ['case_id', 'infection_grave_treat_0', 'infection_grave_treat_1', 'infection_grave_treat_2', 'infection_grave_no_ref_treat_0', 'infection_grave_no_ref_treat_1', 'infection_grave_no_ref_treat_2', 'infection_grave_no_ref_treat_5', 'infection_locale_treat_0', 'infection_locale_treat_1', 'maladie_grave_treat_0', 'maladie_grave_treat_1'] class InfantTreatmentExtended(BaseSqlData): table_name = "fluff_TDHInfantTreatmentFluff" slug = 'infant_treatment' title = 'Infant Treatment' @property def filters(self): return [] @property def columns(self): from custom.tdh.models import TDHInfantTreatmentFluff return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHInfantTreatmentFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS] @property def headers(self): from custom.tdh.models import TDHInfantTreatmentFluff return [DataTablesColumn(self.header(k)) for k in TDHInfantTreatmentFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS + ['case_id']] @property def group_by(self): from custom.tdh.models import TDHInfantTreatmentFluff return [k for k in TDHInfantTreatmentFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS] class NewbornTreatment(BaseSqlData): table_name = "fluff_TDHNewbornTreatmentFluff" slug = 'newborn_treatment' title = 'Newborn Treatment' @property def filters(self): return [] @property def group_by(self): return ['case_id', 'infection_grave_treat_0', 'infection_grave_treat_1', 'infection_grave_no_ref_treat_0', 'infection_grave_no_ref_treat_1', 'infection_locale_treat_0', 'infection_locale_treat_1', 'incapable_nourrir_treat_0', 'incapable_nourrir_treat_1'] class NewbornTreatmentExtended(BaseSqlData): table_name = "fluff_TDHNewbornTreatmentFluff" slug = 'newborn_treatment' title = 'Newborn Treatment' @property def filters(self): return [] @property def columns(self): from custom.tdh.models import TDHNewbornTreatmentFluff return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHNewbornTreatmentFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS] @property def headers(self): from custom.tdh.models import TDHNewbornTreatmentFluff return [DataTablesColumn(self.header(k)) for k in TDHNewbornTreatmentFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS + ['case_id']] @property def group_by(self): from custom.tdh.models import TDHNewbornTreatmentFluff return [k for k in TDHNewbornTreatmentFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS] class ChildTreatment(BaseSqlData): table_name = "fluff_TDHChildTreatmentFluff" slug = 'child_treatment' title = 'Child Treatment' @property def filters(self): return [] @property def group_by(self): return ['case_id', 'pneumonie_grave_treat_0', 'pneumonie_grave_treat_1', 'pneumonie_grave_treat_4', 'pneumonie_grave_no_ref_treat_0', 'pneumonie_grave_no_ref_treat_1', 'pneumonie_grave_no_ref_treat_3', 'pneumonie_grave_no_ref_treat_5', 'pneumonie_grave_no_ref_treat_6', 'pneumonie_treat_0', 'pneumonie_treat_1', 'deshydratation_severe_pas_grave_perfusion_treat_3', 'deshydratation_severe_pas_grave_perfusion_treat_4', 'deshydratation_severe_pas_grave_perfusion_treat_5', 'deshydratation_severe_pas_grave_perfusion_treat_6', 'deshydratation_severe_pas_grave_perfusion_treat_8', 'deshydratation_severe_pas_grave_perfusion_treat_9', 'deshydratation_severe_pas_grave_perfusion_treat_10', 'deshydratation_severe_pas_grave_perfusion_treat_11', 'deshydratation_severe_pas_grave_perfusion_treat_15', 'deshydratation_severe_pas_grave_perfusion_treat_16', 'deshydratation_severe_pas_grave_sng_treat_2', 'deshydratation_severe_pas_grave_sng_treat_3', 'deshydratation_severe_pas_grave_sans_sng_sans_perfusion_treat_3', 'deshydratation_severe_pas_grave_sans_sng_sans_perfusion_treat_4', 'signes_deshydratation_treat_0', 'signes_deshydratation_treat_3', 'pas_deshydratation_treat_1', 'dysenterie_treat_1', 'dysenterie_treat_2', 'dysenterie_treat_3', 'diahree_persistante_treat_0', 'diahree_persistante_treat_1', 'paludisme_grave_treat_0', 'paludisme_grave_treat_1', 'paludisme_grave_treat_2', 'paludisme_grave_treat_4', 'paludisme_grave_treat_5', 'paludisme_grave_trea
import numpy as np import copy import datetime as dt import QSTK.qstkutil.qsdateutil as du import QSTK.qstkutil.DataAccess as da import QSTK.qstkstudy.EventProfiler as ep from bollinger import Bollinger """ Accepts a list of symbols along with start and end date Returns the Ev
ent Matrix which is a pandas Datamatrix Event matrix has the following structure : |IBM |GOOG|XOM |MSFT| GS | JP | (d1)|nan |nan | 1 |nan |nan | 1 | (d2)|nan | 1 |nan |nan |nan |nan | (d3)| 1 |nan | 1 |nan | 1 |nan | (d4)|nan | 1 |nan | 1 |nan |nan | .........
.......................... ................................... Also, d1 = start date nan = no information about any event. 1 = status bit(positively confirms the event occurence) """ def find_events(ls_symbols, d_data): df_close = d_data['close'] ts_market = df_close['SPY'] print "Finding Events" # Creating an empty dataframe df_events = copy.deepcopy(df_close) df_events = df_events * np.NAN # Time stamps for the event range ldt_timestamps = df_close.index for s_sym in ls_symbols: for i in range(1, len(ldt_timestamps)): # Calculating the returns for this timestamp f_symprice_today = df_close[s_sym].ix[ldt_timestamps[i]] f_symprice_yest = df_close[s_sym].ix[ldt_timestamps[i - 1]] #f_marketprice_today = ts_market.ix[ldt_timestamps[i]] #f_marketprice_yest = ts_market.ix[ldt_timestamps[i - 1]] #f_symreturn_today = (f_symprice_today / f_symprice_yest) - 1 #f_marketreturn_today = (f_marketprice_today / f_marketprice_yest) - 1 ''' Bollinger value of equity today < -2.0 Bollinger value of equity yesterday >= -2.0 Bollinger value of SPY today >= 1.5 ''' bollinger_obj = Bollinger(df_close) equity_today = bollinger_obj.get_value(ldt_timestamps[i], s_sym) equity_yesterday = bollinger_obj.get_value(ldt_timestamps[i - 1], s_sym) mkt_today = bollinger_obj.get_value(ldt_timestamps[i], 'SPY') if equity_today < -2.0 and equity_yesterday >= -2.0 and mkt_today >= 1.5: df_events[s_sym].ix[ldt_timestamps[i]] = 1 return df_events if __name__ == '__main__': dt_start = dt.datetime(2008, 1, 1) dt_end = dt.datetime(2009, 12, 31) ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt.timedelta(hours=16)) dataobj = da.DataAccess('Yahoo') ls_symbols = dataobj.get_symbols_from_list('sp5002012') ls_symbols.append('SPY') ls_keys = ['close'] ldf_data = dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys) d_data = dict(zip(ls_keys, ldf_data)) for s_key in ls_keys: d_data[s_key] = d_data[s_key].fillna(method='ffill') d_data[s_key] = d_data[s_key].fillna(method='bfill') d_data[s_key] = d_data[s_key].fillna(1.0) df_events = find_events(ls_symbols, d_data) print "Creating Study" ep.eventprofiler(df_events, d_data, i_lookback=20, i_lookforward=20, s_filename='BollingerStudy.pdf', b_market_neutral=True, b_errorbars=True, s_market_sym='SPY')
# This file is part of pybliographer # # Copyright (C) 1998-2004 Frederic GOBRY # Email : gobry@pybliographer.org # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # ''' Generic XML bibliographic style handler ''' import string from Pyblio.Style import Parser from Pyblio import Autoload, recode def author_desc (group, coding, initials = 0, reverse = 0): """ Create a nice string describing a group of authors. coding : name of the output coding (as requested for recode) initials : if = 1, uses initials instead of complete first names reverse : -1 use First Last format 0 use Last, First, excepted for the first entry 1 use Last, First for all the authors, not only the first """ l = len (group) fulltext = "" for i in range (0, l): (honorific, first, last, lineage) = group [i].format (coding) if initials: first = group [i].initials (coding) text = "" if reverse == 1 or (i == 0 and reverse == 0): if last: text = text + last if lineage: text = text + ", " + lineage if first: text = text + ", " + first else: if first: text = first + " " if last: text = text + last if lineage: text = text + ", " + lineage if text: if i < l - 2: text = text + ", " elif i == l - 2: text = text + " and " fulltext = fulltext + text # avoid a dot at the end of the author list if fulltext [-1] == '.': fulltext = fulltext [0:-1] return fulltext def string_key (entry, fmt, table): """ Generates an alphabetical key for an entry. fmt is the output coding """ rc = recode.recode ("latin1.." + fmt) if entry.has_key ('author'): aut = entry ['author'] elif entry.has_key ('editor'): aut = entry ['editor'] else: aut = () if len (aut) > 0: if len (aut) > 1: key = '' for a in aut: honorific, first, last, lineage = a.format (fmt) key = key + string.join (map (lambda x: x [0], string.split (last, ' ')), '') if len (key) >= 3: if len (aut) > 3: key = key + '+' break else: honorific, first, last, lineage = aut [0].format (fmt) parts = string.split (last, ' ') if len (parts) == 1: key = parts [0][0:3] else: key = string.join (map (lambda x: x [0], parts), '') else: key = rc (entry.key.key [0:3]) if entry.has_key ('date'): year = entry ['date'].format (fmt) [0] if year: key = key + year [2:] if table.has_key (key) or table.has_key (key + 'a'): if table.has_key (key): # rename the old entry new = key + 'a' table [new] = table [key] del table [key] base = key suff = ord ('b') key = base + chr (suff) while table.has_key (key): suff = suff + 1 key = base + chr (suff) return key def numeric_key (entry, fmt, table): count = 1 while table.has_key (str (count)): count = count + 1 return str (count) def create_string_key (database, keys, fmt): table = {} for key in keys: s = string_key (database [key], fmt, table) table [s] = key skeys = table.keys () skeys.sort () return table, skeys def create_numeric_key (database, keys, fmt): table = {} skeys = [] for key in keys: s = numeric_key (database [key], fmt, table) table [
s] = key skeys.append (s) return table, skeys def standard_date (entry, coding): (text, month, day) = entry.format (coding) if month: text = "%s/%s" % (month, text) if day : text = "%s/%s" % (day, text) return text def last_first_full_authors (entry, coding): return author_desc (en
try, coding, 0, 1) def first_last_full_authors (entry, coding): return author_desc (entry, coding, 0, -1) def full_authors (entry, coding): return author_desc (entry, coding, 0, 0) def initials_authors (entry, coding): return author_desc (entry, coding, 1, 0) def first_last_initials_authors (entry, coding): return author_desc (entry, coding, 1, -1) def last_first_initials_authors (entry, coding): return author_desc (entry, coding, 1, 1) Autoload.register ('style', 'Generic', { 'first_last_full_authors' : first_last_full_authors, 'last_first_full_authors' : last_first_full_authors, 'full_authors' : full_authors, 'first_last_initials_authors' : first_last_initials_authors, 'last_first_initials_authors' : last_first_initials_authors, 'initials_authors' : initials_authors, 'string_keys' : create_string_key, 'numeric_keys' : create_numeric_key, 'european_date' : standard_date, })
] = QtWidgets.QDockWidget('Plotting-' + str(count)) dock['Plotting-' + str(count)].setWidget(self.plottingWidget) self.addDockWidget(QtCore.Qt.TopDockWidgetArea, dock['Plotting-' + str(count)]) self.tabifyDockWidget(dock['Welcome'], dock['Plotting-' + str(count)]) dock['Plotting-
' + str(count)].setVisible(True) dock['Plotting-' + str(count)].setFocus() dock['Plotting-' + str(count)].raise_() temp = self.obj_appconfig.current_project['ProjectName'] if temp: self.obj_appconfig.dock_dict[temp].a
ppend( dock['Plotting-' + str(count)] ) count = count + 1 def ngspiceEditor(self, projDir): """ This function creates widget for Ngspice window.""" self.projDir = projDir self.projName = os.path.basename(self.projDir) self.ngspiceNetlist = os.path.join( self.projDir, self.projName + ".cir.out") # Edited by Sumanto Kar 25/08/2021 if os.path.isfile(self.ngspiceNetlist) is False: return False global count self.ngspiceWidget = QtWidgets.QWidget() self.ngspiceLayout = QtWidgets.QVBoxLayout() self.ngspiceLayout.addWidget( NgspiceWidget(self.ngspiceNetlist, self.projDir) ) # Adding to main Layout self.ngspiceWidget.setLayout(self.ngspiceLayout) dock['NgSpice-' + str(count) ] = QtWidgets.QDockWidget('NgSpice-' + str(count)) dock['NgSpice-' + str(count)].setWidget(self.ngspiceWidget) self.addDockWidget(QtCore.Qt.TopDockWidgetArea, dock['NgSpice-' + str(count)]) self.tabifyDockWidget(dock['Welcome'], dock['NgSpice-' + str(count)]) # CSS dock['NgSpice-' + str(count)].setStyleSheet(" \ .QWidget { border-radius: 15px; border: 1px solid gray; padding: 0px;\ width: 200px; height: 150px; } \ ") dock['NgSpice-' + str(count)].setVisible(True) dock['NgSpice-' + str(count)].setFocus() dock['NgSpice-' + str(count)].raise_() temp = self.obj_appconfig.current_project['ProjectName'] if temp: self.obj_appconfig.dock_dict[temp].append( dock['NgSpice-' + str(count)] ) count = count + 1 def modelEditor(self): """This function defines UI for model editor.""" print("in model editor") global count self.modelwidget = QtWidgets.QWidget() self.modellayout = QtWidgets.QVBoxLayout() self.modellayout.addWidget(ModelEditorclass()) # Adding to main Layout self.modelwidget.setLayout(self.modellayout) dock['Model Editor-' + str(count)] = QtWidgets.QDockWidget('Model Editor-' + str(count)) dock['Model Editor-' + str(count)].setWidget(self.modelwidget) self.addDockWidget(QtCore.Qt.TopDockWidgetArea, dock['Model Editor-' + str(count)]) self.tabifyDockWidget(dock['Welcome'], dock['Model Editor-' + str(count)]) # CSS dock['Model Editor-' + str(count)].setStyleSheet(" \ .QWidget { border-radius: 15px; border: 1px solid gray; \ padding: 5px; width: 200px; height: 150px; } \ ") dock['Model Editor-' + str(count)].setVisible(True) dock['Model Editor-' + str(count)].setFocus() dock['Model Editor-' + str(count)].raise_() count = count + 1 def kicadToNgspiceEditor(self, clarg1, clarg2=None): """ This function is creating Editor UI for Kicad to Ngspice conversion. """ global count self.kicadToNgspiceWidget = QtWidgets.QWidget() self.kicadToNgspiceLayout = QtWidgets.QVBoxLayout() self.kicadToNgspiceLayout.addWidget(MainWindow(clarg1, clarg2)) self.kicadToNgspiceWidget.setLayout(self.kicadToNgspiceLayout) dock['kicadToNgspice-' + str(count)] = \ QtWidgets.QDockWidget('kicadToNgspice-' + str(count)) dock['kicadToNgspice-' + str(count)].setWidget(self.kicadToNgspiceWidget) self.addDockWidget(QtCore.Qt.TopDockWidgetArea, dock['kicadToNgspice-' + str(count)]) self.tabifyDockWidget(dock['Welcome'], dock['kicadToNgspice-' + str(count)]) # CSS dock['kicadToNgspice-' + str(count)].setStyleSheet(" \ .QWidget { border-radius: 15px; border: 1px solid gray;\ padding: 5px; width: 200px; height: 150px; } \ ") dock['kicadToNgspice-' + str(count)].setVisible(True) dock['kicadToNgspice-' + str(count)].setFocus() dock['kicadToNgspice-' + str(count)].raise_() dock['kicadToNgspice-' + str(count)].activateWindow() temp = self.obj_appconfig.current_project['ProjectName'] if temp: self.obj_appconfig.dock_dict[temp].append( dock['kicadToNgspice-' + str(count)] ) count = count + 1 def subcircuiteditor(self): """This function creates a widget for different subcircuit options.""" global count self.subcktWidget = QtWidgets.QWidget() self.subcktLayout = QtWidgets.QVBoxLayout() self.subcktLayout.addWidget(Subcircuit(self)) self.subcktWidget.setLayout(self.subcktLayout) dock['Subcircuit-' + str(count)] = QtWidgets.QDockWidget('Subcircuit-' + str(count)) dock['Subcircuit-' + str(count)].setWidget(self.subcktWidget) self.addDockWidget(QtCore.Qt.TopDockWidgetArea, dock['Subcircuit-' + str(count)]) self.tabifyDockWidget(dock['Welcome'], dock['Subcircuit-' + str(count)]) # CSS dock['Subcircuit-' + str(count)].setStyleSheet(" \ .QWidget { border-radius: 15px; border: 1px solid gray;\ padding: 5px; width: 200px; height: 150px; } \ ") dock['Subcircuit-' + str(count)].setVisible(True) dock['Subcircuit-' + str(count)].setFocus() dock['Subcircuit-' + str(count)].raise_() count = count + 1 def makerchip(self): """This function creates a widget for different subcircuit options.""" global count self.makerWidget = QtWidgets.QWidget() self.makerLayout = QtWidgets.QVBoxLayout() self.makerLayout.addWidget(makerchip(self)) self.makerWidget.setLayout(self.makerLayout) dock['Makerchip-' + str(count)] = QtWidgets.QDockWidget('Makerchip-' + str(count)) dock['Makerchip-' + str(count)].setWidget(self.makerWidget) self.addDockWidget(QtCore.Qt.TopDockWidgetArea, dock['Makerchip-' + str(count)]) self.tabifyDockWidget(dock['Welcome'], dock['Makerchip-' + str(count)]) # CSS dock['Makerchip-' + str(count)].setStyleSheet(" \ .QWidget { border-radius: 15px; border: 1px solid gray;\ padding: 5px; width: 200px; height: 150px; } \ ") dock['Makerchip-' + str(count)].setVisible(True) dock['Makerchip-' + str(count)].setFocus() dock['Makerchip-' + str(count)].raise_() count = count + 1 def usermanual(self): """This function creates a widget for user manual.""" global count self.usermanualWidget = QtWidgets.QWidget() self.usermanualLayout = QtWidgets.QVBoxLayout() self.usermanualLayout.addWidget(UserManual()) self.usermanualWidget.setLayout(self.usermanualLayout) dock['User Manual-' + str(count)] = QtWidgets.QDockWidget('User Manual-' + str(count)) dock['User Manual-' + str(count)].setWidget(self.usermanualWidget) self.addDockWidget(QtCore.Qt.TopDockWidgetArea, dock['User Manual-' + str(count)]) self.tabifyDockWidget(dock['Welcome'], dock['User Manual-' + str(count)])
ry() for in_feat_b in union_b.getFeatures(request): count += 1 at_map_b = in_feat_b.attributes() tmp_geom = geometry_checker(in_feat_b.geometry()) if engine.intersects(tmp_geom.constGet()): int_geom = geometry_checker(geom.intersection(tmp_geom)) list_intersecting_b.append(QgsGeometry(tmp_geom)) if not int_geom: # There was a problem creating the intersection # LOGGER.debug( # tr('GEOS geoprocessing error: One or more input ' # 'features have invalid geometry.')) pass int_geom = QgsGeometry() else: int_geom = QgsGeometry(int_geom) if int_geom.wkbType() == QgsWkbTypes.UnknownGeometry \ or QgsWkbTypes.flatType( int_geom.constGet().wkbType()) == \
QgsWkbTypes.GeometryCollection: # Intersection produced different geometry types temp_list = int_geom.as
GeometryCollection() for i in temp_list: if i.type() == geom.type(): int_geom = QgsGeometry(geometry_checker(i)) try: _write_feature( at_map_a + at_map_b, int_geom, writer, not_null_field_index, ) except BaseException: LOGGER.debug( tr('Feature geometry error: One or ' 'more output features ignored due ' 'to invalid geometry.')) else: # Geometry list: prevents writing error # in geometries of different types # produced by the intersection # fix #3549 if int_geom.wkbType() in wkb_type_groups[ wkb_type_groups[int_geom.wkbType()]]: try: _write_feature( at_map_a + at_map_b, int_geom, writer, not_null_field_index) except BaseException: LOGGER.debug( tr('Feature geometry error: One or more ' 'output features ignored due to ' 'invalid geometry.')) # the remaining bit of inFeatA's geometry # if there is nothing left, this will just silently fail and we # are good diff_geom = QgsGeometry(geom) if len(list_intersecting_b) != 0: int_b = QgsGeometry.unaryUnion(list_intersecting_b) diff_geom = geometry_checker(diff_geom.difference(int_b)) if diff_geom is None or \ diff_geom.isEmpty() or not diff_geom.isGeosValid(): # LOGGER.debug( # tr('GEOS geoprocessing error: One or more input ' # 'features have invalid geometry.')) pass if diff_geom is not None and ( diff_geom.wkbType() == 0 or QgsWkbTypes.flatType( diff_geom.constGet().wkbType()) == QgsWkbTypes.GeometryCollection): temp_list = diff_geom.asGeometryCollection() for i in temp_list: if i.type() == geom.type(): diff_geom = QgsGeometry(geometry_checker(i)) try: _write_feature( at_map_a, diff_geom, writer, not_null_field_index) except BaseException: LOGGER.debug( tr('Feature geometry error: One or more output features ' 'ignored due to invalid geometry.')) length = len(union_a.fields()) # nFeat = len(union_b.getFeatures()) for in_feat_a in union_b.getFeatures(): # progress.setPercentage(nElement / float(nFeat) * 100) geom = geometry_checker(in_feat_a.geometry()) atMap = [None] * length atMap.extend(in_feat_a.attributes()) intersects = index_b.intersects(geom.boundingBox()) lstIntersectingA = [] for id in intersects: request = QgsFeatureRequest().setFilterFid(id) inFeatB = next(union_a.getFeatures(request)) tmpGeom = QgsGeometry(geometry_checker(inFeatB.geometry())) if geom.intersects(tmpGeom): lstIntersectingA.append(tmpGeom) if len(lstIntersectingA) == 0: res_geom = geom else: intA = QgsGeometry.unaryUnion(lstIntersectingA) res_geom = geom.difference(intA) if res_geom is None: # LOGGER.debug( # tr('GEOS geoprocessing error: One or more input features ' # 'have null geometry.')) pass continue # maybe it is better to fail like @gustry # does below .... if res_geom.isEmpty() or not res_geom.isGeosValid(): # LOGGER.debug( # tr('GEOS geoprocessing error: One or more input features ' # 'have invalid geometry.')) pass try: _write_feature(atMap, res_geom, writer, not_null_field_index) except BaseException: # LOGGER.debug( # tr('Feature geometry error: One or more output features ' # 'ignored due to invalid geometry.')) pass n_element += 1 # End of copy/paste from processing writer.commitChanges() fill_hazard_class(writer) check_layer(writer) return writer def _write_feature(attributes, geometry, writer, not_null_field_index): """ Internal function to write the feature to the output. :param attributes: Attributes of the feature. :type attributes: list :param geometry: The geometry to write to the output. :type geometry: QgsGeometry :param writer: A vector layer in editing mode. :type: QgsVectorLayer :param not_null_field_index: The index in the attribute table which should not be null. :type not_null_field_index: int """ if writer.geometryType() != geometry.type(): # We don't write the feature if it's not the same geometry type. return compulsary_field = attributes[not_null_field_index] if not compulsary_field: # We don't want feature without a compulsary field. # I think this a bug from the union algorithm. return out_feature = QgsFeature() out_feature.setGeometry(geometry) out_feature.setAttributes(attributes) writer.addFeature(out_feature) @profile def fill_hazard_class(layer): """We need to fill hazard class when it's empty. :param layer: The vector layer. :type layer: QgsVectorLayer :return: The updated vector layer. :rtype: QgsVectorLayer .. versionadded:: 4.0 """ hazard_field = layer.keywords['inasafe_fields'][hazard_class_field['key']] expression = '"%s" is NULL OR "%s" = \'\'' % (hazard_field, hazard_field) index = layer.fields().lookupField(hazard_field) request = QgsFeatureRequest().setFilterExpression(expression) layer.startEditing() for feature in layer.getFeatur
import os import glob import subprocess def expand_path(path): return os.path.abspath(os.path.expandvars(os.path.expanduser(path))) def is_file(path): if not path: return False if not os.path.isfile(path): return False return True def arg_is_file(path): try: if not is_file(path): raise except: msg = '{0!r} is not a file'.format(path) raise argparse.ArgumentTypeError(msg) return expand_path(path) def run_jmodeltest(name): jmodel_proc=subprocess.Popen('java -jar ~/phylo_tools/jmodeltest-2.1.5/jModelTest.jar -d '+str(name)+' -s 3 -f -i -g 4 -BIC -c 0.95 > '+str(name)+'.results.txt', shell=True, executable='/bin/bash') jmodel_proc.wait() def get_models(f, gene_name, out): fl=file(f) for line in fl: line=line.strip() if "the 95% confidence interval" in line: model=line.split(': ')[1] out.write(str(gene_name)+'\t'+str(model)+'\n') def main(): for f in glob.glob('*.nex'): run_jmodeltest(f) out=open('models.txt','w') for f in glob.glob('*.results.txt'): gene_name=f.split('.')[0] get_models(f, gene_name,out) ''' description = ('This program will run jModelTest on a single file or set ' 'of files in nexus format. User can choose the set of models' 'and type of summary using flags. The standard 24 models used' 'in MrBayes and BIC summary with 95% credible set are defaults.') FILE_FORMATS = ['nex'] parser = argparse.ArgumentParser(description = description) parser.add_argument('input_files', metavar='INPUT-SEQ-FILE', nargs = '+', type = arg_is_file,
help = ('Input sequence file(s) name ')) parser.add_argument('-o', '--out-format', type = str, choices = ['nex', 'fasta', 'phy'], help = ('The format of the output sequence file(s). Valid options ')) parser.add_argument('-j', '--path-to-jModelTest', type = str, help=('The full path to the jModelTest executable')) parser.add_argument('-s', '-
-substitution-models', type = str, choices = ['3','5','7','11'] default = ['3'] help = ('Number of substitution schemes to test. Default is all GTR models "-s 3".')) parser.add_argument('-g', '--gamma', type = str, default = ['4'] help = ('Include models with rate variation among sites and number of categories (e.g., -g 8)')) parser.add_argument('-i', '--invar', type = str, default = ['false'] help = ('include models with a proportion invariable sites (e.g., -i)')) args = parser.parse_args() for f in args.input_files: in_type=os.path.splitext(f)[1] filename=os.path.splitext(f)[0] if in_type == '.nex' or in_type == '.nexus': dict=in_nex(f) elif in_type == '.fa' or in_type == '.fas' or in_type == '.fasta': dict=in_fasta(f) elif in_type == '.phy' or in_type == '.phylip': dict=in_phy(f) if args.out_format == 'nex': out_nex(dict, filename) elif args.out_format == 'fasta': out_fasta(dict, filename) elif args.out_format == 'phy': out_phy(dict, filename)''' if __name__ == '__main__': main()
# coding=utf-8 import random lista = [] for x in range(10): numero = random.randint(1, 100)
if x == 0: maior, menor = numero, numero elif numero > maior: maior = numero elif numero < menor: menor = numero lista.append(numero) lista.sort() print(lista) print("Maior: %d" % maior) prin
t("Menor: %d" % menor)
# -*- coding: utf-8 -*- # Gen
erated by Django 1.10.8 on 2018-04-26 12:20 from __fut
ure__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('codenerix_products', '0011_auto_20180202_0826'), ] operations = [ migrations.AddField( model_name='productunique', name='caducity', field=models.DateField(blank=True, default=None, null=True, verbose_name='Caducity'), ), ]
be returned with instances where there is a significant performance difference """ regression_table_data = list() improvement_table_data = list() full_comparison_str = str() for workload_scale_key, workload in grouped.items(): for query_name, file_formats in workload.items(): for file_format, results in file_formats.items(): ref_results = ref_grouped[workload_scale_key][query_name][file_format] change_significant, is_regression = check_perf_change_significance( results, ref_results) if change_significant: full_comparison_str += build_perf_change_str( results, ref_results, is_regression) + '\n' full_comparison_str += build_exec_summary_str(results, ref_results) + '\n' change_row = build_perf_change_row(results, ref_results, is_regression) if is_regression: regression_table_data.append(change_row) else: improvement_table_data.append(change_row) try: save_runtime_diffs(results, ref_results, change_significant, is_regression) except Exception as e: print 'Could not generate an html diff: %s' % e return full_comparison_str, regression_table_data, improvement_table_data def is_result_group_comparable(grouped, ref_grouped): """Given two nested dictionaries generated by get_dict_from_json, return true if they can be compared. grouped can be compared to ref_grouped if ref_grouped contains all the queries that are in grouped. """ if ref_grouped is None: return False for workload_scale_key, workload in grouped.items(): for query_name, file_formats in workload.items(): for file_format, results in file_formats.items(): if file_format not in ref_grouped[workload_scale_key][query_name]: return False return True def check_perf_change_significance(stat, ref_stat): absolute_difference = abs(ref_stat[AVG] - stat[AVG]) try: percent_difference = abs(ref_stat[AVG] - stat[AVG]) * 100 / ref_stat[AVG] except ZeroDivisionError: percent_difference = 0.0 stddevs_are_zero = (ref_stat[STDDEV] == 0) and (stat[STDDEV] == 0) if absolute_difference < options.allowed_latency_diff_secs: return False, False if percent_difference < options.min_percent_change_threshold: return False, False if percent_difference > options.max_percent_change_threshold: return True, ref_stat[AVG] < stat[AVG] if options.tval_threshold and not stddevs_are_zero: tval = calculate_tval(stat[AVG], stat[STDDEV], stat[ITERATIONS], ref_stat[AVG], ref_stat[STDDEV], ref_stat[ITERATIONS]) return abs(tval) > options.tval_threshold, tval > options.tval_threshold return False, False def build_summary_header(): summary = "Execution Summary ({0})\n".format(date.today()) if options.report_description: summary += 'Run Description: {0}\n'.format(options.report_description) if options.cluster_name: summary += '\nCluster Name: {0}\n'.format(options.cluster_name) if options.build_version: summary += 'Impala Build Version: {0}\n'.format(options.build_version) if options.lab_run_info: summary += 'Lab Run Info: {0}\n'.format(options.lab_run_info) return summary def get_summary_str(grouped): summary_str = str() for workload_scale, workload in grouped.items(): summary_str += "{0} / {1} \n".format(workload_scale[0][1], workload_scale[1][1]) table = prettytable.PrettyTable(["File Format", "Compression", "Avg (s)"]) table.align = 'l' table.float_format = '.2' for file_format, queries in workload.items(): # Calculate The average time for each file format and compression ff = file_format[0][1] compression = file_format[1][1] + " / " + file_format[2][1] avg = calculate_avg([query_results[TIME_TAKEN] for results in queries.values() for query_results in results[RESULT_LIST]]) table.add_row([ff, compression, avg]) summary_str += str(table) + '\n' return summary_str def get_stats_str(grouped): stats_str = str() for workload_scale, workload in grouped.items(): stats_str += "Workload / Scale Factor: {0} / {1}\n".format( workload_scale[0][1], workload_scale[1][1]) table = prettytable.PrettyTable(["Query", "File Format", "Compression", "Avg(s)", "StdDev(s)", "Rel StdDev", "Num Clients", "Iters"]) table.align = 'l' table.float_format = '.2' for file_format, queries in workload.items(): for query_name, results in queries.items(): relative_stddev = results[STDDEV] / results[AVG] if results[AVG] > 0 else 0.0 relative_stddev_str = '{0:.2%}'.format(relative_stddev) if relative_stddev > 0.1: relative_stddev_str = '* ' + relative_stddev_str + ' *' else: relative_stddev_str = ' ' + relative_stddev_str table.add_row([query_name[0][1], file_format[0][1], file_format[1][1] + ' / ' + file_format[2][1], results[AVG], results[STDDEV], relative_stddev_str, results[NUM_CLIENTS], results[ITERATIONS]]) stats_str += str(table) + '\n' return stats_str def all_query_results(grouped): for workload_scale_key, workload in grouped.items(): for query_name, file_formats in workload.items(): for file_format, results in file_formats.items(): yield(results) def write_results_to_datastore(grouped): """ Saves results to a database """ from perf_result_datastore import PerfResultDataStore print 'Saving perf results to database' current_date = datetime.now() data_store = PerfResultDataStore(host=options.db_host, username=options.db_username, password=options.db_password, database_name=options.db_name) run_info_id = data_store.insert_run_info(options.lab_run_info) for results in all_query_results(grouped): first_query_result = results[RESULT_LIST][0] executor_name = first_query_result[EXECUTOR_NAME] workload = first_query_result[QUERY][WORKLOAD_NAME] scale_factor = first_query_result[QUERY][SCALE_FACTOR] query_name = first_query_result[QUERY][NAME] query = first_query_result[QUERY][QUERY_STR] file_
format = first_query_result[QUERY][TEST_VECTOR][FILE_FORMAT] compression_codec = first_query_result[QUERY][TEST_VECTOR][COMPRESSION_CODEC] compression_type = first_query_result[QUERY][TEST_VECTOR][COMPRESSION_TYPE] avg_time = results[AVG] stddev = results[STDDEV] num_clients = results[NUM_CLIENTS] num_iterations = results[ITERA
TIONS] runtime_profile = first_query_result[RUNTIME_PROFILE] file_type_id = data_store.get_file_format_id( file_format, compression_codec, compression_type) if file_type_id is None: print 'Skipping unkown file type: %s / %s' % (file_format, compression) continue workload_id = data_store.get_workload_id(workload, scale_factor) if workload_id is None: workload_id = data_store.insert_workload_info(workload, scale_factor) query_id = data_store.get_query_id(query_name, query) if query_id is None: query_id = data_store.insert_query_info(query_name, query) data_store.insert_execution_result( query_id = query_id, workload_id = workload_id, file_type_id = file_type_id, num_clients = num_clients, cluster_name = options.cluster_name, executor_name = executor_name, avg_time = avg_time, stddev = stddev, run_date = current_date, version = options.build_version, notes = options.report_description, run_info_id = run_info_id, num_iterations = num_iterations, runtime_profile = runtime_profile, is_official = options.is_official) def build_perf_summary_table(table_data): table = prettytable.PrettyTable( ['Query', 'Format', 'Original Time (s)', 'Current Time (s)']) table.align = 'l' table.float_format = '.2' for row in table_data: table.add_row(row) return str(table) if __name__ == "__main__": """Workflow: 1. Build a nested dictionary for the current result JS
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.eager.python import network from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.layers import core # pylint: disable=not-callable class MyNetwork(network.Network): def __init__(self): super(MyNetwork, self).__init__(name="abcd") self.l1 = self.add_layer(core.Dense(1, use_bias=False)) def call(self, x): return self.l1(x) class NetworkTest(test.TestCase): def testTrainableAttribute(self): net = network.Network() self.assertTrue(net.trainable) with self.assertRaises(AttributeError): net.trainable = False self.assertTrue(net.trainable) def testNetworkCall(self): net = MyNetwork() net(constant_op.constant([[2.0]])) # Force variables to be created. self.assertEqual(1, len(net.trainable_variables)) net.trainable_variables[0].assign([[17.0]]) # TODO(josh11b): Support passing Python values to networks. result = net(constant_op.constant([[2.0]
])) self.assertEqual(34.0, result.numpy()) def testNetworkAsAGraph(self): self.skipTest("TODO(ashankar,josh11b): FIX THIS") # Verify that we're using ResourceVariables def testNetwor
kVariablesDoNotInterfere(self): self.skipTest("TODO: FIX THIS") net1 = MyNetwork() net2 = MyNetwork() one = constant_op.constant([[1.]]) print(type(net1(one))) net2(one) net1.trainable_weights[0].assign(constant_op.constant([[1.]])) net2.trainable_weights[0].assign(constant_op.constant([[2.]])) print("NET1") print(net1.name) print(net1.variables) print(net1(one)) print("NET2") print(net2.name) print(net2.variables) print(net2(one)) class SequentialTest(test.TestCase): def testTwoLayers(self): # Create a sequential network with one layer. net = network.Sequential([core.Dense(1, use_bias=False)]) # Set that layer's weights so it multiplies by 3 l1 = net.get_layer(index=0) net(constant_op.constant([[2.0]])) # Create l1's variables self.assertEqual(1, len(l1.trainable_variables)) l1.trainable_variables[0].assign([[3.0]]) self.assertEqual(21.0, net(constant_op.constant([[7.0]])).numpy()) # Add a second layer to the network. l2 = core.Dense(1, use_bias=False) net.add_layer(l2) # Set the second layer's weights so it multiplies by 11 net(constant_op.constant([[2.0]])) # Create l2's variables self.assertEqual(1, len(l2.trainable_variables)) l2.trainable_variables[0].assign([[11.0]]) self.assertEqual(231.0, net(constant_op.constant([[7.0]])).numpy()) if __name__ == "__main__": test.main()
from datetime import timedelta from requests.auth import HTTPBasicAuth from django.core.exceptions import ValidationError from django.utils import timezone from django.core.cache import cache from ..settings import ( WFRS_GATEWAY_COMPANY_ID, WFRS_GATEWAY_ENTITY_ID, WFRS_GATEWAY_API_HOST, WFRS_GATEWAY_CONSUMER_KEY, WFRS_GATEWAY_CONSUMER_SECRET, WFRS_GATEWAY_CLIENT_CERT_PATH, WFRS_GATEWAY_PRIV_KEY_PATH, ) from ..security import encrypt_pickle, decrypt_pickle import requests import logging import uuid logger = logging.getLogger(__name__) class BearerTokenAuth(requests.auth.AuthBase): def __init__(self, api_key): self.api_key = api_key def __call__(self, request): request.headers["Authorization"] = "Bearer %s" % self.api_key return request class WFRSAPIKey: def __init__(self, api_key, expires_on): self.api_key = api_key self.expires_on = expires_on @property def is_expired(self): # Force key rotation 10 minutes before it actually expires expires_on = self.expires_on - timedelta(minutes=10) now = timezone.now() return now >= expires_on @property def ttl(self): return int((self.expires_on - timezone.now()).total_seconds()) def __str__(self): return "<WFRSAPIKey expires_on=[%s]>" % self.expires_on class WFRSGatewayAPIClient: company_id = WFRS_GATEWAY_COMPANY_ID entity_id = WFRS_GATEWAY_ENTITY_ID api_host = WFRS_GATEWAY_API_HOST consumer_key = WFRS_GATEWAY_CONSUMER_KEY consumer_secret = WFRS_GATEWAY_CONSUMER_SECRET client_cert_path = WFRS_GATEWAY_CLIENT_CERT_PATH priv_key_path = WFRS_GATEWAY_PRIV_KEY_PATH scopes = [ "PLCCA-Prequalifications", "PLCCA-Applications", "PLCCA-Payment-Calculations", "PLCCA-Transactions-Authorization", "PLCCA-Transactions-Charge", "PLCCA-Transactions-Authorization-Charge", "PLCCA-Transactions-Return", "PLCCA-Transactions-Cancel-Authorization", "PLCCA-Transactions-Void-Return", "PLCCA-Transactions-Void-Sale", "PLCCA-Transactions-Timeout-Authorization-Charge", "PLCCA-Transactions-Timeout-Return", "PLCCA-Account-Details", ] cache_version = 1 @property def cache_key(self): return "wfrs-gateway-api-key-{api_host}-{consumer_key}".format( api_host=self.api_host, consumer_key=self.consumer_key ) def api_get(self, path, **kwargs): return self.make_api_request("get", path, **kwargs) def api_post(self, path, **kwargs): return self.make_api_request("post", path, **kwargs) def make_api_request(self, method, path, client_request_id=None, **kwargs): url = "https://{host}{path}".format(host=self.api_host, path=path) # Setup authentication auth = BearerTokenAuth(self.get_api_key().api_key) cert = None if self.client_cert_path and self.priv_key_path: cert = (self.client_cert_path, self.priv_key_path) # Build headers request_id = ( str(uuid.uuid4()) if client_request_id is None else str(client_request_id)
) headers = { "request-id": request_id, "gateway-company-id": self.company_id, "gateway-e
ntity-id": self.entity_id, } if client_request_id is not None: headers["client-request-id"] = str(client_request_id) # Send request logger.info( "Sending WFRS Gateway API request. URL=[%s], RequestID=[%s]", url, request_id, ) request_fn = getattr(requests, method) resp = request_fn(url, auth=auth, cert=cert, headers=headers, **kwargs) logger.info( "WFRS Gateway API request returned. URL=[%s], RequestID=[%s], Status=[%s]", url, request_id, resp.status_code, ) # Check response for errors if resp.status_code == 400: resp_data = resp.json() errors = [] for err in resp_data.get("errors", []): exc = ValidationError(err["description"], code=err["error_code"]) errors.append(exc) raise ValidationError(errors) # Return response return resp def get_api_key(self): # Check for a cached key key_obj = self.get_cached_api_key() if key_obj is None: key_obj = self.generate_api_key() self.store_cached_api_key(key_obj) return key_obj def get_cached_api_key(self): # Try to get an API key from cache encrypted_obj = cache.get(self.cache_key, version=self.cache_version) if encrypted_obj is None: return None # Try to decrypt the object we got from cache try: key_obj = decrypt_pickle(encrypted_obj) except Exception as e: logger.exception(e) return None # Check if the key is expired if key_obj.is_expired: return None # Return the key return key_obj def store_cached_api_key(self, key_obj): # Pickle and encrypt the key object encrypted_obj = encrypt_pickle(key_obj) # Store it in Django's cache for later cache.set( self.cache_key, encrypted_obj, key_obj.ttl, version=self.cache_version ) def generate_api_key(self): url = "https://{host}/token".format(host=self.api_host) auth = HTTPBasicAuth(self.consumer_key, self.consumer_secret) cert = (self.client_cert_path, self.priv_key_path) req_data = { "grant_type": "client_credentials", "scope": " ".join(self.scopes), } resp = requests.post(url, auth=auth, cert=cert, data=req_data) resp.raise_for_status() resp_data = resp.json() expires_on = timezone.now() + timedelta(seconds=resp_data["expires_in"]) logger.info("Generated new WFRS API Key. ExpiresIn=[%s]", expires_on) key_obj = WFRSAPIKey(api_key=resp_data["access_token"], expires_on=expires_on) return key_obj
import pandas as pd from pandas import Dat
aFrame from matplotlib import pyplot as plt from matplotlib import style style.use('ggplot') df = pd.read_csv('sp500_ohlc.csv', index_col = 'Date', parse_dates=True) #print df.head() df['STD'] = pd.rolling_std(df['Close'], 25, min_periods=1) ax1 = plt.subplot(2, 1, 1) df['Close'].plot() plt.ylabel('Close') # do not do sharex first ax2 = plt.subplot(2, 1, 2, sharex = ax1) df['S
TD'].plot() plt.ylabel('Standard Deviation') plt.show()
# -*- coding: utf-8 -*- """ *************************************
************************************** ProcessingPlugin.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This progra
m is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ from processing import interface __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import shutil import inspect from PyQt4.QtCore import * from PyQt4.QtGui import * from qgis.core import * from processing.commander.CommanderWindow import CommanderWindow from processing.core.Processing import Processing from processing.tools import dataobjects from processing.tools.system import * from processing.gui.ProcessingToolbox import ProcessingToolbox from processing.gui.HistoryDialog import HistoryDialog from processing.gui.ConfigDialog import ConfigDialog from processing.gui.ResultsDialog import ResultsDialog from processing.modeler.ModelerDialog import ModelerDialog import processing.resources_rc cmd_folder = os.path.split(inspect.getfile( inspect.currentframe() ))[0] if cmd_folder not in sys.path: sys.path.insert(0, cmd_folder) class ProcessingPlugin: def __init__(self, iface): interface.iface = iface Processing.initialize() def initGui(self): self.commander = None self.toolbox = ProcessingToolbox() interface.iface.addDockWidget(Qt.RightDockWidgetArea, self.toolbox) self.toolbox.hide() Processing.addAlgListListener(self.toolbox) self.menu = QMenu(interface.iface.mainWindow()) self.menu.setTitle(QCoreApplication.translate("Processing", "Processing")) self.toolboxAction = self.toolbox.toggleViewAction() self.toolboxAction.setIcon(QIcon(":/processing/images/alg.png")) self.toolboxAction.setText(QCoreApplication.translate("Processing", "Toolbox")) self.menu.addAction(self.toolboxAction) self.modelerAction = QAction(QIcon(":/processing/images/model.png"), QCoreApplication.translate("Processing", "Graphical modeler"), interface.iface.mainWindow()) self.modelerAction.triggered.connect(self.openModeler) self.menu.addAction(self.modelerAction) self.historyAction = QAction(QIcon(":/processing/images/history.gif"), QCoreApplication.translate("Processing", "History and log"), interface.iface.mainWindow()) self.historyAction.triggered.connect(self.openHistory) self.menu.addAction(self.historyAction) self.configAction = QAction(QIcon(":/processing/images/config.png"), QCoreApplication.translate("Processing", "Options and configuration"), interface.iface.mainWindow()) self.configAction.triggered.connect(self.openConfig) self.menu.addAction(self.configAction) self.resultsAction = QAction(QIcon(":/processing/images/results.png"), QCoreApplication.translate("Processing", "&Results viewer"), interface.iface.mainWindow()) self.resultsAction.triggered.connect(self.openResults) self.menu.addAction(self.resultsAction) menuBar = interface.iface.mainWindow().menuBar() menuBar.insertMenu(interface.iface.firstRightStandardMenu().menuAction(), self.menu) self.commanderAction = QAction(QIcon(":/processing/images/commander.png"), QCoreApplication.translate("Processing", "&Commander"), interface.iface.mainWindow()) self.commanderAction.triggered.connect(self.openCommander) self.menu.addAction(self.commanderAction) interface.iface.registerMainWindowAction(self.commanderAction, "Ctrl+Alt+M") def unload(self): self.toolbox.setVisible(False) self.menu.deleteLater() #delete temporary output files folder = tempFolder() if QDir(folder).exists(): shutil.rmtree(folder, True) interface.iface.unregisterMainWindowAction(self.commanderAction) def openCommander(self): if self.commander is None: self.commander = CommanderWindow(interface.iface.mainWindow(), interface.iface.mapCanvas()) Processing.addAlgListListener(self.commander) self.commander.prepareGui() self.commander.show() #dlg.exec_() def openToolbox(self): if self.toolbox.isVisible(): self.toolbox.hide() else: self.toolbox.show() def openModeler(self): dlg = ModelerDialog() dlg.exec_() if dlg.update: self.toolbox.updateTree() def openResults(self): dlg = ResultsDialog() dlg.exec_() def openHistory(self): dlg = HistoryDialog() dlg.exec_() def openConfig(self): dlg = ConfigDialog(self.toolbox) dlg.exec_()
fro
m django.contrib import admin import models # Register your models here. admin.site.register(models.UserProfile) admin.site
.register(models.Event)
#!/usr/bin/python import sys import pyxbackup as pxb import pytest def test__parse_port_param(): assert(pxb._parse_port_param('27017,27019')) == True assert(pxb.xb_opt_remote_nc_port_min) == 27017 assert(pxb.xb_opt_remote_nc_port_max) == 27019 a
ssert(pxb._parse_port_param('27017, 27019')) == True assert(pxb._parse_port_param('abcde, 27019')) == False assert(pxb._parse_port_param('abcde, ')) == False assert(pxb._parse_port_param('9999, ')) == False assert(pxb._parse_port_param('9999 ')) == False assert(pxb._parse_port_param('9999')) == True assert(pxb.xb_opt_remote_nc_port_min) == 9999 assert(pxb.xb_opt_remote_nc_port_max) == 9999 def test__xb_version(): assert(pxb._xb_version(verstr = '2.2.13')) == [2,
2, 13] assert(pxb._xb_version(verstr = '2.2.13', tof = True)) == 2.2
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import uuid import pkg_resources from pifpaf import drivers class CephDriver(drivers.Driver): DEFAULT_PORT = 6790 def __init__(self, port=DEFAULT_PORT, **kwargs): """Create a new Ceph cluster.""" super(CephDriver, self).__init__(**kwargs) self.port = port @classmethod def get_options(cls): return [ {"param_decls": ["--port"], "type": int, "default": cls.DEFAULT_PORT, "help": "port to use for Ceph Monitor"}, ] def _setUp(self): super(CephDriver, self)._setUp() self._ensure_xattr_support() fsid = str(uuid.uuid4()) conffile = os.path.join(self.tempdir, "ceph.conf") mondir = os.path.join(self.tempdir, "mon"
, "ceph-a") osddir = os.path.join(self.tempdir, "osd", "ceph-0") os.makedirs(mondir) os.makedirs(osddir) _, version = self._exec(["ceph", "--version"], stdout=True) version = version.decode("ascii").split()[2] version = pkg_resources.parse_version(version) if version < pkg_resources.parse_version("12.0.0"): extra = """ mon_osd_nearfull_ratio = 1 mon_osd_full_ratio = 1 osd_failsafe_nearfull_ratio = 1 osd_f
ailsafe_full_ratio = 1 """ else: extra = """ mon_allow_pool_delete = true """ # FIXME(sileht): check availible space on /dev/shm # if os.path.exists("/dev/shm") and os.access('/dev/shm', os.W_OK): # journal_path = "/dev/shm/$cluster-$id-journal" # else: journal_path = "%s/osd/$cluster-$id/journal" % self.tempdir with open(conffile, "w") as f: f.write("""[global] fsid = %(fsid)s # no auth for now auth cluster required = none auth service required = none auth client required = none ## no replica osd pool default size = 1 osd pool default min size = 1 osd crush chooseleaf type = 0 ## some default path change run dir = %(tempdir)s pid file = %(tempdir)s/$type.$id.pid admin socket = %(tempdir)s/$cluster-$name.asok mon data = %(tempdir)s/mon/$cluster-$id osd data = %(tempdir)s/osd/$cluster-$id osd journal = %(journal_path)s log file = %(tempdir)s/$cluster-$name.log mon cluster log file = %(tempdir)s/$cluster.log # Only omap to have same behavior for all filesystems filestore xattr use omap = True # workaround for ext4 and last Jewel version osd max object name len = 256 osd max object namespace len = 64 osd op threads = 10 filestore max sync interval = 10001 filestore min sync interval = 10000 %(extra)s journal_aio = false journal_dio = false journal zero on create = false journal block align = false # run as file owner setuser match path = %(tempdir)s/$type/$cluster-$id [mon.a] host = localhost mon addr = 127.0.0.1:%(port)d """ % dict(fsid=fsid, tempdir=self.tempdir, port=self.port, journal_path=journal_path, extra=extra)) # noqa ceph_opts = ["ceph", "-c", conffile] mon_opts = ["ceph-mon", "-c", conffile, "--id", "a", "-d"] osd_opts = ["ceph-osd", "-c", conffile, "--id", "0", "-d", "-m", "127.0.0.1:%d" % self.port] # Create and start monitor self._exec(mon_opts + ["--mkfs"]) self._touch(os.path.join(mondir, "done")) mon, _ = self._exec( mon_opts, wait_for_line=r"mon.a@0\(leader\).mds e1 print_map") # Create and start OSD self._exec(ceph_opts + ["osd", "create"]) self._exec(ceph_opts + ["osd", "crush", "add", "osd.0", "1", "root=default"]) self._exec(osd_opts + ["--mkfs", "--mkjournal"]) if version < pkg_resources.parse_version("0.94.0"): wait_for_line = "journal close" else: wait_for_line = "done with init" osd, _ = self._exec(osd_opts, wait_for_line=wait_for_line) if version >= pkg_resources.parse_version("12.0.0"): self._exec(ceph_opts + ["osd", "set-full-ratio", "0.95"]) self._exec(ceph_opts + ["osd", "set-backfillfull-ratio", "0.95"]) self._exec(ceph_opts + ["osd", "set-nearfull-ratio", "0.95"]) # Wait it's ready out = b"" while b"HEALTH_OK" not in out: ceph, out = self._exec(ceph_opts + ["health"], stdout=True) if b"HEALTH_ERR" in out: raise RuntimeError("Fail to deploy ceph") self.putenv("CEPH_CONF", conffile, True) self.putenv("CEPH_CONF", conffile) self.putenv("URL", "ceph://localhost:%d" % self.port)
#!/usr/bin/env python3 # ReText # Copyright 2011-2012 Dmitry Shachnev # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; witho
ut even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin
Street, Fifth Floor, Boston, # MA 02110-1301, USA. import sys import signal from ReText import * from ReText.window import ReTextWindow def main(): app = QApplication(sys.argv) app.setOrganizationName("ReText project") app.setApplicationName("ReText") RtTranslator = QTranslator() for path in datadirs: if RtTranslator.load('retext_'+QLocale.system().name(), path+'/locale'): break QtTranslator = QTranslator() QtTranslator.load("qt_"+QLocale.system().name(), QLibraryInfo.location(QLibraryInfo.TranslationsPath)) app.installTranslator(RtTranslator) app.installTranslator(QtTranslator) if settings.contains('appStyleSheet'): stylename = readFromSettings('appStyleSheet', str) sheetfile = QFile(stylename) sheetfile.open(QIODevice.ReadOnly) app.setStyleSheet(QTextStream(sheetfile).readAll()) sheetfile.close() window = ReTextWindow() window.show() fileNames = [QFileInfo(arg).canonicalFilePath() for arg in sys.argv[1:]] for fileName in fileNames: try: fileName = QString.fromUtf8(fileName) except: # Not needed for Python 3 pass if QFile.exists(fileName): window.openFileWrapper(fileName) signal.signal(signal.SIGINT, lambda sig, frame: window.close()) sys.exit(app.exec_()) if __name__ == '__main__': main()
THis function ... :param ski: :param component: :return: """ # Get title title = component.parameters.title # Set the deprojection geometry ski.set_stellar_component_geometry(title, component.deprojection) # ----------------------------------------------------------------- def set_stellar_component_geometry_sed_and_normalization(ski, component): """ This function ... :param ski: :param component: :return: """ # Get title title = component.parameters.title # Get class names geometry_type = component.parameters.geometry sed_type = component.parameters.sed normalization_type = component.parameters.normalization # Get properties for each of the three classes geometry_properties = component.properties["geometry"] sed_properties = component.properties["sed"] normalization_properties = component.properties["normalization"] # Create stellar component ski.create_new_stellar_component(title, geometry_type, geometry_properties, sed_type, sed_properties, normalization_type, normalization_properties) # ----------------------------------------------------------------- def set_stellar_component_mappings(ski, component): """ THis function ... :param ski: :param component: :return: """ # Get title title = component.parameters.title # Get SED properties metallicity = component.parameters.metallicity compactness = component.parameters.compactness pressure = component.parameters.pressure covering_factor = component.parameters.covering_factor # Get normalization fltr = parse_filter(component.parameters.filter) luminosity = component.parameters.luminosity # Determine the normalization wavelength wavelength = fltr.center # Set SED ski.set_stellar_component_mappingssed(title, metallicity, compactness, pressure, covering_factor) # SED # Set center wavelength of the filter as normalization wavelength (keeps label) ski.set_stellar_component_normalization_wavelength(title, wavelength) # Set spectral luminosity at that wavelength (keeps label) ski.set_stellar_component_luminosity(title, luminosity, filter_or_wavelength=wavelength) # Scale height doesn't need to be set as parameter, this is already in the deprojection model # ----------------------------------------------------------------- def set_stellar_component(ski, component): """ This function ... :return: :param ski: :param component: """ # Get title title = component.parameters.title # Get SED properties template = component.parameters.template age = component.parameters.age metallicity = component.parameters.metallicity # Get normalization fltr = parse_filter(component.parameters.filter) luminosity = component.parameters.luminosity # Determine the normalization wavelength wavelength = fltr.center # Set SED ski.set_stellar_component_sed(title, template, age, metallicity) # Set center wavelength of the filter as normalization wavelength (keeps label) ski.set_stellar_component_normalization_wavelength(title, wavelength) # Set spectral luminosity at that wavelength (keeps label) ski.set_stellar_component_luminosity(title, luminosity, filter_or_wavelength=wavelength) # Scale height doesn't need to be set as parameter, this is already in the deprojection model # ----------------------------------------------------------------- def add_dust_component(ski, name, component, title=None): """ This function ... :param ski: :param name: :param component: :param title: :return: """ # Debugging log.debug("Adding dust component '" + name + "' to the ski file ...") # THIS HAS TO COME FIRST!! # If an input map is required if "map_path" in component: filename = set_dust_input_map(name, component) else: filename = None # NEW COMPONENT OR ADJUST EXISTING if title is not None and not ski.has_dust_component(title): add_new_dust_component(ski, name, component, title=title) else: adjust_dust_component(ski, name, component, title=title) # Return the map filename return filename # ----------------------------------------------------------------- def add_new_dust_component(ski, name, component, title=None): """ This function ... :param ski: :param name: :param component: :param title: :return: """ # Debugging log.debug("Adding new dust component '" + name + "' to the ski file ...") # From properties if component.properties is not None: # Check if title is given if title is None: log.warning("Title of the component '" + name + "' is not given") # Add component ski.add_dust_component(component.properties, title=title) return # Initialize properties geometry = None geometry_type = None geometry_properties = None mix_type = None mix_properties = None normalization_type = None normalization_properties = None mix = None mass = None # For THEMIS mix hydrocarbon_pops = None silicate_pops = None # For Zubko mix graphite_populations = None silicate_populations = None pah_populations = None # Set properties of the component if "model" in component: geometry = component.model elif "deprojection" in component: geometry = component.deprojection # Parameters are defined if component.parameters is not None: # Check title if title is not None and component.parameters.title != title: raise ValueError("The title of the component '" + title + "' doesn't match that defined in the component parameters") # Check if this is a new component (geometry not defined above): add geometry, mix and normalization if "geometry" in component.parameters: # Get class names geometry_type = component.parameters.geometry mix_type = component.parameters.sed normalization_type = component.parameters.normalization # Get properties for each of the three classes geometry_properties = component.properties["geometry"] mix_properties = component.properties["mix"] normalization_properties = component.properties["normalization"] # Existing component (geometry defined above), THEMIS dust mix elif "hydrocarbon_pops" in component.parameters: #set_dust_component_themis_mix(ski, component) # Set mix name mix = "themis" # Get parameters mass = component.parameters.mass hydrocarbon_pops = component.parameters.hydrocarbon_pops silicate_pops = component.parameters.silicate_pops # Existing component (geometry defined above), Zubko dust mix elif "graphite_populations" in component.parameters: # Set mix name mix = "zubko" # Get parameters mass = component.parameters.mass graphite_populations = component.parameters.graphite_populations silicate_populations = component.parameters.silicate_populations pah_populations = component.parameters.pah_populations # Existing component, not THEMIS dust mix else: raise NotImplementedError("Only THEMIS dust mi
xes are implemented at this moment") # Check whether the title is defined if title is None: log.warning("The title for the '" + name + "' dust component is not specified") # Set dust component properties properties = dict() properties["geometry"] = geometry properties["geometry_type"] = geometry_type properties["geometry_properties"] = geometry_properties
properties["mix_type"] = mix_type properties["mix_properties"] = mix_properties properties["normalization_type"] = normalization_type properties["normalization_properties"] = normalization_properties properties["mix"] = mix properties["mass"] = mass proper
st) log.debug('Token request validation ok for %r.', request) except errors.OAuth2Error as e: log.debug('Client error during validation of %r. %r.', request, e) return headers, e.json, e.status_code token = token_handler.create_token(request, refresh_token=True) self.request_validator.invalidate_authorization_code( r
equest.client_id, request.code, request) return headers, json.dumps(token), 200 def validate_authorization_request(self, reque
st): """Check the authorization request for normal and fatal errors. A normal error could be a missing response_type parameter or the client attempting to access scope it is not allowed to ask authorization for. Normal errors can safely be included in the redirection URI and sent back to the client. Fatal errors occur when the client_id or redirect_uri is invalid or missing. These must be caught by the provider and handled, how this is done is outside of the scope of OAuthLib but showing an error page describing the issue is a good idea. """ # First check for fatal errors # If the request fails due to a missing, invalid, or mismatching # redirection URI, or if the client identifier is missing or invalid, # the authorization server SHOULD inform the resource owner of the # error and MUST NOT automatically redirect the user-agent to the # invalid redirection URI. # REQUIRED. The client identifier as described in Section 2.2. # http://tools.ietf.org/html/rfc6749#section-2.2 if not request.client_id: raise errors.MissingClientIdError(request=request) if not self.request_validator.validate_client_id(request.client_id, request): raise errors.InvalidClientIdError(request=request) # OPTIONAL. As described in Section 3.1.2. # http://tools.ietf.org/html/rfc6749#section-3.1.2 log.debug('Validating redirection uri %s for client %s.', request.redirect_uri, request.client_id) if request.redirect_uri is not None: request.using_default_redirect_uri = False log.debug('Using provided redirect_uri %s', request.redirect_uri) if not is_absolute_uri(request.redirect_uri): raise errors.InvalidRedirectURIError(request=request) if not self.request_validator.validate_redirect_uri( request.client_id, request.redirect_uri, request): raise errors.MismatchingRedirectURIError(request=request) else: request.redirect_uri = self.request_validator.get_default_redirect_uri( request.client_id, request) request.using_default_redirect_uri = True log.debug('Using default redirect_uri %s.', request.redirect_uri) if not request.redirect_uri: raise errors.MissingRedirectURIError(request=request) # Then check for normal errors. # If the resource owner denies the access request or if the request # fails for reasons other than a missing or invalid redirection URI, # the authorization server informs the client by adding the following # parameters to the query component of the redirection URI using the # "application/x-www-form-urlencoded" format, per Appendix B. # http://tools.ietf.org/html/rfc6749#appendix-B # Note that the correct parameters to be added are automatically # populated through the use of specific exceptions. if request.response_type is None: raise errors.InvalidRequestError(description='Missing response_type parameter.', request=request) for param in ('client_id', 'response_type', 'redirect_uri', 'scope', 'state'): if param in request.duplicate_params: raise errors.InvalidRequestError(description='Duplicate %s parameter.' % param, request=request) if not self.request_validator.validate_response_type(request.client_id, request.response_type, request.client, request): log.debug('Client %s is not authorized to use response_type %s.', request.client_id, request.response_type) raise errors.UnauthorizedClientError(request=request) # REQUIRED. Value MUST be set to "code". if request.response_type != 'code': raise errors.UnsupportedResponseTypeError(request=request) # OPTIONAL. The scope of the access request as described by Section 3.3 # http://tools.ietf.org/html/rfc6749#section-3.3 self.validate_scopes(request) return request.scopes, { 'client_id': request.client_id, 'redirect_uri': request.redirect_uri, 'response_type': request.response_type, 'state': request.state, 'request': request, } def validate_token_request(self, request): # REQUIRED. Value MUST be set to "authorization_code". if request.grant_type != 'authorization_code': raise errors.UnsupportedGrantTypeError(request=request) if request.code is None: raise errors.InvalidRequestError( description='Missing code parameter.', request=request) for param in ('client_id', 'grant_type', 'redirect_uri'): if param in request.duplicate_params: raise errors.InvalidRequestError(description='Duplicate %s parameter.' % param, request=request) if self.request_validator.client_authentication_required(request): # If the client type is confidential or the client was issued client # credentials (or assigned other authentication requirements), the # client MUST authenticate with the authorization server as described # in Section 3.2.1. # http://tools.ietf.org/html/rfc6749#section-3.2.1 if not self.request_validator.authenticate_client(request): log.debug('Client authentication failed, %r.', request) raise errors.InvalidClientError(request=request) elif not self.request_validator.authenticate_client_id(request.client_id, request): # REQUIRED, if the client is not authenticating with the # authorization server as described in Section 3.2.1. # http://tools.ietf.org/html/rfc6749#section-3.2.1 log.debug('Client authentication failed, %r.', request) raise errors.InvalidClientError(request=request) if not hasattr(request.client, 'client_id'): raise NotImplementedError('Authenticate client must set the ' 'request.client.client_id attribute ' 'in authenticate_client.') # Ensure client is authorized use of this grant type self.validate_grant_type(request) # REQUIRED. The authorization code received from the # authorization server. if not self.request_validator.validate_code(request.client_id, request.code, request.client, request): log.debug('Client, %r (%r), is not allowed access to scopes %r.', request.client_id, request.client, request.scopes) raise errors.InvalidGrantError(request=request) for attr in ('user', 'state', 'scopes'): if getattr(request, attr) is None: log.debug('request.%s was not set on code validation.', attr) # REQUIRED, if the "redirect_uri" parameter was included in the # authorization request as described in Section 4.1.1, and their # values MUST be identical. if not self.request_validator.confirm_redirect_uri(request.client_id, request.code, request.redirect_uri, request.client): log.debug('Re
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Created on Wed Jul 24 11:30:24 2013 @author: Sat Kumar Tomer @email: satkumartomer@gmail.com @website: www.ambhas.com """ import numpy as np import h5py import os import datetime as dt def extract_smc(h5_file, lat, lon): """ Extract Soil Moisture Content from AMSR2 h5 products Input: h5_file: a single file name lat: latitude, either a single value or min,max limits eg.
lat = 12 lat = [10,15] lon: longitude, either a single value or min,max limits eg. as for lat
""" res = 0.1 ######### convert lat, lon into indices ############## # min max are given min_max = type(lat) is list if min_max: lat_min = lat[0] lat_max = lat[1] i_lat_min = int(np.floor((90-lat_min)/res)) i_lat_max = int(np.floor((90-lat_max)/res)) lon_min = lon[0] lon_max = lon[1] if lon_min<0: lon_min += 360 if lon_max<0: lon_max += 360 j_lon_min = int(np.floor(lon_min/res)) j_lon_max = int(np.floor(lon_max/res)) else: # if only single value of lat, lon is given i_lat = np.floor((90-lat)/res) i_lat = i_lat.astype(int) lon1 = np.copy(lon) if lon1<0: lon1 += 360 j_lon = np.floor(lon1/res) j_lon = j_lon.astype(int) # read the data if type(h5_file) is str: f = h5py.File(h5_file, "r") if min_max: smc = f["Geophysical Data"][i_lat_max:i_lat_min+1, j_lon_min:j_lon_max+1,0] else: smc = f["Geophysical Data"][i_lat, j_lon,0] elif type(h5_file) is list: n = len(h5_file) if min_max: nlat = i_lat_min+1 - i_lat_max nlon = j_lon_max+1 - j_lon_min smc = np.empty((n, nlat, nlon)) for h5_f,i in zip(h5_file, range(n)): f = h5py.File(h5_f, "r") smc[i,:,:] = f["Geophysical Data"][i_lat_max:i_lat_min+1, j_lon_min:j_lon_max+1,0] f.close() else: smc = np.empty(n,) for h5_f,i in zip(h5_file, range(n)): f = h5py.File(h5_f, "r") smc[i] = f["Geophysical Data"][i_lat, j_lon,0] f.close() try: smc[smc<0] = np.nan except: if smc <0: smc = np.nan return smc def extract_dates(h5_file): h5_dates = [] for h5_f in h5_file: foo = os.path.basename(h5_f)[7:15] h5_dates.append(dt.datetime.strptime(foo, '%Y%m%d')) return h5_dates def extract_orbit(h5_file): asc = [] for h5_f in h5_file: f = h5py.File(h5_f, "r") if f.attrs['OrbitDirection'][0] == 'Ascending': asc.append(True) elif f.attrs['OrbitDirection'][0] == 'Descending': asc.append(False) else: asc.append(None) f.close() return asc if __name__ == "__main__": import glob h5_file = '/home/tomer/amsr2/data/h5/GW1AM2_20130722_01D_EQMD_L3SGSMCHA1100100.h5' h5_file = glob.glob('/home/tomer/amsr2/data/h5/GW1AM2_201?????_01D*.h5') h5_file.sort() h5_file = h5_file[:5] lat = [8, 38] lon = [68, 98] sm = extract_smc(h5_file, lat, lon) sm_dates = extract_dates(h5_file) asc = extract_orbit(h5_file)
# Copyright 2016, IBM US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from opsmgr.common import constants from opsmgr.common import exceptions from opsmgr.common.utils import entry_exit, execute_command from opsmgr.inventory.interfaces import IManagerDevicePlugin class PowerNodePlugin(IManagerDevicePlugin.IManagerDevicePlugin): IPMI_TOOL = "/usr/local/bin/ipmitool" def __init__(self): self.host = None self.userid = None self.password = None self.version = None self.machine_type_model = "" self.serial_number = "" @staticmethod def get_type(): return "PowerNode" @staticmethod def get_web_url(host): return "https://" + host @staticmethod def get_capabilities(): return [constants.MONITORING_CAPABLE] @entry_exit(exclude_index=[0, 3, 4], exclude_name=["self", "password", "ssh_key_string"]) def connect(self, host, userid, password=None, ssh_key_string=None): """connect to the BMC and store the mtm and serial number """ _method_ = "PowerNodePlugin.connect" self.host = host self.userid = userid self.password = password if ssh_key_string is not None: raise exceptions.AuthenticationException("SSH Key Authentication " "is not supported for PowerNode devices") cmd_parms = [self.IPMI_TOOL, "-I", "lanplus", "-H", host, "-U", userid, "-P", password, "fru", "print"] (_rc, stdout, stderr) = execute_command(" ".join(cmd_parms)) logging.warning("%s::ipmi query standard error output %s", _method_, stderr) for line in stderr: if "Unable to establish IPMI" in line: raise exceptions.ConnectionException( "Unable to connect to the device using IPMI") for line in stdout: if "Chassis Part Number" in line: self.machine_type_model = line.split(":")[1].strip() elif "Chassis Serial" in line: self.serial_number = line.split(":")[1].strip() @entry_exit(exclude_index=[0], exclude_name=["self"]) def disconnect(self): pass @entry_exit(exclude_index=[0], exclude_name=["self"]) def get_machine_type_model(self): return self.machine_type_model @entry_exit(exclude_index=[0], exclude_name=["self"]) def get_serial_number(self): return self.serial_number @entry_exit(exclude_index=[0], exclude_name=["self"]) def get_version(self): _method_ = "PowerNodePlugin.get_version" cmd_parms = [self.IPMI_TOOL, "-I", "lanplus", "-H", self.host, "-U", self.userid, "-P", self.password, "mc", "info"] (rc, stdout, stderr) = execut
e_command(" ".join(cmd_parms)) if rc != 0: logging.warning("%s::ipmi query failed with output %s", _method_, stderr) raise exceptions.DeviceException("ipmi query failed with output %s" % stderr) for line in stdout: if "Firmware Revision" in line: self.version = line.split(":")[1].strip() break return self.version @entry_exit(exclude_index=[0], excl
ude_name=["self"]) def get_architecture(self): return None @entry_exit(exclude_index=[0, 1], exclude_name=["self", "new_password"]) def change_device_password(self, new_password): """Update the password of the ipmi default user on the BMC of the openpower server. """ _method_ = "PowerNodePlugin.change_device_password" user_number = self._get_user_number() cmd_parms = [self.IPMI_TOOL, "-I", "lanplus", "-H", self.host, "-U", self.userid, "-P", self.password, "user", "set", "password", user_number, new_password] (rc, _stdout, stderr) = execute_command(" ".join(cmd_parms)) if rc != 0: logging.error("%s::ipmi password change failed with output %s", _method_, stderr) raise exceptions.DeviceException("ipmi password change failed with output %s" % stderr) @entry_exit(exclude_index=[0], exclude_name=["self"]) def _get_user_number(self): """Each user in IPMI has a number associated with that is used on the command line when modifying a user. This method will find the number associated with the userid """ _method_ = "PowerNodePlugin._get_user_number" user_id = None cmd_parms = [self.IPMI_TOOL, "-I", "lanplus", "-H", self.host, "-U", self.userid, "-P", self.password, "user", "list"] (rc, stdout, stderr) = execute_command(" ".join(cmd_parms)) if rc != 0: logging.warning("%s::ipmi query failed with output %s", _method_, stderr) raise exceptions.DeviceException("ipmi query failed with output %s" % stderr) for line in stdout: ids = line.split()[0] user = line.split()[1] if user == self.userid: user_id = ids break if user_id: return user_id else: raise exceptions.DeviceException("Failed to determine the id for the user: %s" % self.userid)
import os PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__)) TEST_ROOT = os.path.join(os.path.dirname(PROJECT_ROOT), "tests") SINGLE_ORCA_RUN_FILE = os.path.join(PROJECT_ROOT, "code", "single_core_run_orca.py") SINGLE_RUN_MOB_KMC_FILE = os.path.joi
n( PROJECT_ROOT, "code", "single_core_run_mob_KMC.py" ) SINGLE_RUN_DEVICE_KMC_FILE = os
.path.join( PROJECT_ROOT, "code", "single_core_run_device_KMC.py" )
obj.stance), "orientation": packets.Container(yaw=b_obj.yaw, pitch=b_obj.pitch), "grounded": packets.Container(grounded=b_obj.on_ground)}) def send_action(self, b_obj): """ sneaking, not sneaking, leave bed, start sprinting, stop sprinting """ if b_obj.action != b_obj._action: b_obj.action = b_obj._action self.world.send_packet("entity action", {"eid": self.eid, "action": b_obj._action}) def turn_to_point(self, b_obj, point): if point.x == b_obj.x and point.z == b_obj.z: return yaw, pitch = utils.yaw_pitch_between(point, b_obj.position_eyelevel) if yaw is None or pitch is None: return b_obj.yaw = yaw b_obj.pitch = pitch def turn_to_direction(self, b_obj, x, y, z): if x == 0 and z == 0: return yaw, pitch = utils.vector_to_yaw_pitch(x, y, z) b_obj.yaw = yaw b_obj.pitch = pitch def turn_to_vector(self, b_obj, vect): if vect.x == 0 and vect.z == 0: return yaw, pitch = utils.vector_to_yaw_pitch(vect.x, vect.y, vect.z) b_obj.yaw = yaw b_obj.pitch = pitch def clip_abs_velocities(self, b_obj): if abs(b_obj.velocities.x) < 0.005: # minecraft value b_obj.velocities.x = 0 if abs(b_
obj.velocities.y) < 0.005: # minecraft value b_obj.velocities.y = 0 if abs(b_obj.velocities.z) < 0.005: # minecraft value b_obj.velocities.z = 0 def clip_ladder_velocities(self, b_obj): if self.is_on_ladder(b_obj): if b_obj.velocities.y < -0.15: b_obj.velocities.y = -0.15 if abs(b_obj.velocities.x) > 0.15:
b_obj.velocities.x = math.copysign(0.15, b_obj.velocities.x) if abs(b_obj.velocities.z) > 0.15: b_obj.velocities.z = math.copysign(0.15, b_obj.velocities.z) if self.is_sneaking(b_obj) and b_obj.velocities.y < 0: b_obj.velocities.y = 0 def handle_water_movement(self, b_obj): is_in_water = False water_current = utils.Vector(0, 0, 0) bb = b_obj.aabb.expand(-0.001, -0.401, -0.001) top_y = utils.grid_shift(bb.max_y + 1) for blk in self.world.grid.blocks_in_aabb(bb): if isinstance(blk, blocks.BlockWater): if top_y >= (blk.y + 1 - blk.height_percent): is_in_water = True water_current = blk.add_velocity_to(water_current) if water_current.size > 0: water_current.normalize() wconst = 0.014 water_current = water_current * wconst b_obj.velocities = b_obj.velocities + water_current return is_in_water def handle_lava_movement(self, b_obj): for blk in self.world.grid.blocks_in_aabb( b_obj.aabb.expand(-0.1, -0.4, -0.1)): if isinstance(blk, blocks.BlockLava): return True return False def move_collisions(self, b_obj, vx, vy, vz): if self.is_in_web(b_obj): vx *= 0.25 vy *= 0.05000000074505806 vz *= 0.25 b_obj.velocities.x = 0 b_obj.velocities.y = 0 b_obj.velocities.z = 0 aabbs = self.world.grid.collision_aabbs_in(b_obj.aabb.extend_to(vx, vy, vz)) b_bb = b_obj.aabb dy = vy if not fops.eq(vy, 0): for bb in aabbs: dy = b_bb.calculate_axis_offset(bb, dy, 1) b_bb = b_bb.offset(dy=dy) dx = vx if not fops.eq(vx, 0): for bb in aabbs: dx = b_bb.calculate_axis_offset(bb, dx, 0) b_bb = b_bb.offset(dx=dx) dz = vz if not fops.eq(vz, 0): for bb in aabbs: dz = b_bb.calculate_axis_offset(bb, dz, 2) b_bb = b_bb.offset(dz=dz) if vy != dy and vy < 0 and (dx != vx or dz != vz): st = config.MAX_STEP_HEIGHT aabbs = self.world.grid.collision_aabbs_in(b_obj.aabb.extend_to(vx, st, vz)) b_bbs = b_obj.aabb dys = st for bb in aabbs: dys = b_bbs.calculate_axis_offset(bb, dys, 1) b_bbs = b_bbs.offset(dy=dys) dxs = vx for bb in aabbs: dxs = b_bbs.calculate_axis_offset(bb, dxs, 0) b_bbs = b_bbs.offset(dx=dxs) dzs = vz for bb in aabbs: dzs = b_bbs.calculate_axis_offset(bb, dzs, 2) b_bbs = b_bbs.offset(dz=dzs) if fops.gt(dxs * dxs + dzs * dzs, dx * dx + dz * dz): dx = dxs dy = dys dz = dzs b_bb = b_bbs b_obj.on_ground = vy != dy and vy < 0 b_obj.is_collided_horizontally = dx != vx or dz != vz b_obj.horizontally_blocked = not fops.eq(dx, vx) and not fops.eq(dz, vz) if not fops.eq(vx, dx): b_obj.velocities.x = 0 if not fops.eq(vy, dy): b_obj.velocities.y = 0 if not fops.eq(vz, dz): b_obj.velocities.z = 0 b_obj.set_xyz(b_bb.posx, b_bb.min_y, b_bb.posz) self.do_block_collision(b_obj) def move(self, b_obj): self.clip_abs_velocities(b_obj) is_in_water = self.handle_water_movement(b_obj) is_in_lava = self.handle_lava_movement(b_obj) if b_obj.is_jumping: if is_in_water or is_in_lava: b_obj.velocities.y += config.SPEED_LIQUID_JUMP elif b_obj.on_ground: b_obj.velocities.y = config.SPEED_JUMP elif self.is_on_ladder(b_obj): b_obj.velocities.y = config.SPEED_CLIMB b_obj.is_jumping = False if is_in_water: if b_obj.hold_position_flag: b_obj.velocities.y = 0 orig_y = b_obj.y self.update_directional_speed(b_obj, 0.02, balance=True) self.move_collisions(b_obj, b_obj.velocities.x, b_obj.velocities.y, b_obj.velocities.z) b_obj.velocities.x *= 0.8 b_obj.velocities.y *= 0.8 b_obj.velocities.z *= 0.8 b_obj.velocities.y -= 0.02 if b_obj.is_collided_horizontally and \ self.is_offset_in_liquid(b_obj, b_obj.velocities.x, b_obj.velocities.y + 0.6 - b_obj.y + orig_y, b_obj.velocities.z): b_obj.velocities.y = 0.3 elif is_in_lava: if b_obj.hold_position_flag: b_obj.velocities.y = 0 orig_y = b_obj.y self.update_directional_speed(b_obj, 0.02) self.move_collisions(b_obj, b_obj.velocities.x, b_obj.velocities.y, b_obj.velocities.z) b_obj.velocities.x *= 0.5 b_obj.velocities.y *= 0.5 b_obj.velocities.z *= 0.5 b_obj.velocities.y -= 0.02 if b_obj.is_collided_horizontally and \ self.is_offset_in_liquid(b_obj, self.velocities.x, self.velocities.y + 0.6 - self.y + orig_y, self.velocities.z): self.velocities.y = 0.3 else: if self.is_on_ladder(b_obj) and b_obj.hold_position_flag: self.start_sneaking(b_obj) slowdown = self.current_slowdown(b_obj) self.update_directional_speed(b_obj, self.current_speed_factor(b_obj)) self.clip_ladder_velocities(b_obj) self.move_collisions(b_obj, b_obj.velocities.x, b_obj.velocities.y, b_obj.velocities.z) if b_obj.is_collided_horizontally and self.is_on_ladder(b_obj): b_obj.velocities.y = 0.2 b_obj.velocities.y -= config.BLOCK_FALL b_obj.velocities.y *= config.DRAG b_obj.velocities.x *=
import Deci
mal_pb2 import Log_pb2 import uRPC_pb2 import client import s
erver
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. """ Test cases for the TAAR Hybrid recommender """ from taar.recommenders.hybrid_recommender import CuratedRecommender from taar.recommenders.hybrid_recommender import HybridRecommender from taar.recommenders.ensemble_recommender import EnsembleRecommender from taar.recommenders.s3config import TAAR_WHITELIST_BUCKET, TAAR_WHITELIST_KEY # from taar.recommenders.hybrid_recommender import ENSEMBLE_WEIGHTS from .test_ensemblerecommender import install_mock_ensemble_data from .mocks import MockRecommenderFactory import json from moto import mock_s3 import boto3 def install_no_curated_data(ctx): ctx = ctx.child() conn = boto3.resource("s3", region_name="us-west-2") conn.create_bucket(Bucket=TAAR_WHITELIST_BUCKET) conn.Object(TAAR_WHITELIST_BUCKET, TAAR_WHITELIST_KEY).put(Body="") return ctx def install_mock_curated_data(ctx): mock_data = [] for i in range(20): mock_data.append(str(i) * 16) ctx = ctx.child() conn = boto3.resource("s3", region_name="us-west-2") conn.create_bucket(Bucket=TAAR_WHITELIST_BUCKET) conn.Object(TAAR_WHITELIST_BUCKET, TAAR_WHITELIST_KEY).put( Body=json.dumps(mock_data) ) return ctx def install_ensemble_fixtures(ctx): ctx = install_mock_ensemble_data(ctx) factory = MockRecommenderFactory() ctx["recommender_factory"] = factory ctx["recommender_map"] = { "collaborative": factory.create("collaborative"), "similarity": factory.create("similarity"), "locale": factory.create("locale"), } ctx["ensemble_recommender"] = EnsembleRecommender(ctx.child()) return ctx @mock_s3 def test_curated_can_recommend(test_ctx): ctx = install_no_curated_data(test_ctx) r = CuratedRecommender(ctx) # CuratedRecommender will always recommend something no matter # what assert r.can_recommend({}) assert
r.can_recommend({"installed_addons": []}) @mock_s3 def test_curated_recommendations(test_ctx): ctx = install_mo
ck_curated_data(test_ctx) r = CuratedRecommender(ctx) # CuratedRecommender will always recommend something no matter # what for LIMIT in range(1, 5): guid_list = r.recommend({"client_id": "000000"}, limit=LIMIT) # The curated recommendations should always return with some kind # of recommendations assert len(guid_list) == LIMIT @mock_s3 def test_hybrid_recommendations(test_ctx): # verify that the recommendations mix the curated and # ensemble results ctx = install_mock_curated_data(test_ctx) ctx = install_ensemble_fixtures(ctx) r = HybridRecommender(ctx) # Test that we can generate lists of results for LIMIT in range(4, 8): guid_list = r.recommend({"client_id": "000000"}, limit=LIMIT) # The curated recommendations should always return with some kind # of recommendations assert len(guid_list) == LIMIT # Test that the results are actually mixed guid_list = r.recommend({"client_id": "000000"}, limit=4) # A mixed list will have two recommendations with weight > 1.0 # (ensemble) and 2 with exactly weight 1.0 from the curated list assert guid_list[0][1] > 1.0 assert guid_list[1][1] > 1.0 assert guid_list[2][1] == 1.0 assert guid_list[3][1] == 1.0
import unittest import os from flow.benchmarks.baselines.bottleneck0 import bottleneck0_baseline from flow.benchmarks.baselines.bottleneck1 import bottleneck1_baseline
from flow.benchmarks.baselines.bottleneck2 import bottleneck2_baseline from flow.benchmarks.baselines.figureeight012 import figure_eight_baseline from flow.benchmarks.baselines.grid0 import grid0_baseline from flow.benchmarks.baselines.grid1 import grid1_baseline
from flow.benchmarks.baselines.merge012 import merge_baseline os.environ["TEST_FLAG"] = "True" class TestBaselines(unittest.TestCase): """ Tests that the baselines in the benchmarks folder are running and returning expected values (i.e. values that match those in the CoRL paper reported on the website, or other). """ def test_bottleneck0(self): """ Tests flow/benchmark/baselines/bottleneck0.py """ # run the bottleneck to make sure it runs bottleneck0_baseline(num_runs=1, render=False) # TODO: check that the performance measure is within some range def test_bottleneck1(self): """ Tests flow/benchmark/baselines/bottleneck1.py """ # run the bottleneck to make sure it runs bottleneck1_baseline(num_runs=1, render=False) # TODO: check that the performance measure is within some range def test_bottleneck2(self): """ Tests flow/benchmark/baselines/bottleneck2.py """ # run the bottleneck to make sure it runs bottleneck2_baseline(num_runs=1, render=False) # TODO: check that the performance measure is within some range def test_figure_eight(self): """ Tests flow/benchmark/baselines/figureeight{0,1,2}.py """ # run the bottleneck to make sure it runs figure_eight_baseline(num_runs=1, render=False) # TODO: check that the performance measure is within some range def test_grid0(self): """ Tests flow/benchmark/baselines/grid0.py """ # run the bottleneck to make sure it runs grid0_baseline(num_runs=1, render=False) # TODO: check that the performance measure is within some range def test_grid1(self): """ Tests flow/benchmark/baselines/grid1.py """ # run the bottleneck to make sure it runs grid1_baseline(num_runs=1, render=False) # TODO: check that the performance measure is within some range def test_merge(self): """ Tests flow/benchmark/baselines/merge{0,1,2}.py """ # run the bottleneck to make sure it runs merge_baseline(num_runs=1, render=False) # TODO: check that the performance measure is within some range if __name__ == '__main__': unittest.main()
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('api', '0009_dummyprovider'), ] operations = [ migrations.CreateModel( name='ExternalCredentials', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created_at', models.DateTimeField(auto_now_add=True)), ('modified_at', models.DateTimeField(auto_now=True)), ('provider_name', models.CharField(max_length=1024)), ('username', models.CharField(max_length=1024)), ('password', models.CharField(max_length=1024)), ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='ExternalJobPortal', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created_at', models.DateTimeField(auto_now_add=True)), ('modified_at', models.DateTimeField(auto_now=True)), ('name', models.CharField(max_length=1024)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='ExternalJobPortalForm', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created_at', models.DateTimeField(auto_now_add=True)), ('modified_at', mode
ls.DateTimeField(auto_now=True)), ('name', models.CharField(max_length=1024)), ('template_name', models.CharField(max_length=1024)), ], options={ 'abstract': False, }, ),
migrations.CreateModel( name='ExternalJobPortalFormGroup', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created_at', models.DateTimeField(auto_now_add=True)), ('modified_at', models.DateTimeField(auto_now=True)), ('name', models.CharField(max_length=1024)), ('parent', models.ForeignKey(to='api.ExternalJobPortalFormGroup', null=True)), ('portal', models.ForeignKey(to='api.ExternalJobPortal')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='ExternalJobPortalSubmission', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created_at', models.DateTimeField(auto_now_add=True)), ('modified_at', models.DateTimeField(auto_now=True)), ('data', models.TextField()), ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)), ('target', models.ForeignKey(to='api.ExternalJobPortal')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='ExternalJobPortalSubmissionStateChange', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created_at', models.DateTimeField(auto_now_add=True)), ('modified_at', models.DateTimeField(auto_now=True)), ('state', models.CharField(max_length=256, choices=[(b'EXTERNAL_SUBMISSION_RUNNING', b'Running'), (b'EXTERNAL_SUBMISSION_FAILED', b'FAILED'), (b'EXTERNAL_SUBMISSION_PENDING', b'Pending'), (b'EXTERNAL_SUBMISSION_PENDING_SUBMISSION', b'Submission in progress'), (b'EXTERNAL_SUBMISSION_SUCCESS', b'Succeeded')])), ('external_submission', models.ForeignKey(to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='externaljobportalform', name='parent', field=models.ForeignKey(to='api.ExternalJobPortalFormGroup', null=True), ), migrations.AddField( model_name='externaljobportalform', name='portal', field=models.ForeignKey(to='api.ExternalJobPortal'), ), ]
#Equals and hash class Eq(object): def __init__(self, data): self.data = data def __eq__(self, other): return self.data == other.data class Ne(object): def __init__(self, data): self.data = data def __ne__(self, other): return self.data != other.data class Hash(object): def __init__(self, data): self
.data = data def __hash__(self): re
turn hash(self.data) class Unhashable1(object): __hash__ = None class EqOK1(Unhashable1): def __eq__(self, other): return False def __ne__(self, other): return True class Unhashable2(object): #Not the idiomatic way of doing it, but not uncommon either def __hash__(self): raise TypeError("unhashable object") class EqOK2(Unhashable2): def __eq__(self, other): return False def __ne__(self, other): return True class ReflectiveNotEquals(object): def __ne__(self, other): return not self == other class EqOK3(ReflectiveNotEquals, Unhashable1): def __eq__(self, other): return self.data == other.data
# -*- coding: utf-8 -*- # # # # ## This file is part of Indico. ## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN). ## ## Indico is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 3 of the ## License, or (at your option) any later version. ## ## Indico is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Indico; if not, see <http://www.gnu.org/licenses/>. import argparse import cPickle import fcntl import logging import logging.handlers import os import pprint import signal import SocketServer import struct import sys import termios import textwrap from threading import Lock import sqlparse from pygments import highlight from pygments.formatters.terminal256 import Terminal256Formatter from pygments.lexers.agile import PythonLexer, PythonTracebackLexer from pygments.lexers.sql import SqlLexer output_lock = Lock() help_text = textwrap.dedent(""" To use this script, you need to add the following to your logging.conf: [logger_db] level=DEBUG handlers=db qualname=indico.db propagate=0 [handler_db] class=handlers.SocketHandler level=DEBUG args=('localhost', 9020) Also add your new logger/handler to the loggers/handlers lists, e.g. like this: [loggers] keys=root,db [handlers] keys=indico,db,other,smtp """).strip() class LogRecordStreamHandler(SocketServer.StreamRequestHandler): def handle(self): while True: chunk = self.connection.recv(4) if len(chunk) < 4: break size = struct.unpack('>L', chunk)[0] chunk = self.connection.recv(size) while len(chunk) < size: chunk = chunk + self.connection.recv(size - len(chunk)) obj = cPickle.loads(chunk) self.handle_log(obj) def handle_log(self, obj): sql_log_type = obj.get('sql_log_type') if sql_log_type == 'start': source = prettify_source(obj['sql_source'], self.server.traceback_frames) if obj['sql_source'] else None statement = prettify_statement(obj['sql_statement']) params = prettify_params(obj['sql_params']) if obj['sql_params'] else None with output_lock: if source: print prettify_caption('Source') print source print print prettify_caption('Statement') print statement if params: print print prettify_caption('Params') print params elif sql_log_type == 'end': with output_lock: print print prettify_caption('Duration') print ' {:.06f}s'.format(obj['sql_duration']) print_linesep() class LogRecordSocketReceiver(SocketServer.ThreadingTCPServer): allow_reuse_address = True def __init__(self, host, port, ha
ndler=LogRecordStreamHandler, traceback_frames=1): SocketServer.ThreadingTCPServer.__init__(self, (host, port), handler) self.timeout = 1 self.traceback_frames = traceback_frames def terminal_size(): h, w, hp,
wp = struct.unpack('HHHH', fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0))) return w, h def print_linesep(): print terminal_size()[0] * u'\N{BOX DRAWINGS LIGHT HORIZONTAL}' def indent(msg, level=4): indentation = level * ' ' return indentation + msg.replace('\n', '\n' + indentation) def prettify_caption(caption): return '\x1b[38;5;75;04m{}\x1b[0m'.format(caption) def prettify_source(source, traceback_frames): if not traceback_frames: return None msg = 'Traceback (most recent call last):\n' frame_msg = textwrap.dedent(""" File "{}", line {}, in {} {}\n""").strip() msg += indent('\n'.join(frame_msg.format(*frame) for frame in source[:traceback_frames]), 2) highlighted = highlight(msg, PythonTracebackLexer(), Terminal256Formatter(style='native')) # Remove first line (just needed for PythonTracebackLexer) highlighted = '\n'.join(highlighted.splitlines()[1:]) return indent(highlighted, 2).rstrip() def prettify_statement(statement): statement = sqlparse.format(statement, keyword_case='upper', reindent=True) return indent(highlight(statement, SqlLexer(), Terminal256Formatter(style='native'))).rstrip() def prettify_params(args): args = pprint.pformat(args) return indent(highlight(args, PythonLexer(), Terminal256Formatter(style='native'))).rstrip() def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-p', dest='port', type=int, default=logging.handlers.DEFAULT_TCP_LOGGING_PORT, help='The port to bind the UDP listener to') parser.add_argument('-t', dest='traceback_frames', type=int, default=1, help='Number of stack frames to show (max. 3)') parser.add_argument('--setup-help', action='store_true', help='Explain how to enable logging for script') return parser.parse_args() def sigint(*unused): print '\rTerminating' os._exit(1) def main(): args = parse_args() if args.setup_help: print help_text sys.exit(1) signal.signal(signal.SIGINT, sigint) print 'Listening on 127.0.0.1:{}'.format(args.port) server = LogRecordSocketReceiver('localhost', args.port, traceback_frames=args.traceback_frames) try: server.serve_forever() except KeyboardInterrupt: print if __name__ == '__main__': main()
res. EXC_PATCH = "pyvtk580_tryexcept_and_pyexceptions.diff" # fixes attributes in vtkproperty for shader use in python VTKPRPRTY_PATCH = "vtkProperty_PyShaderVar.diff" # recent segfault with vtk 5.6.1 and wxPython 2.8.11.0 # see here for more info: # http://vtk.1045678.n5.nabble.com/wx-python-scripts-segfault-td1234471.html WXVTKRWI_DISPLAYID_SEGFAULT_PATCH = "wxvtkrwi_displayid_segfault.diff" dependencies = ['CMake'] class VTK58(InstallPackage): def __init__(self): self.source_dir = os.path.join(config.archive_dir, BASENAME) self.build_dir = os.path.join(config.build_dir, '%s-build' % (BASENAME,)) self.inst_dir = os.path.join(config.inst_dir, BASENAME) self.exc_patch_src = os.path.join(config.patches_dir, EXC_PATCH) self.exc_patch_dst = os.path.join(config.archive_dir, EXC_PATCH) self.vtkprprty_patch_filename = os.path.join(config.patches_dir,
VTKPRPRTY_PATCH) self.wxvtkrwi_displayid_segfault_patch_filename = os.path.join( config.patches_dir, WXVTKRWI_DISPLAYID_SEGFAULT_PATCH) config.VTK_LIB = os.path.join(self.inst_dir, 'lib') # whatever the case may be, we have to register VTK variables if os.name == 'nt': # on Win, inst/VTK/bin contains the so files
config.VTK_SODIR = os.path.join(self.inst_dir, 'bin') # inst/VTK/lib/site-packages the VTK python package config.VTK_PYTHON = os.path.join( config.VTK_LIB, 'site-packages') else: # on *ix, inst/VTK/lib contains DLLs config.VTK_SODIR = os.path.join( config.VTK_LIB, VTK_BASE_VERSION) # on *ix, inst/lib/python2.5/site-packages contains the # VTK python package # sys.version is (2, 5, 0, 'final', 0) config.VTK_PYTHON = os.path.join( config.VTK_LIB, 'python%d.%d/site-packages' % \ sys.version_info[0:2]) # this contains the VTK cmake config (same on *ix and Win) config.VTK_DIR = os.path.join(config.VTK_LIB, VTK_BASE_VERSION) def get(self): if os.path.exists(self.source_dir): utils.output("VTK already checked out, skipping step.") else: utils.goto_archive() ret = os.system("git clone %s %s" % (GIT_REPO, BASENAME)) if ret != 0: utils.error("Could not clone VTK repo. Fix and try again.") os.chdir(self.source_dir) ret = os.system("git checkout %s" % (GIT_TAG,)) if ret != 0: utils.error("Could not checkout VTK %s. Fix and try again." % (GIT_TAG,)) if not os.path.exists(self.exc_patch_dst): utils.output("Applying EXC patch") # we do this copy so we can see if the patch has been done yet or not shutil.copyfile(self.exc_patch_src, self.exc_patch_dst) os.chdir(self.source_dir) # default git-generated patch, so needs -p1 ret = os.system( "%s -p1 < %s" % (config.PATCH, self.exc_patch_dst)) if ret != 0: utils.error( "Could not apply EXC patch. Fix and try again.") # # VTKPRPRTY PATCH # utils.output("Applying VTKPRPRTY patch") # os.chdir(os.path.join(self.source_dir, 'Rendering')) # ret = os.system( # "%s -p0 < %s" % (config.PATCH, self.vtkprprty_patch_filename)) # if ret != 0: # utils.error( # "Could not apply VTKPRPRTY patch. Fix and try again.") # # WXVTKRWI_DISPLAYID_SEGFAULT patch # utils.output("Applying VTKWXRWI_DISPLAYID_SEGFAULT patch") # os.chdir(self.source_dir) # # default git-generated patch, so needs -p1 # ret = os.system( # "%s -p1 < %s" % (config.PATCH, # self.wxvtkrwi_displayid_segfault_patch_filename)) # if ret != 0: # utils.error( # "Could not apply WXVTKRWI_DISPLAYID_SEGFAULT patch. Fix and try again.") def unpack(self): pass def configure(self): if os.path.exists( os.path.join(self.build_dir, 'CMakeFiles/cmake.check_cache')): utils.output("VTK build already configured.") return if not os.path.exists(self.build_dir): os.mkdir(self.build_dir) cmake_params = "-DBUILD_SHARED_LIBS=ON " \ "-DBUILD_TESTING=OFF " \ "-DCMAKE_BUILD_TYPE=RelWithDebInfo " \ "-DCMAKE_INSTALL_PREFIX=%s " \ "-DVTK_USE_TK=NO " \ "-DVTK_USE_METAIO=ON " \ "-DVTK_USE_PARALLEL=ON " \ "-DPYTHON_EXECUTABLE=%s " \ "-DPYTHON_LIBRARY=%s " \ "-DPYTHON_INCLUDE_PATH=%s " \ "-DVTK_WRAP_PYTHON=ON " % (self.inst_dir, config.PYTHON_EXECUTABLE, config.PYTHON_LIBRARY, config.PYTHON_INCLUDE_PATH) ret = utils.cmake_command(self.build_dir, self.source_dir, cmake_params) if ret != 0: utils.error("Could not configure VTK. Fix and try again.") def build(self): posix_file = os.path.join(self.build_dir, 'bin/libvtkWidgetsPython.so') nt_file = os.path.join(self.build_dir, 'bin', config.BUILD_TARGET, 'vtkWidgetsPythonD.dll') if utils.file_exists(posix_file, nt_file): utils.output("VTK already built. Skipping build step.") else: os.chdir(self.build_dir) ret = utils.make_command('VTK.sln') if ret != 0: utils.error("Error building VTK. Fix and try again.") def install(self): posix_file = os.path.join(self.inst_dir, 'bin/vtkpython') nt_file = os.path.join(self.inst_dir, 'bin', 'vtkpython.exe') if utils.file_exists(posix_file, nt_file): utils.output("VTK already installed. Skipping build step.") else: # python 2.5.2 setup.py complains that this does not exist # with VTK PV-3-2-1. This is only on installations with # EasyInstall / Python Eggs, then the VTK setup.py uses # EasyInstall and not standard distutils. gah! # just tested with VTK 5.8.0 and Python 2.7.2 # it indeed installs VTK_PYTHON/VTK-5.8.0-py2.7.egg # but due to the site.py and easy-install.pth magic in there, # adding VTK_PYTHON to the PYTHONPATH still works. We can keep # pip, yay! if not os.path.exists(config.VTK_PYTHON): os.makedirs(config.VTK_PYTHON) os.chdir(self.build_dir) # we save, set and restore the PP env variable, else # stupid setuptools complains save_env = os.environ.get('PYTHONPATH', '') os.environ['PYTHONPATH'] = config.VTK_PYTHON ret = utils.make_command('VTK.sln', install=True) os.environ['PYTHONPATH'] = save_env if ret != 0: utils.error("Could not install VTK. Fix and try again.") # now do some surgery on VTKConfig.cmake and # VTKLibraryDepends.cmake so builds of VTK-dependent libraries # with only the DRE to link with Just Work(tm) # on windows, we need to replace backslash with forward slash # as that's the style used by the config files. On *ix mostly # harmless idp = re.sub(r'\\','/', config.inst_dir) for fn in [os.path.join(config.VTK_DIR, 'VTKConfig.cmake'),
# Copyright 2015, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import retrying from nova import exception from nova import test from pypowervm.tests import test_fixtures as pvm_fx from pypowervm.tests.test_utils import pvmhttp from nova.virt.powervm import mgmt LPAR_HTTPRESP_FILE = "lpar.txt" class TestMgmt(test.TestCase): def setUp(self): super(TestMgmt, self).setUp() self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt lpar_http = pvmhttp.load_pvm_resp(LPAR_HTTPRESP_FILE, adapter=self.apt) self.assertIsNotNone( lpar_http, "Could not load %s " % LPAR_HTTPRESP_FILE) self.resp = lpar_http.response @mock.patch('pypowervm.tasks.partition.get_this_partition', autospec=True) def test_mgmt_uuid(self, mock_get_partition): mock_get_partition.return_value = mock.Mock(uuid='mock_mgmt') adpt = mock.Mock() # First run should call the partition only once self.assertEqual('mock_mgmt', mgmt.mgmt_uuid(adpt)) mock_get_partition.assert_called_once_with(adpt) # But a subsequent call should effectively no-op mock_get_partition.reset_mock() self.assertEqual('mock_mgmt', mgmt.mgmt_uuid(adpt)) self.assertEqual(mock_get_partition.call_count, 0) @mock.patch('glob.glob', autospec=True) @mock.patch('nova.privsep.path.writefile', autospec=True) @mock.patch('os.path.realpath', autospec=True) def test_discover_vscsi_disk(self, mock_realpath, mock_writefile, mock_glob): scanpath = '/sys/bus/vio/devices/30000005/host*/scsi_host/host*/scan' udid = ('275b5d5f88fa5611e48be9000098be9400' '13fb2aa55a2d7b8d150cb1b7b6bc04d6') devlink = ('/dev/disk/by-id/scsi-SIBM_3303_NVDISK' + udid) mapping = mock.Mock() mapping.client_adapter.lpar_slot_num = 5 mapping.backing_storage.udid = udid # Realistically, first glob would return e.g. .../host0/.../host0/... # but it doesn't matter for test purposes. mock_glob.side_effect = [[scanpath], [devlink]] mgmt.discover_vscsi_disk(mapping) mock_glob.assert_has_calls( [mock.call(scanpath), mock.call('/dev/disk/by-id/*' + udid[-32:])]) mock_writefile.assert_called_once_with(scanpath, 'a', '- - -') mock_realpath.assert_called_with(devlink) @mock.patch('retrying.retry', autospec=True) @mock.patch('glob.glob', autospec=True) @mock.patch('nova.privsep.path.writefile', autospec=True) def test_discover_vscsi_disk_not_one_result(self, mock_writefile, mock_glob, mock_retry): """Zero or more than one disk is found by discover_vscsi_disk.""" def validate_retry(kwargs): self.assertIn('retry_on_result', kwargs) self.assertEqual(250, kwargs['wait_fixed']) self.assertEqual(300000, kwargs['stop_max_delay']) def raiser(unused): raise retrying.RetryError(mock.Mock(attempt_number=123)) def retry_passthrough(**kwargs): validate_retry(kwargs) def wrapped(_poll_for_dev): return _poll_for_dev return wrapped def retry_timeout(**kwargs): validate_retry(kwargs) def wrapped(_poll_for_dev): return raiser return wrapped udid = ('275b5d5f88fa5611e48be9000098be9400' '13fb2aa55a2d7b8d150cb1b7b6bc04d6') mapping = mock.Mock() mapping.client_adapter.lpar_slot_num = 5 mapping.backing_storage.udid = udid # No disks found mock_retry.side_effect = retry_timeout mock_glob.side_effect = lambda path: [] self.assertRaises(exception.NoDiskDiscoveryException, mgmt.discover_vscsi_disk, mapping) # Multiple disks found mock_retry.side_effect = retry_passthrough mock_glob.side_effect = [['path'], ['/dev/sde', '/dev/sdf']] self.assertRaises(exception.UniqueDiskDiscoveryException, mgmt.discover_vscsi_disk, mapping) @mock.patch('time.sleep', autospec=True) @mock.patch('os.path.realpath', autospec=True) @mock.patch('os.stat', autospec=True) @mock.patch('nova.privsep.path.writefile', autospec=True) def test_remove_block_dev(self, mock_writefile, mock_stat, mock_realpath, mock_sleep): link = '/dev/link/foo' realpath = '/dev/sde' delpath = '/sys/block/sde/device/delete' mock_realpath.return_value = realpath # Good path mock_stat.side_effect = (None, None, OSError()) mgmt.remove_block_dev(link) mock_realpath.assert_called_with(link) mock_stat.assert_has_calls([mock.call(realpath), mock.call(delpath), mock.call(realpath)]) mock_writefile.assert_called_once_with(delpath, 'a', '1') self.assertEqual(0, mock_sleep.call_count) # Device param not found mock_writefile.reset_mock() mock_stat.reset_mock() mock_stat.side_effect = (OSError()
, None, None) self.assertRaises(exception.InvalidDevicePath, mgmt.remove_block_dev, link) # stat was called once; exec was not called self.assertEqual(1, mock_stat.call_count) self.assertEqual(0, mock_writefile.call_count) # Delete special file not found mock_writefile.reset_mock() mock_stat.reset_mock()
mock_stat.side_effect = (None, OSError(), None) self.assertRaises(exception.InvalidDevicePath, mgmt.remove_block_dev, link) # stat was called twice; exec was not called self.assertEqual(2, mock_stat.call_count) self.assertEqual(0, mock_writefile.call_count) @mock.patch('retrying.retry') @mock.patch('os.path.realpath') @mock.patch('os.stat') @mock.patch('nova.privsep.path.writefile') def test_remove_block_dev_timeout(self, mock_dacw, mock_stat, mock_realpath, mock_retry): def validate_retry(kwargs): self.assertIn('retry_on_result', kwargs) self.assertEqual(250, kwargs['wait_fixed']) self.assertEqual(10000, kwargs['stop_max_delay']) def raiser(unused): raise retrying.RetryError(mock.Mock(attempt_number=123)) def retry_timeout(**kwargs): validate_retry(kwargs) def wrapped(_poll_for_del): return raiser return wrapped # Deletion was attempted, but device is still there link = '/dev/link/foo' delpath = '/sys/block/sde/device/delete' realpath = '/dev/sde' mock_realpath.return_value = realpath mock_stat.side_effect = lambda path: 1 mock_retry.side_effect = retry_timeout self.assertRaises( exception.DeviceDeletionException, mgmt.remove_block_dev, link) mock_realpath.assert_called_once_with(link) mock_dacw.assert_called_with(delpath, 'a', '1')
# This file is formatted with black. # https://github.com/psf/black import os import json import subprocess import sys import simplekml ALT_MODE = simplekml.AltitudeMode.absolute # Absolute altitude means from sea floor # Current commit if os.environ.get("TRAVIS"): COMMIT = os.environ["TRAVIS_COMMIT"] else: # For local dev proc = subprocess.run( ["git", "rev-parse", "HEAD"], capture_output=True, cwd="../../../", text=True ) if proc.returncode != 0: print("Git command failed") sys.exit(1) COMMIT = proc.stdout.strip() def get_desc(node): """Generate HTML description for a node.""" # Required keys desc = f"<h1>{node['name']}</h1>" desc += f"<h2>{node['status']}</h2>" desc += f"Type: {node['type']}<br>" desc += f"Altitude: {node['altitude']}<br>" desc += f"Date Added: {node['dateAdded']}<br>" desc += f"Group: {node['group']}<br>" # Optional keys desc += f"Model: {node.get('model')}<br>" d
esc += f"IPv4: {node.get('ipv4')}<br>" desc += f"IPv6: {node.get('ipv6')}<br>" desc += f"Mode: {node.get('mode')}<br>" if node["type"] != "router": desc += f"Connected Router: {node.get('router')}<br>" # Antenna specific keys if node["type"] == "antenna": desc += f"SSID: {node.get('ssid')}<br>" desc += "<br>" desc
+= f"Antenna Type: {node.get('antennaType')}<br>" desc += f"Antenna Cone: {node.get('antennaCone')}<br>" desc += f"Antenna Direction: {node.get('antennaDirection')}<br>" desc += f"Antenna Distance: {node.get('antennaDistance')}<br>" desc += f"Antenna Protocol: {node.get('antennaProtocol')}<br>" desc += "<br>" # Images if node.get("images") is not None: for image in node["images"]: url = ( "https://raw.githubusercontent.com/tomeshnet/node-list/" + COMMIT + "/images/" + image ) desc += f'<a href={url}><img alt={image} src={url} width="300"></a><br>' return "<![CDATA[" + desc + "]]>" with open("../../../tomeshnet-node-list.json", "r") as f: nodes = json.load(f)["nodeList"] kml = simplekml.Kml(name="Toronto Community Network") active = kml.newfolder(name="Active Nodes", open=0, visibility=1) proposed = kml.newfolder(name="Proposed Nodes", open=0, visibility=1) inactive = kml.newfolder(name="Inactive Nodes", open=0, visibility=0) for node in nodes: if node["status"] == "active": folder = active vis = 1 # Active nodes always visible # Yellow icon_url = "http://maps.google.com/mapfiles/kml/pushpin/ylw-pushpin.png" elif node["status"] == "proposed": folder = proposed vis = 1 # Light Blue icon_url = "http://maps.google.com/mapfiles/kml/pushpin/ltblu-pushpin.png" else: # All other nodes are considered inactive folder = inactive vis = 0 # Red icon_url = "http://maps.google.com/mapfiles/kml/pushpin/red-pushpin.png" pnt = folder.newpoint( name=node["name"], altitudemode=ALT_MODE, coords=[(node["longitude"], node["latitude"], node["altitude"])], visibility=vis, description=get_desc(node), snippet=simplekml.Snippet(), # Empty snippet ) pnt.style.iconstyle.icon.href = icon_url kml.save("../../build/tomeshnet-node-list-kml.kml")
#! /usr/bin/python import sys, localconfig, platform, time #OS Runtime comments if platform.system() == "Windows": sys.path.append(localconfig.winpath) print "You
are running the AnkitBot UAA Module for Windows. Sponsored by DQ. :)" else: sys.path.append(localconfig.linuxpath) print "You are running the AnkitBot UAA Module for Linux. Sponsored by DQ. :)" import wikipedia import globalfunc as globe override = False if not globe.startAllowed(override): print "Fatal - System Access Denied." sys.exit(1) print "System Alert - Program is still running." globe.main() globe.checkWait() globe.pageCleanup() wikipedia.stopme()
from unittest import TestCase from django.template import Context
, Template, VariableNode from django.test import override_settings class NodelistTest(TestCase): def test_for(self): template = Template('{% for i in 1 %}{{ a }}{% endfor %}') vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1) def test_if(self): template = Template('{% if x %}{{ a }}{% endif %}') vars = template.nodelist.get_nodes_by_type(VariableNode) self.assertEqual(len(vars), 1) def test_ifequal(self): template = Template('{% ifequal x y %}{{ a }}{% endifequal %}') vars = template.nodelist.get_nodes_by_type(VariableNode) self.assertEqual(len(vars), 1) def test_ifchanged(self): template = Template('{% ifchanged x %}{{ a }}{% endifchanged %}') vars = template.nodelist.get_nodes_by_type(VariableNode) self.assertEqual(len(vars), 1) class ErrorIndexTest(TestCase): """ Checks whether index of error is calculated correctly in template debugger in for loops. Refs ticket #5831 """ @override_settings(DEBUG=True, TEMPLATE_DEBUG=True) def test_correct_exception_index(self): tests = [ ('{% load bad_tag %}{% for i in range %}{% badsimpletag %}{% endfor %}', (38, 56)), ('{% load bad_tag %}{% for i in range %}{% for j in range %}{% badsimpletag %}{% endfor %}{% endfor %}', (58, 76)), ('{% load bad_tag %}{% for i in range %}{% badsimpletag %}{% for j in range %}Hello{% endfor %}{% endfor %}', (38, 56)), ('{% load bad_tag %}{% for i in range %}{% for j in five %}{% badsimpletag %}{% endfor %}{% endfor %}', (38, 57)), ('{% load bad_tag %}{% for j in five %}{% badsimpletag %}{% endfor %}', (18, 37)), ] context = Context({ 'range': range(5), 'five': 5, }) for source, expected_error_source_index in tests: template = Template(source) try: template.render(context) except (RuntimeError, TypeError) as e: error_source_index = e.django_template_source[1] self.assertEqual(error_source_index, expected_error_source_index)
"""Test cases for STEREO Map subclasses. This particular test file pertains to CORMap. @Author: Pritish C. (VaticanCameos) """ import os import glob from sunpy.map.sources.stereo import CORMap from sunpy.map import Map import sunpy.data.test path = sunpy.data.test.rootdir fitspath = glob.glob(os.path.join(path, "cor1_20090615_000500_s4c1A.fts")) cor =
Map(fitspath) # COR Tests def test_fitstoEIT(): """Tests the creation of CORMap using FITS.""" assert isinstance(cor, CORMap) def test_is_datasource_for(): """Test the is_datasource_for method of CORMap. Note that header data to be provided as an argument can be a MapMeta object.""" assert cor.is_datasource_for(c
or.data, cor.meta) def test_measurement(): """Tests the measurement property of the CORMap object.""" assert cor.measurement == "white-light" def test_observatory(): """Tests the observatory property of the CORMap object.""" assert cor.observatory == "STEREO A"
g'): match = re.match(r'\\g<(\S+)>', arg).group(1) groups.append(match) elif arg.startswith('\\'): match = int(re.match(r'\\(\d+)', arg).group(1)) groups.append(match) else: raise errors.AnsibleFilterError('Unknown argument') flags = 0 if kwargs.get('ignorecase'): flags |= re.I if kwargs.get('multiline'): flags |= re.M match = re.search(regex, value, flags) if match: if not groups: return match.group() else: items = list() for item in groups: items.append(match.group(item)) return items def ternary(value, true_val, false_val): ''' value ? true_val : false_val ''' if value: return true_val else: return false_val def regex_escape(string): '''Escape all regular expressions special characters from STRING.''' return re.escape(string) def from_yaml(data): if isinstance(data, string_types): return yaml.safe_load(data) return data @environmentfilter def rand(environment, end, start=None, step=None, seed=None): if seed is None: r = SystemRandom() else: r = Random(seed) if isinstance(end, integer_types): if not start: start = 0 if not step: step = 1 return r.randrange(start, end, step) elif hasattr(end, '__iter__'): if start or step: raise errors.AnsibleFilterError('start and step can only be used with integer values') return r.choice(end) else: raise errors.AnsibleFilterError('random can only be used on sequences and integers') def randomize_list(mylist, seed=None): try: mylist = list(mylist) if seed: r = Random(seed) r.shuffle(mylist) else: shuffle(mylist) except: pass return mylist def get_hash(data, hashtype='sha1'): try: # see if hash is supported h = hashlib.new(hashtype) except: return None h.update(to_bytes(data, errors='surrogate_then_strict')) return h.hexdigest() def get_encrypted_password(password, hashtype='sha512', salt=None): # TODO: find a way to construct dynamically from system cryptmethod= { 'md5': '1', 'blowfish': '2a', 'sha256': '5', 'sha512': '6', } if hashtype in cryptmethod: if salt is None: r = SystemRandom() if hashtype in ['md5']: saltsize = 8 else: saltsize = 16 saltcharset = string.ascii_letters + string.digits + '/.' salt = ''.join([r.choice(saltcharset) for _ in range(saltsize)]) if not HAS_PASSLIB: if sys.platform.startswith('darwin'): raise errors.AnsibleFilterError('|password_hash requires the passlib python module to generate password hashes on Mac OS X/Darwin') saltstring = "$%s$%s" % (cryptmethod[hashtype],salt) encrypted = crypt.crypt(password, saltstring) else: if hashtype == 'blowfish': cls = passlib.hash.bcrypt else: cls = getattr(passlib.hash, '%s_crypt' % hashtype) encrypted = cls.encrypt(password, salt=salt) return encrypted return None def to_uuid(string): return str(uuid.uuid5(UUID_NAMESPACE_ANSIBLE, str(string))) def mandatory(a): from jinja2.runtime import Undefined ''' Make a variable mandatory ''' if isinstance(a, Undefined): raise errors.AnsibleFilterError('Mandatory variable not defined.') return a def combine(*terms, **kwargs): recursive = kwargs.get('recursive', False) if len(kwargs) > 1 or (len(kwargs) == 1 and 'recursive' not in kwargs): raise errors.AnsibleFilterError("'recursive' is the only valid keyword argument") for t in terms: if not isinstance(t, dict): raise errors.AnsibleFilterError("|combine expects dictionaries, got " + repr(t)) if recursive: return reduce(merge_hash, terms) else: return dict(itertools.chain(*map(iteritems, terms))) def comment(text, style='plain', **kw): # Predefined comment types comment_styles = { 'plain': { 'decoration': '# ' }, 'erlang': { 'decoration': '% ' }, 'c': { 'decoration': '// ' }, 'cblock': { 'beginning': '/*', 'decoration': ' * ', 'end': ' */' }, 'xml': { 'beginning': '<!--', 'decoration': ' - ', 'end': '-->' } } # Pointer to the right comment type style_params = comment_styles[style] if 'decoration' in kw: prepostfix = kw['decoration'] else: prepostfix = style_params['decoration'] # Default params p = { 'newline': '\n', 'beginning': '', 'prefix': (prepostfix).rstrip(), 'prefix_count': 1, 'decoration': '', 'postfix': (prepostfix).rstrip(), 'postfix_count': 1, 'end': '' } # Update default params p.update(style_params) p.update(kw) # Compose substrings for the final string str_beginning = '' if p['beginning']: str_beginning = "%s%s" % (p['beginning'], p['newline']) str_prefix = '' if p['prefix']: if p['prefix'] != p['newline']: str_prefix = str( "%s%s" % (p['prefix'], p['newline'])) * int(p['prefix_count']) else: str_prefix = str( "%s" % (p['newline'])) * int(p['prefix_count']) str_text = ("%s%s" % ( p['decoration'], # Prepend each line of the text with the decorator text.replace( p['newline'], "%s%s" % (p['newline'], p['decoration'])))).replace( # Remove trailing spaces when only decorator is on the line "%s%s" % (p['decoration'], p['newline']), "%s%s" % (p['decoration'].rstrip(), p['newline'])) str_postfix = p['newline'].join( [''] + [p['postfix'] for x in range(p['postfix_count'])]) str_end = '' if p['end']: str_end = "%s%s" % (p['newline'], p['end']) # Return the final string return "%s%s%s%s%s" % ( str_beginning, str_prefix, str_text, str_postfix, str_end) def extract(item, container, morekeys=None): from jinja2.runtime import Undefined value = container[item] if value is not Undefined and morekeys is not None: if not isinstance(morekeys, list): morekeys = [morekeys] try: value = reduce(lambda d, k: d[k], morekeys, value) except KeyError: value = Undefined() return value def failed(*a, **kw): ''' Test if task result yields failed ''' item = a[0] if not isinstance(item, MutableMapping): raise errors.AnsibleFilterError("|failed expects a dic
tionary") rc = item.get('rc', 0) failed = item.get('failed', False) if rc != 0 or failed: return True else: return False def success(*a, **kw): ''' Test if task result yields success ''' return not failed(*a, **kw) def changed(*a, **kw): ''' Test if task result yields changed ''' item = a[0] if n
ot isinstance(item, MutableMapping): raise errors.AnsibleFilterError("|changed expects a dictionary") if not 'changed' in item: changed = False if ('results' in item # some modules return a 'results' key and isinstance(item['results'], MutableSequence) and isinstance(item['results'][0], MutableMapping)): for result in item['results']: changed = changed or result.get('changed', False) else: changed = item.get('changed', False) return changed def skipped(*a, **kw): ''' Test if task result yields skipped ''' item = a[0] if not isinstance(item, MutableMapping): raise err
from sa_tools.base.magic import MagicMixin from sa_tools.inbox import Inbox from sa_tools.session import SASession from sa_tools.index import Index import os import pickle import sys def py_ver() -> str: return str(sys.version_info.major) class APSession(object): def __init__(self, username: str, passwd: str=None, save_session: bool=False, *args, **kwarg
s): self.
username = username self.passwd = passwd self._session_bak = \ '.' + username.replace(' ', '_') + py_ver() + '.bak' self.session = self._get_session(save_session=save_session) del passwd del self.passwd def _get_session(self, save_session: bool=True) -> SASession: backup_exists = os.path.exists(self._session_bak) # session = None if backup_exists: session = self._load_session() else: session = SASession(self.username, self.passwd) if save_session: self._save_session(session) return session def _load_session(self) -> None: with open(self._session_bak, 'rb') as old_session: print("Loading from backup: " + self._session_bak) session = pickle.load(old_session) return session def _save_session(self, session: SASession) -> None: with open(self._session_bak, 'wb') as session_file: pickle.dump(session, session_file) class AwfulPy(APSession, MagicMixin): def __init__(self, username, *args, **kwargs): super().__init__(username, *args, **kwargs) self.index = Index(self.session) self.inbox = Inbox(self.session) self.name = "awful.py" self.version = "v0.2014.08.24" def __repr__(self): info = '[' + self.name + ' ' + self.version + '] ' acct = 'Logged in as ' + self.username login_time = ' on ' + self.session.login_time return info + acct + login_time
# Authors: Aaron Qiu <zqiu@ulg.ac.be>, # Antonio Sutera <a.sutera@ulg.ac.be>, # Arnaud Joly <a.joly@ulg.ac.be>, # Gilles Louppe <g.louppe@ulg.ac.be>, # Vincent Francois <v.francois@ulg.ac.be> # # License: BSD 3 clause from __future__ import division, print_function, absolute_import from itertools import chain import numpy as np from sklearn.externals.joblib import Parallel, delayed, cpu_count
from utils import scale def _partition_X(X, n_jobs): """Private function used to partition X between jobs.""" n_nodes = X.shape[1] # Compute the number of jobs
n_jobs = min(cpu_count() if n_jobs == -1 else n_jobs, n_nodes) # Partition estimators between jobs n_node_per_job = (n_nodes // n_jobs) * np.ones(n_jobs, dtype=np.int) n_node_per_job[:n_nodes % n_jobs] += 1 starts = np.cumsum(n_node_per_job) return n_jobs, [0] + starts.tolist() def _parallel_count(X, start, end): """Private function used to compute a batch of score within a job.""" count = np.zeros((end - start, X.shape[1])) for index, jx in enumerate(range(start, end)): X_jx_bot = X[:-1, jx] + 0.2 X_jx_top = X[:-1, jx] + 0.5 for j in range(X.shape[1]): if j == jx: continue count[index, j] = ((X[1:, j] > X_jx_bot) & (X[1:, j] < X_jx_top)).sum() return count def make_prediction_directivity(X, threshold=0.12, n_jobs=1): """Score neuron connectivity using a precedence measure Parameters ---------- X : numpy array of shape (n_samples, n_nodes) Fluorescence signals threshold : float, (default=0.11) Threshold value for hard thresholding filter: x_new[i] = x[i] if x[i] >= threshold else 0. n_jobs : integer, optional (default=1) The number of jobs to run the algorithm in parallel. If -1, then the number of jobs is set to the number of cores. Returns ------- score : numpy array of shape (n_nodes, n_nodes) Pairwise neuron connectivity score. """ # Perform filtering X_new = np.zeros((X.shape)) for i in range(1, X.shape[0] - 1): for j in range(X.shape[1]): X_new[i, j] = (X[i, j] + 1 * X[i - 1, j] + 0.8 * X[i - 2, j] + 0.4 * X[i - 3, j]) X_new = np.diff(X_new, axis=0) thresh1 = X_new < threshold * 1 thresh2 = X_new >= threshold * 1 X_new[thresh1] = 0 X_new[thresh2] = pow(X_new[thresh2], 0.9) # Score directivity n_jobs, starts = _partition_X(X, n_jobs) all_counts = Parallel(n_jobs=n_jobs)( delayed(_parallel_count)(X_new, starts[i], starts[i + 1]) for i in range(n_jobs)) count = np.vstack(list(chain.from_iterable(all_counts))) return scale(count - np.transpose(count))
# Copyright 2011 Kyriakos Zarifis # Copyright 2008 (C) Nicira, Inc. # # This file is part of POX. # # POX is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # POX is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with POX. If not, see <http://www.gnu.org/licenses/>. """ This is an L2 learning switch derived originally from NOX's pyswitch example. It is now a demonstration of the ofcommand library for constructing OpenFlow messages. """ from time import time # TODO: mac_to_str and mact_to_int aren't currently defined in packet_utils... #from pox.lib.packet.packet_utils import mac_to_str, mac_to_int from pox.lib.packet.ethernet import ethernet from pox.lib.packet.tcp import tcp from pox.lib.packet.udp import udp from pox.lib.packet.vlan import vlan from pox.lib.packet.ipv4 import ipv4 from pox.lib.packet.icmp import icmp from pox.lib.packet.ethernet import ethernet from pox.core import core from pox.lib.revent import * from pox.lib.addresses import EthAddr log = core.getLogger() import pox.openflow.ofcommand as ofcommand class dumb_l2_switch (EventMixin): def __init__ (self): log.info("Starting") self.listenTo(core) self.st = {} def _handle_GoingUpEvent (self, event): self.listenTo(core.openflow) def _handle_PacketIn (self, event): """Packet entry method. Drop LLDP packets (or we get confused) and attempt learning and forwarding """ con = event.connection dpid = event.connection.dpid inport = event.port packet = event.parse() buffer_id = event.ofp.buffer_id if not packet.parsed: log.warning("%i %i ignoring unparsed packet", dpid, inport) return if not con in self.st: log.info('registering new switch ' + str(dpid)) self.st[con] = {} # don't forward lldp packets if packet.type == ethernet.LLDP_TYPE: return # learn MAC on incoming port self.do_l2_learning(con, inport, packet) # forward packet self.forward_l2_packet(con, inport, packet, packet.arr, buffer_id) def do_l2_learning(self, con, inport, packet): """Given a packet, learn the source and peg to a switch/inport """ # learn MAC on incoming port srcaddr = EthAddr(packet.src) #if ord(srcaddr[0]) & 1: # return if self.st[con].has_key(srcaddr.toStr()): # change to raw? # we had already heard from this switch dst = self.st[con][srcaddr.toStr()] # raw? if dst[0] != inport: # but from a different port log.info('MAC has moved from '+str(dst)+'to'+str(inport)) else: return else: log.info('learned MAC '+srcaddr.toStr()+' on Switch %s, Port %d'% (con.dpid,inport)) # learn or update timestamp of entry self.st[con][srcaddr.toStr()] = (inport, time(), packet) # raw? # Replace any old entry for (switch,mac). #mac = mac_to_int(packet.src) def forward_l2_packet(self, con, inport, packet, buf, bufid): """If we've learned the destination MAC set up a flow and send only out of its inport. Else, flood. """ dstaddr = EthAddr(packet.dst) #if not ord(dstaddr[0]) & 1 and # what did this do? if self.st[con].has_key(dstaddr.toStr()): # raw? prt = self.st[con][dstaddr.toStr()] # raw? if prt[0] == inport: log.warning('**warning** learned port = inport') ofcommand.floodPacket(con, inport, packet, buf, bu
fid) else:
# We know the outport, set up a flow log.info('installing flow for ' + str(packet)) match = ofcommand.extractMatch(packet) actions = [ofcommand.Output(prt[0])] ofcommand.addFlowEntry(con, inport, match, actions, bufid) # Separate bufid, make addFlowEntry() only ADD the entry # send/wait for Barrier # sendBufferedPacket(bufid) else: # haven't learned destination MAC. Flood ofcommand.floodPacket(con, inport, packet, buf, bufid) ''' add arp cache timeout? # Timeout for cached MAC entries CACHE_TIMEOUT = 5 def timer_callback(): """Responsible for timing out cache entries. Called every 1 second. """ global st curtime = time() for con in st.keys(): for entry in st[con].keys(): if (curtime - st[con][entry][1]) > CACHE_TIMEOUT: con.msg('timing out entry '+mac_to_str(entry)+" -> "+str(st[con][entry][0])+' on switch ' + str(con)) st[con].pop(entry) '''
import _plotly_utils.basevalidators class DeltaValidator(_plotly_utils.basevalidators.CompoundValidator): def __init__(self, plotly_name="delta", parent_name="indicator", **kwargs): super(DeltaValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, data_class_str=kwargs.pop("data_class_str", "Delta"), data_docs=kwargs.pop( "data_docs", """ decreasing :class:`plotly.graph_objects.indicator.delta.De creasing` instance or dict with compatible properties font Set the font used to display the delta increasing :class:`plotly.graph_objects.indicator.delta.In creasing` instance or dict with compatible properties position Sets the position of delta with respect to the number. reference Sets the reference value to compute the delta. By
default, it is set to the current value. relative Show relative change valueformat Sets the value formatting rule using d3 formatting mini-language whi
ch is similar to those of Python. See https://github.com/d3/d3-3.x-api- reference/blob/master/Formatting.md#d3_format """, ), **kwargs )
# -*- coding: utf-8 -*- # Generated by Django 1.10.8 on 2018-11-06 11:00 from __future__ import unicode_literals from django.db import migrations, models class Mig
ration(migrations.Migration): dependencies = [ ('rdrf', '0081_clinicaldata_active'),
] operations = [ migrations.AlterField( model_name='surveyrequest', name='survey_name', field=models.CharField(max_length=80), ), ]
import os import chardet from humanfriendly import format_size import pygments import pygments.lexers import pygments.lexers.special import pygments.formatters from pygments.util import ClassNotFound from mako.lookup import TemplateLookup from mfr.core import extension from mfr.extensions.codepygments import settings from mfr.extensions.codepygments import exceptions class CodePygmentsRenderer(extension.BaseRenderer): DEFAULT_LEXER = pygments.lexers.special.TextLexer TEMPLATE = TemplateLookup( directories=[ os.path.join(os.path.dirname(__file__), 'templates') ]).get_template('viewer.mako') def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.metrics.add('pygments_version', pygments.__version__) def render(self): file_size = os.path.getsize(self.file_path) if file_size > settings.MAX_SIZE: raise exceptions.FileTooLargeError( 'Text files larger than {} are not rendered. Please download ' 'the file to view.'.format(format_size(settings.MAX_SIZE, binary=True)), file_size=file_size, max_size=settings.MAX_SIZE, extension=self.metadata.ext, ) with open(self.file_path, 'rb') as fp: body = self._render_html(fp, self.metadata.ext) return self.TEMPLATE.render(base=self.assets_url, body=body) @property def file_required(self): return True @property def cache_result(self): return True def _render_html(self, fp, ext, *args, **kwargs): """Generate an html representation of the file :param fp: File pointer :param ext: File name extension :return: Content html """ formatter = pygments.formatters.HtmlFormatter() data = fp.read() content, encoding = None, 'utf-8' try: content = data.decode(encoding) except UnicodeDecodeError: detected_encoding = chardet.detect(data)
encoding = detected_encoding.get('encoding', None) if encoding is None: raise exceptions.FileDecodingError(
message='Unable to detect encoding of source file.', extension=ext, category='undetectable_encoding', code=400, ) try: content = data.decode(encoding) except UnicodeDecodeError as err: raise exceptions.FileDecodingError( message='Unable to decode file as {}.'.format(encoding), extension=ext, category='undecodable', original_exception=err, code=400, ) if content is None: raise exceptions.FileDecodingError( message='File decoded to undefined using encoding "{}"'.format(encoding), extension=ext, category='decoded_to_undefined', code=500, ) self.metrics.merge({'encoding': encoding, 'default_lexer': False}) try: # check if there is a lexer available for more obscure file types if ext in settings.lexer_lib.keys(): lexer = pygments.lexers.get_lexer_by_name(settings.lexer_lib[ext]) else: lexer = pygments.lexers.guess_lexer_for_filename(ext, content) except ClassNotFound: self.metrics.add('default_lexer', True) lexer = self.DEFAULT_LEXER() self.metrics.add('lexer', lexer.name) return pygments.highlight(content, lexer, formatter)
impo
rt sys from math import ceil def main(): AREA = 1.76 w = float(input()) h = float(input()) print(ceil(w * h / AREA)) if __name__ == "__main__": sys.exit(int(main()
or 0))
#!/usr/bin/env python import json import sys import argparse from clingo import Control, Number class App: def __init__(self, args): self.control = Control() self.args = args self.horizon = 0 self.objects = 0 self.end = None def show(self, model): if not self.args.quiet: print("Model: {}".format(model)) def ground(self, kind): count = self.objects + self.horizon + 1 parts = [("expand", [Number(count)])] if self.args.scratch and count
> 1: self.control = Cont
rol() for source in self.args.file: self.control.load(source) for i in range(0, self.objects): parts.append(("object", [Number(i + 1, count)])) for i in range(0, self.horizon): parts.append(("horizon", [Number(i + 1, count)])) if self.args.scratch or count == 1: for option in self.args.option: setattr(self.control.configuration, option[0], option[1]) parts.append(("base", [])) if kind: self.objects += 1 parts.append(("object", [Number(self.objects), Number(count)])) else: self.horizon += 1 parts.append(("horizon", [Number(self.horizon), Number(count)])) if self.args.verbose: print("") print("Objects: {}".format(Number(self.objects))) print("Horizon: {}".format(Number(self.horizon))) self.control.ground(parts) if self.args.verbose: print("Solving: {}".format(count)) def run(self): for source in self.args.file: self.control.load(source) if self.args.maxobj is None: self.end = self.control.get_const("n").number else: self.end = self.args.maxobj while self.objects < self.end: self.ground(True) while True: ret = self.control.solve(on_model=self.show) if self.args.stats: args = {"sort_keys": True, "indent": 0, "separators": (',', ': ')} stats = {} for x in ["step", "enumerated", "time_cpu", "time_solve", "time_sat", "time_unsat", "time_total"]: stats[x] = self.control.statistics[x] for x in ["lp", "ctx", "solvers"]: for y in self.control.statistics[x]: stats[y] = self.control.statistics[x][y] print(json.dumps(stats, *args)) if ret.satisfiable: break self.ground(False) parser = argparse.ArgumentParser(description="Gradually expand logic programs.", epilog="""Example: main.py -x -q -s -v -m 42 -o solve.models 0 encoding.lp instance.lp""") parser.add_argument("-x", "--scratch", action='store_true', help="start each step from scratch (single-shot solving)") parser.add_argument("-q", "--quiet", action='store_true', help="do not print models") parser.add_argument("-s", "--stats", action='store_true', help="print solver statistics") parser.add_argument("-v", "--verbose", action='store_true', help="print progress information") parser.add_argument("-m", "--maxobj", type=int, metavar="NUM", default=None, help="maximum number of introduced objects") parser.add_argument("-o", "--option", nargs=2, metavar=("OPT", "VAL"), action="append", default=[], help="set sover options") parser.add_argument("file", nargs="*", default=[], help="gringo source files") args = parser.parse_args() if args.maxobj is not None and args.maxobj < 1: parser.error("maximum number of objects must be positive") App(args).run()
# -*- coding: utf-8 -*- # # This file is part of INSPIRE. # Copyright (C) 2014-2017 CERN. # # INSPIRE is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # INSPIRE is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCH
ANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public Lic
ense for more details. # # You should have received a copy of the GNU General Public License # along with INSPIRE. If not, see <http://www.gnu.org/licenses/>. # # In applying this license, CERN does not waive the privileges and immunities # granted to it by virtue of its status as an Intergovernmental Organization # or submit itself to any jurisdiction. """DoJSON model and rules for CDS to INSPIRE HEP MARC.""" from __future__ import absolute_import, division, print_function from . import rules # noqa: F401 from .model import cds2hep_marc # noqa: F401
""" Django settings for detest project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '&^ck$n8qsz2e#s+z6%b%(f$r4)2!w4fvz7m9ks@blx=(hq*efu' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'detest_ui', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'detest.urls' WSGI_APPLICATION = 'detest.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': 'detest', 'USER': 'detest', 'PASSWORD': 'detest', 'HOST': '', } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/' LOGIN_REDIRECT_URL = '/' LOGIN_URL = '/login/' LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'formatter': 'verbose', }, }, 'formatters': { 'verbose': { 'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s", 'datefmt': "%d/%
b/%Y %H:%M:%S" }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'loggers': { 'django': { 'handlers': ['console'], 'level': 'DEBUG', }, 'detest': { 'handlers': ['console'], 'level': 'DEBUG',
}, 'detest_ui': { 'handlers': ['console'], 'level': 'DEBUG', }, }, } LOGGING = {}
from application import CONFIG, app from .models import * from flask import current_app, session from flask.ext.login import login_user, logout_user, current_user from flask.ext.principal import Principal, Identity, AnonymousIdentity, identity_changed, identity_loaded, RoleNeed import bcrypt import re import sendgrid import time from itsdangerous import URLSafeTimedSerializer AuthenticationError = Exception("AuthenticationError", "Invalid credentials.") UserExistsError = Exception("UserExistsError", "Email already exists in database.") UserDoesNotExistError = Exception("UserDoesNotExistError", "Account with given email does not exist.") login_manager = LoginManager() login_manager.init_app(app) principals = Principal(app) sg = sendgrid.SendGridClient(CONFIG["SENDGRID_API_KEY"]) ts = URLSafeTimedSerializer(CONFIG["SECRET_KEY"]) @login_manager.user_loader def load_user(user_id): user_entries = StaffUserEntry.objects(id = user_id) if user_entries.count() != 1: return None currUser = user_entries[0] user = User(currUser.id, currUser.email, currUser.firstname, currUser.lastname, currUser.roles) return user @identity_loaded.connect_via(app) def on_identity_loaded(sender, identity): identity.user = current_user if hasattr(current_user, 'roles'): for role in current_user.roles: identity.provides.add(RoleNeed(role)) def get_user(email): entries = StaffUserEntry.objects(email = email) if entries.count() == 1: return entries[0] return None def verify_user(email, password): currUser = get_user(email) if currUser is None: return None hashed = currUser.hashed if bcrypt.hashpw(password.encode("utf-8"), hashed.encode("utf-8")) == hashed.encode("utf-8"): return load_user(currUser.id) else: return None def login(email): user = load_user(get_user(email).id) if user != None: login_user(user) identity_changed.send(current_app._get_current_object(), identity = Identity(user.uid)) else: raise UserDoesNotExistError def logout(): logout_user() for key in ('identity.name', 'identity.auth_type'): session.pop(ke
y, None) identity_changed.send(current_app._get_current_object(), identity = AnonymousIdentity()) def tokenize_email(email): return ts.dumps(email, salt = CONFIG["EMAIL_TOKENIZER_SALT"]) def detokenize_email(token): return ts.loads(token, salt = CONFIG["EMAIL_TOKENIZER_SALT"], max_age = 86400) def send_recovery_email(email): user = get_user(email) if user is None: raise UserDoesNotExistError token = tokenize_email(email) message = sendgrid.Mail() message.add_to(email)
message.set_from("noreply@hackbca.com") message.set_subject("hackBCA III - Account Recovery") message.set_html("<p></p>") message.add_filter("templates", "enable", "1") message.add_filter("templates", "template_id", CONFIG["SENDGRID_ACCOUNT_RECOVERY_TEMPLATE"]) message.add_substitution("prefix", "staff") message.add_substitution("token", token) status, msg = sg.send(message) def change_name(email, firstname, lastname): account = get_user(email) if account is None: raise UserDoesNotExistError account.firstname = firstname account.lastname = lastname account.save() login(email) #To update navbar def change_password(email, password): account = get_user(email) if account is None: raise UserDoesNotExistError hashed = str(bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt()))[2:-1] account.hashed = hashed account.save() def get_user_attr(email, attr): user = get_user(email) if user is None: raise UserDoesNotExistError return getattr(user, attr) def set_user_attr(email, attr, value): user = get_user(email) if user is None: raise UserDoesNotExistError setattr(user, attr, value) user.save()
from django.apps import AppConfig class UserregistrationConfig(AppConfig): nam
e = 'userregistration'
import utils from flask import render_template, redirect, request, session, url_for, json, jsonify from . import murmurbp from .User import User # User Views @murmurbp.route("/users", methods = ['GET']) def get_all_users(): u = User() ul = utils.obj_to_dict(u.get_all()) data = [{'UserId': k, 'UserName': v} for k, v in ul.iteritems()] resp = jsonify(users=data) return resp, 200 @murmurbp.route("/users/<int:id>", methods = ['GET']) def get_user(id): u = User() data = utils.obj_to_dict(u.get(id)) resp = jsonify(data) return resp, 200 @murmurbp.route("/users", methods = ['POST']) def add_user(): u = User() user = json.loads('{"UserName": "TestUser7"}') new_user = u.add(user) data = utils.obj_to_dict(new_user) resp = jsonify(data) return resp, 200 @murmurbp.route("/users/<int:id>", methods = ['DELETE']) def delete_user(id): u = User() u.delete(id) return jsonify(), 201 from .Channel import Channel # Channel Views @murmurbp.route("/channels", methods = ['GET']) def get_all_channels(): c = Chan
nel() cl = utils.obj_to_dict(c.get_all()) data = [ v for k, v in cl.iteritems()] resp = jsonify(channels=data) return resp, 20
0 @murmurbp.route("/channels", methods = ['POST']) def add_channel(): c = Channel() name = request.form['channelName'] parent = request.form['parent'] new_channel = c.add_channel(name, parent) data = utils.obj_to_dict(new_channel) resp = jsonify(data) return resp, 200 @murmurbp.route("/channels/<int:id>", methods = ['DELETE']) def delete_channel(id): c = Channel() c.delete(id) return jsonify(), 201 from .ACLGroup import ACL, Group # ACL and Group Views @murmurbp.route("/acls/<int:channel_id>", methods = ['GET']) def get_all_acls(channel_id): a = ACL() data = utils.obj_to_dict(a.get_all(channel_id)) resp = jsonify(acls=data) return resp, 200 @murmurbp.route("/groups/<int:channel_id>", methods = ['GET']) def get_all_groups(channel_id): g = Group() data = utils.obj_to_dict(g.get_all(channel_id)) resp = jsonify(groups=data) return resp, 200 @murmurbp.route("/acls/<int:channel_id>", methods = ['POST']) def add_acl_to_channel(channel_id): # TODO: load json object a = ACL() acl = json.loads('{"applyHere": true,"applySubs": true,"userid": 1,"group": "admin","allow": 1024,"deny": 0}') data = a.add(channel_id, acl) resp = jsonify(data) return resp, 200
# # This file is part of TSmells # # TSmells is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 2 of the License, or (at your # option) any later version. # # TSmells is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along # with TSmells; if not, write to the Free Software Foundation, Inc., # 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # # Copyright 2007-2008 Manuel Breugelmans <manuel.breugelmans@student.ua.ac.be> # from com.hp.hpl.gue
ss.ui import Dockable class TDockable(JPanel, Dockable): # # Implementation of Dockable interface # def mouseEnterEdge(self, edge): pass def mouseLeaveNode(self, node): pass def mouseLeaveEdge(self, edge): pass def getPreferredSize(self): return Dimension(200,600) def getDefaultFrameBounds(self): return Rectangle(50, 50, 300, 600) def getDirectionPreference(self
): ''' prefer vertical orientation ''' return 2 # vertical, see com.hp.hpl.guess.ui.MainUIWindow.java def opening(self, state): self.visible = state def attaching(self, state): pass def getTitle(self): return("") def getWindow(self): return self.myParent def setWindow(self,gjf): self.myParent = gjf
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite
330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class PyWrapt(PythonPackage): """Module for decorators, wrappers and monkey patching.""" homepage = "https://
github.com/GrahamDumpleton/wrapt" url = "https://pypi.io/packages/source/w/wrapt/wrapt-1.10.10.tar.gz" version('1.10.10', '97365e906afa8b431f266866ec4e2e18')
#encoding: utf-8 from __future__ import unicode_literals TITLE_TO_SLUG = { 'איגוד הביטקוין': 'bitcoin-org-il', 'אליאב': 'eliav', '* בדיקות פרודקשיין *': 'production-test', 'בי״ס עמית': 'amit', 'הבר קיימא': 'barkayma', 'הסדנא לידע ציבורי': 'hasadna', 'הפורום לממשל פתוח': 'open-government', 'התנועה לאיכות השל
טון': 'mqg', 'מעיין ברוך': 'maayan-baruch', 'מרצ': 'meretz', 'נטף': 'nataf',
'נען': 'naan', 'קהילה פתוחה': 'open-community', }
#!/usr/bin/env python2.7 import numpy as np import matplotlib.pyplot as plt Freq=np.array([30,40,45,50,53,55,60,65,70,80,90,95,98,100,110,120]) Db=np.a
rray([70.5,78.6,83.2,88.4,87.5,86.7,85.2,83.9,85.1,88,95.7,100.4,100.4,99.2,94.7,94.9]) plt.xlabel('Frecuencia') plt.ylabel('Decibel') plt.title('DecibelvsFreq a 0.1volts') #for i in range(len(Freq)): # plt.text(Freq[i],Db[i], r'$Freq=%f, \ Db=%f$' % (Freq[i], Db[i])) plt.axis([0, 330, 50, 130]) plt.plot(Freq,Db,'bo',Freq,Db,'
k') plt.grid(True) plt.show()
#import os import pickle import glob #import numpy as np from tqdm import tqdm import matplotlib.pyplot as plt #from data_control_no_images.read import R
ead listing = glob.glob('F:/Project_Cars_Data/1lap-fullspeed/Watkins Glen International - Short Circuit' + '/*.pkl') x = [] y = [] throttle = [] raw_throttle = [] brake = [] raw_brake = [] steering = [] raw_steering = [] xy = [] for filen
ame in tqdm(listing): with open(filename, 'rb') as file_data: project_cars_state = pickle.load(file_data) controller_state = pickle.load(file_data) #remove none flying lap data if project_cars_state.mParticipantInfo[0].mCurrentLapDistance == 0.0: continue position = project_cars_state.mParticipantInfo[0].mWorldPosition x.append(round(position[0])) y.append(round(position[2])) throttle.append(controller_state['right_trigger']/255)# 0 - 255 brake.append(controller_state['left_trigger']/255) #0 - 255 steering.append(controller_state['thumb_lx']/32767) #-32768 - 32767 #steering.append(project_cars_state.mSteering) raw_steering.append(project_cars_state.mUnfilteredSteering) raw_brake.append(project_cars_state.mUnfilteredBrake) raw_throttle.append(project_cars_state.mUnfilteredThrottle) xy.append([position[0], position[2]]) plt.figure(figsize=(10, 10)) plt.scatter(x, y, c=steering) plt.colorbar() plt.axis('equal') plt.title('position and controller steering') plt.show() plt.close() plt.figure(figsize=(10, 10)) plt.scatter(x, y, c=raw_steering) plt.colorbar() plt.axis('equal') plt.title('position and raw steering') plt.show() plt.close() plt.figure(figsize=(10, 10)) plt.scatter(x, y, c=throttle) plt.colorbar() plt.axis('equal') plt.title('position and controller throttle') plt.show() plt.close() plt.figure(figsize=(10, 10)) plt.scatter(x, y, c=raw_throttle) plt.colorbar() plt.axis('equal') plt.title('position and raw throttle') plt.show() plt.close() plt.figure(figsize=(10, 10)) plt.scatter(x, y, c=brake) plt.colorbar() plt.axis('equal') plt.title('position and controller brake') plt.show() plt.close() plt.figure(figsize=(10, 10)) plt.scatter(x, y, c=raw_brake) plt.colorbar() plt.axis('equal') plt.title('position and raw brake') plt.show() plt.close() # get_data = Read(True) # mean, std = get_data.load_mean_and_std('F:/Project_Cars_Data/Full_Speed_Training_none_image') # print(mean) # print(std) # xy = (xy - mean) / std # print(np.array(xy[:,0]).shape) # plt.scatter(xy[:,0], xy[:,1]) # plt.axis('equal') # plt.show()
is #* scikit-beam - data analysis tools for X-ray science # - https://github.com/scikit-beam/scikit-beam #* xray-vision - plotting helper functions for X-ray science # - https://github.com/Nikea/xray-vision import xray_vision import matplotlib.cm as mcm import copy import xray_vision.mpl_plotting as mpl_plot from xray_vision.mpl_plotting import speckle from xray_vision.mask.manual_mask import ManualMask import skbeam.core.roi as roi import skbeam.core.correlation as corr import skbeam.core.utils as utils import numpy as np from datetime import datetime import h5py import pims from pandas import DataFrame import os, sys, time import getpass import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib.colors import LogNorm import pickle from lmfit import Model from lmfit import minimize, Parameters, Parameter, report_fit from matplotlib.figure import Figure from matplotlib import gridspec from mpl_toolkits.axes_grid1 import make_axes_locatable from tqdm import tqdm import collections import itertools import random from PIL import Image import warnings from eiger_io.fs_handler2 import EigerHandler2 from eiger_io.fs_handler import LazyEigerHandler fs = db.event_sources[0].fs fs.deregister_handler('AD_EIGER') fs.register_handler('AD_EIGER', LazyEigerHandler) fs.deregister_handler('AD_EIGER2') fs.register_handler('AD_EIGER2', EigerHandler2) mcolors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k','darkgoldenrod','oldlace', 'brown','dodgerblue' ]) markers = itertools.cycle(list(plt.Line2D.filled_markers)) lstyles = itertools.cycle(['-', '--', '-.','.',':']) colors = itertools.cycle(["blue", "darkolivegreen", "brown", "m", "orange", "hotpink", "darkcyan", "red", "gray", "green", "black", "cyan", "purple" , "navy"]) colors_copy = itertools.cycle(["blue", "darkolivegreen", "brown", "m", "orange", "hotpink", "darkcyan", "red", "gray", "green", "black", "cyan", "purple" , "navy"]) markers = itertools.cycle( ["o", "2", "p", "1", "s", "*", "4", "+", "8", "v","3", "D", "H", "^",]) markers_copy = itertools.cycle( ["o", "2", "p", "1", "s", "*", "4", "+", "8", "v","3", "D", "H", "^",]) RUN_GUI = False #if True for gui setup; else for notebook; the main code difference is the Figure() or plt.figure(figsize=(8, 6)) markers = ['o', 'D', 'v', '^', '<', '>', 'p', 's', 'H', 'h', '*', 'd', '$I$','$L$', '$O$','$V$','$E$', '$c$', '$h$','$x$','$b$','$e$','$a$','$m$','$l$','$i$','$n$', '$e$', '8', '1', '3', '2', '4', '+', 'x', '_', '|', ',', '1',] markers = np.array( markers *100 ) markers = ['o', 'D', 'v', '^', '<', '>', 'p', 's', 'H', 'h', '*', 'd', '8', '1', '3', '2', '4', '+', 'x', '_', '|', ',', '1',] markers = np.array( markers *100 ) colors = np.array( ['darkorange', 'mediumturquoise', 'seashell', 'mediumaquamarine', 'darkblue', 'yellowgreen', 'mintcream', 'royalblue', 'springgreen', 'slategray', 'yellow', 'slateblue', 'darkslateblue', 'papayawhip', 'bisque', 'firebrick', 'burlywood', 'dodgerblue', 'dimgrey', 'chartreuse', 'deepskyblue', 'honeydew', 'orchid', 'teal', 'steelblue', 'limegreen', 'antiquewhite', 'linen', 'saddlebrown', 'grey', 'khaki', 'hotpink', 'darkslategray', 'forestgreen', 'lightsalmon', 'turquoise', 'navajowhite', 'darkgrey', 'darkkhaki', 'slategrey', 'indigo', 'darkolivegreen', 'aquamarine', 'moccasin', 'beige', 'ivory', 'olivedrab', 'whitesmoke', 'paleturquoise', 'blueviolet', 'tomato', 'aqua', 'palegoldenrod', 'cornsilk', 'navy', 'mediumvioletred', 'palevioletred', 'aliceblue', 'azure', 'orangered', 'lightgrey', 'lightpink', 'orange', 'wheat', 'darkorchid', 'mediumslateblue', 'lightslategray', 'green', 'lawngreen', 'mediumseagreen', 'darksalmon', 'pink', 'oldlace', 'sienna', 'dimgray', 'fuchsia', 'lemonchiffon', 'maroon', 'salmon', 'gainsboro', 'indianred', 'crimson', 'mistyrose', 'lightblue', 'darkgreen', 'lightgreen', 'deeppink', 'palegreen', 'thistle', 'lightcoral', 'lightgray', 'lightskyblue', 'mediumspringgreen', 'mediumblue', 'peru', 'lightgoldenrodyellow', 'darkseagreen', 'mediumorchid', 'coral', 'lightyellow', 'chocolate', 'lavenderblush', 'darkred', 'lightseagreen', 'darkviolet', 'lightcyan', 'cadetblue', 'blanchedalmond', 'midnightblue', 'lightsteelblue', 'darkcyan', 'floralwhite', 'darkgray', 'lavender', 'sandybrown', 'cornflowerblue', 'gray', 'mediumpurple', 'lightslategr
ey', 'seagreen', 'silver', 'darkmagenta', 'darkslategrey', 'darkgoldenrod', 'rosybrown', 'goldenrod', 'darkturquoise', 'plum', 'purple', 'olive', 'gold','powderblue', 'peachpuff','violet', 'lime', 'greenyellow', 'tan', 'skyblue', 'magenta', 'black', 'brown', 'green', 'cyan', 'red','blue'] *100 ) colors = colors[::-1] colors_ = itertools.cycle( colors ) #colors_ = itertools.cycle(sorted_colors_ ) markers_ = itertools.cycle(
markers ) import matplotlib as mpl # Custom colormaps ################################################################################ # ROYGBVR but with Cyan-Blue instead of Blue color_list_cyclic_spectrum = [ [ 1.0, 0.0, 0.0 ], [ 1.0, 165.0/255.0, 0.0 ], [ 1.0, 1.0, 0.0 ], [ 0.0, 1.0, 0.0 ], [ 0.0, 0.2, 1.0 ], [ 148.0/255.0, 0.0, 211.0/255.0 ], [ 1.0, 0.0, 0.0 ] ] cmap_cyclic_spectrum = mpl.colors.LinearSegmentedColormap.from_list('cmap_cyclic_spectrum', color_list_cyclic_spectrum) # classic jet, slightly tweaked # (bears some similarity to mpl.cm.nipy_spectral) color_list_jet_extended = [ [0, 0, 0], [0.18, 0, 0.18], [0, 0, 0.5], [0, 0, 1], [ 0. , 0.38888889, 1. ], [ 0. , 0.83333333, 1. ], [ 0.3046595 , 1. , 0.66308244], [ 0.66308244, 1. , 0.3046595 ], [ 1. , 0.90123457, 0. ], [ 1. , 0.48971193, 0. ], [ 1. , 0.0781893 , 0. ], [1, 0, 0], [ 0.5 , 0. , 0. ], ] cmap_jet_extended = mpl.colors.LinearSegmentedColormap.from_list('cmap_jet_extended', color_list_jet_extended) # Tweaked version of "view.gtk" default color scale color_list_vge = [ [ 0.0/255.0, 0.0/255.0, 0.0/255.0], [ 0.0/255.0, 0.0/255.0, 254.0/255.0], [ 188.0/255.0, 2.0/255.0, 107.0/255.0], [ 254.0/255.0, 55.0/255.0, 0.0/255.0], [ 254.0/255.0, 254.0/255.0, 0.0/255.0], [ 254.0/255.0, 254.0/255.0, 254.0/255.0] ] cmap_vge = mpl.colors.LinearSegmentedColormap.from_list('cmap_vge', color_list_vge) # High-dynamic-range (HDR) version of VGE color_list_vge_hdr = [ [ 255.0/255.0, 255.0/255.0, 255.0/255.0], [ 0.0/255.0, 0.0/255.0, 0.0/255.0], [ 0.0/255.0, 0.0/255.0, 255.0/255.0], [ 188.0/255.0, 0.0/255.0, 107.0/255.0], [ 254.0/255.0, 55.0/255.0, 0.0/255.0], [ 254.0/255.0, 254.0/255.0, 0.0/255.0], [ 254.0/255.0, 254.0/255.0, 254.0/255.0] ] cmap_vge_hdr = mpl.colors.LinearSegmentedColormap.from_list('cmap_vge_hdr', color_list_vge_hdr) # Simliar to Dectris ALBULA default color-scale color_list_hdr_albula = [ [ 255.0/255.0, 255.0/255.0, 255.0/255.0], [ 0.0/255.0, 0.0/255.0, 0.0/255.0], [ 255.0/255.0, 0.0/255.0, 0.0/255.0], [ 255.0/255.0, 255.0/255.0, 0.0/255.0], #[ 255.0/255.0, 255.0/255.0, 255.0/255.0], ] cmap_hdr_albula = mpl.colors.LinearSegmentedColormap.from_list('cmap_hdr_albula', color_list_hdr_albula) cmap_albula = cmap_hdr_albula # Ugly color-scale, but good for highlighting many features in HDR data color_list_cur_hdr_goldish = [ [ 255.0/255.0, 255.0/255.0, 255.0/255.0], # white [ 0.0/255.0, 0.0/255.0, 0.0/255.0], # black [ 100.0/255.0, 127.0/255.0, 255.0/255.0], # light blue [ 0.0/255.0, 0.0/255.0, 127.0/255.0], # dark blue #[ 0.0/255.0, 127.0/255.0, 0.0/255.0], # dark green [ 127
#!/usr/bin/env python import sys,re,time,argparse def main(args): # print >>sys.stdout, "Start analysis: " + time.strftime("%a,%d %b %Y %H:%M:%S") convert(args.input,args.output) # print >>sys.stdout, "Finish analysis: " + time.strftime("%a,%d %b %Y %H:%M:%S") def extract_exon_length_from_cigar(cigar): cigar_m = ["0"] + re.findall(r"(\d+)M",cigar) cigar_d = ["0"] + re.findall(r"(\d+)D",cigar) cigar_m_s,cigar_d_s = [0,0] for m in cigar_m: cigar_m_s += int(m) for d in cigar_d: cigar_d_s += int(d) exon_length = cigar_m_s+cigar_d_s return exon_length def extract_soft_clip_from_cigar(cigar): cigar_5 = ["0"] + re.findall(r"^(\d+)S",cigar) cigar_3 = ["0"] + re.findall(r"(\d+)S$",cigar) cigar_5_s,cigar_3_s = [0,0] for s5 in cigar_5: cigar_5_s += int(s5) for s3 in cigar_3: cigar_3_s += int(s3) return cigar_5_s,cigar_3_s def convert(sam_file,gpd_file): for line in sam_file: if line[0] != "@": qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq = line.strip().split("\t")[:10] tag = "\t".join(line.strip().split("\t")[11:]) if rname != "*" and re.search(r"XS:A:(\S)",tag): s5,s3 = extract_soft_clip_from_cigar(cigar) sf = str(s5)+"_"+str(s3) strand = (re.search(r"XS:A:(\S)",tag)).group(1) cigar_n_l = 0 exon_length = 0 exon_start = int(pos)-1 exon_end = 0 exon_start_list = [] exon_end_list = [] if "N" in cigar: for exon in cigar.split("N"): exon = exon + "N" exon_start = exon_start + exon_length + cigar_n_l exon_length = extract_exon_length_from_cigar(exon) exon_end = exon_start + exon_length if re.search(r"(\d+)N",exon): cigar_n_l = int((re.search(r"(\d+)N",exon)).group(1)) exon_start_list.append(str(exon_start)) exon_end_list.append(str(exon_end)) else: exon_start = exon_start exon_length = extract_exon_length_from_cigar(cigar) exon_end = exon_start + exon_length exon_start_list.append(str(exon_start)) exon_end_list.append(str(exon_end)) exon_start_list.append("") exon_end_list.append("") print >>gpd_file, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (qname,qname,rname,strand,str(int(pos)-1),str(exon_end),mapq,sf,str(len(exon_start_list)-1),",".join(exon_start_list),",".join(exon_end_list)) sam_file.close() gpd_file.close() def do_inputs(): output_gpd_format = ''' 1. read id 2. read id 3. chromosome id 4. strand 5. start site of alignment 6. end site of alignm
ent 7. MAPQ 8. Number of nucleotides that are softly-clipped by aligner; left_right 9. exon count 10. exon start set 11. exon end set''' parser = argparse.ArgumentParser(description="Function: convert sam to gpd.",formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-i','--in
put',type=argparse.FileType('r'),required=True,help="Input: sam file") parser.add_argument('-o','--output',type=argparse.FileType('w'),required=True,help="Output: gpd file") args = parser.parse_args() return args if __name__=="__main__": args = do_inputs() main(args)
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. # # """ Methods to calculate ECP values (Endogenous Competition Potential) """ ######################################################## #~ Import libraries. ######################################################## from cellery import exceptions from itertools import product from multiprocessing import Queue,Process from numpy import array,empty,float32,float64,nan,zeros from clcECP import rtrnECP,rtrnECPMskd,rtrnECPDnsty,rtrnECPDnstyMskd import os import sqlite3 ######################################################## #~ Compute ECP values for all combinations of two arrays of arrays with # values. ######################################################## def cmpECP(aMrnVlsDtA,aMrnVlsDtB,aANmsA,aANmsB,fldrOutECPPrws, \ aALenA=False,aALenB=False,aMskRef=False,nThrds=10,intrvlSz=700, \ sqlFl=False,pntrCnts=True): """ Input: aMrnVlsDtA is an array A of arrays with values for miRNAs. aMrnVlsDtB is an array B of arrays with values for miRNAs. aANmsA is the array of variable names in the same position as the numbers in vrblAPos. aANmsB is the array of variable names in the same order as vrblBPos. fldrOutECPPrws is a folder to store partial ECP results. Optionally, aALenA is an array of object lengths in the same order that aAVlsA. aALenB is an array of object lengths in the same order that aAVlsB. aMskRef is a mask array for the miRNAs (i.e. arrays within array A and B). nThrds is the number of threads to run in parallel. intrvlSz is the size of the interval to run in multithread. sqlFl is a sql database to save the ECP values. If pntrCnts is True aAVlsA and aAVlsB are counts so 0 values shall be considered (excluded in shared counts). Output: aECPVlsAVlsB is an array with the ECP values for all combinations of array A and B. NOTE: The subarrays in arrays A and B must have the same dimensions (i.e. all the miRNA arrays must have the same size.). NOTE: Null values shall be numpy.nan. NOTE: aECPVlsAVlsB has arrays in A as rows and in B as columns. NOTE: if aALenA and aALenB ECP density is going to be calculated. NOTE: if aMskRef miRNA is going to be masked. """ def mltECPclc(qInJobs,qOutRslts,mthdECPclc,aMrnVlsDtA,aMrnVlsDtB, \ fldrOutECPPrws,aALenA,aALenB,aMskRef,pntrCnts): """ Input: qInJobs is a queue with pairs of intervals. qOutRslts is the queue to store position in arrayA, position in arrayB, and ECP value. mthdECPclc is the method to calculate the ECP value. aMrnVlsDtA is an array A of arrays with values for miRNAs. aMrnVlsDtB is an array B of arrays with values for miRNAs. fldrOutECPPrws is a folder to store partial ECP results. aALenA is an array of object lengths in the same order that aAVlsA. aALenB is an array of object lengths in the same order that aAVlsB. aMskRef is a mask array for the miRNAs (i.e. arrays within array A and B). If pntrCnts is True aAVlsA and aAVlsB are counts so 0 values shall be considered (excluded in shared counts). Output: qOutRslts is the queue to store position in arrayA, position in arrayB, and ECP values. """ for intrvlA,intrvB in iter(qInJobs.get,'STOP'): lECPVlsAVlsB = mthdECPclc(aMrnVlsDtA,aMrnVlsDtB, \ fldrOutECPPrws,intrvlA,intrvB,pntrCnts,aMskRef,aALenA, \ aALenB) qOutRslts.put(lECPVlsAVlsB) #-------------------------- #~ Check if there is mask for miRNAs if dir(aMskRef)[0]=='T': assert len(aMskRef) == len(aALenB[0]) == len(aALenA[0]) if dir(aALenB)[0]=='T': assert dir(aALenB)[1]=='T' mthdECPclc = rtrnECPDnstyMskd else: assert not aALenA and not aALenB mthdECPclc = rtrnECPMskd else: if dir(aALenB)[0]=='T': assert dir(aALenB)[1]=='T' mthdECPclc = rtrnECPDnsty else: assert not aALenA and not aALenB mthdECPclc = rtrnECP #-------------------------- #~ Create list of intervals for multithreading lenaMrnVlsDtA = len(aMrnVlsDtA) lenaMrnVlsDtB = len(aMrnVlsDtB) intrvlsMrnVlsA = [] for strt in xrange(0,lenaMrnVlsDtA,intrvlSz): cEnd = strt+intrvlSz if cEnd<lenaMrnVlsDtA: end = cEnd else: end = lenaMrnVlsDtA intrvlsMrnVlsA.append([strt,end]) intrvlsMrnVlsB = [] for strt in xrange(0,lenaMrnVlsDtB,intrvlSz): cEnd = strt+intrvlSz if cEnd<lenaMrnVlsDtB: end = cEnd else: end = lenaMrnVlsDtB intrvlsMrnVlsB.append([strt,en
d]) #-------------------------- #~ Run in parallel. aECPVlsAVlsB = zeros((l
enaMrnVlsDtA,lenaMrnVlsDtB),dtype=float32) aECPVlsAVlsB.fill(nan)#fill all ECP with nan to start qInJobs = Queue() qOutRslts = Queue() cntVlABPrs=0 for intrvlA,intrvB in product(intrvlsMrnVlsA,intrvlsMrnVlsB): qInJobs.put((intrvlA,intrvB)) cntVlABPrs += 1 for t in xrange(nThrds): Process(target = mltECPclc,args=(qInJobs,qOutRslts,mthdECPclc, \ aMrnVlsDtA,aMrnVlsDtB,fldrOutECPPrws,aALenA,aALenB, \ aMskRef,pntrCnts)).start() lECPVlsAVlsBGlbl = []#store global results for cnt in range(cntVlABPrs): if cnt%50==0: print 'Running calculations on pair %s out of %s'%(cnt, \ cntVlABPrs) lECPVlsAVlsB = qOutRslts.get() lECPVlsAVlsBGlbl.extend(lECPVlsAVlsB) for t in xrange(nThrds): qInJobs.put('STOP') #-------------------------- #~ create array: aMrnVlsDtA in rows, aMrnVlsDtB in columns. for vlsAPos,vlsBPos,ECP in lECPVlsAVlsBGlbl: aECPVlsAVlsB[vlsAPos,vlsBPos] = ECP if sqlFl: mkSqlFlECP(lECPVlsAVlsBGlbl,sqlFl,aANmsA,aANmsB) return aECPVlsAVlsB ######################################################## #~ Make a sqlite3 database for ECP values between genes/lncRNAs of # interest. ######################################################## def mkSqlFlECP(lECPVlsAVlsBGlbl,sqlFl,aANmsA,aANmsB): """ Input: lECPVlsAVlsBGlbl is a list of tuples (vrblAPos,vrblBPos,ECP). vrblAPos is the position of the first variables, vrblBPos is the position of the second variable, ECP is the ECP value between vrblAPos and vrblBPos. A sqlite3 database will be created for the input list. aANmsA is the array of variable names in the same position as the numbers in vrblAPos. aANmsB is the array of variable names in the same order as vrblBPos. Output: A sqlite3 database will be created for the input list in the file sqlFl. """ conn = sqlite3.connect(sqlFl) c = conn.cursor() c.execute \ ('''CREATE TABLE records (id TEXT, vrblANm TEXT, vrblBNm TEXT, ECP REAL)''') lCnt = 0 for vrblAPos,vrblBPos,ECP in lECPVlsAVlsBGlbl: vrblANm,vrblBNm = aANmsA[vrblAPos],aANmsB[vrblBPos] lCnt+=1 c.execute('insert into records VALUES (?,?,?,?)', (str(lCnt), \ vrblANm,vrblBNm,float64(ECP))) # create indexes. Decrease complexity of querying c.execute("CREATE INDEX index_records on records (id);") conn.commit() conn.close() return 0 ######################################################## #~ Read a sqlite3 database for correlations between genes/lncRNAs of # interest. ######################################################## def rtrnSqlFlECP(sqlFl,srtdVrblANms,srtdVrblBNms,rtrnECPSgnd=False): """ Input: sqlFl is a sqlite3 database with the fields id, vrblANm, vrblBNm, and ECP. srtdVrblANms is a sorted lists of names present in the field vrblANm. srtdVrblBNms is a sorted lists of names present in the field vrblBNm. Optionally, rtrnECPSgnd can have values 'negative' or 'positive', in those cases only 'negative' or 'positive' ECP values are going to be retrieved respectively. Output: aECPVlsAVlsB is an array of size len(srtdVrblANms) x len(srtdVrblBNms) with correlation values ECP. In case the value is not present nan is going to be incldued in the cell. NOTE: If a name is not present in a database, nan values are going to be returned. NOTE: srtdVrblANms are going to be in rows, and srtdVrblBNms in columns. """ if rtrnECPSgnd: try: if rtrnECPSgnd not in {'negative','positive'}: raise exceptions.CelleryWarningObjct \ ('"negative" or "positive" are values, not recognized', \ rtrnECPSgnd) except exceptions.CelleryWarningObjct as err: print err #-------------------------- #~ make a dictionary of names and positions lenaAVlsA = len(srtdVrblANms) lenaAVlsB = len(srtdVrblBNms) dVrblANmP
#!/usr/bin/env python """ Copyright (c) 2019 Alex Forencich Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from myhdl import * import os import ptp module = 'ptp_clock_cdc' testbench = 'test_%s_64' % module srcs = [] srcs.append("../rtl/%s.v" % module) srcs.append("%s.v" % testbench) src = ' '.join(srcs) build_cmd = "iverilog -o %s.vvp %s" % (testbench, src) def bench(): # Parameters TS_WIDTH = 64 NS_WIDTH = 4 FNS_WIDTH = 16 INPUT_PERIOD_NS = 0x6 INPUT_PERIOD_FNS = 0x6666 OUTPUT_PERIOD_NS = 0x6 OUTPUT_PERIOD_FNS = 0x6666 USE_SAMPLE_CLOCK = 1 LOG_FIFO_DEPTH = 3 LOG_RATE = 3 # Inputs clk = Signal(bool(0)) rst = Signal(bool(0)) current_test = Signal(intbv(0)[8:]) input_clk = Signal(bool(0)) input_rst = Signal(bool(0)) output_clk = Signal(bool(0)) output_rst = Signal(bool(0)) sample_clk = Signal(bool(0)) input_ts = Signal(intbv(0)[96:]) # Outputs output_ts = Signal(intbv(0)[96:]) output_ts_step = Signal(bool(0)) output_pps = Signal(bool(0)) # PTP clock ptp_clock = ptp.PtpClock(period_ns=INPUT_PERIOD_NS, period_fns=INPUT_PERIOD_FNS) ptp_logic = ptp_clock.create_logic( input_clk, input_rst, ts_64=input_ts ) # DUT if os.system(build_cmd): raise Exception("Error running build command") dut = Cosimulation( "vvp -m myhdl %s.vvp -lxt2" % testbench, clk=clk, rst=rst, current_test=current_test, input_clk=input_clk, input_rst=input_rst, output_clk=output_clk, output_rst=output_rst, sample_clk=sample_clk, input_ts=input_ts, output_ts=output_ts, output_ts_step=output
_ts_step, output_pps=output_pps ) @always(delay(3200)) def clkgen(): clk.next = not clk input_clk.next = not input_clk outpu
t_clk_hp = Signal(int(3200)) @instance def clkgen_output(): while True: yield delay(int(output_clk_hp)) output_clk.next = not output_clk @always(delay(5000)) def clkgen_sample(): sample_clk.next = not sample_clk @instance def check(): yield delay(100000) yield clk.posedge rst.next = 1 input_rst.next = 1 output_rst.next = 1 yield clk.posedge yield clk.posedge yield clk.posedge input_rst.next = 0 output_rst.next = 0 yield clk.posedge yield delay(100000) yield clk.posedge # testbench stimulus yield clk.posedge print("test 1: Same clock speed") current_test.next = 1 yield clk.posedge for i in range(20000): yield clk.posedge input_stop_ts = input_ts/2**16*1e-9 output_stop_ts = output_ts/2**16*1e-9 print(input_stop_ts-output_stop_ts) assert abs(input_stop_ts-output_stop_ts) < 1e-8 yield delay(100000) yield clk.posedge print("test 2: Slightly faster") current_test.next = 2 output_clk_hp.next = 3100 yield clk.posedge for i in range(20000): yield clk.posedge input_stop_ts = input_ts/2**16*1e-9 output_stop_ts = output_ts/2**16*1e-9 print(input_stop_ts-output_stop_ts) assert abs(input_stop_ts-output_stop_ts) < 1e-8 yield delay(100000) yield clk.posedge print("test 3: Slightly slower") current_test.next = 3 output_clk_hp.next = 3300 yield clk.posedge for i in range(20000): yield clk.posedge input_stop_ts = input_ts/2**16*1e-9 output_stop_ts = output_ts/2**16*1e-9 print(input_stop_ts-output_stop_ts) assert abs(input_stop_ts-output_stop_ts) < 1e-8 yield delay(100000) yield clk.posedge print("test 4: Significantly faster") current_test.next = 4 output_clk_hp.next = 2000 yield clk.posedge for i in range(20000): yield clk.posedge input_stop_ts = input_ts/2**16*1e-9 output_stop_ts = output_ts/2**16*1e-9 print(input_stop_ts-output_stop_ts) assert abs(input_stop_ts-output_stop_ts) < 1e-8 yield delay(100000) yield clk.posedge print("test 5: Significantly slower") current_test.next = 5 output_clk_hp.next = 5000 yield clk.posedge for i in range(30000): yield clk.posedge input_stop_ts = input_ts/2**16*1e-9 output_stop_ts = output_ts/2**16*1e-9 print(input_stop_ts-output_stop_ts) assert abs(input_stop_ts-output_stop_ts) < 1e-8 yield delay(100000) raise StopSimulation return instances() def test_bench(): sim = Simulation(bench()) sim.run() if __name__ == '__main__': print("Running test...") test_bench()
# -*- coding: utf-8 -*- # Generated by Django 1.9.1 on 2016-01-19 06:49 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('condensed_urls', '0001_initial'), ] operations =
[ migrations.AddField(
model_name='condensedurl', name='visited_count', field=models.IntegerField(default=0), ), ]
# Copyright 2017 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Cyborg common internal object model""" import netaddr from oslo_utils import versionutils from oslo_versionedobjects import base as object_base from cyborg import objects from cyborg.objects import fields as object_fields class CyborgObjectRegistry(object_base.VersionedObjectRegistry): def registration_hook(self, cls, index): # NOTE(jroll): blatantly stolen from nova # NOTE(danms): This is called when an object is registered, # and is responsible for maintaining cyborg.objects.$OBJECT # as the highest-versioned implementation of a given object. version = versionutils.convert_version_to_tuple(cls.VERSION) if not hasattr(objects, cls.obj_name()): setattr(objects, cls.obj_name(), cls) else: cur_version = versionutils.convert_version_to_tuple( getattr(objects, cls.obj_name()).VERSION) if version >= cur_version: setattr(objects, cls.obj_name(), cls) class CyborgObject(object_base.VersionedObject): """Base class and object factory. This forms the base of all objects that can be remoted or instantiated via RPC. Simply defining a class that inherits from this base class will make it remotely instantiatable. Objects should implement the necessary "get" classmethod routines as well as "save" object methods as appropriate. """ OBJ_SERIAL_NAMESPACE = 'cyborg_object' OBJ_PROJECT_NAMESPACE = 'cyborg' fields = { 'created_at': object_fields.DateTimeField(nullable=True), 'updated_at': object_fields.DateTimeField(nullable=True), } def as_dict(self): return dict((k, getattr(self, k)) for k in self.fields if hasattr(self, k)) @staticmethod def _from_db_object(obj, db_obj): """Converts a database entity to a formal object. :param obj: An object of the class. :param db_obj: A DB model of the object :return: The object of the class with the database entity added """ for field in obj.fields: obj[field] = db_obj[field] obj.obj_reset_changes() return obj @classmethod def _from_db_object_list(cls, db_objs, context): """Converts a list of database ent
ities to a list of formal objects.""" objs = [] for db_obj in db_objs: objs.append(cls._from_db_object(cls(context), db_obj)) retu
rn objs class CyborgObjectSerializer(object_base.VersionedObjectSerializer): # Base class to use for object hydration OBJ_BASE_CLASS = CyborgObject CyborgObjectDictCompat = object_base.VersionedObjectDictCompat class CyborgPersistentObject(object): """Mixin class for Persistent objects. This adds the fields that we use in common for most persistent objects. """ fields = { 'created_at': object_fields.DateTimeField(nullable=True), 'updated_at': object_fields.DateTimeField(nullable=True), 'deleted_at': object_fields.DateTimeField(nullable=True), 'deleted': object_fields.BooleanField(default=False), } class ObjectListBase(object_base.ObjectListBase): @classmethod def _obj_primitive_key(cls, field): return 'cyborg_object.%s' % field @classmethod def _obj_primitive_field(cls, primitive, field, default=object_fields.UnspecifiedDefault): key = cls._obj_primitive_key(field) if default == object_fields.UnspecifiedDefault: return primitive[key] else: return primitive.get(key, default) def obj_to_primitive(obj): """Recursively turn an object into a python primitive. A CyborgObject becomes a dict, and anything that implements ObjectListBase becomes a list. """ if isinstance(obj, ObjectListBase): return [obj_to_primitive(x) for x in obj] elif isinstance(obj, CyborgObject): result = {} for key in obj.obj_fields: if obj.obj_attr_is_set(key) or key in obj.obj_extra_fields: result[key] = obj_to_primitive(getattr(obj, key)) return result elif isinstance(obj, netaddr.IPAddress): return str(obj) elif isinstance(obj, netaddr.IPNetwork): return str(obj) else: return obj def obj_equal_prims(obj_1, obj_2, ignore=None): """Compare two primitives for equivalence ignoring some keys. This operation tests the primitives of two objects for equivalence. Object primitives may contain a list identifying fields that have been changed - this is ignored in the comparison. The ignore parameter lists any other keys to be ignored. :param:obj1: The first object in the comparison :param:obj2: The second object in the comparison :param:ignore: A list of fields to ignore :returns: True if the primitives are equal ignoring changes and specified fields, otherwise False. """ def _strip(prim, keys): if isinstance(prim, dict): for k in keys: prim.pop(k, None) for v in prim.values(): _strip(v, keys) if isinstance(prim, list): for v in prim: _strip(v, keys) return prim if ignore is not None: keys = ['cyborg_object.changes'] + ignore else: keys = ['cyborg_object.changes'] prim_1 = _strip(obj_1.obj_to_primitive(), keys) prim_2 = _strip(obj_2.obj_to_primitive(), keys) return prim_1 == prim_2 class DriverObjectBase(CyborgObject): @staticmethod def _from_db_object(obj, db_obj): fields = obj.fields fields.pop("updated_at") fields.pop("created_at") for field in fields: obj[field] = db_obj[field] obj.obj_reset_changes() return obj
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2010 Nick Hall # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; e
ither version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program
; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # """ Tag editing module for Gramps. """ #------------------------------------------------------------------------- # # GNOME modules # #------------------------------------------------------------------------- from gi.repository import Gtk #------------------------------------------------------------------------- # # Gramps modules # #------------------------------------------------------------------------- from gramps.gen.const import GRAMPS_LOCALE as glocale _ = glocale.translation.sgettext from ..managedwindow import ManagedWindow from gramps.gen.const import URL_MANUAL_PAGE from ..display import display_help from ..listmodel import ListModel, TOGGLE #------------------------------------------------------------------------- # # Constants # #------------------------------------------------------------------------- WIKI_HELP_PAGE = '%s_-_Entering_and_Editing_Data:_Detailed_-_part_3' % \ URL_MANUAL_PAGE WIKI_HELP_SEC = _('manual|Tags') #------------------------------------------------------------------------- # # EditTagList # #------------------------------------------------------------------------- class EditTagList(ManagedWindow): """ Dialog to allow the user to edit a list of tags. """ def __init__(self, tag_list, full_list, uistate, track): """ Initiate and display the dialog. """ ManagedWindow.__init__(self, uistate, track, self) self.namemodel = None top = self._create_dialog() self.set_window(top, None, _('Tag selection')) for tag in full_list: self.namemodel.add([tag[0], tag in tag_list, tag[1]]) self.namemodel.connect_model() # The dialog is modal. We don't want to have several open dialogs of # this type, since then the user will loose track of which is which. self.return_list = None self.show() while True: response = self.window.run() if response == Gtk.ResponseType.HELP: display_help(webpage=WIKI_HELP_PAGE, section=WIKI_HELP_SEC) elif response == Gtk.ResponseType.DELETE_EVENT: break else: if response == Gtk.ResponseType.OK: self.return_list = [(row[0], row[2]) for row in self.namemodel.model if row[1]] self.close() break def _create_dialog(self): """ Create a dialog box to select tags. """ # pylint: disable-msg=E1101 title = _("%(title)s - Gramps") % {'title': _("Edit Tags")} top = Gtk.Dialog(title) top.set_default_size(360, 400) top.set_modal(True) top.vbox.set_spacing(5) columns = [('', -1, 300), (' ', -1, 25, TOGGLE, True, None), (_('Tag'), -1, 300)] view = Gtk.TreeView() self.namemodel = ListModel(view, columns) slist = Gtk.ScrolledWindow() slist.add_with_viewport(view) slist.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC) top.vbox.pack_start(slist, 1, 1, 5) top.add_button(Gtk.STOCK_HELP, Gtk.ResponseType.HELP) top.add_button(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL) top.add_button(Gtk.STOCK_OK, Gtk.ResponseType.OK) top.show_all() return top def build_menu_names(self, obj): """ Define the menu entry for the ManagedWindows. """ return (_("Tag selection"), None)
00) s = pd.Series(arr) df = tm.makeDataFrame() class TestTopper(TestCase): def __init__(self, *args, **kwargs): TestCase.__init__(self, *args, **kwargs) def runTest(self): pass def setUp(self): pass def test_topn_largest(self): # get the n largest bn_res = topper.bn_topn(arr, 10) assert bn_res[0] == max(arr) # sanity check pd_res = s.order(ascending=False)[:10] np.testing.assert_almost_equal(bn_res, pd_res) # change result to biggest to smallest bn_res = topper.bn_topn(arr, 10, ascending=True) assert bn_res[-1] == max(arr) # sanity check pd_res = s.order(ascending=True)[-10:] # grab from end since we reversed np.testing.assert_almost_equal(bn_res, pd_res) def test_topn_big_N(self): """ When calling topn where N is greater than the number of non-nan values. This can happen if you're tracking a Frame of returns where not all series start at the same time. It's possible that in the begining or end, or anytime for that matter, you might not have enough values. This screws up the logic. """ # test data arr = np.random.randn(100) arr[5:] = np.nan # only first four are non-na s = pd.Series(arr) # top bn_res = topper.bn_topn(arr, 10) assert bn_res[0] == max(arr) # sanity check pd_res = s.order(ascending=False)[:10].dropna() tm.assert_almost_equal(bn_res, pd_res.values) # bottom bn_res = topper.bn_topn(arr, -10) assert bn_res[0] == min(arr) # sanity check pd_res = s.order()[:10].dropna() # grab from end since we reversed tm.assert_almost_equal(bn_res, pd_res.values) def test_top_smallest(self): # get the nsmallest bn_res = topper.bn_topn(arr, -10) assert bn_res[0] == min(arr) # sanity check pd_res = s.order()[:10] tm.assert_almost_equal(bn_res, pd_res.values) # change ordering bn_res = topper.bn_topn(arr, -10, ascending=False) assert bn_res[-1] == min(arr) # sanity check pd_res = s.order(ascending=False)[-10:] # grab from end since we reversed tm.assert_almost_equal(bn_res, pd_res.values) def test_top_arg(self): # get the nlargest bn_res = topper.bn_topn(arr, 10) bn_args = topper.bn_topargn(arr, 10) arg_res = arr[bn_args] tm
.assert_almost_equal(bn_res, arg_res) # get the nsmallest bn_res = topper.bn_topn(arr, -10) bn_args = topper.bn_topargn(arr, -10) arg_res = arr[bn_args] tm.assert_almost_equal(bn_res, arg_res) # get the nsmallest bn_res = topper.bn_topn(arr, -10,
ascending=False) bn_args = topper.bn_topargn(arr, -10, ascending=False) arg_res = arr[bn_args] tm.assert_almost_equal(bn_res, arg_res) def test_nans(self): """ bottleneck.partsort doesn't handle nans. We need to correct for them. the arg version is trickiers since we need to make sure to translate back into the nan-filled array """ nanarr = np.arange(10).astype(float) nanarr[nanarr % 2 == 0] = np.nan test = topper.topn(nanarr, 3) correct = [9,7,5] tm.assert_almost_equal(test, correct) test = topper.topn(nanarr, -3) correct = [1,3,5] tm.assert_almost_equal(test, correct) test = topper.topargn(nanarr, 3) correct = [9,7,5] tm.assert_almost_equal(test, correct) test = topper.topargn(nanarr, -3) correct = [1,3,5] tm.assert_almost_equal(test, correct) test = topper.topargn(nanarr, -3, ascending=False) correct = [5,3,1] tm.assert_almost_equal(test, correct) def test_df_topn(self): # long way of getting the topn tops = df.apply(lambda s: s.topn(2, ascending=False), axis=1) correct = pd.DataFrame(tops, index=df.index) test = topper.topn_df(df, 2, ascending=False) tm.assert_frame_equal(test, correct) # sanity check, make sure first value is right c = df.iloc[0].order()[-1] t = test.iloc[0][0] tm.assert_almost_equal(t, c) # bottom 2 tops = df.apply(lambda s: s.topn(-2), axis=1) correct = pd.DataFrame(tops, index=df.index) test = topper.topn_df(df, -2) tm.assert_frame_equal(test, correct) # sanity check, make sure first value is right c = df.iloc[0].order()[0] t = test.iloc[0][0] tm.assert_almost_equal(t, c) def test_df_topindexn(self): # long way of getting the topindexn top_pos = df.apply(lambda s: s.topargn(2, ascending=False), axis=1) correct = df.columns[top_pos.values] correct = pd.DataFrame(correct, index=df.index) test = topper.topindexn_df(df, 2, ascending=False) tm.assert_frame_equal(test, correct) # sanity check, make sure first value is right c = df.iloc[0].order().index[-1] t = test.iloc[0][0] tm.assert_almost_equal(t, c) # bottom 2 top_pos = df.apply(lambda s: s.topargn(-2), axis=1) correct = df.columns[top_pos.values] correct = pd.DataFrame(correct, index=df.index) test = topper.topindexn_df(df, -2) tm.assert_frame_equal(test, correct) # sanity check, make sure first value is right c = df.iloc[0].order().index[0] t = test.iloc[0][0] tm.assert_frame_equal(test, correct) def test_df_topargn(self): # really this is tested via topindexn indirectly pass def test_default_ascending(self): """ Changed ascending to change based on N More intuitive, by default you'd expect the greatest or lowest value would be first, depending on which side you are looking for """ # top should default to asc=False bn_res = topper.bn_topn(arr, 10) pd_res = s.order(ascending=False)[:10] tm.assert_almost_equal(bn_res, pd_res.values) # make sure ascending is still respected bn_res = topper.bn_topn(arr, 10, ascending=True) pd_res = s.order(ascending=True)[-10:] tm.assert_almost_equal(bn_res, pd_res.values) # bottom defaults asc=True bn_res = topper.bn_topn(arr, -10) pd_res = s.order()[:10] tm.assert_almost_equal(bn_res, pd_res.values) # make sure ascending is still respected bn_res = topper.bn_topn(arr, -10, ascending=False) pd_res = s.order()[:10][::-1] tm.assert_almost_equal(bn_res, pd_res.values) def test_test_ndim(self): """ Make sure topn and topargn doesn't accept DataFrame """ try: topper.topn(df, 1) except: pass else: assert False try: topper.topargn(df, 1) except: pass else: assert False def test_too_big_n_df(self): df = pd.DataFrame(np.random.randn(100, 10)) df[df > 0] = np.nan testdf = topper.topn_df(df, 10) for x in range(len(df)): correct = df.iloc[x].order(ascending=False).reset_index(drop=True) test = testdf.iloc[x] tm.assert_almost_equal(test, correct) testdf = topper.topn_df(df, 2) for x in range(len(df)): correct = df.iloc[x].order(ascending=False).reset_index(drop=True)[:2] test = testdf.iloc[x] tm.assert_almost_equal(test, correct) # bottom testdf = topper.topn_df(df, -2) for x in range(len(df)): correct = df.iloc[x].order().reset_index(drop=True)[:2] test = testdf.iloc[x] tm.assert_almost_equal(test, correct) # bottom testdf = topper.topn_df(df, -20) for x in range(len(df)): correct = df.iloc[x].order().reset_index(drop=True)[:20] test = testdf.iloc[x] tm.assert_almost_equal(test, correct) if __
from flask import render_template, redirect, request, url_for, flash from flask_login import login_user, logout_user, login_required, \ current_user from . import auth from .. import db from ..models import User from ..email import send_email from .forms import LoginForm, RegistrationForm, ChangePasswordForm,\ PasswordResetRequestForm, PasswordResetForm, ChangeEmailForm @auth.before_app_request def before_request(): if current_user.is_authenticated: current_user.ping() if not current_user.confirmed \ and request.endpoint[:5] != 'auth.' \ and request.endpoint != 'static': return redirect(url_for('auth.unconfirmed')) @auth.route('/unconfirmed') def unconfirmed(): if current_user.is_anonymous or current_user.confirmed: return redirect(url_for('main.index')) return render_template('auth/unconfirmed.html') @auth.route('/login', methods=['GET', 'POST']) def login(): form = LoginForm() if form.validate_on_submit(): user = User.query.filter_by(email=form.email.data).first() if user is not None and user.verify_password(form.password.data): login_user(user, form.remember_me.data) return redirect(request.args.get('next') or url_for('main.index')) flash('Invalid username or password.') return render_template('auth/login.html', form=form) @auth.route('/logout') @login_required def logout(): logout_user() flash('You have been logged out.') return redirect(url_for('main.index')) @auth.route('/register', methods=['GET', 'POST']) def register(): form = RegistrationForm() if form.validate_on_submit(): user = User(email=form.email.data, username=form.username.data, password=form.password.data) db.session.add(user) db.session.commit() token = user.generate_confirmation_token() send_email(user.email, 'Confirm Your Account', 'auth/email/confirm', user=user, token=token) flash(
'A confirmation email has been sent to you by email.') return redirect(url_for('auth.login')) return render_template('auth/register.html', form=form) @auth.route('/confirm/<token>') @login_required def confirm(token): if current_user.confirmed: return redirect(url_for('main.index')) if current_user.confirm(token): flash('You have confirmed your account. Thanks!') else: flash('The confirmati
on link is invalid or has expired.') return redirect(url_for('main.index')) @auth.route('/confirm') @login_required def resend_confirmation(): token = current_user.generate_confirmation_token() send_email(current_user.email, 'Confirm Your Account', 'auth/email/confirm', user=current_user, token=token) flash('A new confirmation email has been sent to you by email.') return redirect(url_for('main.index')) @auth.route('/change-password', methods=['GET', 'POST']) @login_required def change_password(): form = ChangePasswordForm() if form.validate_on_submit(): if current_user.verify_password(form.old_password.data): current_user.password = form.password.data db.session.add(current_user) flash('Your password has been updated.') return redirect(url_for('main.index')) else: flash('Invalid password.') return render_template("auth/change_password.html", form=form) @auth.route('/reset', methods=['GET', 'POST']) def password_reset_request(): if not current_user.is_anonymous: return redirect(url_for('main.index')) form = PasswordResetRequestForm() if form.validate_on_submit(): user = User.query.filter_by(email=form.email.data).first() if user: token = user.generate_reset_token() send_email(user.email, 'Reset Your Password', 'auth/email/reset_password', user=user, token=token, next=request.args.get('next')) flash('An email with instructions to reset your password has been ' 'sent to you.') return redirect(url_for('auth.login')) return render_template('auth/reset_password.html', form=form) @auth.route('/reset/<token>', methods=['GET', 'POST']) def password_reset(token): if not current_user.is_anonymous: return redirect(url_for('main.index')) form = PasswordResetForm() if form.validate_on_submit(): user = User.query.filter_by(email=form.email.data).first() if user is None: return redirect(url_for('main.index')) if user.reset_password(token, form.password.data): flash('Your password has been updated.') return redirect(url_for('auth.login')) else: return redirect(url_for('main.index')) return render_template('auth/reset_password.html', form=form) @auth.route('/change-email', methods=['GET', 'POST']) @login_required def change_email_request(): form = ChangeEmailForm() if form.validate_on_submit(): if current_user.verify_password(form.password.data): new_email = form.email.data token = current_user.generate_email_change_token(new_email) send_email(new_email, 'Confirm your email address', 'auth/email/change_email', user=current_user, token=token) flash('An email with instructions to confirm your new email ' 'address has been sent to you.') return redirect(url_for('main.index')) else: flash('Invalid email or password.') return render_template("auth/change_email.html", form=form) @auth.route('/change-email/<token>') @login_required def change_email(token): if current_user.change_email(token): flash('Your email address has been updated.') else: flash('Invalid request.') return redirect(url_for('main.index'))
#!/usr/bin/python import sys from LAPS.MsgBus.Bus import Bus # Create queue with a unique name # insert message # receive msg # delete queue if __name__ == "__main__": # If invoked directly, parse command line arguments for logger information #
and pass the rest to the run() method defined above # -------------------------------------------------------------------------- try: unique_queue_name = sys.argv[1] except: print "Not enough command line arguments: this test needs a unique queue name" exit(1) #msgbus = Bus(broker="lhd002"
, address=unique_queue_name) #parset = """ #key=value #""" #msgbus.send(parset,"Observation123456")
ion = option[:-1] value = getattr(self, option) print indent + "%s = %s" % (option, value) def run (self): """A command's raison d'etre: carry out the action it exists to perform, controlled by the options initialized in 'initialize_options()', customized by other commands, the setup script, the command-line, and config files, and finalized in 'finalize_options()'. All terminal output and filesystem interaction should be done by 'run()'. This method must be implemented by all command classes. """ raise RuntimeError, \ "abstract method -- subclass %s must override" % self.__class__ def announce (self, msg, level=1): """If the current verbosity level is of greater than or equal to 'level' print 'msg' to stdout. """ log.log(level, msg) def debug_print (self, msg): """Print 'msg' to stdout if the global DEBUG (taken from the DISTUTILS_DEBUG environment variable) flag is true. """ from distutils.debug import DEBUG if DEBUG: print msg sys.stdout.flush() # -- Option validation methods ------------------------------------- # (these are very handy in writing the 'finalize_options()' method) # # NB. the general philosophy here is to ensure that a particular option # value meets certain type and value constraints. If not, we try to # force it into conformance (eg. if we expect a list but have a string, # split the string on comma and/or whitespace). If we can't force the # option into conformance, raise DistutilsOptionError. Thus, command # classes need do nothing more than (eg.) # self.ensure_string_list('foo') # and they can be guaranteed that thereafter, self.foo will be # a list of strings. def _ensure_stringlike (self, option, what, default=None): val = getattr(self, option) if val is None: setattr(self, option, default) return default elif type(val) is not StringType: raise DistutilsOptionError, \ "'%s' must be a %s (got `%s`)" % (option, what, val) return val def ensure_string (self, option, default=None): """Ensure that 'option' is a string; if not defined, set it to 'default'. """ self._ensure_stringlike(option, "string", default) def ensure_string_list (self, option): """Ensure that 'option' is a list of strings. If 'option' is currently a string, we split it either on /,\s*/ or /\s+/, so "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become ["foo", "bar", "baz"]. """ val = getattr(self, option) if val is None: return elif type(val) is StringType: setattr(self, option, re.split(r',\s*|\s+', val)) else: if type(val) is ListType: types = map(type, val) ok = (types == [StringType] * len(val)) else: ok = 0 if not ok: raise DistutilsOptionError, \ "'%s' must be a list of strings (got %s)" % \ (option, `val`) def _ensure_tested_string (self, option, tester, what, error_fmt, default=None): val = se
lf._ensure_stringlike(option,
what, default) if val is not None and not tester(val): raise DistutilsOptionError, \ ("error in '%s' option: " + error_fmt) % (option, val) def ensure_filename (self, option): """Ensure that 'option' is the name of an existing file.""" self._ensure_tested_string(option, os.path.isfile, "filename", "'%s' does not exist or is not a file") def ensure_dirname (self, option): self._ensure_tested_string(option, os.path.isdir, "directory name", "'%s' does not exist or is not a directory") # -- Convenience methods for commands ------------------------------ def get_command_name (self): if hasattr(self, 'command_name'): return self.command_name else: return self.__class__.__name__ def set_undefined_options (self, src_cmd, *option_pairs): """Set the values of any "undefined" options from corresponding option values in some other command object. "Undefined" here means "is None", which is the convention used to indicate that an option has not been changed between 'initialize_options()' and 'finalize_options()'. Usually called from 'finalize_options()' for options that depend on some other command rather than another option of the same command. 'src_cmd' is the other command from which option values will be taken (a command object will be created for it if necessary); the remaining arguments are '(src_option,dst_option)' tuples which mean "take the value of 'src_option' in the 'src_cmd' command object, and copy it to 'dst_option' in the current command object". """ # Option_pairs: list of (src_option, dst_option) tuples src_cmd_obj = self.distribution.get_command_obj(src_cmd) src_cmd_obj.ensure_finalized() for (src_option, dst_option) in option_pairs: if getattr(self, dst_option) is None: setattr(self, dst_option, getattr(src_cmd_obj, src_option)) def get_finalized_command (self, command, create=1): """Wrapper around Distribution's 'get_command_obj()' method: find (create if necessary and 'create' is true) the command object for 'command', call its 'ensure_finalized()' method, and return the finalized command object. """ cmd_obj = self.distribution.get_command_obj(command, create) cmd_obj.ensure_finalized() return cmd_obj # XXX rename to 'get_reinitialized_command()'? (should do the # same in dist.py, if so) def reinitialize_command (self, command, reinit_subcommands=0): return self.distribution.reinitialize_command( command, reinit_subcommands) def run_command (self, command): """Run some other command: uses the 'run_command()' method of Distribution, which creates and finalizes the command object if necessary and then invokes its 'run()' method. """ self.distribution.run_command(command) def get_sub_commands (self): """Determine the sub-commands that are relevant in the current distribution (ie., that need to be run). This is based on the 'sub_commands' class attribute: each tuple in that list may include a method that we call to determine if the subcommand needs to be run for the current distribution. Return a list of command names. """ commands = [] for (cmd_name, method) in self.sub_commands: if method is None or method(self): commands.append(cmd_name) return commands # -- External world manipulation ----------------------------------- def warn (self, msg): sys.stderr.write("warning: %s: %s\n" % (self.get_command_name(), msg)) def execute (self, func, args, msg=None, level=1): util.execute(func, args, msg, dry_run=self.dry_run) def mkpath (self, name, mode=0777): dir_util.mkpath(name, mode, dry_run=self.dry_run) def copy_file (self, infile, outfile, preserve_mode=1, preserve_times=1, link=None, level=1): """Copy a file respecting verbose, dry-run and force flags. (The former two default to whatever is in the Distribution object, and the latter defaults to false for commands that don't define it.)""" return file_util.copy_file( infile, outfile, preserve_mode, preserve_
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2010 (ita) """ ConfigSet: a special dict The values put in :py:class:`ConfigSet` must be lists """ import copy, re, os from waflib import Logs, Utils re_imp = re.compile('^(#)*?([^#=]*?)\ =\ (.*?)$', re.M) class ConfigSet(object): """ A dict that honor serialization and parent relationships. The serialization format is human-readable (python-like) and performed by using eval() and repr(). For high performance prefer pickle. Do not store functions as they are not serializable. The values can be accessed by attributes or by keys:: from waflib.ConfigSet import ConfigSet env = ConfigSet() env.FOO = 'test' env['FOO'] = 'test' """ __slots__ = ('table', 'parent') def __init__(self, filename=None): self.table = {} """ Internal dict holding the object values """ #self.parent = None if filename: self.load(filename) def __contains__(self, key): """ Enable the *in* syntax:: if 'foo' in env: print(env['foo']) """ if key in self.table: return True try: return self.parent.__contains__(key) except AttributeError: return False # parent may not exist def keys(self): """Dict interface (unknown purpose)""" keys = set() cur = self while cur: keys.update(cur.table.keys()) cur = getattr(cur, 'parent', None) keys = list(keys) keys.sort() return keys def __str__(self): """Text representation of the ConfigSet (for debugging purposes)""" return "\n".join(["%r %r" % (x, self.__getitem__(x)) for x in self.keys()]) def __getitem__(self, key): """ Dictionary interface: get value from key:: def configure(conf): conf.env['foo'] = {} print(env['foo']) """ try: while 1: x = self.table.get(key, None) if not x is None: return x self = self.parent except AttributeError: return [] def __setitem__(self, key, value): """ Dictionary interface: get value from key """ self.table[key] = value def __delitem__(self, key): """ Dictionary interface: get value from key """ self[key] = [] def __getattr__(self, name): """ Attribute access provided for convenience. The following forms are equivalent:: def configure(conf): conf.env.value conf.env['value'] """ if name in self.__slots__: return object.__getattr__(self, name) else: return self[name] def __setattr__(self, name, value): """ Attribute access provided for convenience. The following forms are equivalent:: def configure(conf): conf.env.value = x env['value'] = x """ if name in self.__slots__: object.__setattr__(self, name, value) else: self[name] = value def __delattr__(self, name): """ Attribute access provided for convenience. The following forms are equivalent:: def configure(conf): del env.value del env['value'] """ if name in self.__slots__: object.__delattr__(self, name) else: del self[name] def derive(self): """ Returns a new ConfigSet deriving from self. The copy returned will be a shallow copy
:: from waflib.ConfigSet import ConfigSet env = ConfigSet() env.a
ppend_value('CFLAGS', ['-O2']) child = env.derive() child.CFLAGS.append('test') # warning! this will modify 'env' child.CFLAGS = ['-O3'] # new list, ok child.append_value('CFLAGS', ['-O3']) # ok Use :py:func:`ConfigSet.detach` to detach the child from the parent. """ newenv = ConfigSet() newenv.parent = self return newenv def detach(self): """ Detach self from its parent (if existing) Modifying the parent :py:class:`ConfigSet` will not change the current object Modifying this :py:class:`ConfigSet` will not modify the parent one. """ tbl = self.get_merged_dict() try: delattr(self, 'parent') except AttributeError: pass else: keys = tbl.keys() for x in keys: tbl[x] = copy.deepcopy(tbl[x]) self.table = tbl return self def get_flat(self, key): """ Return a value as a string. If the input is a list, the value returned is space-separated. :param key: key to use :type key: string """ s = self[key] if isinstance(s, str): return s return ' '.join(s) def _get_list_value_for_modification(self, key): """ Return a list value for further modification. The list may be modified inplace and there is no need to do this afterwards:: self.table[var] = value """ try: value = self.table[key] except KeyError: try: value = self.parent[key] except AttributeError: value = [] if isinstance(value, list): value = value[:] else: value = [value] else: if not isinstance(value, list): value = [value] self.table[key] = value return value def append_value(self, var, val): """ Appends a value to the specified config key:: def build(bld): bld.env.append_value('CFLAGS', ['-O2']) The value must be a list or a tuple """ if isinstance(val, str): # if there were string everywhere we could optimize this val = [val] current_value = self._get_list_value_for_modification(var) current_value.extend(val) def prepend_value(self, var, val): """ Prepends a value to the specified item:: def configure(conf): conf.env.prepend_value('CFLAGS', ['-O2']) The value must be a list or a tuple """ if isinstance(val, str): val = [val] self.table[var] = val + self._get_list_value_for_modification(var) def append_unique(self, var, val): """ Append a value to the specified item only if it's not already present:: def build(bld): bld.env.append_unique('CFLAGS', ['-O2', '-g']) The value must be a list or a tuple """ if isinstance(val, str): val = [val] current_value = self._get_list_value_for_modification(var) for x in val: if x not in current_value: current_value.append(x) def get_merged_dict(self): """ Compute the merged dictionary from the fusion of self and all its parent :rtype: a ConfigSet object """ table_list = [] env = self while 1: table_list.insert(0, env.table) try: env = env.parent except AttributeError: break merged_table = {} for table in table_list: merged_table.update(table) return merged_table def store(self, filename): """ Write the :py:class:`ConfigSet` data into a file. See :py:meth:`ConfigSet.load` for reading such files. :param filename: file to use :type filename: string """ try: os.makedirs(os.path.split(filename)[0]) except OSError: pass buf = [] merged_table = self.get_merged_dict() keys = list(merged_table.keys()) keys.sort() try: fun = ascii except NameError: fun = repr for k in keys: if k != 'undo_stack': buf.append('%s = %s\n' % (k, fun(merged_table[k]))) Utils.writef(filename, ''.join(buf)) def load(self, filename): """ Retrieve the :py:class:`ConfigSet` data from a file. See :py:meth:`ConfigSet.store` for writing such files :param filename: file to use :type filename: string """ tbl = self.table code = Utils.readf(filename, m='rU') for m in re_imp.finditer(code): g = m.group tbl[g(2)] = eval(g(3)) Logs.debug('env: %s' % str(self.table)) def update(self, d): """ Dictionary interface: replace values from another dict :param d: object to use the value from :type d: dict-like object """ for k, v in d.items(): self[k] = v def stash(self): """ Store the object state, to provide a kind of transaction support:: env = ConfigSet() env.stash() try: env.append_value('CFLAGS', '-O3') call_some_method(env) finally: env.revert() The history is kept in a stack, and is lost during the serialization by :py:meth:`ConfigSet.store` """ orig = self.table tbl = self.table = self.table.copy() for x in tbl.keys(): tbl[x] = copy.deepcopy(tbl[x]) self.undo_stack = self.undo_stack + [orig] def revert(self): """ Reverts the object to a previous state. See :py:meth:`ConfigSet.stash` """ self.table = self.undo_stack.pop(-1)
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry import test from measurements import smoothness import page_sets @test.Disabled('linux') # crbug.com/368767 class SchedulerToughSchedulingCases(test.Test): """Measures rendering statistics while interacting with pages that have challenging scheduling properties. https://docs.google.com/a/chromium.org/document/d/ 17yhE5Po9By0sCdM1yZT3LiUECaUr_94rQt9j-4tOQIM/view""" test = smoothness.Smooth
ness page_set = page_sets.ToughSchedulingCasesPageSet # Pepper plugin is not supported on android. @test.Disabled('android', 'win') # crbug.com/384733 class SchedulerToughPepperCases(test.Test): """Measures rendering statistics while interacting with pages that have pepper plugins""" test = smoothness.Smoothness page
_set = page_sets.ToughPepperCasesPageSet def CustomizeBrowserOptions(self, options): # This is needed for testing pepper plugin. options.AppendExtraBrowserArgs('--enable-pepper-testing')
import numpy as np import pylab as pl import sys sys.path.append('../../lablib') import ourgui def smoothList(list,strippedXs=False,degree=10): if strippedXs==True: return Xs[0:-(len(list)-(len(list)-degree+1))] smoothed=[0]*(len(list)-degree+
1) for i in range(len(smoothed)): smoothed[i]=sum(list[i:i+degree])/float(degree) return smoothed def smoothListGaussian(list,strippedXs=False,degree=5): window=degree*2-1 weight=np.array([1
.0]*window) weightGauss=[] for i in range(window): i=i-degree+1 frac=i/float(window) gauss=1/(np.exp((4*(frac))**2)) weightGauss.append(gauss) weight=np.array(weightGauss)*weight smoothed=[0.0]*(len(list)-window) for i in range(len(smoothed)): smoothed[i]=sum(np.array(list[i:i+window])*weight)/sum(weight) return smoothed filename = ourgui.openFile() dtypes = {'names': ['date', 'value', 'unit'], 'formats': ['f8', 'f4', 'S1']} data = np.loadtxt(filename, delimiter=",", dtype=dtypes, ) date = data['date'] date -= date[0] scale = 60 date /= scale pl.plot(date, data['value'], '.') degree = 200 sdataG = smoothListGaussian(data['value'], degree=degree) sdateG = date[degree:(-degree+1)] sdata = smoothList(data['value'], degree=degree) sdate = date[degree/2:-degree/2+1] pl.plot(sdate, sdata, 'g-') pl.plot(sdateG, sdataG, 'r-') pl.xlabel("time (min)") pl.ylabel("thermistor resistance") pl.show()
reating a new hash. Typically this option is not needed, as the default (``"2a"``) is usually the correct choice. If specified, it must be one of the following: * ``"2"`` - the first revision of BCrypt, which suffers from a minor security flaw and is generally not used anymore. * ``"2a"`` - latest revision of the official BCrypt algorithm, and the current default. * ``"2y"`` - format specific to the *crypt_blowfish* BCrypt implementation, identical to ``"2a"`` in all but name. :type relaxed: bool :param relaxed: By default, providing an invalid value for one of the other keywords will result in a :exc:`ValueError`. If ``relaxed=True``, and the error can be corrected, a :exc:`~passlib.exc.PasslibHashWarning` will be issued instead. Correctable errors include ``rounds`` that are too small or too large, and ``salt`` strings that are too long. .. versionadded:: 1.6 .. versionchanged:: 1.6 This class now supports ``"2y"`` hashes, and recognizes (but does not support) the broken ``"2x"`` hashes. (see the :ref:`crypt_blowfish bug <crypt-blowfish-bug>` for details). .. versionchanged:: 1.6 Added a pure-python backend. """ #=================================================================== # class attrs #=================================================================== #--GenericHandler-- name = "bcrypt" setting_kwds = ("salt", "rounds", "ident") checksum_size = 31 checksum_chars = bcrypt64.charmap #--HasManyIdents-- default_ident = u("$2a$") ident_values = (u("$2$"), IDENT_2A, IDENT_2X, IDENT_2Y) ident_aliases = {u("2"): u("$2$"), u("2a"): IDENT_2A, u("2y"): IDENT_2Y} #--HasSalt-- min_salt_size = max_salt_size = 22 salt_chars = bcrypt64.charmap # NOTE: 22nd salt char must be in bcrypt64._padinfo2[1], not full charmap #--HasRounds-- default_rounds = 12 # current passlib default min_rounds = 4 # bcrypt spec specified minimum max_rounds = 31 # 32-bit integer limit (since real_rounds=1<<rounds) rounds_cost = "log2" #=================================================================== # formatting #=================================================================== @classmethod def from_string(cls, hash): ident, tail = cls._parse_ident(hash) if ident == IDENT_2X: raise ValueError("crypt_blowfish's buggy '2x' hashes are not " "currently supported") rounds_str, data = tail.split(u("$")) rounds = int(rounds_str) if rounds_str != u('%02d') % (rounds,): raise uh.exc.MalformedHashError(cls, "malformed cost field") salt, chk = data[:22], data[22:] return cls( rounds=rounds, salt=salt, checksum=chk or None, ident=ident, ) def to_string(self): hash = u("%s%02d$%s%s") % (self.ident, self.rounds, self.salt, self.checksum or u('')) return uascii_to_str(hash) def _get_config(self, ident=None): "internal helper to prepare config string for backends" if ident is None: ident = self.ident
if ident == IDENT_2Y: ident = IDENT_2A else: assert ident != IDENT_2X config = u("%s%02d$%s") % (ident, self.rounds, self.salt) return uascii_to_str(config) #=================================================================== # specialized salt generation - fixes passlib issue 25 #============================================================
======= @classmethod def _bind_needs_update(cls, **settings): return cls._needs_update @classmethod def _needs_update(cls, hash, secret): if isinstance(hash, bytes): hash = hash.decode("ascii") # check for incorrect padding bits (passlib issue 25) if hash.startswith(IDENT_2A) and hash[28] not in bcrypt64._padinfo2[1]: return True # TODO: try to detect incorrect $2x$ hashes using *secret* return False @classmethod def normhash(cls, hash): "helper to normalize hash, correcting any bcrypt padding bits" if cls.identify(hash): return cls.from_string(hash).to_string() else: return hash def _generate_salt(self, salt_size): # override to correct generate salt bits salt = super(bcrypt, self)._generate_salt(salt_size) return bcrypt64.repair_unused(salt) def _norm_salt(self, salt, **kwds): salt = super(bcrypt, self)._norm_salt(salt, **kwds) assert salt is not None, "HasSalt didn't generate new salt!" changed, salt = bcrypt64.check_repair_unused(salt) if changed: # FIXME: if salt was provided by user, this message won't be # correct. not sure if we want to throw error, or use different warning. warn( "encountered a bcrypt salt with incorrectly set padding bits; " "you may want to use bcrypt.normhash() " "to fix this; see Passlib 1.5.3 changelog.", PasslibHashWarning) return salt def _norm_checksum(self, checksum): checksum = super(bcrypt, self)._norm_checksum(checksum) if not checksum: return None changed, checksum = bcrypt64.check_repair_unused(checksum) if changed: warn( "encountered a bcrypt hash with incorrectly set padding bits; " "you may want to use bcrypt.normhash() " "to fix this; see Passlib 1.5.3 changelog.", PasslibHashWarning) return checksum #=================================================================== # primary interface #=================================================================== backends = ("pybcrypt", "bcryptor", "os_crypt", "builtin") @classproperty def _has_backend_pybcrypt(cls): return pybcrypt_hashpw is not None @classproperty def _has_backend_bcryptor(cls): return bcryptor_engine is not None @classproperty def _has_backend_builtin(cls): if os.environ.get("PASSLIB_BUILTIN_BCRYPT") not in ["enable","enabled"]: return False # look at it cross-eyed, and it loads itself _load_builtin() return True @classproperty def _has_backend_os_crypt(cls): # XXX: what to do if only h2 is supported? h1 is *very* rare. h1 = '$2$04$......................1O4gOrCYaqBG3o/4LnT2ykQUt1wbyju' h2 = '$2a$04$......................qiOQjkB8hxU8OzRhS.GhRMa4VUnkPty' return test_crypt("test",h1) and test_crypt("test", h2) @classmethod def _no_backends_msg(cls): return "no bcrypt backends available - please install py-bcrypt" def _calc_checksum_os_crypt(self, secret): config = self._get_config() hash = safe_crypt(secret, config) if hash: assert hash.startswith(config) and len(hash) == len(config)+31 return hash[-31:] else: # NOTE: it's unlikely any other backend will be available, # but checking before we bail, just in case. for name in self.backends: if name != "os_crypt" and self.has_backend(name): func = getattr(self, "_calc_checksum_" + name) return func(secret) raise uh.exc.MissingBackendError( "password can't be handled by os_crypt, " "recommend installing py-bcrypt.", ) def _calc_checksum_pybcrypt(self, secret): # py-bcrypt behavior: # py2: unicode secret/hash encoded as ascii bytes before use, # bytes taken as-is; returns ascii bytes. # py3: not supported (patch submitted) if isinstance(secret, unicode): secret = secret.encode("utf-8") if _BNULL in secret:
import collections import jinja2 import requests from flask import ( current_app, Blueprint, render_template, request, jsonify, abort ) frontend = Blueprint('frontend', __name__, template_folder='templates') headers = {"Content-type": "application/json"} @jinja2.contextfilter @frontend.app_template_filter() def format_link(context, value): items = value.split(':') register = current_app.config['POAO_SECTION_REGISTER'] return "<a href='%s/products-of-animal-origin-section/%s'>%s</a> %s" % (register, items[0],items[0],items[1]) @frontend.route('/') def index(): premises_url = current_app.config['PREMISES_REGISTER'] url = "%s/search?_representation=json" % premises_url resp = requests.get(url, headers=headers) if resp.status_code != 200: abort(resp.status_code) return render_template('index.html', data=resp.json()) @frontend.route('/search') def search(): query = re
quest.args.get('query', '') page = request.args.get('page', 0) premises_url = current_app.config['PREMISES_REGISTER'] url = "%s/search?_query=%s&_page=%s&_representation=json" % (premises_url, query, page) resp = requests.get(url, headers=headers) if resp.sta
tus_code != 200: abort(resp.status_code) current_app.logger.info(resp.json()) return jsonify(resp.json()) @frontend.route('/premises/<int:id>') def premises(id): premises_register = current_app.config['PREMISES_REGISTER'] poao_premises_register = current_app.config['POAO_PREMISES_REGISTER'] address_register = current_app.config['ADDRESS_REGISTER'] food_category_register = current_app.config['FOOD_ESTABLISHMENT_CATEGORY_REGISTER'] try: premises_url = '%s/premises/%d.json' % (premises_register, id) resp = requests.get(premises_url, headers=headers) resp.raise_for_status() premises = resp.json() poao_premises_url = '%s/premises/%d.json' % (poao_premises_register, id) resp = requests.get(poao_premises_url, headers=headers) resp.raise_for_status() poao_premises = resp.json() category_details = _get_category_details(poao_premises) address_url = '%s/address/%d.json' % (address_register, id) resp = requests.get(address_url, headers=headers) resp.raise_for_status() address = resp.json() except requests.exceptions.HTTPError as e: current_app.logger.info(e) abort(resp.status_code) return render_template('premises.html', poao_premises_register=poao_premises_register, premises=premises, poao_premises=poao_premises, address=address, category_details=category_details, food_category_register=food_category_register) Category = collections.namedtuple('Category', 'category_key, section_name, activity_name') # This sort of stuff is a mess. def _get_category_details(premises): category_details = [] try: for category in premises['entry']['food-establishment-categories']: section_key, activity_key = category.split(':') section_url = "%s/products-of-animal-origin-section/%s.json" % (current_app.config['POAO_SECTION_REGISTER'], section_key) activity_url = "%s/products-of-animal-origin-activity/%s.json" % (current_app.config['POAO_ACTIVITY_REGISTER'], activity_key) section_resp = requests.get(section_url, headers=headers) activity_resp = requests.get(activity_url, headers=headers) section_resp.raise_for_status() activity_resp.raise_for_status() section = section_resp.json()['entry'] activity = activity_resp.json()['entry'] category = Category(category_key=category, section_name=section['name'], activity_name=activity['name']) category_details.append(category) current_app.logger.info(category_details) except requests.exceptions.HTTPError as e: current_app.logger.info(e) current_app.logger.info('Not much we can do at this point but return empty category_details') return category_details
from math import log10, floor # N = 12 N = 101 # N = 1000001 def n_squares(n): return [i**2 for i in range(2, n)] # print(n_squares(11)) # print(n_squares(100)) ##### This block from stackoverflow: # https://stackoverflow.com/questions/37023774/all-ways-to-partition-a-string import itertools memo = {} def multiSlice(s,cutpoints): k = len(cutpoints) if k == 0: return [s] else: multislices = [s[:cutpoints[0]]] multislices.extend(s[cutpoints[i]:cutpoints[i+1]] for i in range(k-1)) multislices.append(s[cutpoints[k-1]:]) return multislices def allPartitions(s): # if s in memo: # return memo[s] n = len(s) cuts = list(range(1,n)) for k in range(1, n): for cutpoints in itertools.combinations(cuts,k): yield multiSlice(s,cutpoints) ##### End block # print(list(allPartitions([int(i) for i in str(1234)]))) def list_sum(num_list): outer_sum = 0 for sub_list in num_list: inner_sum = 0 power = 1 for digit in sub_list[::-1]: inner_sum += power * digit power *= 10 outer_sum += inner_sum return outer_sum # print(list_sum([[1, 2], [3, 4]])) # print(list_sum([[1, 2,
3, 4]])) # print(list_sum([[1], [2], [3], [4]])) def is_s_num(num): sqrt = num**0.5 for part in allPartitions([int(i) for i in str(num)]): if sqrt == list_sum(part): return True return False # print(81, is_s_num(81)) # print(64, is_s_num(64)) # print(8281, is_s_num(8281)) # print(9801, is_s_num(9801)) def T(
N): squares = n_squares(N) sum = 0 for n in squares: if is_s_num(n): print(n, "is true") sum += n return sum print(T(N))