prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
import os import battlenet try: import unittest2 as unittest except ImportError: import uni
ttest as unittest PUBLIC_KEY = os.environ.get('BNET_PUBLIC_KEY') PRIVATE_KEY = os.environ.get('BNET_PRIVATE_
KEY') class RegionsTest(unittest.TestCase): def setUp(self): self.connection = battlenet.Connection(public_key=PUBLIC_KEY, private_key=PRIVATE_KEY) def test_us(self): realms = self.connection.get_all_realms(battlenet.UNITED_STATES) self.assertTrue(len(realms) > 0) def test_eu(self): realms = self.connection.get_all_realms(battlenet.EUROPE) self.assertTrue(len(realms) > 0) def test_kr(self): realms = self.connection.get_all_realms(battlenet.KOREA) self.assertTrue(len(realms) > 0) def test_tw(self): realms = self.connection.get_all_realms(battlenet.TAIWAN) self.assertTrue(len(realms) > 0) def test_cn(self): realms = self.connection.get_all_realms(battlenet.CHINA) self.assertTrue(len(realms) > 0) def tearDown(self): del self.connection if __name__ == '__main__': unittest.main()
"""Disassembler of Python byte code into mnemonics.""" import sys import types from opcode_25 import * from opcode_25 import __all__ as _opcodes_all __all__ = ["dis","disassemble","distb","disco"] + _opcodes_all del _opcodes_all def dis(x=None): """Disassemble classes, methods, functions, or code. With no argument, disassemble the last traceback. """ if x is None: distb() return if type(x) is types.InstanceType: x = x.__class__ if hasattr(x, 'im_func'): x = x.im_func if hasattr(x, 'func_code'): x = x.func_code if hasattr(x, '__dict__'): items = x.__dict__.items() items.sort() for name, x1 in items: if type(x1) in (types.MethodType, types.FunctionType, types.CodeType, types.ClassType): print "Disassembly of %s:" % name try: dis(x1) except TypeError, msg: print "Sorry:", msg print elif hasattr(x, 'co_code'): disassemble(x) elif isinstance(x, str): disassemble_string(x) else: raise TypeError, \ "don't know how to disassemble %s objects" % \ type(x).__name__ def distb(tb=None): """Disassemble a traceback (default: last traceback).""" if tb is None: try: tb = sys.last_traceback except AttributeError: raise RuntimeError, "no last traceback to disassemble" while tb.tb_next: tb = tb.tb_next disassemble(tb.tb_frame.f_code, tb.tb_lasti) def disassemble(co, lasti=-1): """Disassemble a code object.""" code = co.co_code byte_increments = [ord(c) for c in co.co_lnotab[0::2]] line_increments = [ord(c) for c in co.co_lnotab[1::2]] table_length = len(byte_increments) # == len(line_increments) lineno = co.co_firstlineno table_index = 0 while (table_index < table_length and byte_increments[table_index] == 0): lineno += line_increments[table_index] table_index += 1 addr = 0 line_incr = 0 labels = findlabels(code) n = len(code) i = 0 extended_arg = 0 free = None while i < n: c = code[i] op = ord(c) if i >= addr: lineno += line_incr while table_index < table_length: addr += byte_increments[table_index] line_incr = line_increments[table_index] table_index += 1 if line_incr: break else: addr = sys.maxint if i > 0: print print "%3d"%lineno, else: print ' ', if i == lasti: print '-->', else: print ' ', if i in labels: print '>>', else: print ' ', print `i`.rjust(4), print opname[op].ljust(20), i = i+1 if op >= HAVE_ARGUMENT: oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg extended_arg = 0 i = i+2 if op == EXTENDED_ARG: extended_arg = oparg*65536L print `oparg`.rjust(5), if op in hasconst: print '(' + `co.co_consts[oparg]` + ')', elif op in hasname: print '(' + co.co_names[oparg] + ')', elif op in hasjrel: print '(to ' + `i + oparg` + ')', elif op in haslocal: print '(' + co.co_varnames[oparg] + ')', elif op in hascompare: print '(' + cmp_op[oparg] + ')', elif op in hasfree: if free is None: free = co.co_cellvars + co.co_freevars print '(' + free[oparg] + ')', print def disassemble_string(code, lasti=-1, varnames=None, names=None, constants=None): labels = findlabels(code) n = len(code) i = 0 while i < n: c = code[i] op = ord(c) if op == opmap['SET_LINENO'] and i > 0: print # Extra blank line if i == lasti: print '-->', else: print ' ', if i in labels: print '>>', else: print ' ', print `i`.rjust(4), print opname[op].ljust(15), i = i+1 if op >= HAVE_ARGUMENT: oparg = ord(code[i]) + ord(code[i+1])*256 i = i+2 print `oparg`.rjust(5), if op in hasconst: if constants: print '(' + `constants[oparg]` + ')', else: print '(%d)'%oparg, elif op in hasname: if names is not None: print '(' + names[oparg] + ')', else: print '(%d)'%oparg, elif op in hasjrel: print '(to ' + `i + oparg` + ')', elif op in haslocal: if varnames: print '(' + varnames[oparg] + ')', else: print '(%d)' % oparg, elif op in hascompare: print '(' + cmp_op[oparg] + ')', print disco = disassemble # XXX For backwards compatibility def findlabels(code): """Detect all offsets in a byte code which are jump targets. Return the list of offsets. """ labels = [] n = len(code) i = 0 while i < n: c = code[i] op = ord(c) i = i+1 if op >= HAVE_ARGUMENT: oparg = ord(code[i]) + ord(code[i+1])*256 i = i+2 label = -1 if op in has
jrel: label = i+oparg elif op in hasjabs: label = oparg if label >= 0: if label not in labels: labels.append(label) return labels def _test(): """Simple test program to disassemble a file.""" if sys.argv[1:]: if sys.argv[2:]: sys.stderr.write("usage: python dis.py [-|file]\n") sys.exit(2) fn = sys.argv[1] if not fn or fn =
= "-": fn = None else: fn = None if fn is None: f = sys.stdin else: f = open(fn) source = f.read() if fn is not None: f.close() else: fn = "<stdin>" code = compile(source, fn, "exec") dis(code) if __name__ == "__main__": _test()
x=None): o = copy.deepcopy(self) if x: o.validates(x) return o def render(self): out = '' out += self.rendernote(self.note) out += '<table class="formtab table table-bordered">\n' out += '<thead ><tr class=active><th>%s</th><th class=rtd><a class="btn"\ href="javascript:history.go(-1);">%s</a></th></tr></thead>\n'%(self.title, net.websafe("返回")) for i in self.inputs: html = safeunicode(i.pre) + i.render() + self.rendernote(i.note) + safeunicode(i.post) if i.is_hidden(): out += ' <tr style="display: none;"><td></td><td>%s</td></tr>\n' % (html) else: out += ' <tr><td>%s</td><td>%s</td></tr>\n' % ( net.websafe(i.description),html) if self.error: out += ' <tr><td colspan=2>%s</td></tr>\n' % ( self.rendernote(self.error)) out += "</table>" return out def render_css(self): out = [] out.append(self.rendernote(self.note)) for i in self.inputs: out.append(' <div class="form-group">\n') if not i.is_hidden(): out.append(' <label class="col-sm-4 control-label" id="lab_%s" for="%s">%s</label>\n' % (i.id,i.id, net.websafe(i.description))) out.append(i.pre) out.append(' <div class="col-sm-6">\n') out.append(" %s\n"%i.render()) out.append(' </div>\n') if i.help: out.append(' <a id="%s_help" href="javascript:void(0);" data-container="body" data-toggle="popover" data-trigger="focus" data-placement="top" data-content="%s">\n'%(i.id,i.help)) out.append(' <span class="input-help glyphicon glyphicon-question-sign"></span></a>\n') out.append(self.rendernote(i.note)) out.append(i.post) out.append(' </div>\n') if i.hr: out.append("<hr/>\n") return ''.join(out) def rendernote(self, note): if note: return '<span class="wrong">%s</span>' % net.websafe(note) else: return "" def validates(self, source=None, _validate=True, **kw): source = source or kw out = True for i in self.inputs: v = attrget(source, i.name) if _validate: out = i.validate(v) and out else: i.set_value(v) if _validate: out = out and self._validate(source) self.valid = out return out def _validate(self, value): self.value = value for v in self.validators: if not v.valid(value): self.note = v.msg return False return True def fill(self, source=None, **kw): return self.validates(source, _validate=False, **kw) def __getitem__(self, i): for x in self.inputs: if x.name == i: return x raise KeyError, i def __getattr__(self, name): # don't interfere with deepcopy inputs = self.__dict__.get('inputs') or [] for x in inputs: if x.name == name: return x raise AttributeError, name def get(self, i, default=None): try: return self[i] except KeyError: return default def _get_d(self): #@@ should really be form.attr, no? return storage([(i.name, i.get_value()) for i in self.inputs]) d = property(_get_d) class Input(object): def __init__(self, name, *validators, **attrs): self.name = name self.validators = validators self.attrs = attrs = AttributeList(attrs) self.description = attrs.pop('description', name) self.help = attrs.pop("help","") self.value = attrs.pop('value', None) self.pre = attrs.pop('pre', "") self.post = attrs.pop('post', "") self.hr = attrs.pop('hr',False) self.note = None self.id = attrs.setdefault('id', self.get_default_id()) if 'class_' in attrs: attrs['class'] = attrs['class_'] del attrs['class_'] attrs['placeholder'] = self.description for vd in self.validators: attrs['placeholder'] += ", "+vd.msg def is_hidden(self): return False def get_type(self): raise NotImplementedError def get_default_id(self): return self.name def validate(self, value): self.set_value(value) for v in self.validators: if not v.valid(value): self.note = v.msg return False return True def set_value(self, value): self.value = value def get_value(self): return self.value def render(self): attrs = self.attrs.copy() attrs['type'] = self.get_type() if self.value is not None: attrs['value'] = self.value attrs['name'] = self.name return '<input %s/>' % attrs def rendernote(self, note): if note: return '<strong class="wrong">%s</strong>
' % net.websafe(note)
else: return "" def addatts(self): # add leading space for backward-compatibility return " " + str(self.attrs) class AttributeList(dict): def copy(self): return AttributeList(self) def __str__(self): return " ".join(['%s="%s"' % (k, net.websafe(v)) for k, v in self.items()]) def __repr__(self): return '<attrs: %s>' % repr(str(self)) class Textbox(Input): def get_type(self): return 'text' class Password(Input): def get_type(self): return 'password' class Textarea(Input): def render(self): attrs = self.attrs.copy() attrs['name'] = self.name value = net.websafe(self.value or '') return '<textarea %s>%s</textarea>' % (attrs, value) class Dropdown(Input): def __init__(self, name, args, *validators, **attrs): self.args = args super(Dropdown, self).__init__(name, *validators, **attrs) def render(self): attrs = self.attrs.copy() attrs['name'] = self.name x = '<select %s>\n' % attrs for arg in self.args: x += self._render_option(arg) x += ' </select>\n' return x def _render_option(self, arg, indent=' '): if isinstance(arg, (tuple, list)): value, desc= arg else: value, desc = arg, arg if self.value == value or (isinstance(self.value, list) and value in self.value): select_p = ' selected="selected"' else: select_p = '' return indent + ' <option%s value="%s">%s</option>\n' % (select_p, net.websafe(value), net.websafe(desc)) class GroupedDropdown(Dropdown): def __init__(self, name, args, *validators, **attrs): self.args = args super(Dropdown, self).__init__(name, *validators, **attrs) def render(self): attrs = self.attrs.copy() attrs['name'] = self.name x = ' <select %s>\n' % attrs for label, options in self.args: x += ' <optgroup label="%s">\n' % net.websafe(label) for arg in options: x += self._render_option(arg, indent = ' ') x += ' </optgroup>\n' x += ' </select>\n' return x class Radio(Input): def __init__(self, name, args, *validators, **attrs): self.args = args super(Radio, self).__init__(name, *validators, **attrs) def render(self): x = '<span>' for arg in self.args: if isinstance(arg, (tuple, list)): value, desc= arg else: value, desc = arg, arg attrs = self.attrs.copy() attrs['name'] = self.name attrs['type'] = 'radio' attrs['value'] = value if self.value == valu
stream_info.iteritems(): self._construct_stream_and_publisher(stream_name, stream_config) log.debug
("%r: PlatformAgentStreamPublisher complete", self._platform
_id) def _construct_stream_and_publisher(self, stream_name, stream_config): if log.isEnabledFor(logging.TRACE): # pragma: no cover log.trace("%r: _construct_stream_and_publisher: " "stream_name:%r, stream_config:\n%s", self._platform_id, stream_name, self._pp.pformat(stream_config)) decoder = IonObjectDeserializer(obj_registry=get_obj_registry()) if 'stream_def_dict' not in stream_config: # should not happen: PlatformAgent._validate_configuration validates this. log.error("'stream_def_dict' key not in configuration for stream %r" % stream_name) return stream_def_dict = stream_config['stream_def_dict'] stream_def_dict['type_'] = 'StreamDefinition' stream_def_obj = decoder.deserialize(stream_def_dict) self._stream_defs[stream_name] = stream_def_obj routing_key = stream_config['routing_key'] stream_id = stream_config['stream_id'] exchange_point = stream_config['exchange_point'] parameter_dictionary = stream_def_dict['parameter_dictionary'] log.debug("%r: got parameter_dictionary from stream_def_dict", self._platform_id) self._data_streams[stream_name] = stream_id self._param_dicts[stream_name] = ParameterDictionary.load(parameter_dictionary) stream_route = StreamRoute(exchange_point=exchange_point, routing_key=routing_key) publisher = self._create_publisher(stream_id, stream_route) self._data_publishers[stream_name] = publisher log.debug("%r: created publisher for stream_name=%r", self._platform_id, stream_name) def _create_publisher(self, stream_id, stream_route): publisher = StreamPublisher(process=self._agent, stream_id=stream_id, stream_route=stream_route) return publisher def reset_connection(self): self._connection_ID = uuid.uuid4() self._connection_index = {stream_name : 0 for stream_name in self._data_streams.keys()} log.debug("%r: reset_connection: connection_id=%s, connection_index=%s", self._platform_id, self._connection_ID.hex, self._connection_index) def handle_attribute_value_event(self, driver_event): if log.isEnabledFor(logging.TRACE): # pragma: no cover # show driver_event as retrieved (driver_event.vals_dict might be large) log.trace("%r: driver_event = %s", self._platform_id, driver_event) log.trace("%r: vals_dict:\n%s", self._platform_id, self._pp.pformat(driver_event.vals_dict)) elif log.isEnabledFor(logging.DEBUG): # pragma: no cover log.debug("%r: driver_event = %s", self._platform_id, driver_event.brief()) stream_name = driver_event.stream_name publisher = self._data_publishers.get(stream_name, None) if not publisher: log.warn('%r: no publisher configured for stream_name=%r. ' 'Configured streams are: %s', self._platform_id, stream_name, self._data_publishers.keys()) return param_dict = self._param_dicts[stream_name] stream_def = self._stream_defs[stream_name] if isinstance(stream_def, str): rdt = RecordDictionaryTool(param_dictionary=param_dict.dump(), stream_definition_id=stream_def) else: rdt = RecordDictionaryTool(stream_definition=stream_def) self._publish_granule_with_multiple_params(publisher, driver_event, param_dict, rdt) def _publish_granule_with_multiple_params(self, publisher, driver_event, param_dict, rdt): stream_name = driver_event.stream_name pub_params = {} selected_timestamps = None for param_name, param_value in driver_event.vals_dict.iteritems(): param_name = param_name.lower() if not param_name in rdt: if param_name not in self._unconfigured_params: # an unrecognized attribute for this platform: self._unconfigured_params.add(param_name) log.warn('%r: got attribute value event for unconfigured parameter %r in stream %r' ' rdt.keys=%s', self._platform_id, param_name, stream_name, list(rdt.iterkeys())) continue # separate values and timestamps: vals, timestamps = zip(*param_value) self._agent._dispatch_value_alerts(stream_name, param_name, vals) # Use fill_value in context to replace any None values: param_ctx = param_dict.get_context(param_name) if param_ctx: fill_value = param_ctx.fill_value log.debug("%r: param_name=%r fill_value=%s", self._platform_id, param_name, fill_value) # do the replacement: vals = [fill_value if val is None else val for val in vals] if log.isEnabledFor(logging.TRACE): # pragma: no cover log.trace("%r: vals array after replacing None with fill_value:\n%s", self._platform_id, self._pp.pformat(vals)) else: log.warn("%r: unexpected: parameter context not found for %r", self._platform_id, param_name) # Set values in rdt: rdt[param_name] = numpy.array(vals) pub_params[param_name] = vals selected_timestamps = timestamps if selected_timestamps is None: # that is, all param_name's were unrecognized; just return: return self._publish_granule(stream_name, publisher, param_dict, rdt, pub_params, selected_timestamps) def _publish_granule(self, stream_name, publisher, param_dict, rdt, pub_params, timestamps): # Set timestamp info in rdt: if param_dict.temporal_parameter_name is not None: temp_param_name = param_dict.temporal_parameter_name rdt[temp_param_name] = numpy.array(timestamps) #@TODO: Ensure that the preferred_timestamp field is correct rdt['preferred_timestamp'] = numpy.array(['internal_timestamp'] * len(timestamps)) if log.isEnabledFor(logging.DEBUG): # pragma: no cover log.debug('Preferred timestamp is unresolved, using "internal_timestamp"') else: log.warn("%r: Not including timestamp info in granule: " "temporal_parameter_name not defined in parameter dictionary", self._platform_id) g = rdt.to_granule(data_producer_id=self.resource_id, connection_id=self._connection_ID.hex, connection_index=str(self._connection_index[stream_name])) try: publisher.publish(g) if log.isEnabledFor(logging.TRACE): # pragma: no cover log.trace("%r: Platform agent published data granule on stream %r: " "%s timestamps: %s", self._platform_id, stream_name, self._pp.pformat(pub_params), self._pp.pformat(timestamps)) elif log.isEnabledFor(logging.DEBUG): # pragma: no cover summary_params = {attr_id: "(%d vals)" % len(vals) for attr_id, vals in pub_params.iteritems()} summary_timestamps = "(%d vals)" % len(timestamps) log.debug("%r: Platform agent published data granule on stream
pe().name # Commands that list configuration list *all* scopes by default. default_list_scope = None # cmd has a submodule called "list" so preserve the python list module python_list = list # Patterns to ignore in the commands directory when looking for commands. ignore_files = r'^\.|^__init__.py$|^#' SETUP_PARSER = "setup_parser" DESCRIPTION = "description" command_path = os.path.join(spack.lib_path, "spack", "cmd") commands = [] for file in os.listdir(command_path): if file.endswith(".py") and not re.search(ignore_files, file): cmd = re.sub(r'.py$', '', file) commands.append(cmd) commands.sort() def remove_options(parser, *options): """Remove some options from a parser.""" for option in options: for action in parser._actions: if vars(action)['option_strings'][0] == option: parser._handle_conflict_resolve(None, [(option, action)]) break def get_python_name(name): """Commands can have '-' in their names, unlike Python identifiers.""" return name.replace("-", "_") def get_module(name): """Imports the module for a particular command name and returns it.""" module_name = "%s.%s" % (__name__, name) module = __import__(module_name, fromlist=[name, SETUP_PARSER, DESCRIPTION], level=0) attr_setdefault(module, SETUP_PARSER, lambda *args: None) # null-op attr_setdefault(module, DESCRIPTION, "") fn_name = get_python_name(name) if not hasattr(module, fn_name): tty.die("Command module %s (%s) must define function '%s'." % (module.__name__, module.__file__, fn_name)) return module def get_command(name): """Imports the command's function from a module and returns it.""" python_name = get_python_name(name) return getattr(get_module(python_name), python_name) def parse_specs(args, **kwargs): """Convenience function for parsing arguments from specs. Handles common exceptions and dies if there are errors. """ concretize = kwargs.get('concretize', False) normalize = kwargs.get('normalize', False) try: specs = spack.spec.parse(args) for spec in specs: if concretize: spec.concretize() # implies normalize elif norma
lize: spec.normalize() return specs except spack.parse.ParseError as e: tty.error(e.message, e.string, e.pos * " " + "^") sys.exit(1)
except spack.spec.SpecError as e: tty.error(e.message) sys.exit(1) def elide_list(line_list, max_num=10): """Takes a long list and limits it to a smaller number of elements, replacing intervening elements with '...'. For example:: elide_list([1,2,3,4,5,6], 4) gives:: [1, 2, 3, '...', 6] """ if len(line_list) > max_num: return line_list[:max_num - 1] + ['...'] + line_list[-1:] else: return line_list def disambiguate_spec(spec): matching_specs = spack.store.db.query(spec) if not matching_specs: tty.die("Spec '%s' matches no installed packages." % spec) elif len(matching_specs) > 1: args = ["%s matches multiple packages." % spec, "Matching packages:"] args += [colorize(" @K{%s} " % s.dag_hash(7)) + s.cformat('$_$@$%@$=') for s in matching_specs] args += ["Use a more specific spec."] tty.die(*args) return matching_specs[0] def gray_hash(spec, length): return colorize('@K{%s}' % spec.dag_hash(length)) def display_specs(specs, args=None, **kwargs): """Display human readable specs with customizable formatting. Prints the supplied specs to the screen, formatted according to the arguments provided. Specs are grouped by architecture and compiler, and columnized if possible. There are three possible "modes": * ``short`` (default): short specs with name and version, columnized * ``paths``: Two columns: one for specs, one for paths * ``deps``: Dependency-tree style, like ``spack spec``; can get long Options can add more information to the default display. Options can be provided either as keyword arguments or as an argparse namespace. Keyword arguments take precedence over settings in the argparse namespace. Args: specs (list of spack.spec.Spec): the specs to display args (optional argparse.Namespace): namespace containing formatting arguments Keyword Args: mode (str): Either 'short', 'paths', or 'deps' long (bool): Display short hashes with specs very_long (bool): Display full hashes with specs (supersedes ``long``) namespace (bool): Print namespaces along with names show_flags (bool): Show compiler flags with specs variants (bool): Show variants with specs """ def get_arg(name, default=None): """Prefer kwargs, then args, then default.""" if name in kwargs: return kwargs.get(name) elif args is not None: return getattr(args, name, default) else: return default mode = get_arg('mode', 'short') hashes = get_arg('long', False) namespace = get_arg('namespace', False) flags = get_arg('show_flags', False) full_compiler = get_arg('show_full_compiler', False) variants = get_arg('variants', False) hlen = 7 if get_arg('very_long', False): hashes = True hlen = None nfmt = '.' if namespace else '_' ffmt = '' if full_compiler or flags: ffmt += '$%' if full_compiler: ffmt += '@' ffmt += '+' vfmt = '$+' if variants else '' format_string = '$%s$@%s%s' % (nfmt, ffmt, vfmt) # Make a dict with specs keyed by architecture and compiler. index = index_by(specs, ('architecture', 'compiler')) # Traverse the index and print out each package for i, (architecture, compiler) in enumerate(sorted(index)): if i > 0: print() header = "%s{%s} / %s{%s}" % (spack.spec.architecture_color, architecture, spack.spec.compiler_color, compiler) tty.hline(colorize(header), char='-') specs = index[(architecture, compiler)] specs.sort() abbreviated = [s.cformat(format_string) for s in specs] if mode == 'paths': # Print one spec per line along with prefix path width = max(len(s) for s in abbreviated) width += 2 format = " %%-%ds%%s" % width for abbrv, spec in zip(abbreviated, specs): prefix = gray_hash(spec, hlen) if hashes else '' print(prefix + (format % (abbrv, spec.prefix))) elif mode == 'deps': for spec in specs: print(spec.tree( format=format_string, indent=4, prefix=(lambda s: gray_hash(s, hlen)) if hashes else None)) elif mode == 'short': # Print columns of output if not printing flags if not flags and not full_compiler: def fmt(s): string = "" if hashes: string += gray_hash(s, hlen) + ' ' string += s.cformat('$-%s$@%s' % (nfmt, vfmt)) return string colify(fmt(s) for s in specs) # Print one entry per line if including flags else: for spec in specs: # Print the hash if necessary hsh = gray_hash(spec, hlen) + ' ' if hashes else '' print(hsh + spec.cformat(format_string) + '\n') else: raise ValueError( "Invalid mode for display_specs: %s. Must be one of (paths," "deps, short)." % mode) def spack_is_git_repo(): """Ensure that this instance of Spack is a git clone.""" with working_dir(spack.prefix):
#!/usr/bin/env python # -*- coding:utf-8 -*- ''' 使用paramiko模块远程管理服务器 通过key登录 ''' import paramiko private_key_path = 'D:\workspace\Python-oldboy\day07\zhangyage_pass' #key = paramiko.RSAKey.from_private_key_file(filename, password) key = paramiko.RSAKey.from_private_key_file(private_key_path,'1234
5678') #private_key_path是秘钥文件的位置,'12345678'是秘钥的口令 ssh = paramiko.SSHClient() #实例化一个客户端 ssh.set
_missing_host_key_policy(paramiko.AutoAddPolicy()) #自动恢复yes,在我们使用ssh客户端链接的时候第一次的时候都会让我们输入一个yes确定的 ssh.connect('192.168.75.133', 22, username='root', pkey=key) stdin,stdout,stderr = ssh.exec_command('ifconfig') #定义三个变量进行输出,默认输出是个元组会赋值给三个变量 print stdout.read() ssh.close()
import exifread import os import shutil import dmcutils from dmcutils import mylog report = {} trashFiles = [ '.DS_Store', 'Thumbs.db', '.picasa.ini' ] def processVideo(file): vp = os.path.join(args.targetFolder,'videos') if not os.path.exists(vp): os.mkdir(vp) outPath = os.path.join(vp,os.path.split(file)[1]) #check filesize ... if same then SHA -- just to save a little bit of time while os.path.exists(outPath) : print outPath + " exists geenrating new name" outPath = os.path.join(vp,"dif_" + os.path.split(file)[1]) move(file, outPath); def processFile(file): if
not os.path.isfile(file): mylog("File %s does not exist." % file) return if str(file).lower().endswith(('.jpg', '.jpeg')): processImage(file) report["processImageCount"] += 1 elif str(file).lower().endswith(('.mp4', '.mov', '.avi')): processVideo(file) pass elif any(bf.lower() in str(file).lower() for bf in trashFiles): mylog("Deleting %s because defindes as Tr
ash" % file) os.remove(file) pass else: mylog("Unhandled %s " % file) def scanAndProcessFolders(inputDir): mylog("Starting in " + inputDir) fileList = [] for root, dirs, files in os.walk(inputDir): for file in files: candidate = os.path.join(root, file) fileList.append(candidate); for candidate in fileList: processImage(candidate); def processImage(img): with open(img, "rb") as f: tags = exifread.process_file(f, stop_tag='EXIF DateTimeOriginal') datestr = "0" if "EXIF DateTimeOriginal" in tags: datestr = str(tags["EXIF DateTimeOriginal"]) elif "Image DateTime" in tags: datestr = str(tags["Image DateTime"]) if not datestr == "0" and not datestr == " ": moveImage(img, datestr) else: report["processNoExif"] = report["processNoExif"] +1 if(args.requireExif): mylog("Skip %s due missing EXIF Date" % img) return mylog("%s - No EXIFDate Found" % img) ndd = os.path.join(args.targetFolder,"nodate") #maybe old directory structure could be preserved if(not os.path.exists(ndd)): os.mkdir(ndd) move(img,os.path.join(ndd,os.path.split(img)[1])) def moveImage(image,datestr): dateList = datestr.split(':') year, month = createDirsIfNotExist(dateList) filename = os.path.split(image)[1] newPath = os.path.join(args.targetFolder, year,month,filename) if(os.path.exists(newPath)): if(not checkForDublette(image,newPath)): newPath = os.path.join(args.targetFolder, year, month, "dif_" + filename) mylog("New filename for conflicting file generated %s" % newPath) move(image,newPath) else: if not args.copyonly: mylog("Deleting %s it already exists in %s" % (image,newPath)) os.remove(image) else: move(image,newPath) def move(srcFile, toDir): if args.copyonly: mylog("copy %s to direcotry %s" % (srcFile,toDir)) shutil.copy(srcFile,toDir) else: mylog("move %s to direcotry %s" % (srcFile,toDir)) shutil.move(srcFile,toDir) def checkForDublette(image,newPath): imageHash = dmcutils.fileSha265Sum(image) copyedHash = dmcutils.fileSha265Sum(newPath) if(imageHash == copyedHash): return True else: return False def createDirsIfNotExist(dateList): year = os.path.join(args.targetFolder,dateList[0].strip()) month = os.path.join(year,dateList[1].strip()) if(not os.path.exists(year)): mylog("Create new Folder %s" % year) os.mkdir(year) if(not os.path.exists(month)): mylog("Create new Folder %s" % month) os.mkdir(month) return year, month def init(commandArgs): global args report["processImageCount"] = 0; report["processNoExif"] = 0; mylog("Init FileUtils") args = commandArgs if __name__ == '__main__': dmcutils.init() init(dmcutils.commandArgs) scanAndProcessFolders(args.inputFolder) mylog("Images processed %s" % report["processImageCount"]) mylog("Images without valid EXIF Date %s" % report["processNoExif"])
class _HSPConsumer(object): def start_hsp(self): self._hsp = Record.HSP() def score(self, line): self.
_hsp.bits, self._hsp.score = _re_search( r"Score =\s*([0-9.e+]+) bits \(([0-9]+)\)", line,
"I could not find the score in line\n%s" % line) self._hsp.score = _safe_float(self._hsp.score) self._hsp.bits = _safe_float(self._hsp.bits) x, y = _re_search( r"Expect\(?(\d*)\)? = +([0-9.e\-|\+]+)", line, "I could not find the expect in line\n%s" % line) if x: self._hsp.num_alignments = _safe_int(x) else: self._hsp.num_alignments = 1 self._hsp.expect = _safe_float(y) def identities(self, line): x, y = _re_search( r"Identities = (\d+)\/(\d+)", line, "I could not find the identities in line\n%s" % line) self._hsp.identities = _safe_int(x), _safe_int(y) self._hsp.align_length = _safe_int(y) if 'Positives' in line: x, y = _re_search( r"Positives = (\d+)\/(\d+)", line, "I could not find the positives in line\n%s" % line) self._hsp.positives = _safe_int(x), _safe_int(y) assert self._hsp.align_length == _safe_int(y) if 'Gaps' in line: x, y = _re_search( r"Gaps = (\d+)\/(\d+)", line, "I could not find the gaps in line\n%s" % line) self._hsp.gaps = _safe_int(x), _safe_int(y) assert self._hsp.align_length == _safe_int(y) def strand(self, line): self._hsp.strand = _re_search( r"Strand\s?=\s?(\w+)\s?/\s?(\w+)", line, "I could not find the strand in line\n%s" % line) def frame(self, line): # Frame can be in formats: # Frame = +1 # Frame = +2 / +2 if '/' in line: self._hsp.frame = _re_search( r"Frame\s?=\s?([-+][123])\s?/\s?([-+][123])", line, "I could not find the frame in line\n%s" % line) else: self._hsp.frame = _re_search( r"Frame = ([-+][123])", line, "I could not find the frame in line\n%s" % line) # Match a space, if one is available. Masahir Ishikawa found a # case where there's no space between the start and the sequence: # Query: 100tt 101 # line below modified by Yair Benita, Sep 2004 # Note that the colon is not always present. 2006 _query_re = re.compile(r"Query(:?) \s*(\d+)\s*(.+) (\d+)") def query(self, line): m = self._query_re.search(line) if m is None: if line.strip() == "Query ------------------------------------------------------------": # Special case - long gap relative to the subject, # note there is no start/end present, cannot update those self._hsp.query += "-" * 60 self._query_len = 60 # number of dashes self._query_start_index = 13 # offset of first dash return raise ValueError("I could not find the query in line\n%s" % line) # line below modified by Yair Benita, Sep 2004. # added the end attribute for the query colon, start, seq, end = m.groups() seq = seq.strip() self._hsp.query += seq if self._hsp.query_start is None: self._hsp.query_start = _safe_int(start) # line below added by Yair Benita, Sep 2004. # added the end attribute for the query self._hsp.query_end = _safe_int(end) # Get index for sequence start (regular expression element 3) self._query_start_index = m.start(3) self._query_len = len(seq) def align(self, line): seq = line[self._query_start_index:].rstrip() if len(seq) < self._query_len: # Make sure the alignment is the same length as the query seq += ' ' * (self._query_len - len(seq)) elif len(seq) < self._query_len: raise ValueError("Match is longer than the query in line\n%s" % line) self._hsp.match = self._hsp.match + seq # To match how we do the query, cache the regular expression. # Note that the colon is not always present. _sbjct_re = re.compile(r"Sbjct(:?) \s*(\d+)\s*(.+) (\d+)") def sbjct(self, line): m = self._sbjct_re.search(line) if m is None: raise ValueError("I could not find the sbjct in line\n%s" % line) colon, start, seq, end = m.groups() # mikep 26/9/00 # On occasion, there is a blast hit with no subject match # so far, it only occurs with 1-line short "matches" # I have decided to let these pass as they appear if not seq.strip(): seq = ' ' * self._query_len else: seq = seq.strip() self._hsp.sbjct += seq if self._hsp.sbjct_start is None: self._hsp.sbjct_start = _safe_int(start) self._hsp.sbjct_end = _safe_int(end) if len(seq) != self._query_len: raise ValueError( "QUERY and SBJCT sequence lengths don't match (%i %r vs %i) in line\n%s" % (self._query_len, self._hsp.query, len(seq), line)) del self._query_start_index # clean up unused variables del self._query_len def end_hsp(self): pass class _DatabaseReportConsumer(object): def start_database_report(self): self._dr = Record.DatabaseReport() def database(self, line): m = re.search(r"Database: (.+)$", line) if m: self._dr.database_name.append(m.group(1)) elif self._dr.database_name: # This must be a continuation of the previous name. self._dr.database_name[-1] = "%s%s" % (self._dr.database_name[-1], line.strip()) def posted_date(self, line): self._dr.posted_date.append(_re_search( r"Posted date:\s*(.+)$", line, "I could not find the posted date in line\n%s" % line)) def num_letters_in_database(self, line): letters, = _get_cols( line, (-1,), ncols=6, expected={2: "letters", 4: "database:"}) self._dr.num_letters_in_database.append(_safe_int(letters)) def num_sequences_in_database(self, line): sequences, = _get_cols( line, (-1,), ncols=6, expected={2: "sequences", 4: "database:"}) self._dr.num_sequences_in_database.append(_safe_int(sequences)) def ka_params(self, line): self._dr.ka_params = [_safe_float(x) for x in line.split()] def gapped(self, line): self._dr.gapped = 1 def ka_params_gap(self, line): self._dr.ka_params_gap = [_safe_float(x) for x in line.split()] def end_database_report(self): pass class _ParametersConsumer(object): def start_parameters(self): self._params = Record.Parameters() def matrix(self, line): self._params.matrix = line[8:].rstrip() def gap_penalties(self, line): self._params.gap_penalties = [_safe_float(x) for x in _get_cols( line, (3, 5), ncols=6, expected={2: "Existence:", 4: "Extension:"})] def num_hits(self, line): if '1st pass' in line: x, = _get_cols(line, (-4,), ncols=11, expected={2: "Hits"}) self._params.num_hits = _safe_int(x) else: x, = _get_cols(line, (-1,), ncols=6, expected={2: "Hits"}) self._params.num_hits = _safe_int(x) def num_sequences(self, line): if '1st pass' in line: x, = _get_cols(line, (-4,), ncols=9, expected={2: "Sequences:"}) self._params.num_sequences = _safe_int(x) else: x, = _get_cols(line, (-1,), ncols=4, expected={2: "Sequences:"}) self._params.num_sequences = _safe_int(x) def num_extends(self, line): if '1st pass' in line: x, = _get_cols(line, (-4,), ncols=9, expected={2: "extensions:"}) self._params.num_extends = _safe_int(x) e
pixel width. Similarly, the number of scanlines must be bigger than of equal to the pixel height. Furthermore, we recommend that pitches and lines be multiple of 32 to not break assumption that might be made by various optimizations in the video decoders, video filters and/or video converters. ''' VideoCleanupCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p) VideoCleanupCb.__doc__ = '''Callback prototype to configure picture buffers format. \param opaque private pointer as passed to L{libvlc_video_set_callbacks}() (and possibly modified by @ref libvlc_video_format_cb) [IN] ''' AudioPlayCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint, ctypes.c_int64) AudioPlayCb.__doc__ = '''Callback prototype for audio playback. \param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN] \param samples pointer to the first audio sample to play back [IN] \param count number of audio samples to play back \param pts expected play time stamp (see libvlc_delay()) ''' AudioPauseCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int64) AudioPauseCb.__doc__ = '''Callback prototype for audio pause. \note The pause callback is never called if the audio is already paused. \param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN] \param pts time stamp of the pause request (should be elapsed already) ''' AudioResumeCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int64) AudioResumeCb.__doc__ = '''Callback prototype for audio resumption (i.e. restart from pause). \note The resume callback is never called if the audio is not paused. \param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN] \param pts time stamp of the resumption request (should be elapsed already) ''' AudioFlushCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int64) AudioFlushCb.__doc__ = '''Callback prototype for audio buffer flush (i.e. discard all pending buffers and stop playback as soon as possible). \param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN] ''' AudioDrainCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p) AudioDrainCb.__doc__ = '''Callback prototype for audio buffer drain (i.e. wait for pending buffers to be played). \param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN] ''' AudioSetVolumeCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_float, ctypes.c_bool) AudioSetVolumeCb.__doc__ = '''Callback prototype for audio volume change. \param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN] \param volume software volume (1. = nominal, 0. = mute) \param mute muted flag ''' AudioSetupCb = ctypes.CFUNCTYPE(ctypes.POINTER(ctypes.c_int), ListPOINTER(ctypes.c_void_p), ctypes.c_char_p, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint)) AudioSetupCb.__doc__ = '''Callback prototype to setup the audio playback. This is called when the media player needs to create a new audio output. \param opaque pointer to the data pointer passed to L{libvlc_audio_set_callbacks}() [IN/OUT] \param format 4 bytes sample format [IN/OUT] \param rate sample rate [IN/OUT] \param channels channels count [IN/OUT] \return 0 on success, anything else to skip audio playback ''' AudioCleanupCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p) AudioCleanupCb.__doc__ = '''Callback prototype for audio playback cleanup. This is called when the media player no longer needs an audio output. \param opaque data pointer as passed to L{libvlc_audio_set_callbacks}() [IN] ''' cb = CallbackDecorators # End of generated enum types # # From libvlc_structures.h class AudioOutput(_Cstruct): def __str__(self): return '%s(%s:%s)' % (self.__class__.__name__, self.name, self.description) AudioOutput._fields_ = [ # recursive struct ('name', ctypes.c_char_p), ('description', ctypes.c_char_p), ('next', ctypes.POINTER(AudioOutput)), ] class LogMessage(_Cstruct): _fields_ = [ ('size', ctypes.c_uint ), ('severity', ctypes.c_int ), ('type', ctypes.c_char_p), ('name', ctypes.c_char_p), ('header', ctypes.c_char_p), ('message', ctypes.c_char_p), ] def __init__(self): super(LogMessage, self).__init__() self.size = ctypes.sizeof(self) def __str__(self): return '%s(%d:%s): %s' % (self.__class__.__name__, self.severity, self.type, self.message) class MediaEvent(_Cstruct): _fields_ = [ ('media_name', ctypes.c_char_p), ('instance_name', ctypes.c_
char_p), ] class MediaStats(_Cstruct): _fields_ = [ ('read_bytes', ctypes.c_int ), ('input_bitrate', ctypes.c_float), ('demux_read_bytes', ctypes.c_int ), ('demux_bitrate', ctypes.c_float), (
'demux_corrupted', ctypes.c_int ), ('demux_discontinuity', ctypes.c_int ), ('decoded_video', ctypes.c_int ), ('decoded_audio', ctypes.c_int ), ('displayed_pictures', ctypes.c_int ), ('lost_pictures', ctypes.c_int ), ('played_abuffers', ctypes.c_int ), ('lost_abuffers', ctypes.c_int ), ('sent_packets', ctypes.c_int ), ('sent_bytes', ctypes.c_int ), ('send_bitrate', ctypes.c_float), ] class MediaTrackInfo(_Cstruct): _fields_ = [ ('codec', ctypes.c_uint32), ('id', ctypes.c_int ), ('type', TrackType ), ('profile', ctypes.c_int ), ('level', ctypes.c_int ), ('channels_or_height', ctypes.c_uint ), ('rate_or_width', ctypes.c_uint ), ] class AudioTrack(_Cstruct): _fields_ = [ ('channels', ctypes.c_uint), ('rate', ctypes.c_uint), ] class VideoTrack(_Cstruct): _fields_ = [ ('height', ctypes.c_uint), ('width', ctypes.c_uint), ('sar_num', ctypes.c_uint), ('sar_den', ctypes.c_uint), ('frame_rate_num', ctypes.c_uint), ('frame_rate_den', ctypes.c_uint), ] class SubtitleTrack(_Cstruct): _fields_ = [ ('encoding', ctypes.c_char_p), ] class MediaTrackTracks(ctypes.Union): _fields_ = [ ('audio', ctypes.POINTER(AudioTrack)), ('video', ctypes.POINTER(VideoTrack)), ('subtitle', ctypes.POINTER(SubtitleTrack)), ] class MediaTrack(_Cstruct): _anonymous_ = ("u",) _fields_ = [ ('codec', ctypes.c_uint32), ('original_fourcc', ctypes.c_uint32), ('id', ctypes.c_int ), ('type', TrackType ), ('profile', ctypes.c_int ), ('level', ctypes.c_int ), ('u', MediaTrackTracks), ('bitrate', ctypes.c_uint), ('language', ctypes.c_char_p), ('description', ctypes.c_char_p), ] class PlaylistItem(_Cstruct): _fields_ = [ ('id', ctypes.c_int ), ('uri', ctypes.c_char_p), ('name', ctypes.c_char_p), ] def __str__(self): return '%s #%d %s (uri %s)' % (self.__class__.__name__, self.id, self.name, self.uri) class Position(object): """Enum-like, immutable window position constants. See e.g. VideoMarqueeOption.Position. """ Center = 0 Left = 1 CenterLeft = 1 Right = 2 CenterRight = 2 Top = 4 TopCenter = 4 TopLeft = 5 TopRight = 6 Bottom = 8 BottomCenter = 8 BottomLeft = 9 BottomRight = 10 def __init__(self, *unused): raise TypeError('constants only') def __setattr__(self, *unused): #PYCHOK expected raise TypeError('immutable constants') class Rectangle(_Cstruct): _fields_ = [ ('top',
#!/usr/bin/env python # # Jetduino Example for using the Grove Thumb Joystick (http://www.seeedstudio.com/wiki/Grove_-_Thumb_Joystick) # # The Jetduino connects the Jetson and Grove sensors. You can learn more about the Jetduino here: http://www.NeuroRoboticTech.com/Projects/Jetduino # # Have a question about this example? Ask on the forums here: http://www.NeuroRoboticTech.com/Forum # ''' ## License The MIT License (MIT) GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi. Copyright (C) 2015 Dexter Industries Jetduino for the Jetson TK1/TX1: an open source platform for connecting Grove Sensors to the Jetson embedded supercomputers. Copyright (C) 2016 NeuroRobotic Technologies Permission is hereby granted, free of charge, to any person obtaining a copy of this softwa
re and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to
do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' import time import jetduino from jetduino_pins import * # Connect the Grove Thumb Joystick to analog port A0 # GrovePi Port A0 uses Arduino pins 0 and 1 # GrovePi Port A1 uses Arduino pins 1 and 2 # Don't plug anything into port A1 that uses pin 1 # Most Grove sensors only use 3 of their 4 pins, which is why the GrovePi shares Arduino pins between adjacent ports # If the sensor has a pin definition SIG,NC,VCC,GND, the second (white) pin is not connected to anything # If you wish to connect two joysticks, use ports A0 and A2 (skip A1) # Uses two pins - one for the X axis and one for the Y axis # This configuration means you are using port A0 xPin = ARD_A0 yPin = ARD_A2 jetduino.pinMode(xPin, INPUT_PIN) jetduino.pinMode(yPin, INPUT_PIN) # The Grove Thumb Joystick is an analog device that outputs analog signal ranging from 0 to 1023 # The X and Y axes are two ~10k potentiometers and a momentary push button which shorts the x axis # My joystick produces slightly different results to the specifications found on the url above # I've listed both here: # Specifications # Min Typ Max Click # X 206 516 798 1023 # Y 203 507 797 # My Joystick # Min Typ Max Click # X 253 513 766 1020-1023 # Y 250 505 769 while True: try: # Get X/Y coordinates x = jetduino.analogRead(xPin) y = jetduino.analogRead(yPin) # Calculate X/Y resistance Rx = (float)(1023 - x) * 10 / x Ry = (float)(1023 - y) * 10 / y # Was a click detected on the X axis? click = 1 if x >= 1020 else 0 print ("x =", x, " y =", y, " Rx =", Rx, " Ry =", Ry, " click =", click) time.sleep(.5) except IOError: print ("Error")
# -*- coding: utf-8 -*- """ .. module:: MyCapytain.errors :synopsis: MyCapytain errors .. moduleauthor:: Thibault Clérice <leponteineptique@gmail.com> """ class MyCapytainException(BaseException): """ Namespacing errors """ class JsonLdCollectionMissing(MyCapytainException): """ Error thrown when a JSON LD contains no principle collection Raised when a json supposed to contain collection is parsed but nothing is found """
class
DuplicateReference(SyntaxWarning, MyCapytainException): """ Error generated when a duplicate is found in CtsReference """ class RefsDeclError(Exception, MyCapytainException): """ Error issued when an the refsDecl does not succeed in xpath (no results) """ pass class InvalidSiblingRequest(Exception, MyCapytainException): """ This error is thrown when one attempts to get previous or next passage on a passage with a range of different depth, ex. : 1-2.25 """ pass class InvalidURN(Exception, MyCapytainException): """ This error is thrown when URN are not valid """ class MissingAttribute(Exception, MyCapytainException): """ This error is thrown when an attribute is not present in the Object (missing at startup) """ class UnknownObjectError(ValueError, MyCapytainException): """ This error is thrown when an object does not exist in an inventory or in an API """ class UnknownNamespace(ValueError, MyCapytainException): """ This error is thrown when a namespace is unknown """ class UndispatchedTextError(Exception, MyCapytainException): """ This error is thrown when a text has not been dispatched by a dispatcher """ class UnknownCollection(KeyError, MyCapytainException): """ A collection is unknown to its ancestor """ class EmptyReference(SyntaxWarning, MyCapytainException): """ Error generated when a CtsReference does not exist or is invalid """ class CitationDepthError(UnknownObjectError, MyCapytainException): """ Error generated when the depth of a requested citation is deeper than the citation scheme of the text """ class MissingRefsDecl(Exception, MyCapytainException): """ A text has no properly encoded refsDecl """ class PaginationBrowsingError(MyCapytainException): """ When contacting a remote service and some part of the pages where not reachable or parsable """ class CapitainsXPathError(Exception): def __init__(self, message): super(CapitainsXPathError, self).__init__() self.message = message def __repr__(self): return "CapitainsXPathError("+self.message+")"
# Copyright 2016 Internap # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from flask import Flask from ubersmith_remote_module_server import api, router
class Server(object): def __init__(self, modul
es): self.router = router.Router() self.app = Flask(__name__) self.api = api.Api(modules, self.app, self.router) def run(self, *args, **kwargs): self.app.run(*args, **kwargs)
sed in static content response. These headers are contained in an appinfo.HttpHeadersDict, which maps header names to values (both strings). """ return self._FirstMatch(path).http_headers or appinfo.HttpHeadersDict() def ReadDataFile(data_path, openfile=file): """Reads a file on disk, returning a corresponding HTTP status and data. Args: data_path: Path to the file on disk to read. openfile: Used for dependency injection. Returns: Tuple (status, data) where status is an HTTP response code, and data is the data read; will be an empty string if an error occurred or the file was empty. """ status = httplib.INTERNAL_SERVER_ERROR data = "" try: data_file = openfile(data_path, 'rb') try: data = data_file.read() finally: data_file.close() status = httplib.OK except (OSError, IOError), e: logging.error('Error encountered reading file "%s":\n%s', data_path, e) if e.errno in FILE_MISSING_EXCEPTIONS: status = httplib.NOT_FOUND else: status = httplib.FORBIDDEN return status, data class FileDispatcher(URLDispatcher): """Dispatcher that reads data files from disk.""" def __init__(self, config, path_adjuster, static_file_config_matcher, read_data_file=ReadDataFile): """Initializer. Args: config: AppInfoExternal instance representing the parsed app.yaml file. path_adjuster: Instance of PathAdjuster to use for finding absolute paths of data files on disk. static_file_config_matcher: StaticFileConfigMatcher object. read_data_file: Used for dependency injection. """ self._config = config self._path_adjuster = path_adjuster self._static_file_config_matcher = static_file_config_matcher self._read_data_file = read_data_file def Dispatch(self, request, outfile, base_env_dict=None): """Reads the file and returns the response status and data.""" full_path = self._path_adjuster.AdjustPath(request.path) status, data = self._read_data_file(full_path) content_type = self._static_file_config_matcher.GetMimeType(request.path) static_file = self._static_file_config_matcher.IsStaticFile(request.path) expiration = self._static_file_config_matcher.GetExpiration(request.path) current_etag = self.CreateEtag(data) if_match_etag = request.headers.get('if-match', None) if_none_match_etag = request.headers.get('if-none-match', '').split(',') http_headers = self._static_file_config_matcher.GetHttpHeaders(request.path) def WriteHeader(name, value): if http_headers.Get(name) is None: outfile.write('%s: %s\r\n' % (name, value)) if if_match_etag and not self._CheckETagMatches(if_match_etag.split(','), current_etag, False): outfile.write('Status: %s\r\n' % httplib.PRECONDITION_FAILED) WriteHeader('ETag', current_etag) outfile.write('\r\n') elif self._CheckETagMatches(if_none_match_etag, current_etag, True): outfile.write('Status: %s\r\n' % httplib.NOT_MODIFIED) WriteHeader('ETag', current_etag) outfile.write('\r\n') else: outfile.write('Status: %d\r\n' % status) WriteHeader('Content-Type', content_type) if expiration: fmt = email.Utils.formatdate WriteHeader('Expires', fmt(time.time() + expiration, usegmt=True)) WriteHeader('Cache-Control', 'public, max-age=%i' % expiration) if static_file: WriteHeader('ETag', '"%s"' % current_etag) for header in http_headers.iteritems(): outfile.write('%s: %s\r\n' % header) outfile.write('\r\n') outfile.write(data) def __str__(self): """Returns a string representation of this dispatcher.""" return 'File dispatcher' @staticmethod def CreateEtag(data): """Returns string of hash of file content, unique per URL.""" data_crc = zlib.crc32(data) return base64.b64encode(str(data_crc)) @staticmethod def _CheckETagMatches(supplied_etags, current_etag, allow_weak_match): """Checks if there is an entity tag match. Args: supplied_etags: list of input etags current_etag: the calculated etag for the entity allow_weak_match: Allow for weak tag comparison. Returns: True if there is a match, False otherwise. """ for tag in supplied_etags: if allow_weak_match and tag.startswith('W/'): tag = tag[2:] tag_data = tag.strip('"') if tag_data == '*' or tag_data == current_etag: return True return False _IGNORE_RESPONSE_HEADERS = frozenset([ 'connection', 'content-encoding', 'date', 'keep-alive', 'proxy-authenticate', 'server', 'trailer', 'transfer-encoding', 'upgrade', blobstore.BLOB_KEY_HEADER ]) class AppServerResponse(object): """Development appserver response object. Object used to hold the full appserver response. Used as a container that is passed through the request rewrite chain and ultimately sent to the web client. Attributes: status_code: Integer HTTP response status (e.g., 200, 302, 404, 500) status_message: String containing an informational message about the response code, possibly derived from the 'status' header, if supplied. hea
ders: mimetools.Message containing the HTTP headers of the response. body: File-like object containing the body of the response. large_response: Indicates that r
esponse is permitted to be larger than MAX_RUNTIME_RESPONSE_SIZE. """ __slots__ = ['status_code', 'status_message', 'headers', 'body', 'large_response'] def __init__(self, response_file=None, **kwds): """Initializer. Args: response_file: A file-like object that contains the full response generated by the user application request handler. If present the headers and body are set from this value, although the values may be further overridden by the keyword parameters. kwds: All keywords are mapped to attributes of AppServerResponse. """ self.status_code = 200 self.status_message = 'Good to go' self.large_response = False if response_file: self.SetResponse(response_file) else: self.headers = mimetools.Message(cStringIO.StringIO()) self.body = None for name, value in kwds.iteritems(): setattr(self, name, value) def SetResponse(self, response_file): """Sets headers and body from the response file. Args: response_file: File like object to set body and headers from. """ self.headers = mimetools.Message(response_file) self.body = response_file @property def header_data(self): """Get header data as a string. Returns: String representation of header with line breaks cleaned up. """ header_list = [] for header in self.headers.headers: header = header.rstrip('\n\r') header_list.append(header) if not self.headers.getheader('Content-Type'): header_list.append('Content-Type: text/html') return '\r\n'.join(header_list) + '\r\n' def IgnoreHeadersRewriter(response): """Ignore specific response headers. Certain response headers cannot be modified by an Application. For a complete list of these headers please see: https://developers.google.com/appengine/docs/python/tools/webapp/responseclass#Disallowed_HTTP_Response_Headers This rewriter simply removes those headers. """ for h in _IGNORE_RESPONSE_HEADERS: if h in response.headers: del response.headers[h] def ValidHeadersRewriter(response): """Remove invalid response headers. Response headers must be printable ascii characters. This is enforced in production by http_proto.cc IsValidHeader. This rewriter will remove headers that contain non ascii characters. """ for (key, value) in response.headers.items(): try: key.decode('ascii') value.decode('ascii')
PORT = {} PORT['m
etaManager'] = 10087 CGI_SOCK = {} CGI_SOCK[ 'metaManager' ] = '/tmp/metaManager_fcgi_so
ck' SOCK = {} SOCK[ 'logqueue' ] = '/tmp/logqueue_sock' PIDPATH = {} PIDPATH[ 'metaManager' ] = '/var/run/metaManager.server.pid' PIDPATH[ 'managerChewer' ] = '/var/run/data.chewer.pid'
#!/usr/bin/env python3 """ bootstrap.py will set up a virtualenv for you and update it as required. Usage: bootstrap.py # update virtualenv bootstrap.py fake # just update the virtualenv timestamps bootstrap.py clean # delete the virtualenv bootstrap.py -h | --help # print this message and exit Options for the plain command:
-f, --force # do the virtualenv update even if it is up to date -r, --full-rebuild # delete the virtualenv before rebuilding -q, --quiet # don't ask for user input """ # a script to set up the virtualenv so we can use fabric and tasks import sys import getopt import ve_mgr def print_help_text(): print(__doc__) def print_error_msg(error_msg): print(error_msg) print_help_text() return 2 def main(argv):
# check python version is high enough ve_mgr.check_python_version(2, 6, __file__) force_update = False full_rebuild = False fake_update = False clean_ve = False devel = False if argv: try: opts, args = getopt.getopt(argv[1:], 'hfqr', ['help', 'force', 'quiet', 'full-rebuild', 'dev']) except getopt.error as msg: return print_error_msg('Bad options: %s' % msg) # process options for o, a in opts: if o in ("-h", "--help"): print_help_text() return 0 if o in ("-f", "--force"): force_update = True if o in ("-r", "--full-rebuild"): full_rebuild = True if o in ("-d", "--dev"): devel = True if len(args) > 1: return print_error_msg( "Can only have one argument - you had %s" % (' '.join(args))) if len(args) == 1: if args[0] == 'fake': fake_update = True elif args[0] == 'clean': clean_ve = True # check for incompatible flags if force_update and fake_update: return print_error_msg("Cannot use --force with fake") if full_rebuild and fake_update: return print_error_msg("Cannot use --full-rebuild with fake") if full_rebuild and clean_ve: return print_error_msg("Cannot use --full-rebuild with clean") environment = 'dev' if devel is True else None updater = ve_mgr.UpdateVE(environment=environment) if fake_update: return updater.update_ve_timestamp() elif clean_ve: return updater.delete_virtualenv() else: updater.update_git_submodule() return updater.update_ve(full_rebuild, force_update) if __name__ == '__main__': sys.exit(main(sys.argv))
ystem.DEBIAN: "/etc/mysql/my.cnf", operating_system.SUSE: "/etc/my.cnf"}[OS_NAME] MYSQL_SERVICE_CANDIDATES = ["mysql", "mysqld", "mysql-server"] MYSQL_BIN_CANDIDATES = ["/usr/sbin/mysqld", "/usr/libexec/mysqld"] MYCNF_OVERRIDES = "/etc/mysql/conf.d/overrides.cnf" MYCNF_OVERRIDES_TMP = "/tmp/overrides.cnf.tmp" MYCNF_REPLMASTER = "/etc/mysql/conf.d/0replmaster.cnf" MYCNF_REPLSLAVE = "/etc/mysql/conf.d/1replslave.cnf" MYCNF_REPLCONFIG_TMP = "/tmp/replication.cnf.tmp" # Create a package impl packager = pkg.Package() def clear_expired_password(): """ Some mysql installations generate random root password and save it in /root/.mysql_secret, this password is expired and should be changed by client that supports expired passwords. """ LOG.debug("Removing expired password.") secret_file = "/root/.mysql_secret" try: out, err = utils.execute("cat", secret_file, run_as_root=True, root_helper="sudo") except exception.ProcessExecutionError: LOG.exception(_("/root/.mysql_secret does not exist.")) return m = re.match('# The random password set for the root user at .*: (.*)', out) if m: try: out, err = utils.execute("mysqladmin", "-p%s" % m.group(1), "password", "", run_as_root=True, root_helper="sudo") except exception.ProcessExecutionError: LOG.exception(_("Cannot change mysql password.")) return operating_system.remove(secret_file, force=True, as_root=True) LOG.debug("Expired password removed.") def get_auth_password(): pwd, err = utils.execute_with_timeout( "sudo", "awk", "/password\\t=/{print $3; exit}", MYSQL_CONFIG) if err: LOG.error(err) raise RuntimeError("Problem reading my.cnf! : %s" % err) return pwd.strip() def get_engine(): """Create the default engine with the updated admin user.""" # TODO(rnirmal):Based on permissions issues being resolved we may revert # url = URL(drivername='mysql', host='localhost', # query={'read_default_file': '/etc/mysql/my.cnf'}) global ENGINE if ENGINE: return ENGINE pwd = get_auth_password() ENGINE = sqlalchemy.create_engine("mysql://%s:%s@localhost:3306" % (ADMIN_USER_NAME, pwd.strip()), pool_recycle=7200, echo=CONF.sql_query_logging, listeners=[KeepAliveConnection()]) return ENGINE def load_mysqld_options(): # find mysqld bin for bin in MYSQL_BIN_CANDIDATES: if os.path.isfile(bin): mysqld_bin = bin break else: return {} try: out, err = utils.execute(mysqld_bin, "--print-defaults", run_as_root=True, root_helper="sudo") arglist = re.split("\n", out)[1].split() args = defaultdict(list) for item in arglist: if "=" in item: key, value = item.split("=", 1) args[key.lstrip("--")].append(value) else: args[item.lstrip("--")].append(None) return args except exception.ProcessExecutionError: return {} def read_mycnf(): with open(MYSQL_CONFIG, 'r') as file: config_contents = file.read() return config_contents def get_datadir(reset_cache=False): """Return the data directory currently used by Mysql.""" global DATADIR if not reset_cache and DATADIR: return DATADIR mycnf_contents = read_mycnf() # look for datadir parameter in my.cnf mycnf = dict(configurations.MySQLConfPa
rser(mycnf_contents).parse()) DATADIR = mycnf['datadir'] return DATADIR class MySqlAppStatus(service.BaseDbStatus): @classmethod def get(cls): if not cl
s._instance: cls._instance = MySqlAppStatus() return cls._instance def _get_actual_db_status(self): try: out, err = utils.execute_with_timeout( "/usr/bin/mysqladmin", "ping", run_as_root=True, root_helper="sudo", log_output_on_error=True) LOG.info(_("MySQL Service Status is RUNNING.")) return rd_instance.ServiceStatuses.RUNNING except exception.ProcessExecutionError: LOG.exception(_("Failed to get database status.")) try: out, err = utils.execute_with_timeout("/bin/ps", "-C", "mysqld", "h") pid = out.split()[0] # TODO(rnirmal): Need to create new statuses for instances # where the mysql service is up, but unresponsive LOG.info(_('MySQL Service Status %(pid)s is BLOCKED.') % {'pid': pid}) return rd_instance.ServiceStatuses.BLOCKED except exception.ProcessExecutionError: LOG.exception(_("Process execution failed.")) mysql_args = load_mysqld_options() pid_file = mysql_args.get('pid_file', ['/var/run/mysqld/mysqld.pid'])[0] if os.path.exists(pid_file): LOG.info(_("MySQL Service Status is CRASHED.")) return rd_instance.ServiceStatuses.CRASHED else: LOG.info(_("MySQL Service Status is SHUTDOWN.")) return rd_instance.ServiceStatuses.SHUTDOWN class LocalSqlClient(object): """A sqlalchemy wrapper to manage transactions.""" def __init__(self, engine, use_flush=True): self.engine = engine self.use_flush = use_flush def __enter__(self): self.conn = self.engine.connect() self.trans = self.conn.begin() return self.conn def __exit__(self, type, value, traceback): if self.trans: if type is not None: # An error occurred self.trans.rollback() else: if self.use_flush: self.conn.execute(FLUSH) self.trans.commit() self.conn.close() def execute(self, t, **kwargs): try: return self.conn.execute(t, kwargs) except Exception: self.trans.rollback() self.trans = None raise class MySqlAdmin(object): """Handles administrative tasks on the MySQL database.""" def _associate_dbs(self, user): """Internal. Given a MySQLUser, populate its databases attribute.""" LOG.debug("Associating dbs to user %s at %s." % (user.name, user.host)) with LocalSqlClient(get_engine()) as client: q = sql_query.Query() q.columns = ["grantee", "table_schema"] q.tables = ["information_schema.SCHEMA_PRIVILEGES"] q.group = ["grantee", "table_schema"] q.where = ["privilege_type != 'USAGE'"] t = text(str(q)) db_result = client.execute(t) for db in db_result: LOG.debug("\t db: %s." % db) if db['grantee'] == "'%s'@'%s'" % (user.name, user.host): mysql_db = models.MySQLDatabase() mysql_db.name = db['table_schema'] user.databases.append(mysql_db.serialize()) def change_passwords(self, users): """Change the passwords of one or more existing users.""" LOG.debug("Changing the password of some users.") with LocalSqlClient(get_engine()) as client: for item in users: LOG.debug("Changing password for user %s." % item) user_dict = {'_name': item['name'], '_host': item['host'], '_password': item['password']} user = models.MySQLUser() user.deserialize(user_dict) LOG.debug
# The MIT License (MIT) # # Copyright (c) 2016 WUSTL ZPLAB # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # Authors: Erik Hvatum <ice.rikh@gmail.com> from PyQt5 import Qt from ..shared_resources import UNIQUE_QGRAPHICSITEM_TYPE class PointItem(Qt.QGraphicsRectItem): # Omitting .type() or faili
ng to return a unique causes PyQt to return a wrapper of the wrong type when retrieving an instance of this item as a base # class pointer from C++. For example, if this item has a child and that child calls self.parentItem(), it would receive a Python object of type # Qt.QGraphicsRectItem rather than PointItem u
nless PointItem has a correct .type() implementation. QGRAPHICSITEM_TYPE = UNIQUE_QGRAPHICSITEM_TYPE() def __init__(self, picker, x, y, w, h, parent_item): super().__init__(x, y, w, h, parent_item) self.picker = picker flags = self.flags() self.setFlags( flags | Qt.QGraphicsItem.ItemIsFocusable | # Necessary in order for item to receive keyboard events Qt.QGraphicsItem.ItemIsSelectable | Qt.QGraphicsItem.ItemIsMovable | Qt.QGraphicsItem.ItemSendsGeometryChanges # Necessary in order for .itemChange to be called when item is moved ) def itemChange(self, change, value): if change == Qt.QGraphicsItem.ItemPositionHasChanged: self.picker.point_item_position_has_changed.emit(self) return super().itemChange(change, value) def keyPressEvent(self, event): if event.key() == Qt.Qt.Key_Delete and event.modifiers() == Qt.Qt.NoModifier: self.picker.delete_selected() def type(self): return self.QGRAPHICSITEM_TYPE # NB: deriving from Qt.QGraphicsObject is necessary in order to be a scene event filter target class SimplePointPicker(Qt.QGraphicsObject): """ex: from ris_widget.ris_widget import RisWidget from ris_widget.examples.simple_point_picker import SimplePointPicker rw = RisWidget() simple_point_picker = SimplePointPicker(rw.main_view, rw.main_scene.layer_stack_item)""" QGRAPHICSITEM_TYPE = UNIQUE_QGRAPHICSITEM_TYPE() point_item_position_has_changed = Qt.pyqtSignal(PointItem) point_item_list_content_reset = Qt.pyqtSignal() def __init__(self, general_view, parent_item, points=None): super().__init__(parent_item) self.view = general_view self.view.viewport_rect_item.size_changed.connect(self.on_viewport_size_changed) self.point_items = [] self.pen = Qt.QPen(Qt.Qt.red) self.pen.setWidth(2) color = Qt.QColor(Qt.Qt.yellow) color.setAlphaF(0.5) self.brush = Qt.QBrush(color) self.brush_selected = Qt.QBrush(Qt.QColor(255, 0, 255, 127)) parent_item.installSceneEventFilter(self) if points: for point in points: self.make_and_store_point_item(Qt.QPointF(point[0], point[1])) def boundingRect(self): return Qt.QRectF() def paint(self, QPainter, QStyleOptionGraphicsItem, QWidget_widget=None): pass def type(self): return self.QGRAPHICSITEM_TYPE def make_and_store_point_item(self, pos): point_item = PointItem(self, -7, -7, 15, 15, self.parentItem()) point_item.setScale(1 / self.view.transform().m22()) point_item.setPen(self.pen) point_item.setBrush(self.brush) flags = point_item.flags() point_item.setFlags( flags | Qt.QGraphicsItem.ItemIsFocusable | # Necessary in order for item to receive keyboard events Qt.QGraphicsItem.ItemIsSelectable | Qt.QGraphicsItem.ItemIsMovable | Qt.QGraphicsItem.ItemSendsGeometryChanges ) point_item.installSceneEventFilter(self) self.point_items.append(point_item) point_item.setPos(pos) def delete_selected(self): for idx, item in reversed(list(enumerate((self.point_items)))): if item.isSelected(): self.scene().removeItem(item) del self.point_items[idx] self.point_item_list_content_reset.emit() def sceneEventFilter(self, watched, event): if watched is self.parentItem(): if event.type() == Qt.QEvent.GraphicsSceneMousePress and event.button() == Qt.Qt.RightButton: self.make_and_store_point_item(event.pos()) return True if event.type() == Qt.QEvent.KeyPress and event.key() == Qt.Qt.Key_Delete and event.modifiers() == Qt.Qt.NoModifier: self.delete_selected() return False def on_viewport_size_changed(self): scale = 1 / self.view.transform().m22() for point_item in self.point_items: point_item.setScale(scale) def clear(self): for point_item in self.point_items: self.view.scene().removeItem(point_item) self.point_items = [] self.point_item_list_content_reset.emit() @property def points(self): return [(point_item.pos().x(), point_item.pos().y()) for point_item in self.point_items] @points.setter def points(self, points): self.clear() for point in points: self.make_and_store_point_item(Qt.QPointF(point[0], point[1]))
y = query def __repr__(self): return f'{self._token}' @staticmethod def tokens2sql(token: Token, query: 'query_module.BaseQuery' ) -> Iterator[all_token_types]: from .functions import SQLFunc if
isinstance(token, Identifier): # Bug fix for sql parse if isinstance(token[0], Parenthesi
s): try: int(token[0][1].value) except ValueError: yield SQLIdentifier(token[0][1], query) else: yield SQLConstIdentifier(token, query) elif isinstance(token[0], Function): yield SQLFunc.token2sql(token, query) else: yield SQLIdentifier(token, query) elif isinstance(token, Function): yield SQLFunc.token2sql(token, query) elif isinstance(token, Comparison): yield SQLComparison(token, query) elif isinstance(token, IdentifierList): for tok in token.get_identifiers(): yield from SQLToken.tokens2sql(tok, query) elif isinstance(token, Parenthesis): yield SQLPlaceholder(token, query) else: raise SQLDecodeError(f'Unsupported: {token.value}') @staticmethod def token2sql(token: Token, query: 'query_module.BaseQuery' ) -> all_token_types: return next(SQLToken.tokens2sql(token, query)) @staticmethod def placeholder_index(token) -> int: return int(re.match(r'%\(([0-9]+)\)s', token.value, flags=re.IGNORECASE).group(1)) class AliasableToken(SQLToken): @abc.abstractmethod def __init__(self, *args): super().__init__(*args) self.token_alias: 'query_module.TokenAlias' = self.query.token_alias if self.alias: self.token_alias.alias2token[self.alias] = self self.token_alias.token2alias[self] = self.alias if self.is_explicit_alias(): self.token_alias.aliased_names.add(self.alias) def __hash__(self): if self.is_explicit_alias(): return hash(self._token[0].value) return hash(self._token.value) def __eq__(self, other): return hash(self) == hash(other) def is_explicit_alias(self): return len(self._token.tokens) == 5 and self._token[2].match(tokens.Keyword, 'AS') @property def alias(self) -> str: # bug fix sql parse if not self._token.get_ordering(): return self._token.get_alias() class SQLIdentifier(AliasableToken): def __init__(self, *args): super().__init__(*args) self._ord = None if self._token.get_ordering(): # Bug fix for sql parse self._ord = self._token.get_ordering() self._token = self._token[0] @property def order(self): if self._ord is None: raise SQLDecodeError return ORDER_BY_MAP[self._ord] @property def field(self) -> str: if self.given_table in self.query.token_alias.aliased_names: return self.given_table if self.table == self.query.left_table: return self.column else: return f'{self.table}.{self.column}' @property def table(self) -> str: name = self.given_table alias2token = self.token_alias.alias2token try: return alias2token[name].table except KeyError: return name @property def given_table(self) -> str: name = self._token.get_parent_name() if name is None: name = self._token.get_real_name() if name is None: raise SQLDecodeError return name @property def column(self) -> str: name = self._token.get_real_name() if name is None: raise SQLDecodeError return name class SQLConstIdentifier(AliasableToken): def __init__(self, *args): super().__init__(*args) @property def value(self) -> int: return int(self._token[0][1].value) def to_mongo(self) -> dict: return {'$literal': self.value} class SQLComparison(SQLToken): @property def left_table(self): lhs = SQLIdentifier(self._token.left, self.query) return lhs.table @property def left_column(self): lhs = SQLIdentifier(self._token.left, self.query) return lhs.column @property def right_table(self): rhs = SQLIdentifier(self._token.right, self.query) return rhs.table @property def right_column(self): rhs = SQLIdentifier(self._token.right, self.query) return rhs.column @property def rhs_indexes(self): if not self._token.right.ttype == tokens.Name.Placeholder: if self._token.right.match(tokens.Keyword, 'NULL'): return None raise SQLDecodeError index = self.placeholder_index(self._token.right) return index class SQLPlaceholder(SQLToken): def __iter__(self): tok = self._token[1:-1][0] if isinstance(tok, IdentifierList): for aid in tok.get_identifiers(): yield self.get_value(aid) else: yield self.get_value(tok) def __init__(self, token: Token, query: 'query_module.BaseQuery'): super().__init__(token, query) def get_value(self, tok: Token): if tok.ttype == tokens.Name.Placeholder: return self.placeholder_index(tok) elif tok.match(tokens.Keyword, 'NULL'): return None elif tok.match(tokens.Keyword, 'DEFAULT'): return 'DEFAULT' else: raise SQLDecodeError class SQLStatement: @property def current_token(self) -> Token: return self._statement[self._tok_id] def __init__(self, statement: U[Statement, Token]): self._statement = statement self._tok_id = 0 self._gen_inst = self._generator() def __getattr__(self, item): return getattr(self._statement, item) def __iter__(self) -> Token: yield from self._gen_inst def __repr__(self): return str(self._statement) def __getitem__(self, item: slice): start = (item.start or 0) + self._tok_id stop = item.stop and self._tok_id + item.stop sql = ''.join(str(tok) for tok in self._statement[start:stop]) sql = sqlparse(sql)[0] return SQLStatement(sql) def next(self) -> O[Token]: # self._tok_id, token = self._statement.token_next(self._tok_id) try: return next(self._gen_inst) except StopIteration: return None def skip(self, num): self._tok_id += num @property def prev_token(self) -> Token: return self._statement.token_prev(self._tok_id)[1] @property def next_token(self) -> Token: return self._statement.token_next(self._tok_id)[1] def _generator(self): token = self._statement[self._tok_id] while self._tok_id is not None: yield token self._tok_id, token = self._statement.token_next(self._tok_id) class SQLColumnDef: not_null = object() unique = object() autoincrement = object() primarykey = object() null = object() _map = { 'UNIQUE': unique, 'AUTOINCREMENT': autoincrement, 'PRIMARY KEY': primarykey, 'NOT NULL': not_null, 'NULL': null } supported_data_types = None def __init__(self, name: str = None, data_type: str = None, col_constraints: set = None): self.name = name self.data_type = data_type self.col_constraints = col_constraints @staticmethod def _get_constraints(others: str): while others: try: name, others = others.split(' ', 1) except ValueError: name = others others = None try: yield SQLColumnDef._map[name]
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from datetime import datetime import argparse import kudu from kudu.client import Partitioning # Parse arguments parser = argparse.ArgumentParser(description='Basic Example for Kudu Python.') parser.add_argument('--masters', '-m', nargs='+', default='localhost', help='The master address(es) to connect to Kudu.') parser.add_argument('--ports', '-p', nargs='+', default='7051', help='The master server port(s) to connect to Kudu.') arg
s = parser.parse_args() # Connect to Kudu master server(s). client = kudu.connect(host=args.masters, port=args.ports) # Define a schema for
a new table. builder = kudu.schema_builder() builder.add_column('key').type(kudu.int64).nullable(False).primary_key() builder.add_column('ts_val', type_=kudu.unixtime_micros, nullable=False, compression='lz4') schema = builder.build() # Define the partitioning schema. partitioning = Partitioning().add_hash_partitions(column_names=['key'], num_buckets=3) # Delete table if it already exists. if client.table_exists('python-example'): client.delete_table('python-example') # Create a new table. client.create_table('python-example', schema, partitioning) # Open a table. table = client.table('python-example') # Create a new session so that we can apply write operations. session = client.new_session() # Insert a row. op = table.new_insert({'key': 1, 'ts_val': datetime.utcnow()}) session.apply(op) # Upsert a row. op = table.new_upsert({'key': 2, 'ts_val': "2016-01-01T00:00:00.000000"}) session.apply(op) # Update a row. op = table.new_update({'key': 1, 'ts_val': ("2017-01-01", "%Y-%m-%d")}) session.apply(op) # Delete a row. op = table.new_delete({'key': 2}) session.apply(op) # Flush write operations, if failures occur, print them. try: session.flush() except kudu.KuduBadStatus: print(session.get_pending_errors()) # Create a scanner and add a predicate. scanner = table.scanner() scanner.add_predicate(table['ts_val'] == datetime(2017, 1, 1)) # Open scanner and print all tuples. # Note: This doesn't scale for large scans # The expected output: [(1, datetime.datetime(2017, 1, 1, 0, 0, tzinfo=<UTC>))] print(scanner.open().read_all_tuples())
import os import json from ss_rule_scheme import update_outbound_rules, init_inbound_rules, init_outbound_rules, msg_clear_all_outbound, ss_process_policy_change base_path = os.path.abspath(os.path.join(os.path.realpath(__file__), "..")) test_file = os.path.join(base_path, "blackholing_test.py") with open(test_file, 'r') as f: data = json.load(f) inbound_policies = [] outbound_policies = [] for element in data['policy']: if 'inbound' in element: inbound_policies = element if 'outbound' in element: outbound_policies = element #print inbound_policies final_switch = "main-in" rule_msgs = init_inbound_rules(1, inbound_policies,[],final_switch) print "Rule Messages to be removed INBOUND:: "+str(rule_msgs) #rule_msgs2 = init_outbound_rules(1, outbound_policies, [], final_s
witch) #print ("Rule Messages OUTBOUND:: "+str(rule_msgs2)) #if 'changes' in rule_msgs2: # if 'changes' not in rule_msgs: # rule_msgs['changes'] = [] # rule_msgs['changes'] += rule_msgs2['changes'] #TODO: Initialize Outbound Policies from RIB print ("Rule Messages:: "+str(rule_msgs)) for rule in rule_msgs['changes']: rule['mod_type'] = "remove" print ("XRS_Test: Rule
Msgs: %s" % rule_msgs)
######################################
######################################## # # Copyright (C) 2018-2020 Compassion CH (http://www.compassion.ch) # Releasing children from poverty in Jesus' name # @author: Emanuel Cino <ecino@compassion.ch> # # The licence is in the file __manifest__.py # ############################################################################## from . i
mport reports
# -*- coding: utf-8 -*- """ Validators for wx widgets. Copyright (c) Karol Będkowski, 2006-2013 This file is part of wxGTD This is free software; you can redistribute it and/or modify it under the t
erms of the GNU General Public License as published by the Free Software Foundation, version 2. """ __author__ = "Karol Będkowski" __copyright__ = "Copyright (c) Karol Będkowski, 2006-2013" __version__ = '2013-04-21' __all__ = ['ValidatorDv', 'Validator', 'ValidatorDate', 'ValidatorTime', 'ValidatorColorStr'] from .va
lidator import Validator, ValidatorDv, ValidatorDate, ValidatorTime, \ ValidatorColorStr
#!/usr/bin/env python import scipy.sparse as sps from apgl.graph.GeneralVertexList import GeneralVertexList from apgl.graph.SparseGraph import SparseGraph numVertices = 10 vList = GeneralVertexList(numVertices) Wght = sps.lil_matrix((numVertices, numVertices)) graph = SparseGraph(vLis
t, W=Wght, undirected=False) # Add some edges to the graph. # Vertices are indexed starting from
0. graph[0, 1] = 1 graph[0, 2] = 1 # Set the label of the 0th vertex to [2, 3]. graph.setVertex(0, "abc") graph.setVertex(1, 123) print(graph.inDegreeDistribution())
import numpy as np from scipy.stats import skew, kurtosis, shapiro, pearsonr, ansari, mood, levene, fligner, bartlett, mannwhitneyu from scipy.spatial.distance import braycurtis, canberra, chebyshev, cityblock, correlation, cosine, euclidean, hamming, jaccard, kulsinski, matching, russellrao, sqeuclidean from sklearn.preprocessing import LabelBinarizer from sklearn.linear_model import Ridge, LinearRegression, LogisticRegression from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, RandomForestClassifier, GradientBoostingClassifier from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, r2_score, accuracy_score, roc_auc_scor
e, average_precision_score, f1_score, hinge_loss, matthews_corrcoef, precision_score, recall_score, zero_one_loss from sklearn.metrics.cluster import adjusted_mutual_info_score, adjusted_rand_score, completeness_score, homogeneity_completeness_v_measure, homogeneity_score, mutual_info_score, normalized_mutual_info_score, v_measure_score from boomlet.utils
.aggregators import to_aggregator from boomlet.metrics import max_error, error_variance, relative_error_variance, gini_loss, categorical_gini_loss from boomlet.transform.type_conversion import Discretizer from autocause.feature_functions import * from autocause.converters import NUMERICAL_TO_NUMERICAL, NUMERICAL_TO_CATEGORICAL, BINARY_TO_NUMERICAL, BINARY_TO_CATEGORICAL, CATEGORICAL_TO_NUMERICAL, CATEGORICAL_TO_CATEGORICAL """ Functions used to combine a list of features into one coherent one. Sample use: 1. to convert categorical to numerical, we perform a one hot encoding 2. treat each binary column as a separate numerical feature 3. compute numerical features as usual 4. use each of the following functions to create a new feature (with the input as the nth feature for each of the columns) WARNING: these will be used in various locations throughout the code base and will result in feature size growing at faster than a linear rate """ AGGREGATORS = [ to_aggregator("max"), to_aggregator("min"), to_aggregator("median"), to_aggregator("mode"), to_aggregator("mean"), to_aggregator("sum"), ] """ Boolean flags specifying whether or not to perform conversions """ CONVERT_TO_NUMERICAL = True CONVERT_TO_CATEGORICAL = True """ Functions that compute a metric on a single 1-D array """ UNARY_NUMERICAL_FEATURES = [ normalized_entropy, skew, kurtosis, np.std, shapiro, ] UNARY_CATEGORICAL_FEATURES = [ lambda x: len(set(x)), # number of unique ] """ Functions that compute a metric on two 1-D arrays """ BINARY_NN_FEATURES = [ independent_component, chi_square, pearsonr, correlation_magnitude, braycurtis, canberra, chebyshev, cityblock, correlation, cosine, euclidean, hamming, sqeuclidean, ansari, mood, levene, fligner, bartlett, mannwhitneyu, ] BINARY_NC_FEATURES = [ ] BINARY_CN_FEATURES = [ categorical_numerical_homogeneity, bucket_variance, anova, ] BINARY_CC_FEATURES = [ categorical_categorical_homogeneity, anova, dice_, jaccard, kulsinski, matching, rogerstanimoto_, russellrao, sokalmichener_, sokalsneath_, yule_, adjusted_mutual_info_score, adjusted_rand_score, completeness_score, homogeneity_completeness_v_measure, homogeneity_score, mutual_info_score, normalized_mutual_info_score, v_measure_score, ] """ Dictionaries of input type (e.g. B corresponds to pairs where binary data is the input) to pairs of converter functions and a boolean flag of whether or not to aggregate over the output of the converter function converter functions should have the type signature: converter(X_raw, X_current_type, Y_raw, Y_type) where X_raw is the data to convert """ NUMERICAL_CONVERTERS = dict( N=NUMERICAL_TO_NUMERICAL["identity"], B=BINARY_TO_NUMERICAL["identity"], C=CATEGORICAL_TO_NUMERICAL["binarize"], ) CATEGORICAL_CONVERTERS = dict( N=NUMERICAL_TO_CATEGORICAL["discretizer10"], B=BINARY_TO_CATEGORICAL["identity"], C=CATEGORICAL_TO_CATEGORICAL["identity"], ) """ Whether or not the converters can result in a 2D output. This must be set to True if any of the respective converts can return a 2D output. """ NUMERICAL_CAN_BE_2D = True CATEGORICAL_CAN_BE_2D = False """ Estimators used to provide a fit for a variable """ REGRESSION_ESTIMATORS = [ Ridge(), LinearRegression(), DecisionTreeRegressor(random_state=0), RandomForestRegressor(random_state=0), GradientBoostingRegressor(subsample=0.5, n_estimators=10, random_state=0), KNeighborsRegressor(), ] CLASSIFICATION_ESTIMATORS = [ LogisticRegression(random_state=0), DecisionTreeClassifier(random_state=0), RandomForestClassifier(random_state=0), GradientBoostingClassifier(subsample=0.5, n_estimators=10, random_state=0), KNeighborsClassifier(), GaussianNB(), ] """ Functions to provide a value of how good a fit on a variable is """ REGRESSION_METRICS = [ explained_variance_score, mean_absolute_error, mean_squared_error, r2_score, max_error, error_variance, relative_error_variance, gini_loss, ] + BINARY_NN_FEATURES REGRESSION_RESIDUAL_METRICS = [ ] + UNARY_NUMERICAL_FEATURES BINARY_PROBABILITY_CLASSIFICATION_METRICS = [ roc_auc_score, hinge_loss, ] + REGRESSION_METRICS RESIDUAL_PROBABILITY_CLASSIFICATION_METRICS = [ ] + REGRESSION_RESIDUAL_METRICS BINARY_CLASSIFICATION_METRICS = [ accuracy_score, average_precision_score, f1_score, matthews_corrcoef, precision_score, recall_score, zero_one_loss, categorical_gini_loss, ] ND_CLASSIFICATION_METRICS = [ # metrics for N-dimensional classification ] + BINARY_CC_FEATURES """ Functions to assess the model (e.g. complexity) of the fit on a numerical variable of type signature: metric(clf, X, y) """ REGRESSION_MODEL_METRICS = [ # TODO model complexity metrics ] CLASSIFICATION_MODEL_METRICS = [ # TODO use regression model metrics on predict_proba ] """ The operations to perform on the A->B features and B->A features. """ RELATIVE_FEATURES = [ # Identity functions, comment out the next 2 lines for only relative features lambda x, y: x, lambda x, y: y, lambda x, y: x - y, ] """ Whether or not to treat each observation (A,B) as two observations: (A,B) and (B,A) If this is done and training labels are given, those labels will have to be reflected as well. The reflection is performed through appending at the end. (e.g. if we have N training examples, observation N+1 in the output will be the first example reflected) """ REFLECT_DATA = False """ Whether or not metafeatures based on the types of A and B are generated. e.g. 1/0 feature on whether or not A is Numerical, etc. """ ADD_METAFEATURES = True """ Whether or not to generate combination features between the computed features and metafeatures. e.g. for each feature and metafeature, generate a new feature which is the product of the two WARNING: will generate a LOT of features (approximately 21 times as many) """ COMPUTE_METAFEATURE_COMBINATIONS = False
# Copyright (C) 2014-2015 Andrey Antukh <niwi@niwi.be> # Copyright (C) 2014-2015 Jesús Espino <jespinog@gmail.com> # Copyright (C) 2014-2015 David Barragán <bameda@dbarragan.com> # This program is free software
: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # M
ERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from django.db import transaction from django.db import connection @transaction.atomic def bulk_update_userstory_custom_attribute_order(project, user, data): cursor = connection.cursor() sql = """ prepare bulk_update_order as update custom_attributes_userstorycustomattribute set "order" = $1 where custom_attributes_userstorycustomattribute.id = $2 and custom_attributes_userstorycustomattribute.project_id = $3; """ cursor.execute(sql) for id, order in data: cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);", (order, id, project.id)) cursor.execute("DEALLOCATE bulk_update_order") cursor.close() @transaction.atomic def bulk_update_task_custom_attribute_order(project, user, data): cursor = connection.cursor() sql = """ prepare bulk_update_order as update custom_attributes_taskcustomattribute set "order" = $1 where custom_attributes_taskcustomattribute.id = $2 and custom_attributes_taskcustomattribute.project_id = $3; """ cursor.execute(sql) for id, order in data: cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);", (order, id, project.id)) cursor.execute("DEALLOCATE bulk_update_order") cursor.close() @transaction.atomic def bulk_update_issue_custom_attribute_order(project, user, data): cursor = connection.cursor() sql = """ prepare bulk_update_order as update custom_attributes_issuecustomattribute set "order" = $1 where custom_attributes_issuecustomattribute.id = $2 and custom_attributes_issuecustomattribute.project_id = $3; """ cursor.execute(sql) for id, order in data: cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);", (order, id, project.id)) cursor.execute("DEALLOCATE bulk_update_order") cursor.close()
from __future__ import unicode_literals, division, absolute_import from builtins import * # pylint: disable=unused-import, redefined-builtin import os import logging import tempfile from flexget import plugin from flexget.event import event log = logging.getLogger('check_subtitles') class MetainfoSubs(object): """ Set 'subtitles' field for entries, if they are local video files with subs. The field is a list of language codes (3-letter ISO-639-3) for each subtitles file found on disk and/or subs track found inside video (for MKVs). Special "und" code is for unidentified language (i.e. files without language code before extension). """ schema = {'type': 'boolean'} def on_task_start(self, task, config): try: import subliminal except ImportError as e: log.debug('Error importing Subliminal: %s' % e) raise plugin.DependencyError('subliminal', 'subliminal', 'Subliminal module required. ImportError: %s' % e) from subliminal.cli import MutexLock from dogpile.cache.exception import RegionAlreadyConfigured try: subliminal.region.configure('dogpile.cache.dbm', arguments={'filename': os.path.join(tempfile.gettempdir(), 'cachefile.dbm'), 'lock_factory': MutexLock}) except RegionAlreadyConfigured: pass logging.getLogger("subliminal").setLevel(logging.CRITICAL) logging.getLogger("enzyme").setLevel(logging.WARNING) def on_task_meta
info(self, task, config): # check if explicitly disabled (value set to false) if config is False: return for entry in task.entries: entry.register_lazy_func(self.get_subtitles, ['subtitles']) def get_subtitles(self, entry): if entry.get('subtitles',
eval_lazy=False) or not ('location' in entry) or \ ('$RECYCLE.BIN' in entry['location']) or not os.path.exists(entry['location']): return from subliminal.core import search_external_subtitles try: subtitles = list(search_external_subtitles(entry['location']).values()) if subtitles: entry['subtitles'] = subtitles log.debug('Found subtitles %s for %s', '/'.join(subtitles), entry['title']) except Exception as e: log.debug('Error checking local subtitles for %s: %s' % (entry['title'], e)) @event('plugin.register') def register_plugin(): plugin.register(MetainfoSubs, 'check_subtitles', api_ver=2)
# Opus/UrbanSim urban simulation software. # Copyright (C) 2005-2009 University of Washington # See opus_core/LICENSE # This is a simple test variable for the interaction of gridcells and households. from opus_core.variables.variable import Variable from urbansim.functions import attribute_label class hhnper2_nbnper2(Variable): """Test variable for the interaction of neighborhoods and households. Computes household.poor * neighborhood.poor.""" def dependencies(self): return [attribute_label("neighborhood", "nper2_m"), attribute_label("household", "nper2")] def compute(self, dataset_pool): return self.get_dataset().multiply("nper2", "nper2_m") #if __name__=='__main__': #from opus_core.tests import opus_unittest #from urbansim.variable_test_toolbox import VariableTestToolbox #from numpy import array #from numpy import ma #class Tests(opus_unittest.OpusTestCase): #variable_name = "urbansim.household_x_neighborhood.hhrich_nbpoor" #def test_full_tree(self): #dept = array([10, 20, 30]) #prev_dept = array([10, 4, 20, 30]) #values = VariableTestToolbox().compute_varia
ble(self.variable_name, #{"neighborhood":{ #"dept":dept},
#"household":{ #"prev_dept":prev_dept}}, #dataset = "household_x_neighborhood") #should_be = array([[1, 0, 0], #[0, 0, 0], #[0, 1, 0], #[0, 0, 1]]) #self.assertEqual(ma.allclose(values, should_be, rtol=1e-20), #True, msg = "Error in " + self.variable_name) #opus_unittest.main()
# ***************************************************************************** # Copyright (c) 2020, Intel Corporation All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # *************************************
**************************************** import pandas as pd from numba import njit @njit def series_lt(): s1 = pd.Series([5, 4, 3, 2, 1]) s2 = pd.Series([0, 2, 3, 6, 8]) return s1.lt(s2) # Expect series of False, False, False, True, True print(series_lt())
#!/usr/bin/env python ''' ##BOILERPLATE_COPYRIGHT ##BOILERPLATE_COPYRIGHT_END ''' import unittest, copy from testRoot import RootClass from noink.user_db import UserDB from noink.entry_db import EntryDB class AddEntry(Ro
otClass): def test_AddEntry(self): userDB = UserDB() entryDB = EntryDB() u = userDB.add("jontest", "pass", "Jon Q. Testuser") title = 'Little Buttercup' entry = 'There once was a man from Nantucket,' + \ 'who kept his wife in a Bucket.' + \
"Wait... how'd she fit in that bucket anyway?" e = entryDB.add(copy.deepcopy(title), entry, u) self.assertTrue(e.title == title) if __name__ == '__main__': unittest.main()
import io import pytest from contextlib import redirect_stdout from mock import patch from mythril.mythril import MythrilLevelDB, MythrilConfig from mythril.exceptions import CriticalError @patch("mythril.ethereum.interface.leveldb.client.EthLevelDB.search") @patch("mythril.ethereum.interface.leveldb.client.ETH_DB", return_value=None) @patch("mythril.ethereum.interface.leveldb.client.LevelDBReader", return_value=None) @patch("mythril.ethereum.interface.leveldb.client.LevelDBWriter", return_value=None) def test_leveldb_code_search(mock_leveldb, f1, f2, f3): config = MythrilConfig() config.set_api_leveldb("some path") leveldb_search = MythrilLevelDB(leveldb=config.eth_db) leveldb_search.search_db("code#PUSH#") mock_leveldb.assert_called() @patch("mythril.ethereum.interface.leveldb.client.ETH_DB", return_value=None) @patch("mythril.ethereum.interface.leveldb.client.LevelDBReader", return_value=None) @patch
("mythril.ethereum.interface.leveldb.client.LevelDBWriter", return_value=None) def test_leveldb_hash_search_incorrect_input(f1, f2, f3): config = MythrilConfig() config.set_api_leveldb("some path") leveldb
_search = MythrilLevelDB(leveldb=config.eth_db) with pytest.raises(CriticalError): leveldb_search.contract_hash_to_address("0x23") @patch( "mythril.ethereum.interface.leveldb.client.EthLevelDB.contract_hash_to_address", return_value="0xddbb615cb2ffaff7233d8a6f3601621de94795e1", ) @patch("mythril.ethereum.interface.leveldb.client.ETH_DB", return_value=None) @patch("mythril.ethereum.interface.leveldb.client.LevelDBReader", return_value=None) @patch("mythril.ethereum.interface.leveldb.client.LevelDBWriter", return_value=None) def test_leveldb_hash_search_correct_input(mock_hash_to_address, f1, f2, f3): config = MythrilConfig() config.set_api_leveldb("some path") leveldb_search = MythrilLevelDB(leveldb=config.eth_db) f = io.StringIO() with redirect_stdout(f): leveldb_search.contract_hash_to_address( "0x0464e651bcc40de28fc7fcde269218d16850bac9689da5f4a6bd640fd3cdf6aa" ) out = f.getvalue() mock_hash_to_address.assert_called() assert out == "0xddbb615cb2ffaff7233d8a6f3601621de94795e1\n"
# -*- coding: utf-8 -*- """ Contains functions to plot the results of the dustydiffusion test. @author: ibackus """ import matplotlib.pyplot as plt import numpy as np import pynbody import diskpy #sim, epsEstimator, ts, runpars = analyze.loadSim(simdir) def crossSection(sim, ts, crossSectionTimes=[0, 1, 10]): """ Reproduces the cross-section plot of dust density of Price & Laibe 2015, fig. 5 Note, sim and ts can be loaded with analyze.loadSim(...) Parameters ---------- sim : list List of SimSnaps for the simulation ts : array-like Snapshot times crossSectionTimes : array-like (optional) Sim times to plot (approximate) """ # Select times to plot at crossSectionTimes = np.asarray(crossSectionTimes) crossS
ectionTimes = crossSectionTimes.reshape(crossSectionTimes.size) if np.ndim(crossSectionTimes) == 0: crossSectionTimes = crossSectionTimes[None] nPlots = len(crossSectionTimes) # Plot axs = diskpy.plot.gridplot(1, nPlots, square=True) fig = plt.gcf() for iPlot in range(nPlo
ts): ax = axs[iPlot] iTime = abs(ts - crossSectionTimes[iPlot]).argmin() t = ts[iTime] f = sim[iTime] im=pynbody.plot.sph.image(f, 'dustFrac', width=1, log=False, vmin=0, vmax = 0.11, cmap='cubehelix_r', show_cbar=False, subplot=ax, ret_im=True) ax.set_xlabel('t={:.2g}'.format(float(t))) fig.subplots_adjust(right=0.8) cbar_ax = fig.add_axes([0.85, 0.2, 0.05, 0.6]) fig.colorbar(im, cax=cbar_ax) fig.set_size_inches(8.5, 3.4, forward=True) plt.suptitle('Cross section of dust fraction in z=0 plane\n'\ 'See Price & Laibe (2015)') def dustFracProfile(sim, ts, epsEstimator, epsPlotTimes=[0., 0.1, 0.3, 1, 3, 10], nr=200, colorcode=True, legend=True, rasterized=True): """ Note, sim and ts and epsEstimator can be loaded with analyze.loadSim(...) Parameters ---------- sim : list List of SimSnaps for the simulation ts : array-like Snapshot times epsEstimator : function A function of (r, t) that returns the analytic estimate of the dust fraction density profile of P&L15 dustydiffusion epsPlotTimes : array-like Approximate times to plot at nr : int Number of radial bins colorcode : bool Color-code the times legend : bool Display legend rasterized : bool Rasterize the dots. Useful for saving figures as vector graphics """ # Make plot times an array epsPlotTimes = np.asarray(epsPlotTimes) epsPlotTimes = epsPlotTimes.reshape(epsPlotTimes.size) nt = len(epsPlotTimes) actualPlotTimes = np.zeros(nt) title = 'plot times: ' if colorcode: markercolor = None else: markercolor = 'k' for iPlot in range(nt): iTime = abs(ts - epsPlotTimes[iPlot]).argmin() # Calculate stuff f = sim[iTime] t = ts[[iTime]] actualPlotTimes[iPlot] = t print t r = np.linspace(0, f['r'].max(), nr) epsAnalytic = epsEstimator(r, t) # Plot scatter=plt.plot(f['r'], f['dustFrac'], 'o', markersize=3, markeredgecolor='none', label='t={:.2g}'.format(float(t)), color=markercolor, rasterized=rasterized) line=plt.plot(r, epsAnalytic, 'r') if colorcode: # Make lines and points the same color line[0].set_color(scatter[0].get_color()) title += '{:.2g}, '.format(float(t)) # Set-up plot plt.ylim(0, 0.11) plt.xlim(0, 0.5) plt.ylabel('Dust fraction') plt.xlabel('r') if legend: plt.legend(loc='best', markerscale=2) plt.title(title)
s), "1234") self.assertEqual(s.value, 0x1234) s = c_ushort.__ctype_le__(0x1234) self.assertEqual(bin(struct.pack("<h", 0x1234)), "3412") self.assertEqual(bin(s), "3412") self.assertEqual(s.value, 0x1234) def test_endian_int(self): if sys.byteorder == "little": self.assertIs(c_int.__ctype_le__, c_int) self.assertIs(c_int.__ctype_be__.__ctype_le__, c_int) else: self.assertIs(c_int.__ctype_be__, c_int) self.assertIs(c_int.__ctype_le__.__ctype_be__, c_int) s = c_int.__ctype_be__(0x12345678) self.assertEqual(bin(struct.pack(">i", 0x12345678)), "12345678") self.assertEqual(bin(s), "12345678") self.assertEqual(s.value, 0x12345678) s = c_int.__ctype_le__(0x12345678) self.assertEqual(bin(struct.pack("<i", 0x12345678)), "78563412") self.assertEqual(bin(s), "78563412") self.assertEqual(s.value, 0x12345678) s = c_uint.__ctype_be__(0x12345678) self.assertEqual(bin(struct.pack(">I", 0x12345678)), "12345678") self.assertEqual(bin(s), "12345678") self.assertEqual(s.value, 0x12345678) s = c_uint.__ctype_le__(0x12345678) self.assertEqual(bin(struct.pack("<I", 0x12345678)), "78563412") self.assertEqual(bin(s), "78563412") self.assertEqual(s.value, 0x12345678) def test_endian_longlong(self): if sys.byteorder == "little": self.assertIs(c_longlong.__ctype_le__, c_longlong) self.assertIs(c_longlong.__ctype_be__.__ctype_le__, c_longlong) else: self.assertIs(c_longlong.__ctype_be__, c_longlong) self.assertIs(c_longlong.__ctype_le__.__ctype_be__, c_longlong) s = c_longlong.__ctype_be__(0x1234567890ABCDEF) self.assertEqual(bin(struct.pack(">q", 0x1234567890ABCDEF)), "1234567890ABCDEF") self.assertEqual(bin(s), "1234567890ABCDEF") self.assertEqual(s.value, 0x1234567890ABCDEF) s = c_longlong.__ctype_le__(0x1234567890ABCDEF) self.assertEqual(bin(struct.pack("<q", 0x1234567890ABCDEF)), "EFCDAB9078563412") self.assertEqual(bin(s), "EFCDAB9078563412") self.assertEqual(s.value, 0x1234567890ABCDEF) s = c_ulonglong.__ctype_be__(0x1234567890ABCDEF) self.assertEqual(bin(struct.pack(">Q", 0x1234567890ABCDEF)), "1234567890ABCDEF") self.assertEqual(bin(s), "1234567890ABCDEF") self.assertEqual(s.value, 0x1234567890ABCDEF) s = c_ulonglong.__ctype_le__(0x1234567890ABCDEF) self.assertEqual(bin(struct.pack("<Q", 0x1234567890ABCDEF)), "EFCDAB9078563412") self.assertEqual(bin(s), "EFCDAB9078563412") self.assertEqual(s.value, 0x1234567890ABCDEF) def test_endian_float(self): if sys.byteorder == "little": self.assertIs(c_float.__ctype_le__, c_float) self.assertIs(c_float.__ctype_be__.__ctype_le__, c_float) else: self.assertIs(c_float.__ctype_be__, c_float) self.assertIs(c_float.__ctype_le__.__ctype_be__, c_float) s = c_float(math.pi) self.assertEqual(bin(struct.pack("f", math.pi)), bin(s)) # Hm, what's the precision of a float compared to a double? self.assertAlmostEqual(s.value, math.pi, places=6) s = c_float.__ctype_le__(math.pi) self.assertAlmostEqual(s.value, math.pi, places=6) self.assertEqual(bin(struct.pack("<f", math.pi)), bin(s)) s = c_float.__ctype_be__(math.pi) self.assertAlmostEqual(s.value, math.pi, places=6) self.assertEqual(bin(struct.pack(">f", math.pi)), bin(s)) def test_endian_double(self): if sys.byteorder == "little":
self.assertIs(c_double.__ctype_le__, c_double) self.assertIs(c_double.__ctype_be__.__ctype_le__, c_double) else: self.assertIs(c_double.__ctype_be__, c_double) self.assertIs(c_double.__ctype_le__.__ctype_be__, c_double) s = c_double(math.pi
) self.assertEqual(s.value, math.pi) self.assertEqual(bin(struct.pack("d", math.pi)), bin(s)) s = c_double.__ctype_le__(math.pi) self.assertEqual(s.value, math.pi) self.assertEqual(bin(struct.pack("<d", math.pi)), bin(s)) s = c_double.__ctype_be__(math.pi) self.assertEqual(s.value, math.pi) self.assertEqual(bin(struct.pack(">d", math.pi)), bin(s)) def test_endian_other(self): self.assertIs(c_byte.__ctype_le__, c_byte) self.assertIs(c_byte.__ctype_be__, c_byte) self.assertIs(c_ubyte.__ctype_le__, c_ubyte) self.assertIs(c_ubyte.__ctype_be__, c_ubyte) self.assertIs(c_char.__ctype_le__, c_char) self.assertIs(c_char.__ctype_be__, c_char) def test_struct_fields_1(self): if sys.byteorder == "little": base = BigEndianStructure else: base = LittleEndianStructure class T(base): pass _fields_ = [("a", c_ubyte), ("b", c_byte), ("c", c_short), ("d", c_ushort), ("e", c_int), ("f", c_uint), ("g", c_long), ("h", c_ulong), ("i", c_longlong), ("k", c_ulonglong), ("l", c_float), ("m", c_double), ("n", c_char), ("b1", c_byte, 3), ("b2", c_byte, 3), ("b3", c_byte, 2), ("a", c_int * 3 * 3 * 3)] T._fields_ = _fields_ # these fields do not support different byte order: for typ in c_wchar, c_void_p, POINTER(c_int): _fields_.append(("x", typ)) class T(base): pass self.assertRaises(TypeError, setattr, T, "_fields_", [("x", typ)]) def test_struct_struct(self): # nested structures with different byteorders # create nested structures with given byteorders and set memory to data for nested, data in ( (BigEndianStructure, b'\0\0\0\1\0\0\0\2'), (LittleEndianStructure, b'\1\0\0\0\2\0\0\0'), ): for parent in ( BigEndianStructure, LittleEndianStructure, Structure, ): class NestedStructure(nested): _fields_ = [("x", c_uint32), ("y", c_uint32)] class TestStructure(parent): _fields_ = [("point", NestedStructure)] self.assertEqual(len(data), sizeof(TestStructure)) ptr = POINTER(TestStructure) s = cast(data, ptr)[0] del ctypes._pointer_type_cache[TestStructure] self.assertEqual(s.point.x, 1) self.assertEqual(s.point.y, 2) def test_struct_fields_2(self): # standard packing in struct uses no alignment. # So, we have to align using pad bytes. # # Unaligned accesses will crash Python (on those platforms that # don't allow it, like sparc solaris). if sys.byteorder == "little": base = BigEndianStructure fmt = ">bxhid" else: base = LittleEndianStructure fmt = "<bxhid" class S(base): _fields_ = [("b", c_byte), ("h", c_short), ("i", c_int), ("d", c_double)] s1 = S(0x12, 0x1234, 0x12345678, 3.14) s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14) self.assertEqual(bin(s1), bin(s2)) def test_unaligned_nonnative_struct_fields(self): if sys.byteorder == "little": base = BigEndianStructure fmt = ">b h xi xd" else: bas
# coding: utf-8 """ An API to insert and retrieve metadata on cloud artifacts. No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501 OpenAPI spec version: v1alpha1 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class DiscoveryDiscoveredDetails(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'operation': 'GooglelongrunningOperation' } attribute_map = { 'operation': 'operation' } def __init__(self, operation=None): # noqa: E501 """DiscoveryDiscoveredDetails - a model defined in Swagger""" # noqa: E501 self._operation = None self.discriminator = None if operation is not None: self.operation = operation @property def operation(self): """Gets the operation of this DiscoveryDiscoveredDetails. # noqa: E501 Output only. An operation tha
t indicates the status of the current scan. # noqa: E501 :return: The operation of this DiscoveryDiscoveredDetails. # noqa: E501 :rtype: GooglelongrunningOperation """ return self._operation @operatio
n.setter def operation(self, operation): """Sets the operation of this DiscoveryDiscoveredDetails. Output only. An operation that indicates the status of the current scan. # noqa: E501 :param operation: The operation of this DiscoveryDiscoveredDetails. # noqa: E501 :type: GooglelongrunningOperation """ self._operation = operation def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(DiscoveryDiscoveredDetails, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DiscoveryDiscoveredDetails): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
# # Copyright (c) 2013-2018 Quarkslab. # This file is part of IRMA project. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License in the top-level directory # of this distribution and at: # # http://www.apache.org/licenses/LICENSE-2.0 # # No part of the project, including this file, may be copied, # modified, propagated, or distributed except according to the # terms contained in the LICENSE file. import stat import socket from irma.common.base.exceptions import IrmaSFTPv2Error from irma.common.ftp.ftp import FTPInterface from ssh2.session import Session from ssh2.sftp import LIBSSH2_FXF_CREAT, LIBSSH2_FXF_WRITE,\ LIBSSH2_SFTP_S_IRUSR, LIBSSH2_SFTP_S_IWUSR,\ LIBSSH2_SFTP_S_IRGRP, LIBSSH2_SFTP_S_IROTH,\ LIBSSH2_SFTP_S_IXUSR class IrmaSFTPv2(FTPInterface): """Irma SFTPv2 handler This class handles the connection with a sftp server functions for interacting with it. """ _Exception = IrmaSFTPv2Error # ================================== # Constructor and Destructor stuff # ================================== def __init__(self, host, port, auth, key_path, user, passwd, dst_user=None, upload_path='uploads', hash_check=False, autoconnect=True): self._sess = None self._client = None super().__init__(host, port, auth, key_path, user, passwd, dst_user, upload_path, hash_check, autoconnect) def connected(self): return self._sess is not None # ============================ # Overridden private methods # ============================ def _connect(self): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((self._host, self._port)) self._sess = Session() self._sess.handshake(sock) if self._auth == 'key': # self._pubkey_path must be generated from private key # s.userauth_publickey_fromfile(self._user, self._pubkey_path, # self._key_path, '') raise IrmaSFTPv2Error("Pub key authentication not implemented") else: self._sess.userauth_password(self._user, self._passwd) self._client = self._sess.sftp_init() def _disconnect(self, *, force=False): self._client = None if not force: self._
sess.disconnect() self._sess = None def _upload(self, remote, fobj): mode = LIBSSH2_SFTP_S_IRUSR | LIBSSH2_SFTP_S_IWUSR | \ LIBSSH2_SFTP_S_IRGRP | LIBSSH2_SFTP_S_IROTH opt = LIBSSH2_FXF_CREAT | LIBSSH2_FXF_WRITE with self._client.open(remote, opt, mode) as rfh:
for chunk in iter(lambda: fobj.read(1024*1024), b""): rfh.write(chunk) def _download(self, remote, fobj): with self._client.open(remote, 0, 0) as rfh: for size, data in rfh: fobj.write(data) def _ls(self, remote): with self._client.opendir(remote) as rfh: paths = (p[1].decode('utf-8') for p in rfh.readdir()) return [p for p in paths if p not in ['.', '..']] def _is_file(self, remote): return not self._is_dir(remote) def _is_dir(self, remote): st = self._client.stat(remote) return stat.S_ISDIR(st.st_mode) def _rm(self, remote): self._client.unlink(remote) def _rmdir(self, remote): self._client.rmdir(remote) def _mkdir(self, remote): mode = LIBSSH2_SFTP_S_IRUSR | \ LIBSSH2_SFTP_S_IWUSR | \ LIBSSH2_SFTP_S_IXUSR self._client.mkdir(remote, mode) def _mv(self, oldremote, newremote): self._client.rename(oldremote, newremote)
#/usr/bin/env python import QtTesting import QtTestingImage object1 = 'pqClientMainWindow/MainControlsToolbar/actionOpenData' QtTesting.playCommand(object1, 'activate', '') object2 = 'pqClientMainWindow/FileOpenDialog' QtTesting.playCommand(object2, 'filesSelected', '$PARAVIEW_DATA_ROOT/SPCTH/Dave_Karelitz_Small/spcth_a') object3 = 'pqClientMainWindow/proxyTabDock/proxyTabWidget/qt_tabwidget_stackedwidget/objectInspector/ScrollArea/qt_scrollarea_viewport/PanelArea/Editor/CellArrayStatus/1QHeaderView0' QtTesting.playCommand(object3, 'mousePress', '1,1,0,0,0,0') QtTesting.playCommand(object3, 'mouseRelease', '1,0,0,0,0,0') object4 = 'pqClientMainWindow/proxyTabDock/proxyTabWidget/qt_tabwidget_stackedwidget/objectInspector/Accept' QtTesting.playCommand(object4, 'activate', '') object5 = 'pqClientMainWindow/representationToolbar/displayRepresentation/comboBox' QtTesting.playCommand(object5, 'set_string', 'Surface') object6 = 'pqClientMainWindow/variableToolbar/displayColor/Variables' QtTesting.playCommand(object6, 'set_string', 'Pressure (dynes/cm^2^)') object7 = 'pqClientMainWindow/cameraToolbar/actionPositiveX' QtTesting.playCommand(object7, 'activate', '') object8 = 'pqClientMainWindow/menubar/menuFilters/pqProxyGroupMenuManager0/Cut' QtTesting.playCommand(object8, 'activate', '') QtTesting.playComm
and(object4, 'activate', '') object9 = 'pqClientMainWindow/proxyTabDock/proxyTabWidget/qt_tabwidget_stackedwidget/objectInspector/ScrollArea/qt_scrollarea_viewport/PanelArea/Editor/CutFunction/pqImplicitPlaneWidget/show3DWidget' QtTesting.playCommand(object9, 'set_boolean', 'false') # DO_IMAGE_COMPARE snapshotWidget = 'pqClientMainWindow/centralwidget/MultiViewWidget/CoreWidget/qt_tabwidget_stackedwidget/MultiViewWidget1/Frame.0/Viewport' QtTestingI
mage.compareImage(snapshotWidget, 'CTHAMRClip.png', 300, 300)
# -*- coding: utf-8 -*- from __future__ import uni
code_literals from django.apps import AppConfig class PluginrepoCo
nfig(AppConfig): name = 'pluginrepo'
from ritoapi.endpoints.match_v3 import MatchV3 import threading def _load_matches(match_v3, sample_region, sample_match_id, count): for i
in range(count): data = match_v3.matches(sample_region, sample_match_id) assert(data['gameId'] == sample_match_id) def test_matches_stress(sample_api_key, sample_rate_limit, sample_region, sample_match_id): match_v3 = MatchV3(sample_api_key, sample_rate_limit) threads = [] for i in range(10): t = threading.Thread(target=_load_matches, args=(match_v3, sample_region, sample_match_id, 20)) threads.append(t) t.start(
) for t in threads: t.join()
from future import standard_library standard_library.install_aliases() from builtins import object import threading from time import time import random import queue from ..common import log class Scheduler(object): """ A simple scheduler which schedules the periodic or once event """ import sortedcontainers as sc max_delay_time = 60 def __init__(self): self._jobs = Scheduler.sc.SortedSet() self._wakeup_q = queue.Queue() self._lock = threading.Lock() self._thr = threading.Thread(target=self._do_jobs) self._thr.deamon = True self._started = False def start(self): """ Start the schduler which will start the internal thread for scheduling jobs. Please do tear_down when doing cleanup """ if self._started: log.logger.info("Scheduler already started.") return self._started = True self._thr.start() def tear_down(self): """ Stop the schduler which will stop the internal thread for scheduling jobs. """ if not self._started: log.logger.info("Scheduler already tear down.") return self._wakeup_q.put(True) def _do_jobs(self): while 1: (sleep_time, jobs) = self.get_ready_jobs() self._do_execution(jobs) try: done = self._wakeup_q.get(timeout=sleep_time) except queue.Empty: pass else: if done: break self._started = False log.logger.info("Scheduler exited.") def get_ready_jobs(self): """ @return: a 2 element tuple. The first element is the next ready duration. The second element is ready jobs list """ now = time() ready_jobs = [] sleep_time = 1 with self._lock: job_set = self._jobs total_jobs = len(job_set) for job in job_set: if job.get_expiration() <= now: ready_jobs.append(job) if ready_jobs: del job_set[:len(ready_jobs)] for job in ready_jobs: if job.get_interval() != 0 and not job.stopped(): # repeated job, calculate next due time and enqueue job.update_expiration() job_set.add(job) if job_set: sleep_time = job_set[0].get_expiration() - now if sleep_time < 0: log.logger.warn("Scheduler satuation, sleep_time=%s", sleep_time) sleep_time = 0.1
if ready_jobs: log.logger.info("Get %d ready jobs, next duration is %f, " "and there are %s jobs scheduling",
len(ready_jobs), sleep_time, total_jobs) ready_jobs.sort(key=lambda job: job.get("priority", 0), reverse=True) return (sleep_time, ready_jobs) def add_jobs(self, jobs): with self._lock: now = time() job_set = self._jobs for job in jobs: delay_time = random.randrange(0, self.max_delay_time) job.set_initial_due_time(now + delay_time) job_set.add(job) self._wakeup() def update_jobs(self, jobs): with self._lock: job_set = self._jobs for njob in jobs: job_set.discard(njob) job_set.add(njob) self._wakeup() def remove_jobs(self, jobs): with self._lock: job_set = self._jobs for njob in jobs: njob.stop() job_set.discard(njob) self._wakeup() def number_of_jobs(self): with self._lock: return len(self._jobs) def disable_randomization(self): self.max_delay_time = 1 def _wakeup(self): self._wakeup_q.put(None) def _do_execution(self, jobs): for job in jobs: job()
]]) def test_assignment_indexerror(self): self.assertRaises(IndexError, self.phi.assignment, [10]) self.assertRaises(IndexError, self.phi.assignment, [1, 3, 10, 5]) self.assertRaises(IndexError, self.phi.assignment, np.array([1, 3, 10, 5])) self.assertRaises(IndexError, self.phi4.assignment, [2, 24]) self.assertRaises(IndexError, self.phi4.assignment, np.array([24, 2, 4, 30])) def test_get_cardinality(self): self.assertEqual(self.phi.get_cardinality(['x1']), {'x1': 2}) self.assertEqual(self.phi.get_cardinality(['x2']), {'x2': 2}) self.assertEqual(self.phi.get_cardinality(['x3']), {'x3': 2}) self.assertEqual(self.phi.get_cardinality(['x1', 'x2']), {'x1': 2, 'x2': 2}) self.assertEqual(self.phi.get_cardinality(['x1', 'x3']), {'x1': 2, 'x3': 2}) self.assertEqual(self.phi.get_cardinality(['x1', 'x2', 'x3']), {'x1': 2, 'x2': 2, 'x3': 2}) self.assertEqual(self.phi4.get_cardinality(
[self.tup1, self.tup3]), {self.tup1: 2, self.tup3: 4}) def test_get_cardinality_scopeerror(self): self.assertRaises(ValueError, self.phi.get_cardinality, ['x4']) self.assertRaises(ValueError, self.phi4.get_cardinality, [('x1', 'x4')]) self.assertRaises(ValueError, self.phi4.get_cardinality, [('x3', (2,
'x4'))]) def test_get_cardinality_typeerror(self): self.assertRaises(TypeError, self.phi.get_cardinality, 'x1') def test_marginalize(self): self.phi1.marginalize(['x1']) np_test.assert_array_equal(self.phi1.values, np.array([[6, 8], [10, 12], [14, 16]])) self.phi1.marginalize(['x2']) np_test.assert_array_equal(self.phi1.values, np.array([30, 36])) self.phi1.marginalize(['x3']) np_test.assert_array_equal(self.phi1.values, np.array(66)) self.phi5.marginalize([self.tup1]) np_test.assert_array_equal(self.phi5.values, np.array([[12, 14, 16, 18], [20, 22, 24, 26], [28, 30, 32, 34]])) self.phi5.marginalize([self.tup2]) np_test.assert_array_equal(self.phi5.values, np.array([60, 66, 72, 78])) self.phi5.marginalize([self.tup3]) np_test.assert_array_equal(self.phi5.values, np.array([276])) def test_marginalize_scopeerror(self): self.assertRaises(ValueError, self.phi.marginalize, ['x4']) self.phi.marginalize(['x1']) self.assertRaises(ValueError, self.phi.marginalize, ['x1']) self.assertRaises(ValueError, self.phi4.marginalize, [('x1', 'x3')]) self.phi4.marginalize([self.tup2]) self.assertRaises(ValueError, self.phi4.marginalize, [self.tup2]) def test_marginalize_typeerror(self): self.assertRaises(TypeError, self.phi.marginalize, 'x1') def test_marginalize_shape(self): values = ['A', 'D', 'F', 'H'] phi3_mar = self.phi3.marginalize(values, inplace=False) # Previously a sorting error caused these to be different np_test.assert_array_equal(phi3_mar.values.shape, phi3_mar.cardinality) phi6_mar = self.phi6.marginalize([self.tup1, self.tup2], inplace=False) np_test.assert_array_equal(phi6_mar.values.shape, phi6_mar.cardinality) self.phi6.marginalize([self.tup1, self.tup3 + self.tup1], inplace=True) np_test.assert_array_equal(self.phi6.values.shape, self.phi6.cardinality) def test_normalize(self): self.phi1.normalize() np_test.assert_almost_equal(self.phi1.values, np.array([[[0, 0.01515152], [0.03030303, 0.04545455], [0.06060606, 0.07575758]], [[0.09090909, 0.10606061], [0.12121212, 0.13636364], [0.15151515, 0.16666667]]])) self.phi5.normalize() np_test.assert_almost_equal(self.phi5.values, [[[0., 0.00362319, 0.00724638, 0.01086957], [0.01449275, 0.01811594, 0.02173913, 0.02536232], [0.02898551, 0.0326087, 0.03623188, 0.03985507]], [[0.04347826, 0.04710145, 0.05072464, 0.05434783], [0.05797101, 0.0615942, 0.06521739, 0.06884058], [0.07246377, 0.07608696, 0.07971014, 0.08333333]]]) def test_reduce(self): self.phi1.reduce([('x1', 0), ('x2', 0)]) np_test.assert_array_equal(self.phi1.values, np.array([0, 1])) self.phi5.reduce([(self.tup1, 0), (self.tup3, 1)]) np_test.assert_array_equal(self.phi5.values, np.array([1, 5, 9])) def test_reduce1(self): self.phi1.reduce([('x2', 0), ('x1', 0)]) np_test.assert_array_equal(self.phi1.values, np.array([0, 1])) self.phi5.reduce([(self.tup3, 1), (self.tup1, 0)]) np_test.assert_array_equal(self.phi5.values, np.array([1, 5, 9])) def test_reduce_shape(self): values = [('A', 0), ('D', 0), ('F', 0), ('H', 1)] phi3_reduced = self.phi3.reduce(values, inplace=False) # Previously a sorting error caused these to be different np_test.assert_array_equal(phi3_reduced.values.shape, phi3_reduced.cardinality) values = [(self.tup1, 2), (self.tup3, 0)] phi6_reduced = self.phi6.reduce(values, inplace=False) np_test.assert_array_equal(phi6_reduced.values.shape, phi6_reduced.cardinality) self.phi6.reduce(values, inplace=True) np_test.assert_array_equal(self.phi6.values.shape, self.phi6.cardinality) def test_complete_reduce(self): self.phi1.reduce([('x1', 0), ('x2', 0), ('x3', 1)]) np_test.assert_array_equal(self.phi1.values, np.array([1])) np_test.assert_array_equal(self.phi1.cardinality, np.array([])) np_test.assert_array_equal(self.phi1.variables, OrderedDict()) self.phi5.reduce([(('x1', 'x2'), 1), (('x2', 'x3'), 0), (('x3', (1, 'x4')), 3)]) np_test.assert_array_equal(self.phi5.values, np.array([15])) np_test.assert_array_equal(self.phi5.cardinality, np.array([])) np_test.assert_array_equal(self.phi5.variables, OrderedDict()) def test_reduce_typeerror(self): self.assertRaises(TypeError, self.phi1.reduce, 'x10') self.assertRaises(TypeError, self.phi1.reduce, ['x10']) self.assertRaises(TypeError, self.phi1.reduce, [('x1', 'x2')]) self.assertRaises(TypeError, self.phi1.reduce, [(0, 'x1')]) self.assertRaises(TypeError, self.phi1.reduce, [(0.1, 'x1')]) self.assertRaises(TypeError, self.phi1.reduce, [(0.1, 0.1)]) self.assertRaises(TypeError, self.phi1.reduce, [('x1', 0.1)]) self.assertRaises(TypeError, self.phi5.reduce, [(('x1', 'x2'), 0), (('x2', 'x3'), 0.2)]) def test_reduce_scopeerror(self): self.assertRaises(ValueError, self.phi1.reduce, [('x4', 1)]) self.assertRaises(ValueError, self.phi5.reduce, [((('x1', 0.1), 0))]) def test_reduce_sizeerror(self): self.assertRaises(IndexError, self.phi1.reduce, [('x3', 5)]) self.assertRaises(IndexError, self.phi5.reduce, [(('x2', 'x3'), 3)]) def test_identity_factor(self): identity_factor = self.phi.identity_factor() self.assertEqual(list(identity_factor.variables), ['x1', 'x2', 'x3']) np_test.assert_array_equal(identity_factor.cardinality, [2, 2, 2]) np_test.assert_array_equal(identity_factor.values, np.ones(8).reshape(2, 2, 2)) identity_factor1 = self.phi5.identity_factor() self.assertEqual(list(identity_factor1.variables), [self.tup1, self.tup2, self.tup3]) np_test.assert_array_equal(identity_factor1.cardinality, [2, 3, 4]) np_test.assert_array_
from collections import deque from threading import RLock, Condition, currentThread import sys import time class OnRequestQueue: ListUsedModFunctions = ("append", "popleft") class QueueEnd: def __init__(self, queueList=None): if queueList is not None: self.q = queueList else: self.q = deque() self.cond = Condition() self.cancel = False def __repr__(self): with self.cond: return "<QueueEnd %r>" % self.q def put(self, item): with self.cond: if self.cancel: return False self.q.append(item) self.cond.notifyAll() def setCancel(self): with self.cond: self.cancel = True self.cond.notifyAll() def __init__(self): self.queues = set() def put(self, item): for q in list(self.queues): q.put(item) def cancelAll(self): for q in list(self.queues): q.setCancel() def read(self, *otherQueues, **kwargs): q = self.QueueEnd(**kwargs) thread = currentThread() thread.waitQueue = q if thread.cancel: # This is to avoid a small race condition for the case # that the thread which wants to join+cancel us was faster # and didn't got the waitQueue. In that case, it would # have set the cancel already to True. return for reqqu in otherQueues: assert(isinstance(reqqu, OnRequestQueue)) reqQueues = (self,) + otherQueues for reqqu in reqQueues: reqqu.queues.add(q) while True: with q.cond: # Note on cancel-behavior: # Earlier, we always still yielded all left items in the queue # before breaking out here. This behavior doesn't fit if you # want to cancel as fast as possible and when you have a # persistent queue anyway - while you might hang at some entry. if q.cancel: break l = list(q.q) if not l: q.cond.wait() for item in l: if q.cancel: break yield item with q.cond: popitem = q.q.popleft() assert popitem is item for reqqu in reqQueues: reqqu.queues.remove(q) class EventCallback: def __init__(self, targetQueue, name=None
, reprname=None, extraCall=None): self.targetQueue = targetQueue self.name = name self.rep
rname = reprname self.extraCall = extraCall def __call__(self, *args, **kwargs): if not "timestamp" in kwargs: kwargs["timestamp"] = time.time() if self.extraCall: self.extraCall(*args, **kwargs) self.targetQueue.put((self, args, kwargs)) def __repr__(self): if self.reprname: return self.reprname else: return "<EventCallback %s>" % self.name class Event: def __init__(self): self.lock = RLock() self.targets = [] def push(self, *args): with self.lock: targets = self.targets for weakt in targets: t = weakt() # resolve weakref if t: t(*args) else: self.targets.remove(weakt) def register(self, target): assert sys.getrefcount(target) > 1, "target will be weakrefed, thus we need more references to it" import weakref with self.lock: self.targets.append(weakref.ref(target))
import random from cloudbot.util import http, formatting def api_get(kind, query): """Use the RESTful Google Search API""" url = 'http://ajax.googleapis.com/ajax/services/search/%s?' \ 'v=1.0&safe=moderate' return http.get_json(url % kind, q=query) # @hook.command("googleimage", "gis", "image") def googleimage(text): """<query> - returns the first google image result for <query>""" parsed = api_get('images', text) if not 200 <= parsed['responseStatus'] < 300: raise IOError('error searching for images: {}: {}'.format(parsed['responseStatus'], '')) if not parsed['responseData']['results']: return 'no images found' return random.choice(parsed['responseData']['results'][:10])['unescapedUrl'] # @hook.command("google", "g", "search") def google(text): """<query> - returns the first google search result for <query>""" parsed = api_get('web', text) if not 200 <= par
sed['responseStatus'] < 300: raise IOError('error searching for pages: {}: {}'.format(parsed['responseStatus'], '')) if not parsed['responseData']['results']: return 'No
results found.' result = parsed['responseData']['results'][0] title = http.unescape(result['titleNoFormatting']) title = formatting.truncate_str(title, 60) content = http.unescape(result['content']) if not content: content = "No description available." else: content = http.html.fromstring(content).text_content() content = formatting.truncate_str(content, 150).replace('\n', '') return '{} -- \x02{}\x02: "{}"'.format(result['unescapedUrl'], title, content)
#!/usr/bin/env python2 import os import sys import json import yaml def main(): with open(sys.argv[1], 'rb') as f: known_issues = yaml.safe_load(f.read()) skipstrings = [ 'passed in strict mode', 'passed in non-strict mode', 'failed in strict mode as expected', 'failed in non-strict mode as expected' ] in_failed_tests = False tofix_count = 0 # count of bugs that will be fixed (no uncertainty about proper behavior etc) known_errors = [] diagnosed_errors = [] unknown_errors = [] other_errors = [] for line in sys.stdin: if len(line) > 1 and line[-1] == '\n': line = line[:-1] # Skip success cases skip = False for sk in skipstrings: if sk in line: skip = True if skip: continue # Augment error list with "known bugs"
print(line) # print error list as is, then refined version later if 'failed tests' in line.lower(): in_failed_tests = True continue if in_failed_tests and line.strip() == '': in_failed_tests = False continue if in_failed_tests: # " intl402/ch12/12.2/12.2.3_c in non-strict mode"
tmp = line.strip().split(' ') test = tmp[0] matched = False for kn in known_issues: if kn.get('test', None) != test: continue if kn.has_key('diagnosed'): tofix_count += 1 diagnosed_errors.append(line + ' // diagnosed: ' + kn['diagnosed']) elif kn.has_key('knownissue'): # Don't bump tofix_count, as testcase expected result is not certain known_errors.append(line + ' // KNOWN: ' + kn['knownissue']) else: tofix_count += 1 unknown_errors.append(line + ' // ??? (rule matches)') kn['used'] = True # mark rule used matched = True break if matched: continue # no match, to fix other_errors.append(line) tofix_count += 1 print('') print('=== CATEGORISED ERRORS ===') print('') # With ES2015+ semantic changes to ES5 there are too many known # issues to print by default. #for i in known_errors: # print(i) for i in diagnosed_errors: print(i) for i in unknown_errors: print(i) for i in other_errors: print(i) # Check for unused rules (e.g. bugs fixed) print('') for kn in known_issues: if not kn.has_key('used'): print('WARNING: unused rule: ' + json.dumps(kn)) # Used by testclient if len(unknown_errors) > 0 or len(other_errors) > 0: print('TEST262 FAILED') elif len(known_errors) > 0 or len(diagnosed_errors) > 0: # Known and diagnosed errors don't indicate test failure # as far as GitHub status is concerned. print('TEST262 SUCCESS') else: print('TEST262 SUCCESS') # To fix count print('') print('KNOWN ISSUE COUNT: ' + str(len(known_errors))) print('TO-FIX COUNT: ' + str(tofix_count)) print(' = test case failures which need fixing (Duktape bugs, uninvestigated)') if __name__ == '__main__': main()
import unittest from katas.kyu_6.longest_2_character_substring import substring class SubstringTestCase(unittest.TestCase): def test_equals(self): self.assertEqual(substring(''), '') def test_equals_2(self): self.assertEqual(substring('a'), 'a') def test_equals_3(self): self.assertEqual(substring('aa'), 'aa') def test_equals_4(self): self.assertEqual(substring('aaa'), 'aaa') def test_equals_5(self): self.assertEqual(substring('ab'), 'ab') def test_equals_6(self): self.assertEqual(substring('aba'), 'aba') def test_equals_7
(self): self.assertEqual(substring('abc'), 'ab') def test_equals_8(self): self.assertEqual(substring('abacd'), 'aba') def test_equals_9(self): self.assertEqual(substring('abcba'
), 'bcb') def test_equals_10(self): self.assertEqual(substring('bbacc'), 'bba') def test_equals_11(self): self.assertEqual(substring('ccddeeff'), 'ccdd') def test_equals_12(self): self.assertEqual(substring('abacddcd'), 'cddcd') def test_equals_13(self): self.assertEqual(substring('cefageaacceaccacca'), 'accacca')
import sip sip.setdestroyonexit(0) import os, shutil import numpy as np from sparkle.QtWrapper import Q
tGui tempfolder = os.path.join(os.path.abspath(os.path.dirname(__file__)), u"tmp") app = None # executes once before all tests def setup(): if not os.path.exists(tempfolder): os.mkdir(tempfolder) np.warnings.filterwarnings('ignore', "All-NaN axis encountered", RuntimeWarning) global app app = QtGui.QApplication([]) def teardown(): shutil.rmtree(tempfolder, ignore_errors=True) np.warnings.resetwarnings() global app app.exit(0)
app = None
# -*- coding: utf-8 -*- # Resource object code # # Created by: The Resource Compiler for PyQt5 (Qt v5.7.1) # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore qt_resource_data = b"\ \x00\x00\x05\x96\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x18\x00\x00\x00\x18\x08\x06\x00\x00\x00\xe0\x77\x3d\xf8\ \x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\ \x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\ \x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\ \x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\ \x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\x18\x74\x45\ \x58\x74\x54\x69\x74\x6c\x65\x00\x47\x49\x53\x20\x69\x63\x6f\x6e\ \x20\x74\x68\x65\x6d\x65\x20\x30\x2e\x32\xee\x53\xa0\xa0\x00\x00\ \x00\x18\x74\x45\x58\x74\x41\x75\x74\x68\x6f\x72\x00\x52\x6f\x62\ \x65\x72\x74\x20\x53\x7a\x63\x7a\x65\x70\x61\x6e\x65\x6b\x5f\x56\ \xb1\x08\x00\x00\x00\x27\x74\x45\x58\x74\x44\x65\x73\x63\x72\x69\ \x70\x74\x69\x6f\x6e\x00\x68\x74\x74\x70\x3a\x2f\x2f\x72\x6f\x62\ \x65\x72\x74\x2e\x73\x7a\x63\x7a\x65\x70\x61\x6e\x65\x6b\x2e\x70\ \
x6c\x90\x59\x48\x60\x00\x00\x00\x18\x74\x45\x58\x74\x43\x72\x65\ \x61\x74\x69\x6f\x6e\x20\x54\x69\x6d
\x65\x00\x32\x30\x30\x38\x2d\ \x31\x32\x2d\x31\x32\x58\x2e\x3b\xbf\x00\x00\x00\x52\x74\x45\x58\ \x74\x43\x6f\x70\x79\x72\x69\x67\x68\x74\x00\x43\x43\x20\x41\x74\ \x74\x72\x69\x62\x75\x74\x69\x6f\x6e\x2d\x53\x68\x61\x72\x65\x41\ \x6c\x69\x6b\x65\x20\x68\x74\x74\x70\x3a\x2f\x2f\x63\x72\x65\x61\ \x74\x69\x76\x65\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x6f\x72\x67\x2f\ \x6c\x69\x63\x65\x6e\x73\x65\x73\x2f\x62\x79\x2d\x73\x61\x2f\x33\ \x2e\x30\x2f\x5e\x83\x5a\xbc\x00\x00\x04\x16\x49\x44\x41\x54\x48\ \x89\x95\x93\x6d\x6c\x53\x55\x18\xc7\x7f\xf7\xde\xde\x7b\xdb\xb5\ \xb7\x4c\xde\xc6\x14\x37\x32\x12\x08\x0b\x64\xf8\x01\xb2\xb8\xad\ \xc9\x3e\x30\x12\x83\xd9\x10\x05\xb7\x25\x0c\x21\x42\x0c\x0c\x48\ \xcc\x28\x16\x0d\x01\x92\x45\x8a\x1f\xe4\x25\x82\xa0\x4b\xc0\x28\ \xcc\x28\x26\xc2\x17\x08\xfb\x00\x63\xc9\xfa\xc1\xc0\x4c\xd4\xac\ \xdb\x9c\x61\x71\x05\x9c\xb3\x8c\x5b\xa0\x5d\x6f\x8f\x1f\x48\x47\ \x5b\xee\xdc\xfc\x27\x27\xf7\xe5\xf9\x9f\xf3\x3b\xcf\xf3\x9c\x23\ \xbd\xb5\x61\xdd\x25\x24\x0a\x99\x4a\x82\xc8\xb7\x17\xbe\x7b\x7d\ \x4a\x5f\x8e\x1c\x48\x14\xbe\xdd\xb0\x01\xc3\x6d\x00\xf0\x30\xf6\ \x90\x0b\xdf\xb4\x63\xf3\x6f\xea\x4d\xd8\x02\x60\x62\xa1\x47\x4f\ \xc6\x67\x27\x92\xca\x3c\x21\x39\xee\x1a\x6e\x63\x24\x6d\x4a\xc7\ \x27\xd3\x9a\x1d\x07\x36\x63\x59\x01\x14\xa5\xf5\xf2\x89\xfd\x6d\ \x99\x31\x39\xf3\xe3\x71\x7c\x7c\xde\xec\x39\x73\x74\x21\x29\xff\ \x6f\xb7\x96\xb5\xbf\x65\x4f\xcb\x42\x2c\x2b\x90\x1b\x92\xe1\x69\ \x09\x00\x5c\xba\x7a\xf7\xf7\xc1\x41\x00\x69\xcc\x1c\x93\xd2\xa6\ \x74\xdc\x4e\xd5\xd5\x07\x1c\xe3\x56\xd2\x71\xf8\xe3\xc3\xa0\x28\ \xad\xb9\x71\x07\x82\x48\x46\x7d\x47\xc6\x51\x8b\x9d\x4e\x5d\x39\ \x7f\xfe\xfb\x17\x65\xac\x3f\x27\x9c\x82\x88\x1d\x40\x29\x36\x0f\ \xce\x9f\xbf\x60\x46\xb8\x37\x4c\xe7\xd7\x47\xdb\x9e\x33\x08\x21\ \xb2\x46\x65\xc3\x4e\x71\x2d\x3c\x2a\x56\x6d\xf1\xc7\x2a\x1a\x9b\ \xcb\x73\xe3\x99\xa3\xa2\xb1\xb9\x7c\xd5\x16\x7f\xec\x5a\x78\x54\ \x54\x36\xec\x14\x76\x9e\xac\x1e\xac\xd9\x71\x60\xb3\xe1\x31\xe8\ \x1f\x18\xa0\xbe\xbe\x3e\xcf\xa9\xea\x17\xab\xd7\x6f\xf7\xd8\x96\ \x66\xfd\x76\x8f\x53\xd5\x2f\xd6\xd7\xd7\xe7\xf5\x0f\x0c\x60\x78\ \x8c\xa7\xcd\xce\x51\x16\x00\xcb\x0a\xf8\xf7\xfa\xe9\xbc\x7e\x83\ \xd2\xd2\x52\xca\xca\x96\xe7\x2b\x86\xfb\x94\x6d\x69\x0c\xf7\xa9\ \xb2\xb2\xe5\xf9\xa5\xa5\xa5\x74\x5e\xbf\x81\x7f\xaf\x9f\x49\x9b\ \xfc\x6c\x96\xd2\x1a\x0c\x06\x1f\x18\x5e\x4f\x12\xa0\x6e\x6d\x9d\ \xcb\xa9\xeb\x75\xbe\xc6\x5d\xb5\x99\x36\x5f\xe3\xae\x5a\xa7\xae\ \xd7\xd5\xad\xad\x73\x01\x18\x5e\x4f\x32\x18\x0c\x3e\xb0\x6b\x72\ \x16\xe0\xf2\x89\xfd\x6d\xd1\xd8\xc8\xa2\x70\xb8\x2f\x61\x9a\x26\ \x9a\xa6\xd1\xd4\xb4\xc9\xad\x6a\xda\xd9\xf2\x86\xdd\x05\x00\xe5\ \x0d\xbb\x0b\x54\x4d\x3b\xdb\xd4\xb4\xc9\xad\x69\x1a\xa6\x69\x12\ \x0e\xf7\x25\xa2\xb1\x91\x45\xb9\x77\xe0\xf9\x0c\x80\xae\x73\x27\ \xef\xcb\x92\xdc\xd6\xd1\xd1\x11\x07\x28\x2a\x2a\xc2\xe7\xab\xca\ \x33\x9c\x7a\xbb\x24\x49\x92\xe1\xd4\xdb\x7d\xbe\xaa\xbc\xa2\xa2\ \x22\x00\x3a\x3a\x3a\xe2\xb2\x24\xb7\x75\x9d\x3b\x79\xdf\xae\x94\ \xcf\x01\x00\x1e\x25\xc7\x0e\x85\x42\x21\xcb\x34\x4d\x00\x6a\x6a\ \x56\xab\x86\xd7\x5b\xfe\xda\xb6\x7d\x31\xc3\xeb\x2d\xaf\xa9\x59\ \xad\x02\x98\xa6\x49\x28\x14\xb2\x1e\x25\xc7\x0e\xd9\xad\x03\x20\ \x09\x21\x6c\x03\xab\x36\xfb\x8f\xaf\x58\xb9\xe2\xdd\xda\xda\x5a\ \x1d\xe0\xd8\xd1\x63\x6c\xdd\xb6\x95\xd3\x9f\x9f\xa6\xf9\x9d\x4a\ \x52\x7d\x1f\x91\xf8\xbb\x0b\x91\x4a\x24\x34\x39\xf9\xd8\x66\x89\ \x90\x80\xc0\xa4\x80\x8a\x8d\xef\xcd\x75\x2a\x9e\xc1\x40\x20\x90\ \xe7\xf1\x78\xb8\xdd\xd3\xc3\xcd\xce\x4e\x2a\xab\xaa\x58\x16\xdf\ \x8d\x14\xfb\x19\x97\x51\x82\x2c\x3b\x91\x15\x27\x92\xa2\x21\xac\ \x04\x42\x58\xcc\x5a\xd8\xc8\x9d\x9f\x3e\xc0\x4a\x44\x6f\x4e\x0a\ \xb0\xcb\x22\xad\xe4\x55\x0d\x63\x6e\x05\x0e\xed\x85\x2c\xbf\xaa\ \xcf\xa6\x70\xe9\xfb\xb8\xf2\x97\x32\x32\xf0\x15\xfd\x37\x37\xda\ \xf7\x20\xad\xdc\x5e\x64\x4a\x76\x64\xdf\x3f\xe7\x8c\xc5\xcc\x7f\ \xe5\x20\xae\xfc\xa5\xc4\xcd\x41\x46\x87\x2e\x3d\xf5\xfd\x17\x20\ \xf7\x44\x65\x01\x64\x75\xe2\xdd\x53\xe0\xe3\xa5\x65\x01\x34\xf7\ \xcb\x24\xe3\xa3\xdc\xfd\xf5\x18\xc2\x7a\x3c\x35\xc0\x2e\x8b\xdc\ \x6c\xbc\xf3\xaa\x29\x5c\xd2\x8c\x43\x9f\x49\xca\x8a\xf3\x57\xdf\ \x97\x3c\x79\xd8\xff\x6c\x23\x53\x01\x72\xb3\x48\x3f\xa3\xc3\x57\ \x70\xcf\x59\x49\x74\xf8\x2a\xff\x0c\xfd\x08\x08\x22\xbf\x7c\xc2\ \x9d\x5b\xfb\x88\x0e\x5f\x21\x3a\x7c\x65\x7a\x80\xcc\x2c\x22\x91\ \x08\xa1\x50\xc8\x02\x40\xa4\x98\x55\xfc\x26\x25\xaf\x9e\xe6\x5e\ \xef\x29\x06\xbb\x77\x30\x74\xeb\x43\x44\x6a\x7c\x62\x4c\x1b\xd0\ \x75\xee\xe4\x7d\x4d\xd5\xbb\xbf\x38\x73\x06\x4d\xd5\xbb\x01\x66\ \x2d\x58\x8f\x6b\xc6\x12\x24\x49\x61\x66\xf1\x1b\xdc\xeb\xfd\xcc\ \x76\xee\xb4\x00\x00\x8a\x22\x97\xb4\xec\xd9\x83\xa2\xc8\x25\x48\ \xf4\xa8\xae\x02\x06\xbb\xb7\x73\xfb\x87\x45\xfc\x11\x6a\xb6\x9f\ \x24\xd1\xe3\x98\x2e\x00\x45\x39\x74\x24\x78\x24\x80\xa2\xb4\x92\ \x62\x28\xf2\xdb\xa7\xc7\x11\x2c\x9e\xd4\x2f\xd1\x4b\x8a\x96\x7f\ \x01\xb3\x71\xdb\xcb\x12\x7d\x31\x70\x00\x00\x00\x00\x49\x45\x4e\ \x44\xae\x42\x60\x82\ " qt_resource_name = b"\ \x00\x07\ \x07\x3b\xe0\xb3\ \x00\x70\ \x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\ \x00\x08\ \x09\xcb\x6f\x53\ \x00\x44\ \x00\x73\x00\x67\x00\x54\x00\x6f\x00\x6f\x00\x6c\x00\x73\ \x00\x0a\ \x0b\x6f\x47\xe0\ \x00\x44\ \x00\x73\x00\x67\x00\x54\x00\x6f\x00\x6f\x00\x6c\x00\x73\x00\x4f\x00\x70\ \x00\x0d\ \x01\xed\x72\x73\ \x00\x4d\ \x00\x69\x00\x6c\x00\x69\x00\x74\x00\x61\x00\x72\x00\x79\x00\x54\x00\x6f\x00\x6f\x00\x6c\x00\x73\ \x00\x13\ \x0c\xc0\x02\x64\ \x00\x6e\ \x00\x75\x00\x6d\x00\x65\x00\x72\x00\x69\x00\x63\x00\x61\x00\x6c\x00\x56\x00\x65\x00\x72\x00\x74\x00\x65\x00\x78\x00\x45\x00\x64\ \x00\x69\x00\x74\ \x00\x18\ \x0a\x0d\x3f\x47\ \x00\x76\ \x00\x65\x00\x63\x00\x74\x00\x6f\x00\x72\x00\x2d\x00\x65\x00\x64\x00\x69\x00\x74\x00\x2d\x00\x6b\x00\x65\x00\x79\x00\x62\x00\x6f\ \x00\x61\x00\x72\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\ " qt_resource_struct = b"\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\ \x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\ \x00\x00\x00\x2a\x00\x02\x00\x00\x00\x01\x00\x00\x00\x04\ \x00\x00\x00\x44\x00\x02\x00\x00\x00\x01\x00\x00\x00\x05\ \x00\x00\x00\x64\x00\x02\x00\x00\x00\x01\x00\x00\x00\x06\ \x00\x00\x00\x90\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\ " def qInitResources(): QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data) qInitResources()
#!/usr/bin/env python from efl import evas import unittest class TestBoxBasics(u
nittest.TestCase): def setUp(self): self.canvas = evas.Canvas(method="buffer", size=(400, 500), viewport=(0, 0, 400, 500)) self.canvas.engine_info_set(self.canvas.engine_info_get()) def tearDown(self): self.canvas.delete() def testConst
ructor(self): box = evas.Box(self.canvas) self.assertEqual(type(box), evas.Box) box.delete() def testConstructorBaseParameters(self): size = (20, 30) pos = (40, 50) geometry = (60, 70, 80, 90) color = (110, 120, 130, 140) # create box using size/pos box1 = evas.Box(self.canvas, name="box1", color=color, size=size, pos=pos) self.assertEqual(box1.name, "box1") self.assertEqual(box1.color, color) self.assertEqual(box1.size, size) self.assertEqual(box1.pos, pos) box1.delete() # cleate box2 using geometry box2 = evas.Box(self.canvas, name="box2", color=color, geometry=geometry) self.assertEqual(box2.name, "box2") self.assertEqual(box2.color, color) self.assertEqual(box2.geometry, geometry) box2.delete() def testRemoveAll(self): box = evas.Box(self.canvas) r1 = evas.Rectangle(self.canvas) r2 = evas.Rectangle(self.canvas) box.append(r1) box.append(r2) box.remove_all(True) self.assertEqual(r1.is_deleted(), True) self.assertEqual(r2.is_deleted(), True) box.delete() if __name__ == '__main__': unittest.main(verbosity=2) evas.shutdown()
""" Return the values at the given indices. """ @_convert @_vcheckother @schemed(BACKEND_PREFIX) def dot(self, other): """ Return the dot product""" @schemed(BACKEND_PREFIX) def _getvalue(self, index): """Helper function to return a single value from an array. May be very slow if the memory is on a gpu. """ @_memoize_single @_returntype def _getslice(self, index): return self._return(self._data[index]) @_convert def __getitem__(self, index): """ Return items from the Array. This not guaranteed to be fast for returning single values. """ if isinstance(index, slice): return self._getslice(index) else: return self._getvalue(index) @_convert def resize(self, new_size): """Resize self to new_size """ if new_size == len(self): return else: self._saved = {} new_arr = zeros(new_size, dtype=self.dtype) if len(self) <= new_size: new_arr[0:len(self)] = self else: new_arr[:] = self[0:new_size] self._data = new_arr._data @_convert def roll(self, shift): """shift vector """ self._saved = {} new_arr = zeros(len(self), dtype=self.dtype) if shift == 0: return if shift < 0: shift=len(self) + shift new_arr[0:shift] = self[len(self)-shift: len(self)] new_arr[shift:len(self)] = self[0:len(self)-shift] self._data = new_arr._data @_returntype @_convert def astype(self, dtype): if _numpy.dtype(self.dtype) == _numpy.dtype(dtype): return self else: return self._data.astype(dtype) @schemed(BACKEND_PREFIX) def _copy(self, self_ref, other_ref): """Helper function to copy between two arrays. The arrays references should be bare array types and not `Array` class instances. """ @_convert def __setitem__(self, index, other): if isinstance(other,Array): _convert_to_scheme(other) if self.kind is 'real' and other.kind is 'complex': raise ValueError('Cannot set real value with complex') if isinstance(index,slice): self_ref = self._data[index] other_ref = other._data else: self_ref = self._data[index:index+1] other_ref = other._data self._copy(self_ref, other_ref) elif type(other) in _ALLOWED_SCALARS: if isinstance(index, slice): self[index].fill(other) else: self[index:index+1].fill(other) else: raise TypeError('Can only copy data from another Array') @property def precision(self): if self.dtype == float32 or self.dtype == complex64: return 'single' else: return 'double' @property def kind(self): if self.dtype == float32 or self.dtype == float64: return 'real' elif self.dtype == complex64 or self.dtype == complex128: return 'complex' else: return 'unknown' @property @_convert def data(self): """Returns the internal python array """ return self._data @data.setter def data(self,other): dtype = None if hasattr(other,'dtype'): dtype = other.dtype temp = Array(other, dtype=dtype) self._data = temp._data @property @_convert @schemed(BACKEND_PREFIX) def ptr(self): """ Returns a pointer to the memory of this array """ @property def itemsize(self): return self.dtype.itemsize @property def nbytes(self): return len(self.data) * self.itemsize @property @cpuonly @_convert def _swighelper(self): """ Used internally by SWIG typemaps to ensure @_convert is called and scheme is correct """ return self; @_convert @schemed(BACKEND_PREFIX) def numpy(self): """ Returns a Numpy Array that contains this data """ @_convert def lal(self): """ Returns a LAL Object that contains this data """ lal_data = None if self._data.dtype == float32: lal_data = _lal.CreateREAL4Vector(len(self)) elif self._data.dtype == float64: lal_data = _lal.CreateREAL8Vector(len(self)) elif self._data.dtype == complex64: lal_data = _lal.CreateCOMPLEX8Vector(len(self)) elif self._data.dtype == complex128: lal_data = _lal.CreateCOMPLEX16Vector(len(self)) lal_data.data[:] = self.numpy() return lal_data @property def dtype(self): return self._data.dtype def save(self, path, group=None): """ Save array to a Numpy .npy, hdf, or text file. When saving a complex array as text, the real and imaginary parts are saved as the first and second column respectively. When using hdf format, the data is stored as a single vector, along with relevant attributes. Parameters ---------- path: string Destination file path. Must end with either .hdf, .npy or .txt. group: string Additional name for internal storage use. Ex. hdf storage uses this as the key value. Raises ------ ValueError If path does not end in .npy or .txt. """ ext = _os.path.splitext(path)[1] if ext == '.npy': _numpy.save(path, self.numpy()) elif ext == '.txt': if self.kind == 'real': _numpy.savetxt(path, self.numpy()) elif self.kind == 'complex': output = _numpy.vstack((self.numpy().real, self.numpy().imag)).T _numpy.savetxt(path, output) elif ext == '.hdf': key = 'data' if group is None else group f = h5py.File(path) f.create_dataset(key, data=self.numpy(), compression='gzip', compression_opts=9, shuffle=True) else: raise ValueError('Path must end with .npy, .txt, or .hdf')
@_convert def trim_zeros(self): """Remove the leading and trailing zeros. """ tmp = self.numpy() f = len(self)-len(_numpy.trim_zeros(tmp, trim='f')) b = len(self)-len(_numpy.trim_zeros(tmp, trim='b')) r
eturn self[f:len(self)-b] @_returntype @_convert def view(self, dtype): """ Return a 'view' of the array with its bytes now interpreted according to 'dtype'. The location in memory is unchanged and changing elements in a view of an array will also change the original array. Parameters ---------- dtype : numpy dtype (one of float32, float64, complex64 or complex128) The new dtype that should be used to interpret the bytes of self """ return self._data.view(dtype) def copy(self): """ Return copy of this array """ return self._return(self.data.copy()) # Convenience functions for determining dtypes def real_same_precision_as(data): if data.precision is 'single': return float32 elif data.precision is 'double': return float64 def complex_same_precision_as(data): if data.precision is 'single': return complex64 elif data.precision is 'double': return complex128 @decorator def _return_array(fn, *args, **kwds): return Array(fn(*args, **kwds), copy=False) @_return_array @schemed(BACKEND_PREFIX) def zeros(length, dtype=float64): """ Return an Array filled with zeros. """ return def load
#!/usr/bin/env python import socket, ssl # This is a copy of _RESTRICTED_SERVER_CIPHERS from the current tip of ssl.py # <https://hg.python.org/cpython/file/af793c7580f1/Lib/ssl.py#l174> except that # RC4 has been added back in, since it was removed in Python 2.7.10, # but SSLStreamConnection only supports RC4 ciphers. CIPHERS = ( 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:' 'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:' '!eNULL:!MD5:!DSS:RC4' ) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(('localhost', 54443)) s.listen(5) while True: newsocket, fromaddr = s.accept() try: connstream = ssl.wrap_socket(newsocket, server_side=True, certfile="cert.pem", keyfile="cert.pem", ciphers=CIPHERS) except ssl.SSLError as e: # Catch occurrences of: # ssl.SSLEOFError: EOF occurred in violation of protocol (_ssl.c:581) # # In theory, setting ssl_version to ssl.PROTOCOL_TLSv1 will resolve # t
he problem, but it didn't do so for me, and it cause
d the error: # ssl.SSLError: [SSL: WRONG_VERSION_NUMBER] wrong version number (_ssl.c:581) # # Whereas the SSLEOFError doesn't prevent the server from working # (it seems to happen only when the server is first started, and it # stops happening if we simply ignore it and try again a few times) # so we leave ssl_version at ssl.PROTOCOL_SSLv3 and ignore that error. # # If we catch SSLEOFError specifically, then Travis fails with: # AttributeError: 'module' object has no attribute 'SSLEOFError' # So we catch the more general exception SSLError. continue try: data = connstream.read() while data: connstream.write(data) data = connstream.read() finally: try: connstream.shutdown(socket.SHUT_RDWR) except socket.error as e: # On Mac, if the other side has already closed the connection, # then socket.shutdown will fail, but we can ignore this failure. pass connstream.close()
from pystorm.bolt import BasicBolt class SentenceSplitterBolt(BasicBolt): def p
rocess(self, tup): sentence = tup.values[0]
for word in sentence.split(' '): BasicBolt.emit(word) if __name__ == '__main__': SentenceSplitterBolt().run()
#!/usr/bin/python # # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Handles request to add a Campaign to a client account.""" __author__ = 'Mark Saniscalchi' import os from handlers.api_handler import APIHandler from handlers.ndb_handler import InitUser import webapp2 from google.appengine.api import users from google.appengine.ext.webapp import templ
ate class AddCampaign(webapp2.RequestHandler): """View that either adds a Campaign or displays an error message.""" def post(self):
"""Handle post request.""" client_customer_id = self.request.get('clientCustomerId') campaign_name = self.request.get('campaignName') ad_channel_type = self.request.get('adChannelType') budget = self.request.get('budget') template_values = { 'back_url': '/showCampaigns?clientCustomerId=%s' % client_customer_id, 'back_msg': 'View Campaigns', 'logout_url': users.create_logout_url('/'), 'user_nickname': users.get_current_user().nickname() } try: app_user = InitUser() # Load Client instance. handler = APIHandler(app_user.client_id, app_user.client_secret, app_user.refresh_token, app_user.mcc_cid, app_user.developer_token) # Create new campaign. handler.AddCampaign(client_customer_id, campaign_name, ad_channel_type, budget) self.redirect('/showCampaigns?clientCustomerId=%s' % client_customer_id) except Exception, e: template_values['error'] = str(e) # Use template to write output to the page. path = os.path.join(os.path.dirname(__file__), '../templates/base_template.html') self.response.out.write(template.render(path, template_values))
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """add ti job_id index Revision ID: 94745
4bf1dff Revises: bdaa763e6c56 Create Date: 2017-08-15 15:12:13.845074 """ # revision identifiers, used by Alembic. revision = '947454bf1dff' down_revision = 'bdaa763e6c56
' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa def upgrade(): op.create_index('ti_job_id', 'task_instance', ['job_id'], unique=False) def downgrade(): op.drop_index('ti_job_id', table_name='task_instance')
# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2016-12-08 19:59 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Driver', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(max_length=50)), ('last_name', models.CharField(max_length=50)), ('is_online', models.BooleanField(default=False)), ('last_seen', models.DateTimeField(blank=True, null=True)), ('auto_back', models.CharField(blank=True, max_length=50, null=True)), ('auto_model', models.CharField(blank=True, max_length=50, null=True)), ('auto_manufacturer', models.CharField(blank=True, max_length=50, null=True)), ], ), migrations.CreateModel( name='DriverStats', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('driver', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='logistics.Driver')), ], ), migrations.CreateModel( name='Fleet', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('description', models.TextField(blank=True, null=True)), ('creation_date', models.DateTimeField(auto_now=True)), ], ), migrations.CreateModel( name='Owner', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(max_length=50)), ('last_name', models.CharField(max_length=50)), ('is_confirmed', models.BooleanField(default=False)), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Trip', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('description', models.TextField(blank=True, null=True)), ('passenger_phone', models.CharField(blank=True, max_length=50, null=True)), ('passenger_name', models.CharField(blank=True, max_length=50, null=True)), ('start_position', models.CharField(blank=True, max_length=50, null=True)), ('end_position', models.CharField(blank=True, max_length=50, null=True)), ('start_date', models.DateTimeField()), ('end_date', models.DateTimeField(blank=True, null=True)), ('is_finished', models.BooleanField(default=False)), ('problem', models.IntegerField(choices=[(1, 'none'), (2, 'crash'), (3, 'jam'), (4, 'other')], default=1)), ('problem_description', models.CharField(blank=True, max_length=50, null=True)), ('driver', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='logistics.Driver')), ('fleet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='logistics.Fleet')), ], ), migrations.CreateModel( name='TripStats', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('trip', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='logistics.Trip')), ], ), migrations.AddField( model_name='fleet', name='owner', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='logistics.Owner'), ), migrations.AddField( model_name='driver', name='fleets', field=models.ManyToManyField(blank=True, related_name='fleets', to='lo
gistics.Fleet'), ), migrations.AddField( model_name='driver', name='pending_fleets', field=models.ManyToManyField(blank=True, related_name='pending_fleets', to='logistics.Fleet'), ), migrations.AddField(
model_name='driver', name='user', field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), ]
import sys def find_motif_locations(dna, motif): motif_locations = [] if len(dna) < len(motif): raise ValueError('Motif can\'t be shorter tha
n sequence') if len(motif) == len(dna) and motif != dna: return motif_locations for _ in range(len(dna) - len(motif) + 1): if dna[_:_ + len(motif)] == motif: motif_locations.append(_ + 1) return motif_locations if __name__ == '__main__': sequences = open(sys.argv[1]).read().split() print( ' '.join( str(_)
for _ in find_motif_locations( sequences[0], sequences[1] ) ) )
""" ============== Blob Detection ============== Blobs are bright on dark or dark on bright regions in an image. In this example, blobs are detected using 3 algorithms. The image used in this case is the Hubble eXtreme Deep Field. Each bright dot in the image is a star or a galaxy. Laplacian of Gaussian (LoG) ----------------------------- This is the most accurate and slowest approach. It computes the Laplacian of Gaussian images with successively increasing standard deviation and stacks them up in a cube. Blobs are local maximas in this cube. Detecting larger blobs is especially slower because of larger kernel sizes during convolution. Only bright blobs on dark backgrounds are detected. See :py:meth:`skimage.feature.blob_log` for usage. Difference of Gaussian (DoG) ---------------------------- This is a faster approximation of LoG approach. In this case the image is blurred with increasing standard deviations and the difference between two successively blurred images are stacked up in a cube. This method suffers from the same disadvantage as LoG approach for detecting larger blobs. Blobs are again assumed to be bright on dark. See :py:meth:`skimage.feature.blob_dog` for usage. Determinant of Hessian (DoH) ---------------------------- This is the fastest approach. It detects blobs by finding maximas in the matrix of the Determinant of Hessian of the image. The detection speed is independent of the size of blobs as internally the implementation uses box filters instead of convolutions. Bright on dark as well as dark on bright blobs are detected. The downside is that small blobs (<3px) are not detected accurately. See :py:meth:`skimage.feature.blob_doh` for usage. """ from math import sqrt from skimage import data from skimage.feature import blob_dog, blob_log, blob_doh from skimage.color import rgb2gray import matplotlib.pyplot as plt image = data.hubble_deep_field()[0:500, 0:500] image_gray = rgb2gray(image) blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1) # Compute radii in the 3rd column. blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2) blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1) blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2) blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01) blobs_list = [blobs_log, blobs_dog, blobs_doh] colors = ['yellow', 'lime', 'red'] titles = ['Laplacian of Gaussian', 'Difference of Gaussian', 'Determinant of Hessian'] sequence = zip(blobs_list, colors, titles) fig, axes = plt.subplots(1, 3, figsize=(9, 3), sharex=True, sh
arey=True, subplot_kw={'adjustable': 'box-forced'}) ax = axes.ravel() for idx, (blobs, color, title) in enumerate(sequence): ax[idx].set_title(title) ax[idx].imshow(image, interpolation='nearest') for blob in blobs: y, x, r = blob c = plt.Circle((x,
y), r, color=color, linewidth=2, fill=False) ax[idx].add_patch(c) ax[idx].set_axis_off() plt.tight_layout() plt.show()
__author__ = 'yusaira-khan' import unittest import un_iife_ize.un_iife_ize as un_iife_ize class CheckVar(unittest.TestCase): def test_simple(self): statement = [('var hello,world=5;', 0)] exp = [('hello=undefined,world=5;', 0)] v = un_iife_ize.Var(statement) v.extract_all() ret = v.all self.assertEqual(ret, exp) def test_multiple(self): statement = [('var hello,world=5;\nvar bye,nope;', 0)] exp = [('hello=undefined,world=5;', 0), ('bye=undefined,nope=undefined;', 19)] v = un_iife_ize.Var(statement) v.extract_all() ret = v.all self.assertEqual(ret, exp) def test_sections(self): statement = [('var hello,world=5;\nvar bye,nope;', 0), ('var hello,world=5;\nvar bye,nope;', 30)] exp = [('hello=undefined,world=5;', 0), ('bye=undefined,nope=undefined;', 19), ('hello=undefined,world=5;', 30), ('bye=undefined,nope=undefined;', 49)] v = un_iife_ize.Var(statement) v.extract_all() ret = v.all self.assertEqual(ret, exp) def test_deliberate_iife(self): statement = [('var hello=function(){;}', 0)] exp = [('hello=function(){;}', 0)] v = un_iife_ize.Var(statement) v.extract_all() ret = v.all
print(ret) self.assertEqual(ret, exp) def test_deliberate_iife_barc(self): statement = [('var hello = (function(){;}())', 0)] exp = [(' hello = (function(){;}())', 0)] v = un_iife_ize.Var(statement) v.extract_all() ret = v.all print(ret, len(exp[0][0]), len(ret[0]
[0])) self.assertEqual(ret, exp) def test_double_assignment(self): statement = [('var hello=wow=;', 0)] exp = [('hello=wow=', 0)] v = un_iife_ize.Var(statement) v.extract_all() ret = v.all print(ret) self.assertEqual(ret, exp) def test_inside_function(self): statement = [('function(a){var hello=5;}', 30)] v = un_iife_ize.Var(statement) v.extract_all() ret = v.unmodified print("woadh", ret, v.unmodified) self.assertEqual(ret, statement) def test_sections_unmodified(self): statement = [('var hello,world=5;\nfunction(){}\nvar bye,nope;', 0), ('var hello,world=5;\nvar bye,nope;', 30)] exp = [('\nfunction(){}\n', 18), ('', len(statement[0][0]) + statement[0][1]), ('\n', 48), ('', len(statement[1][0]) + statement[1][1])] v = un_iife_ize.Var(statement) v.extract_all() ret = v.unmodified print("ret", ret) print("expt", exp) self.assertEqual(ret, exp) if __name__ == '__main__': unittest.main()
i, ei, ei_start_pc) i += 2 ei_end_pc, = struct.unpack('>H', s[i : i + 2]) print '0x%08x exception_table[%d].end_pc=%d' % ( i, ei, ei_end_pc) i += 2 ei_handler_pc, = struct.unpack('>H', s[i : i + 2]) print '0x%08x exception_table[%d].handler_pc=%d' % ( i, ei, ei_end_pc) i += 2 ei_catch_type, = struct.unpack('>H', s[i : i + 2]) print '0x%08x exception_table[%d].catch_type=%d' % ( i, ei, ei_catch_type) i += 2 attributes_count, = struct.unpack('>H', s[i : i + 2]) print '0x%08x attributes_count=%d' % (i, attributes_count) i += 2 for ai in xrange(attributes_count): ai_name_index, = struct.unpack('>H', s[i : i + 2]) print '0x%08x attribute[%d].name=%r' % ( i, ai, constant_utf8[ai_name_index]) i += 2 assert constant_utf8[ai_name_index] != 'Code' ai_attribute_length, = struct.unpack('>L', s[i : i + 4]) i += 4 ai_info = s[i : i + ai_attribute_length] print '0x%08x attribute[%d].info=%r' % ( i, ai, ai_info) i += ai_attribute_length # TODO(pts): Parse the attribute. assert i == iend, 'end-of-code-attr expected at %d, len=%d' % (i, iend) print '0x%08x end-of-code-attr' % i def ParseClass(file_name, max_lines=0, offset=0): try: f = open(file_name) s = f.read() finally: f.close() i = 0 magic, = struct.unpack('>L', s[i : i + 4]) print '0x%08x magic=0x%08x' % (i, magic) assert magic == 0xcafebabe i += 4 major_version, = struct.unpack('>H', s[i : i + 2]) print '0x%08x major_version=%d' % (i, major_version) i += 2 minor_version, = struct.unpack('>H', s[i : i + 2]) print '0x%08x minor_version=%d' % (i, minor_version) i += 2 constant_pool_count, = struct.unpack('>H', s[i : i + 2]) print '0x%08x constant_pool_count=%d' % (i, constant_pool_count) i += 2 # Maps a CONSTANT_Class_info index to a name_index constant_class = {} # Maps a CONSTANT_Utf8 index to a string constant_utf8 = {} # Maps a CONSTANT_NameAndType index to (name_index, descriptor_index) constant_name_and_type = {} # Maps a CONSTANT_Methodref index to (class_index, name_and_type_index) constant_method_ref = {} # Maps a CONSTANT_InterfaceMethodref index to # (class_index, name_and_type_index) constant_interface_method_ref = {} # Maps a name to its CONSTANT_utf8 index name_to_constant_idx = {} ci = 1 while ci < constant_pool_count: tag = ord(s[i]) print '0x%08x constant %d tag=%s' % ( i, ci, TAG_TO_CONSTANT_TYPE.get(tag, tag)), assert tag in TAG_TO_CONSTANT_TYPE if tag == 7: #CONSTANT_Class_info i += 1 j, = struct.unpack('>H', s[i : i + 2]) constant_class[ci] = j print j, i += 2 elif tag == 9: #CONSTANT_Fieldref i += 5 elif tag == 10: #CONSTANT_Methodref i += 1 constant_method_ref[ci] = struct.unpack('>HH', s[i : i + 4]) print constant_method_ref[ci][0], constant_method_ref[ci][1], i += 4 elif tag == 11: #CONSTANT_InterfaceMethodref i += 1 constant_interface_method_ref[ci] = struct.unpack('>HH', s[i : i + 4]) print constant_interface_method_ref[ci][0], print constant_interface_method_ref[ci][1], i += 4 elif tag == 8: #CONSTANT_String i += 3 elif tag == 3: #CONSTANT_Integer i += 5 elif tag == 4: #CONSTANT_Float i += 5 elif tag == 5: #CONSTANT_Long i += 9 ci += 1 elif tag == 6: #CONSTANT_Double i += 9 ci += 1 elif tag == 12: #CONSTANT_NameAndType i += 1 constant_name_and_type[ci] = struct.unpack('>HH', s[i : i + 4]) print constant_name_and_type[ci][0], constant_name_and_type[ci][1], i += 4 elif tag == 1: #CONSTANT_Utf8 blen = struct.unpack('>H', s[i + 1 : i + 3])[0] name = s[i + 3 : i + 3 + blen] name_to_constant_idx[name] = ci constant_utf8[ci] = name print repr(name), i += 3 + blen else: assert 0 print ci += 1 access_flags, = struct.unpack('>H', s[i : i + 2]) print '0x%08x access_flags=%s' % ( i, FormatAccessFlags(access_flags, is_class=True)) i += 2 this_class, = struct.unpack('>H', s[i : i + 2]) print '0x%08x this_class=%r' % ( i, constant_utf8[constant_class[this_class]]) i += 2 super_class, = struct.unpack('>H', s[i : i + 2]) print '0x%08x super_class=%r' % ( i, constant_utf8[constant_class[super_class]]) i += 2 interfaces_count, = struct.unpack('>H', s[i : i + 2]) print '0x%08x interfaces_count=%d' % (i, interfaces_count) i += 2 for ii in xrange(interfaces_count): interface, = struct.unpack('>H', s[i : i + 2]) print '0x%08x interface[%d]=%r' % ( i, ii, constant_utf8[constant_class[interface]]) i += 2 fields_count, = struct.unpack('>H', s[i : i + 2]) print '0x%08x fields_count=%d' % (i, fields_count) i += 2 for fi in xrange(fields_count): fi_access_flags, = struct.unpack('>H', s[i : i + 2]) print '0x%08x field[%d].access_flags=%s' % ( i, fi, FormatAccessFlags(fi_access_flags)) i += 2 fi_name_index, = struct.unpack('>H', s[i : i + 2]) print '0x%08x field[%d].name=%r' % ( i, fi, constant_utf8[fi_name_index]) i += 2 fi_descriptor_index, = struct.unpack('>H', s[i : i + 2]) print '0x%08x field[%d].descriptor=%r' % ( i, fi, constant_utf8[fi_descriptor_index]) i += 2 fi_attributes_count, = struct.unpack('>H', s[i : i + 2]) print '0x%08x field[%d].attributes_count=%d' % (i, fi, fi_attributes_count) i += 2 for ai in xrange(fi_attributes_count): ai_name_index, = struct.unpack('>H', s[i : i + 2]) print '0x%08x field[%d].attribute[%d].name=%r' % ( i, fi, ai, constant_utf8[ai_name_index]) i += 2 assert constant_utf8[ai_name_index] != 'Code' ai_attribute_length, = struct.unpack('>L', s[i : i + 4]) i += 4 ai_info = s[i : i + ai_attribute_length] print '0x%08x field[%d].attribute[%d].info=%r' % ( i, fi, ai, ai_info) i += ai_attribute_length # TODO(pts): Parse the attribute. methods_count, = struct.unpack('>H', s[i : i + 2]) print '0x%08x methods_count=%d' % (i, methods_count) i += 2 for fi in xrange(methods_count): fi_access_flags, = struct.unpack('>H', s[i : i + 2]) print '0x%08x method[%d].access_flags=%s' % ( i, fi, FormatAccessFlags(fi_access_flags)) i += 2 fi_name_index, = struct.unpack('>H', s[i : i + 2]) print '0x%08x method[%d].name=%r' % ( i, fi, constant_utf8[fi_name_index]) i += 2 fi_descriptor_index, = struct.unpack('>H', s[i : i + 2]) print '0x%08x method[%d].descriptor=%r' % ( i, fi, constant_utf8[fi_descriptor_index]) i += 2 fi_attributes_count, = struct.unpack('>H', s[i : i + 2]) print '0x%08x method[%d].attributes_count=%d' % (i, fi, fi_attributes_count) i += 2 for ai in xrange(fi_attributes_count): ai_name_index, = struct.unpack('>H', s[i : i + 2]) print '0x%08x method[%d].attribute[%d].name=%r' % ( i, fi, ai, constant_utf8[ai_name_index]) i += 2 ai_attribute_length, = struct.unpack('>L', s[i : i + 4]) i += 4 if constant_utf8[ai_name_index] == 'Code': print '0x%08x method[%d].attribute[%d].code:' % (i, fi, ai) # TODO(pts): limit s[:ai_info...] DumpCode(
s, i, i + ai_attribute_length, constant_class=constant_class, constant_utf8=constant_utf8, const
ant_name_and_type=constant_name_and_type, constant_method_ref=constant_method_ref, constant_interface_method_ref=constant_interface_method_ref) else: ai_info = s[i : i + ai_attribute_length] print '0x%08x method[%d].attribute[%d].info=%r' % ( i, fi, ai, ai_info) i += ai_attribute_length # TODO(pts): Parse the attribute. attributes_count, = struct.unpack('>H', s[i : i + 2]) print '0x%08x attributes_count=%d' % (i, attributes_count) i += 2 for ai in xrange(attributes_count): ai_name_index, = struct.unpack('>H', s[i : i + 2]
import socket from selectors import DefaultSelector, EVENT_WRITE, EVENT_READ sock = socket.socket() sock.setblocking(False) selector = DefaultSelector() urls_todo = set(['/']) seen_urls = set(['/']) class Fetcher: def __init__(self, url): self.response = b'' self.url = url self.sock = None def fetch(self): # This method fetches the url self.sock = socket.socket() self.sock.setblocking(False) try: self.sock.connect(('xkcd.com'), 80) except BlockingIOError: pass selector.register(self.sock.fileno(), EVENT_WRITE, self.connected)
def connected(self, key, mask): print('connected!') selector.unregister(key.fd) request = 'GET {} HTTP/1.0\r\nHost: xkcd.com\r\n\r\n'.format(self.url) self.sock.send(request.encode('ascii')) # Register the next callback selector.register(key.fd, EVENT_READ, self.read_response) def read_response(self, key, mask): global stopped chunk = self.sock.recv(4096)
# 4K chunks of data if chunk: self.response += chunk else: selector.unregister(key.fd) # Done reading links = self.parse_links() # Set logic for link in links.difference(seen_urls): urls_todo.add(link) Fetcher(link).fetch() seen_urls.update(links) urls_todo.remove(self.url) if not urls_todo stopped = True def parse_links(self): pass if __name__ == '__main__': fetcher = Fetcher('/353') fetcher.fetch() while not stopped: events = selector.select() for event_key, event_mask in events: callback = event_key.data callback(event_key, event_mask)
ue = batch_size//repeats # Batch it up. patches = tf.train.shuffle_batch( [patches], batch_size=unique, num_threads=2, capacity=min_queue + 3 * batch_size, enqueue_many=True, min_after_dequeue=min_queue) print('PATCHES =================',patches.get_shape().as_list()) patches = make_batch_hqjitter(patches, BURST_LENGTH, batch_size, repeats, height, width, to_shift, upscale, jitter, smalljitter) return patches def make_noised(image, height, width, sig_range): y = tf.random_uniform([1], 0, tf.shape(image)[0]-height, tf.int32) x = tf.random_uniform([1], 0, tf.shape(image)[1]-width, tf.int32) y, x = y[0], x[0] noised = tf.reshape(image[y:y+height, x:x+width, :], (1, height, width, 1, 3)) denoised = noised sig = tf.random_uniform([1], 0, sig_range, tf.float32) noised = tf.clip_by_value(noised + tf.random_normal(tf.shape(noised),mean=0.,stddev=sig[0]),0.,1.) return noised, denoised, tf.reshape(sig, [1,1]) def make_stack_noised(image, height, width, depth, sig_range): v_error = tf.maximum(height - tf.shape(image)[0] + 1, 0) h_error = tf.maximum(width - tf.shape(image)[1] + 1, 0) image = tf.pad(image, [[0,v_error],[0,h_error],[0,0]]) noised_stack, denoised_stack, sig_stack = make_noised(image, height, width, sig_range) for i in range(depth-1): noised, denoised, sig = make_noised(image, height, width, sig_range) noised_stack = tf.concat((noised_stack, noised), axis=0) denoised_stack = tf.concat((denoised_stack, denoised), axis=0) sig_stack = tf.concat((sig_stack, sig), axis=0) return noised_stack, denoised_stack, sig_stack def load_batch_noised(depth, dataset_dir, batch_size=32, height=64, width=64, degamma=1., sig_range=20.): filenames = [os.path.join(dataset_dir, f) for f in gfile.ListDirectory(dataset_dir)] filename_queue = tf.train.string_input_producer(filenames) noised_stack = None while noised_stack == None: _, image_file = tf.WholeFileReader().read(filename_queue) image = tf.image.decode_image(image_file) noised_stack, denoised_stack, sig_stack = make_stack_noised((tf.cast(image[0], tf.float32) / 255.)**degamma, height, width, depth, sig_range) # Batch it up. noised, denoised, sig = tf.train.shuffle_batch( [noised_stack, denoised_stack, sig_stack], batch_size=batch_size, num_threads=2, capacity=1024 + 3 * batch_size, enqueue_many=True, min_after_dequeue=500) return noised, denoised, sig def decode(tfr_features): burst = tf.decode_raw(tfr_features['burst_raw'], tf.float32) merged = tf.decode_raw(tfr_features['merge_raw'], tf.float32) readvar = tf.decode_raw(tfr_features['readvar'], tf.float32) shotfactor = tf.decode_raw(tfr_features['shotfactor'], tf.float32) channelgain = tf.decode_raw(tfr_features['channelgain'], tf.float32) blacklevels = tf.decode_raw(tfr_features['blacklevels'], tf.float32) depth = tf.cast(tfr_features['depth'], tf.int32) # 0 height = tf.cast(tfr_features['height'], tf.int32) # 1 width = tf.cast(tfr_features['width'], tf.int32) # 2 # depth = width_ # height = depth_ # width = height_ # WIDTH=4032 # HEIGHT=3024 # payload_raw_c = (payload_raw-bl/16) * ch burst = tf.reshape(burst, (height,width,depth)) sh = tf.shape(burst) ch = tf.tile(tf.reshape(channelgain, (2,2,1)), (sh[0]/2, sh[1]/2, sh[2])) bl = tf.tile(tf.reshape(blacklevels, (2,2,1)), (sh[0]/2, sh[1]/2, sh[2])) burst = (burst - bl/16.) * ch merged = tf.reshape(merged, (height,width,3)) / 16. scale = tf.reduce_max(merged) burst = tf.clip_by_value(burst, 0., scale) scale = 1024. burst = burst / scale merged = merged / scale readvar = tf.reshape(readvar * channelgain * channelgain, [4]) / scale / scale shotfactor = tf.reshape(shotfactor * channelgain, [4]) / scale return burst, merged, readvar, shotfactor def decode_patches(tfr_features): burst = tf.decode_raw(tfr_features['burst_raw'], tf.float32) merged = tf.decode_raw(tfr_features['merge_raw'], tf.float32) demosaic = tf.decode_raw(tfr_features['demosaic_raw'], tf.float32) readvar = tf.decode_raw(tfr_features['readvar'], tf.float32) shotfactor = tf.decode_raw(tfr_features['shotfactor'], tf.float32) channelgain = tf.decode_raw(tfr_features['channelgain'], tf.float32) blacklevels = tf.decode_raw(tfr_features['blacklevels'], tf.float32) depth = tf.cast(tfr_features['depth'], tf.int32) # 0 height = tf.cast(tfr_features['height'], tf.int32) # 1 width = tf.cast(tfr_features['width'], tf.int32) # 2 patches = tf.cast(tfr_features['patches'], tf.int32) burst = tf.reshape(burst, (patches, height,width,depth)) sh = tf.shape(burst) ch = tf.tile(tf.reshape(channelgain, (2,2,1)), (sh[1]/2, sh[2]/2, sh[3])) bl = tf.tile(tf.reshape(blacklevels, (2,2,1)), (sh[1]/2, sh[2]/2, sh[3])) burst = (burst - bl/16./2**10) * ch merged = tf.reshape(merged, (patches,height,width)) demosaic = tf.reshape(demosaic, (patches,height,width,3)) demosaic = demosaic burst = tf.clip_by_value(burst, -10, 1.) merged = tf.clip_by_value(merged, -10, 1.) scale = 2.**10 readvar = tf.reshape(readvar, [4]) / scale / scale shotfactor = tf.reshape(shotfactor, [4]) / scale return burst, merged, demosaic, readvar, shotfactor def read_and_decode_single(filename): e = tf.python_io.tf_record_iterator(filename).next() features = tf.parse_single_example(e, features={ 'readvar': tf.FixedLenFeature([], tf.string), 'shotfactor': tf.FixedLenFeature([], tf.string), 'blacklevels': tf.FixedLenFeature([], tf.string), 'channelgain': tf.FixedLenFeature([], tf.string), 'burst_raw': tf.FixedLenFeature([], tf.string), 'merge_raw': tf.FixedLenFeature([], tf.string), 'depth': tf.FixedLenFeature([], tf.int64), 'height': tf.FixedLenFeature([], tf.int64), 'width': tf.FixedLenFeature([], tf.int64), }) return decode(features) def read_and_decode(filename_queue): reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example( serialized_example, features={ 'readvar': tf.FixedLenFeature([], tf.string), 'shotfactor': tf.FixedLenFeature([], tf.string), 'blacklevels': tf.FixedLenFeature([], tf.st
ring), 'channelgain': tf.FixedLenFeatur
e([], tf.string), 'burst_raw': tf.FixedLenFeature([], tf.string), 'merge_raw': tf.FixedLenFeature([], tf.string), 'depth': tf.FixedLenFeature([], tf.int64), 'height': tf.FixedLenFeature([], tf.int64), 'width': tf.FixedLenFeature([], tf.int64), }) return decode(features) def read_and_decode_patches(filename_queue): reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example( serialized_example, features={ 'readvar': tf.FixedLenFeature([], tf.string), 'shotfactor': tf.FixedLenFeature([], tf.string), 'blacklevels': tf.FixedLenFeature([], tf.string), 'channelgain': tf.FixedLenFeature([], tf.string), 'burst_raw': tf.FixedLenFeature([], tf.string), 'merge_raw': tf.FixedLenFeature([], tf.string), 'demosaic_raw': tf.FixedLenFeature([], tf.string), 'depth': tf.FixedLenFeature([], tf.int64), 'height': tf.FixedLenFeature([], tf.int64), 'width': tf.FixedLenFeature([], tf.int64), 'patches': tf.FixedLenFeature([], tf.int64), }) return decode_patches(features) def read_and_decode_str(filename_queue): reader = tf.TFRecordReader() s, serialized_example = reader.read(filename_queue) features = tf.parse_single_example( serialized_example, features={ 'readvar': tf.FixedLenFeature([], tf.string), 'shotfactor': tf.FixedLenFeature([], tf.string), 'blacklevels': tf.FixedLenFeature([], tf.string), 'channelgain': tf.FixedLenFeature([], tf.string), 'burst_raw': tf.FixedLenFeature([], tf.string), 'merge_raw': tf.FixedLenFeature([
#!/usr/bin/python __version__ = '0.0.1' im
port p
ysimplesoap.client import pysimplesoap.simplexml from zimbrasoap.soap import soap,admin,mail
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('lumos', '0009_proglang_slug'), ] operations = [ migra
tions.AddField( mo
del_name='softskills', name='slug', field=models.SlugField(default=''), preserve_default=False, ), ]
from __future__ import absolute_import import momoko from tornado import gen from psycopg2.extras import RealDictConnection def initialize_database():
db = momoko.Pool( dsn='''dbname=nightson user=vswamy password=vswamy host=localhost port=5432''', size=5, connection_factory=RealDictConnection, ) db.connect() return db class BaseEntityManager(object): db = initialize_database() def __init__(self): pass def
__init__(self, request): self.request = request @gen.coroutine def execute_sql(self, sql): ''' Executes an sql statement and returns the value ''' cursor = yield BaseEntityManager.db.execute(sql) raise gen.Return(cursor) def get_value(self, key): ''' Gets a value given dictionary like arguments''' params = {} if(self.request.method == 'GET'): params = self.request.query_arguments elif(self.request.method == 'POST'): params = self.request.body_arguments elif(self.request.method == 'PUT'): params = self.request.arguments elif(self.request.method == 'DELETE'): params = self.request.body_arguments if(key not in params): return None ''' Params will always be of the form key:[values] ''' return params.get(key)[0]
from enum import Enum clas
s EnumContainerStatus(Enum): running = "running" halted = "halted" networkKilled = "networkKil
led"
# # This file is part of pysmi software. # # Copyright (c) 2015-2016, Ilya Etingof <ilya@glas.net> # License: http://pysmi.sf.net/license.html # import os import sys import time from pysmi.reader.base import AbstractReader from pysmi.mibinfo import MibInfo from pysmi.compat import decode from pysmi import debug from pysmi import error class FileReader(AbstractReader): """Fetch ASN.1 MIB text by name from local file. *FileReader* class instance tries to locate ASN.1 MIB files by name, fetch and return their contents to caller. """ useIndexFile = True # optional .index file mapping MIB to file name indexFile = '.index' def __init__(self, path, recursive=True, ignoreErrors=True): """Create an instance of *FileReader* serving a directory. Args: path (str): directory to search MIB files Keyword Args: recursive (bool): whether to include subdirectories
ignoreErrors (bool): ignore filesystem access errors """ self._path = os.path.normpath(path) self._recursive = recursive self._ignoreErrors = ignoreErrors self._indexLoaded = False def __str__(self): return '%
s{"%s"}' % (self.__class__.__name__, self._path) def getSubdirs(self, path, recursive=True, ignoreErrors=True): if not recursive: return [path] dirs = [path] try: subdirs = os.listdir(path) except OSError: if ignoreErrors: return dirs else: raise error.PySmiError('directory %s access error: %s' % (path, sys.exc_info()[1])) for d in subdirs: d = os.path.join(decode(path), decode(d)) if os.path.isdir(d): dirs.extend(self.getSubdirs(d, recursive)) return dirs def loadIndex(self, indexFile): mibIndex = {} if os.path.exists(indexFile): try: mibIndex = dict( [x.split()[:2] for x in open(indexFile).readlines()] ) debug.logger & debug.flagReader and debug.logger('loaded MIB index map from %s file, %s entries' % (indexFile, len(mibIndex))) except IOError: pass return mibIndex def getMibVariants(self, mibname): if self.useIndexFile: if not self._indexLoaded: self._mibIndex = self.loadIndex( os.path.join(self._path, self.indexFile) ) self._indexLoaded = True if mibname in self._mibIndex: debug.logger & debug.flagReader and debug.logger('found %s in MIB index: %s' % (mibname, self._mibIndex[mibname])) return [(mibname, self._mibIndex[mibname])] return super(FileReader, self).getMibVariants(mibname) def getData(self, mibname): debug.logger & debug.flagReader and debug.logger('%slooking for MIB %s' % (self._recursive and 'recursively ' or '', mibname)) for path in self.getSubdirs(self._path, self._recursive, self._ignoreErrors): for mibalias, mibfile in self.getMibVariants(mibname): f = os.path.join(decode(path), decode(mibfile)) debug.logger & debug.flagReader and debug.logger('trying MIB %s' % f) if os.path.exists(f) and os.path.isfile(f): try: mtime = os.stat(f)[8] debug.logger & debug.flagReader and debug.logger('source MIB %s mtime is %s, fetching data...' % (f, time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(mtime)))) return MibInfo(path='file://%s' % f, file=mibfile, name=mibalias, mtime=mtime), decode(open(f, mode='rb').read(self.maxMibSize)) except (OSError, IOError): debug.logger & debug.flagReader and debug.logger('source file %s open failure: %s' % (f, sys.exc_info()[1])) if not self._ignoreErrors: raise error.PySmiError('file %s access error: %s' % (f, sys.exc_info()[1])) raise error.PySmiReaderFileNotModifiedError('source MIB %s is older than needed' % f, reader=self) raise error.PySmiReaderFileNotFoundError('source MIB %s not found' % mibname, reader=self)
# -*- coding: utf-8 -*- # This file is part of emesene. # # emesene is free software; you can redistribute it and/or m
odify # it under the terms of the GNU General Public License as published by #
the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # emesene is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with emesene; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA import xml.parsers.expat import e3 import logging class RichWidget(object): '''a base widget that allows to add formatted text based on a xhtml subset''' def put_text(self, text, fg_color=None, bg_color=None, font=None, size=None, bold=False, italic=False, underline=False, strike=False): '''insert text at the current position with the style defined by the optional parameters''' raise NotImplementedError('Not implemented') def put_formatted(self, text, fg_color=None, bg_color=None, font=None, size=None, bold=False, italic=False, underline=False, strike=False): '''insert text at the current position with the style defined inside text''' try: result = e3.common.XmlParser.XmlParser( #'<span>' + text.replace('\n', '') + '</span>').result '<span>' + text + '</span>').result except xml.parsers.expat.ExpatError: logging.getLogger("gtkui.RichWidget").debug("cant parse '%s'" % \ (text, )) return dct = e3.common.XmlParser.DictObj(result) self._put_formatted(dct, fg_color, bg_color, font, size, bold, italic, underline, strike) def _put_formatted(self, dct, fg_color=None, bg_color=None, font=None, size=None, bold=False, italic=False, underline=False, strike=False): '''insert text at the current position with the style defined inside text, using the parsed structure stored on dct''' # override the values if defined, keep the old ones if no new defined bold = dct.tag == 'b' or dct.tag == 'strong' or bold italic = dct.tag == 'i' or dct.tag == 'em' or italic underline = dct.tag == 'u' or underline strike = dct.tag == 's' or strike if dct.tag == 'span' and dct.style: style = e3.common.XmlParser.parse_css(dct.style) font = style.font_family or font try: # TODO: handle different units? size = int(style.font_size) or size except ValueError: pass except TypeError: pass fg_color = style.color or fg_color bg_color = style.background_color or bg_color if dct.childs is None: return for child in dct.childs: if isinstance(child, basestring): self.put_text(child, fg_color, bg_color, font, size, bold, italic, underline, strike) elif child.tag == 'img': self.put_image(child.src, child.alt) elif child.tag == 'br': self.new_line() elif child.tag == 'a': self.put_link(child.href) else: self._put_formatted(child, fg_color, bg_color, font, size, bold, italic, underline, strike) def put_image(self, path, tip=None): '''insert an image at the current position tip it's the alt text on mouse over''' raise NotImplementedError('Not implemented') def new_line(self): '''put a new line on the text''' raise NotImplementedError('Not implemented') def put_link(self, link): '''insert a link at the current position''' raise NotImplementedError('Not implemented')
DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( ('My Name', 'your_email@domain.com'), ) MANAGERS = ADMINS import tempfile, os from django import contrib tempdata = tempfile.mkdtemp() approot = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) adminroot = os.path.join(contrib.__path__[0], 'admin') DATABASES = { 'default': { 'NAME': os.path.join(tempdata, 'signalqueue-test.db'), 'TEST_NAME': os.path.join(tempdata, 'signalqueue-test.db'), 'ENGINE': 'django.db.backends.sqlite3', 'USER': '', 'PASSWORD': '', } } TIME_ZONE = 'America/New_York' LANGUAGE_CODE = 'en-us' SITE_ID = 1 USE_I18N = False MEDIA_ROOT = os.path.join(approot, 'static') MEDIA_URL = '/face/' STATIC_ROOT = os.path.join(adminroot, 'static', 'admin')[0] STATIC_URL = '/staticfiles/' ADMIN_MEDIA_PREFIX = '/admin-media/' ROOT_URLCONF = 'signalqueue.settings.urlconf' TEMPLATE_DIRS = ( os.path.join(approot, 'templates'), os.path.join(adminroot, 'templates'), os.path.join(adminroot, 'templates', 'admin'), ) STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.gzip.GZipMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', ) TEMPLATE_CONTEXT_PROCESSORS = ( "django.contrib.auth.context_proces
sors.auth", "django.core.context_processors.request", "django.core.context_processors.debug", #"django.core.context_processors.i18n", this is AMERICA "django.core.context_processors.media", ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.staticfiles', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.admin', 'django_nose',
'djcelery', 'delegate', 'signalqueue', ) LOGGING = dict( version=1, disable_existing_loggers=False, formatters={ 'standard': { 'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s' }, }, handlers={ 'default': { 'level':'DEBUG', 'class':'logging.StreamHandler', 'formatter':'standard', }, 'nil': { 'level':'DEBUG', 'class':'django.utils.log.NullHandler', }, }, loggers={ 'signalqueue': { 'handlers': ['default'], 'level': 'INFO', 'propagate': False }, }, root={ 'handlers': ['default'], 'level': 'INFO', 'propagate': False }, ) SQ_QUEUES = { 'default': { # you need at least one dict named 'default' in SQ_QUEUES 'ENGINE': 'signalqueue.worker.backends.RedisSetQueue', # required - full path to a QueueBase subclass 'INTERVAL': 30, # 1/3 sec 'OPTIONS': dict(port=8356), }, 'listqueue': { 'ENGINE': 'signalqueue.worker.backends.RedisQueue', 'INTERVAL': 30, # 1/3 sec 'OPTIONS': dict(port=8356), }, 'db': { 'ENGINE': 'signalqueue.worker.backends.DatabaseQueueProxy', 'INTERVAL': 30, # 1/3 sec 'OPTIONS': dict(app_label='signalqueue', modl_name='EnqueuedSignal'), }, 'celery': { 'ENGINE': 'signalqueue.worker.celeryqueue.CeleryQueue', 'INTERVAL': 30, # 1/3 sec 'OPTIONS': dict(celery_queue_name='inactive', transport='redis', port=8356), }, } SQ_ADDITIONAL_SIGNALS=['signalqueue.tests'] SQ_WORKER_PORT = 11201 TEST_RUNNER = 'django_nose.NoseTestSuiteRunner' try: from kombu import Queue except ImportError: pass else: CELERY_DEFAULT_QUEUE = 'default' CELERY_DEFAULT_ROUTING_KEY = 'default' CELERY_DEFAULT_EXCHANGE_TYPE = 'direct' CELERY_QUEUES = ( Queue('default', routing_key='default.#'), Queue('yodogg', routing_key='yodogg.#'), ) CELERY_ALWAYS_EAGER = True BROKER_URL = 'redis://localhost:8356/0' BROKER_HOST = "localhost" BROKER_BACKEND = "redis" REDIS_PORT = 8356 REDIS_HOST = "localhost" BROKER_USER = "" BROKER_PASSWORD = "" BROKER_VHOST = "0" REDIS_DB = 0 REDIS_CONNECT_RETRY = True CELERY_SEND_EVENTS = True CELERY_RESULT_BACKEND = "redis://localhost:8356/0" CELERY_TASK_RESULT_EXPIRES = 10 CELERYBEAT_SCHEDULER = "djcelery.schedulers.DatabaseScheduler" try: import djcelery except ImportError: pass else: djcelery.setup_loader() # package path-extension snippet. from pkgutil import extend_path __path__ = extend_path(__path__, __name__)
# -*- coding:utf-8 -*- ''' fio测试工具执行脚本 ''' import os,shutil,re,time,sys,copy from test import BaseTest from lpt.lib.error import * from lpt.lib import lptxml from lpt.lib import lptlog from lpt.lib.share import utils from lpt.lib import lptreport class TestControl(BaseTest): ''' 继承BaseTest属性和方法 ''' def __init__(self, jobs_xml, job_node, tool, tarball='fio-2.1.10.tar.bz2'): super(TestControl, self).__init__(jobs_xml, job_node, tool, tarball) def setup(self): '''编译源码,设置程序 ''' if not self.check_bin(self.processBin): self.tar_src_dir = self.extract_bar() os.chdir(self.tar_src_dir) self.compile(configure_status=True, make_status=True) os.chdir(self.lpt_root) def run(self): tool_node = self.check_tool_result_node() self.config_file = os.path.join(self.tar_src_dir, self.get_config_value(tool_node, "config_file", "./fio-mixed.job", valueType=str)) lptlog.info("使用配置文件: %s" % self.config_file) self.result_tmp_file = os.path.join(self.tmp_dir, "fio_output") self.filesize = self.get_config_value(tool_node, "filesize", "100M", valueType=str) lptlog.info("测试读写文件大小: %s" % self.filesize) f = open(self.config_file,'r') lines = f.read() f.close() f = open(self.config_file,'w') lines = re.sub('size=(\d+)M', 'size=%s'%self.filesize, lines) f.write(lines) f.close() self.mainParameters["parameters"] = "./fio --output %s %s"%(self.result_tmp_file, self.config_file) lptlog.info("----------开始测试") os.chdir(self.tar_src_dir) utils.system("./fio --output %s %s"%(self.result_tmp_file, self.config_file)) def create_result(self): lptlog.info("----------创建结果") self.result_list = self.__match_index(self.result_tmp_file) def __match_index(self, file): if not os.path.isfile(file): return [] lptlog.debug("在%s中搜索测试指标" % file) results_lines = utils.read_all_lines(file) labels = ('io', 'aggrb', 'minb', 'maxb', 'mint','maxt') parallel_template = {'parallels': '1,2,3,4', 'parallel': '1', 'iter': '1', 'times': '2'} result_list = [] count = 0 for line in results_lines: if 'READ:' in line: tmp_list = [] parallel_dict = copy.deepcopy(parallel_template) parallel_dict['parallel'] = str(count / 2 + 1) parallel_dict['iter'] = 'READ' tmp_list.append(parallel_dict) tmp_list.append(self.dict_generator(labels,line)) result_list.append(tmp_list) count = count + 1 elif 'WRITE:' in line: tmp_list = [] parallel_dict = copy.deepcopy(parallel_template) parallel_dict['parallel'] = str(count / 2 + 1) parallel_dict['iter'] = 'WRITE' tmp_list.append(parallel_dict) tmp_list.append(self.dict_generator(labels,line)) result_list.append(tmp_list) count = count + 1 if count in [2,4,6,8]: tmp_list = [] dict2 = result_list[-1]
[1] dict1 = result_list[-2][1] parallel_dict = copy.deepcopy(parallel_template) parallel_dict['parallel'] = str(count / 2) parallel_dict['iter'] = 'Average' tmp_list.append(parallel_dict) tmp_list.append(self.dict_average(dict1, dict2)) result_list.append(tmp_list) return result_list def dict_generator(self, labels, line): r
esult_dict = {} line = line.replace(',','') line = line.split() for l,v in zip(labels, (line[1].split('=')[1][:-2], line[2].split('=')[1][:-4], line[3].split('=')[1][:-4], line[4].split('=')[1][:-4], line[5].split('=')[1][:-4], line[6].split('=')[1][:-4])): result_dict[l] = "%s" % v return result_dict def dict_average(self, dict1, dict2): result_dict = {} for k,v in dict1.items(): try: result_dict[k] = str((float(dict1[k]) * 0.33 + float(dict2[k]) * 0.67)) except e: raise e sys.exit() return result_dict
0: self.upper_preview_node = (point, i) m = Marker(vector3D([point]), dynamic=False) m.set_color('blue') self.temp_point += [m] break def add_line(self, event_callback): event = event_callback.getEvent() if (event.getKey() == ord('l') and event.getState() == 1): objs = self.shape.selected_objects if len(objs) == 2: if (isinstance(objs[0], NodeMarker) and isinstance(objs[1], NodeMarker)): line = ConnectionLine(objs[0], objs[1]) line.layer = self.layer_combobox.currentText() self.shape += [line] elif len(objs) == 1: if (isinstance(objs[0], NodeMarker)): marker2 = self.node_cb(event_callback, force=True) if marker2: line = ConnectionLine(objs[0], marker2) self.shape += [line] self.shape.Select(marker2) self.shape.selection_changed() line.layer = self.layer_combobox.currentText() def add_node(self, event_callback, force=False): event = event_callback.getEvent() if ((event.getKey() == ord('i') or force) and (event.getState() == 1)): objs = self.shape.selected_objects if len(objs) == 1 and (isinstance(objs[0], Lower_Att_Marker)): node = objs[0].node point = Lower_Att_Marker(node, self.parametric_glider) point.layer = self.layer_combobox.currentText() self.shape += [point] self.shape.Select(point) self.shape.grab_cb(event_callback, force=True) elif self.upper_preview_node: self.add_attachment_point(self.upper_preview_node[0]) else: pos = event.getPosition() pos_3D = list(self.view.getPoint(*pos)) pos_3D[-1] = 0. if event.wasCtrlDown(): node = LowerNode2D(pos_3D[:-1], [0, 0, 0]) point = Lower_Att_Marker(node, self.parametric_glider) point.layer = self.layer_combobox.currentText() else: node = BatchNode2D(pos_3D[:-1]) point = NodeMarker(node, self.parametric_glider) point.layer = self.layer_combobox.currentText() self.shape += [point] return point def copy_node(self, event_callback, force=False): event = event_callback.getEvent() if ((event.getKey() == ord('c')) and (event.getState() == 1)): # get selection objs = self.shape.selected_objects if len(objs) == 1 and (isinstance(objs[0], Upper_Att_Marker)): node = objs[0].node ap = Upper_Att_Marker(node, self.parametric_glider) ap.layer = self.layer_combobox.currentText() self.shape += [ap] def add_attachment_point(self, pos): x, y = pos rib_nr = self.xpos.index(x) pos = float(self.Qhl_pos.value()) node = UpperNode2D(rib_nr, pos / 100) node_pos = node.get_2D(self.parametric_glider.shape) ap = Upper_Att_Marker(node, self.parametric_glider) ap.layer = self.layer_combobox.currentText() self.shape += [ap] def selection_changed(self): # je nach dem welches widget grad selektiert ist # soll er ein widget einblenden. # wenn mehrere elemente oder keinen ausgewaehlt ist dann nichts auswaehlen def show_line_widget(objs): for obj in objs: if not (isinstance(obj, ConnectionLine)): return False return True def has_uppermost_line(objs): for obj in objs: if obj.is_uppermost_line(): return Tru
e return False def show_upper_att_widget(objs): for obj in objs: if not isinstance(obj, Upper_Att_Marker): return False return True def show_lower_att_widget(objs): for obj in objs: if not isinstance(obj, Lower_Att_Marker):
return False return True selected_objs = self.shape.selected_objects if selected_objs: self.layer_selection.setEnabled(True) self.target_length.setEnabled(True) self.layer_selection.setItemByText(selected_objs[0].layer) # self.layer_combobox.blockSignals(True) # self.layer_combobox.setItemByText(selected_objs[0].layer) # self.layer_combobox.blockSignals(False) if show_line_widget(selected_objs): self.tool_widget.setCurrentWidget(self.line_widget) if has_uppermost_line(selected_objs): self.target_length.setEnabled(False) else: self.target_length.setValue(selected_objs[0].target_length) line_type_item = self.Qline_list.findItems( selected_objs[0].line_type, QtCore.Qt.MatchExactly)[0] self.Qline_list.setCurrentItem(line_type_item) if len(selected_objs) != 1: self.QLineName.setDisabled(True) else: self.QLineName.blockSignals(True) self.QLineName.setText(selected_objs[0].name) self.QLineName.blockSignals(False) self.QLineName.setEnabled(True) elif show_lower_att_widget(selected_objs): self.tool_widget.setCurrentWidget(self.lw_att_wid) x, y, z = selected_objs[0].pos_3D self.attach_x_val.setValue(x) self.attach_y_val.setValue(y) self.attach_z_val.setValue(z) elif show_upper_att_widget(selected_objs): self.tool_widget.setCurrentWidget(self.up_att_wid) self.up_att_force.setValue(selected_objs[0].force) rib_nr = set([i.rib_nr for i in selected_objs]) if len(rib_nr) > 1: self.up_att_rib.setDisabled(True) else: self.up_att_rib.setValue(list(rib_nr)[0]) self.up_att_rib.setEnabled(True) pos = selected_objs[0].rib_pos self.up_att_pos.setValue(pos) self.up_att_pos.setEnabled(True) else: self.tool_widget.setCurrentWidget(self.none_widget) else: self.tool_widget.setCurrentWidget(self.none_widget) self.layer_selection.setEnabled(False) def update_target_length(self, *args): l = float(self.target_length.value()) for obj in self.shape.selected_objects: obj.target_length = l def update_line_type(self, *args): for obj in self.shape.selected_objects: obj.line_type = self.Qline_list.currentItem().line_type.name def update_lw_att_pos(self, *args): x = self.attach_x_val.value() y = self.attach_y_val.value() z = self.attach_z_val.value() for obj in self.shape.selected_objects: obj.pos_3D = [x, y, z] def update_up_att_force(self, *args): for obj in self.shape.selected_objects: obj.force = self.up_att_force.value() def update_up_att_rib(self, *args): for obj in self.shape.selected_objects: obj.rib_nr = self.up_att_rib.value() def update_up_att_pos(self, *args): # print('update pos') for obj in self.shape.selected_objects: obj.rib_pos = self.up_att_pos.value() def draw_shape(self): self.shape.removeAllChildren() self.shape += [Line(vector3D(self.front)), Line(vector3D(self.back))] self.shape += list(map(Line, vector3D(self.ribs))) shape = self.pa
from proboscis import test @test(groups=['benchmark.discovery']) class BenchmarkDiscoveryTests(object): def __init__(self):
pass
''' Creat
ed on May 26, 2012 @author: Charlie ''' class MyPackageMod0
1(object): def __init__(self): pass
"""Controllers for
the mo
zzarella application."""
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2017-01-12 17:08 from __future__ import unicode_literals from django.db
import migrations, models class Migration(migrations.Migration): dependencies = [ ('themes', '0002_auto_20170110_1809'), ] operations = [ migrations.AlterField( model_name='themes', name='css_style', field=models.CharField(choices=[('green', 'Green'), ('red', 'Red'), ('black', 'Black')], default='green', m
ax_length=50, verbose_name='Css Style'), ), ]
from django.shortcuts import redirect from django.template.response import TemplateResponse from ..forms import AnonymousUserShippingForm, ShippingAddressesForm from ...userprofile.forms import get_address_form from ...userprofile.models import Address from ...teamstore.utils import get_team def anonymous_user_shipping_address_view(request, checkout): team = get_team(request.session['team']) if team.group_shipping: address_form, preview = get_address_form( request.POST or None, country_code=request.country.code, autocomplete_type='shipping', initial={'country': request.country.code}, instance=team.shipping_address) else: address_form, preview = get_address_form( request.POST or None, country_code=request.country.code, autocomplete_type='shipping', initial={'country': request.country.code}, instance=checkout.shipping_address) user_form = AnonymousUserShippingForm( not preview and request.POST or None, initial={'email': checkout.email} if not preview else request.POST.dict()) if team.group_shipping and user_form.is_valid(): checkout.shipping_address = team.shipping_address checkout.email = user_form.cleaned_data['email'] return redirect('checkout:shipping-method') elif all([user_form.is_valid(), address_form.is_valid()]): checkout.shipping_address = address_form.instance checkout.email = user_form.cleaned_data['email'] return redirect('checkout:shipping-method') return TemplateResponse( request, 'checkout/shipping_address.html', context={ 'address_form': address_form, 'user_form': user_form, 'group_shipping': team.group_shipping, 'checkout': checkout}) def user_shipping_address_view(request, checkout): data = request.POST or None additional_addresses = request.user.addresses.all() checkout.email = request.user.email shipping_address = checkout.shipping_address if shipping_address is not None and shipping_address.id: address_form, preview = get_address_form( data, country_code=request.
country.code, initial={'country': request.country}) addresses_form = ShippingAddressesForm( data, additional_addresses=additional_addresses, initial={'address': shipping_address.id}) elif shipping_address: address
_form, preview = get_address_form( data, country_code=shipping_address.country.code, instance=shipping_address) addresses_form = ShippingAddressesForm( data, additional_addresses=additional_addresses) else: address_form, preview = get_address_form( data, initial={'country': request.country}, country_code=request.country.code) addresses_form = ShippingAddressesForm( data, additional_addresses=additional_addresses) if addresses_form.is_valid() and not preview: if addresses_form.cleaned_data['address'] != ShippingAddressesForm.NEW_ADDRESS: address_id = addresses_form.cleaned_data['address'] checkout.shipping_address = Address.objects.get(id=address_id) return redirect('checkout:shipping-method') elif address_form.is_valid(): checkout.shipping_address = address_form.instance return redirect('checkout:shipping-method') return TemplateResponse( request, 'checkout/shipping_address.html', context={ 'address_form': address_form, 'user_form': addresses_form, 'checkout': checkout, 'additional_addresses': additional_addresses})
ice, SwitchEntity): """Represent a Rachio state that can be toggled.""" def __init__(self, controller): """Initialize a new Rachio switch.""" super().__init__(controller) self._state = None @property def name(self) -> str: """Get a name for this switch.""" return f"Switch on {self._controller.name}" @property def is_on(self) -> bool: """Return whether the switch is currently on.""" return self._state @callback def _async_handle_any_update(self, *args, **kwargs) -> None: """Determine whether an update event applies to this device.""" if args[0][KEY_DEVICE_ID] != self._controller.controller_id: # For another device return # For this device self._async_handle_update(args, kwargs) @abstractmethod def _async_handle_update(self, *args, **kwargs) -> None: """Handle incoming webhook data.""" class RachioStandbySwitch(RachioSwitch): """Representation of a standby status/button.""" @property def name(self) -> str: """Return the name of the standby switch.""" return f"{self._controller.name} in standby mode" @property def unique_id(self) -> str: """Return a unique id by combining controller id and purpose.""" return f"{self._controller.controller_id}-standby" @property def icon(self) -> str: """Return an icon for the standby switch.""" return "mdi:power" @callback def _async_handle_update(self, *args, **kwargs) -> None: """Update the state using webhook data.""" if args[0][0][KEY_SUBTYPE] == SUBTYPE_SLEEP_MODE_ON: self._state = True elif args[0][0][KEY_SUBTYPE] == SUBTYPE_SLEEP_MODE_OFF: self._state = False self.async_write_ha_state() def turn_on(self, **kwargs) -> None: """Put the controller in standby mode.""" self._controller.rachio.device.turn_off(self._controller.controller_id) def turn_off(self, **kwargs) -> None: """Resume controller functionality.""" self._controller.rachio.device.turn_on(self._controller.controller_id) async def async_added_to_hass(self): """Subscribe to updates.""" if KEY_ON in self._controller.init_data: self._state = not self._controller.init_data[KEY_ON] self.async_on_remove( async_dispatcher_connect( self.hass, SIGNAL_RACHIO_CONTROLLER_UPDATE, self._async_handle_any_update, ) ) class RachioRainDelay(RachioSwitch): """Representation of a rain delay status/switch.""" def __init__(self, controller): """Set up a Rachio rain delay switch.""" self._cancel_update = None super().__init__(controller) @property def name(self) -> str: """Return the name of the switch.""" return f"{self._controller.name} rain delay" @property def unique_id(self) -> str: """Return a unique id by combining controller id and purpose.""" return f"{self._controller.controller_id}-delay" @property def icon(self) -> str: """Return an icon for rain delay.""" return "mdi:camera-timer" @callback def _async_handle_update(self, *args, **kwargs) -> None: """Update the state using webhook data.""" if self._cancel_update: self._cancel_update() self._cancel_update = None if args[0][0][KEY_SUBTYPE] == SUBTYPE_RAIN_DELAY_ON: endtime = parse_datetime(args[0][0][KEY_RAIN_DELAY_END]) _LOGGER.debug("Rain delay expires at %s", endtime) self._state = True assert endtime is not None self._cancel_update = async_track_point_in_utc_time( self.hass, self._delay_expiration, endtime ) elif args[0][0][KEY_SUBTYPE] == SUBTYPE_RAIN_DELAY_OFF: self._state = False self.async_write_ha_state() @callback def _delay_expiration(self, *args) -> None: """Trigger when a rain delay expires.""" self._state = False self._cancel_update = None self.async_write_ha_state() def turn_on(self, **kwargs) -> None: """Activate a 24 hour rain delay on the controller.""" self._controller.rachio.device.rain_delay(self._controller.controller_id, 86400) _LOGGER.debug("Starting rain delay for 24 hours") def turn_off(self, **kwargs) -> None: """Resume controller functionality.""" self._controller.rachio.device.rain_delay(self._controller.controller_id, 0) _LOGGER.debug("Canceling rain delay") async def async_added_to_hass(self): """Subscribe to updates.""" if KEY_RAIN_DELAY in self._controller.init_data: self._state = self._controller.init_data[ KEY_RAIN_DELAY ] / 1000 > as_timestamp(now()) # If the controller was in a rain delay state during a reboot, this re-sets the timer if self._state is True: delay_end = utc_from_timestamp( self._controller.init_data[KEY_RAIN_DELAY] / 1000 ) _LOGGER.debug("Re-setting rain delay timer for %s", delay_end) self._cancel_update = async_track_point_in_utc_time( self.hass, self._delay_expiration, delay_end ) self.async_on_remove( async_dispatcher_connect( self.hass, SIGNAL_RACHIO_RAIN_DELAY_UPDATE, self._async_handle_any_update, ) ) class RachioZone(RachioSwitch): """Representation of one zone of sprinklers connected to the Rachio Iro.""" def __init__(self, person, controller, data, current_schedule): """Initialize a new Rachio Zone.""" self.id = data[KEY_ID] self._zone_name = data[KEY_NAME] self._zone_number = data[KEY_ZONE_NUMB
ER] self._zone_enabled =
data[KEY_ENABLED] self._entity_picture = data.get(KEY_IMAGE_URL) self._person = person self._shade_type = data.get(KEY_CUSTOM_SHADE, {}).get(KEY_NAME) self._zone_type = data.get(KEY_CUSTOM_CROP, {}).get(KEY_NAME) self._slope_type = data.get(KEY_CUSTOM_SLOPE, {}).get(KEY_NAME) self._summary = "" self._current_schedule = current_schedule super().__init__(controller) def __str__(self): """Display the zone as a string.""" return f'Rachio Zone "{self.name}" on {str(self._controller)}' @property def zone_id(self) -> str: """How the Rachio API refers to the zone.""" return self.id @property def name(self) -> str: """Return the friendly name of the zone.""" return self._zone_name @property def unique_id(self) -> str: """Return a unique id by combining controller id and zone number.""" return f"{self._controller.controller_id}-zone-{self.zone_id}" @property def icon(self) -> str: """Return the icon to display.""" return "mdi:water" @property def zone_is_enabled(self) -> bool: """Return whether the zone is allowed to run.""" return self._zone_enabled @property def entity_picture(self): """Return the entity picture to use in the frontend, if any.""" return self._entity_picture @property def extra_state_attributes(self) -> dict: """Return the optional state attributes.""" props = {ATTR_ZONE_NUMBER: self._zone_number, ATTR_ZONE_SUMMARY: self._summary} if self._shade_type: props[ATTR_ZONE_SHADE] = self._shade_type if self._zone_type: props[ATTR_ZONE_TYPE] = self._zone_type if self._slope_type: if self._slope_type == SLOPE_FLAT: props[ATTR_ZONE_SLOPE] = "Flat" elif self._slope_type == SLOPE_SLIGHT: props[ATTR_ZONE_SLOPE] = "Slight" elif self._slope_type == SLOPE_MODERATE: props[ATTR_ZONE_SLOPE] =
#!/usr/bin/python import time import RPi.GPIO as GPIO # remember to change the GPIO values below to match your sensors # GPIO output = the pin that's connected to "Trig" on the sensor # GPIO input = the pin that's connected to "Echo" on the sensor def reading(sensor): # Disable any warning message such as GPIO pins in use GPIO.setwarnings(False) # use the values of the GPIO pins, and not the actual pin number # so if you connect to GPIO 25 which is on pin number 22, the # reference in this code is 25, which is the number of the GPIO # port and not the number of the physical pin GPIO.setmode(GPIO.BCM) if sensor == 0: # point the software to the GPIO pins the sensor is using # change these values to the pins you are using # GPIO output = the pin that's connected to "Trig" on the sensor # GPIO input = the pin that's connected to "Echo" on the sensor GPIO.setup(22,GPIO.OUT) GPIO.setup(27,GPIO.IN) GPIO.output(22, GPIO.LOW) # found that the sensor can crash if there isn't a delay here # no idea why. If you have odd crashing issues, increase delay time.sleep(0.3) # sensor manual says a pulse ength of 10Us will trigger the # sensor to transmit 8 cycles of ultrasonic burst at 40kHz and # wait for the reflected ult
rasonic burst to be received # to get a pulse length of 10Us we need to start the pulse, then # wait for 10 microseconds, then stop the pulse. This will # result in the pulse length being 10Us. # start the pulse on the GPIO pin # change this value to the pin you are using # GPIO output = the pin that's connected to "Trig" on the sensor GPIO.output(22, True) # wait 10 micro seconds (this is 0.00001 seconds) so the pulse # length is 10Us as the sensor expects time.sle
ep(0.00001) # stop the pulse after the time above has passed # change this value to the pin you are using # GPIO output = the pin that's connected to "Trig" on the sensor GPIO.output(22, False) # listen to the input pin. 0 means nothing is happening. Once a # signal is received the value will be 1 so the while loop # stops and has the last recorded time the signal was 0 # change this value to the pin you are using # GPIO input = the pin that's connected to "Echo" on the sensor while GPIO.input(27) == 0: signaloff = time.time() # listen to the input pin. Once a signal is received, record the # time the signal came through # change this value to the pin you are using # GPIO input = the pin that's connected to "Echo" on the sensor while GPIO.input(27) == 1: signalon = time.time() # work out the difference in the two recorded times above to # calculate the distance of an object in front of the sensor timepassed = signalon - signaloff # we now have our distance but it's not in a useful unit of # measurement. So now we convert this distance into centimetres distance = timepassed * 17000 # return the distance of an object in front of the sensor in cm return distance # we're no longer using the GPIO, so tell software we're done GPIO.cleanup() else: print "Incorrect usonic() function varible."
import configparser CONFIG_PATH = 'accounting.conf' class MyConfigParser(): def __init__(self, config_path=CONFIG_PATH): self.config = configparser.ConfigParser(allow_no_value=True) self.config.read(config_path) def config_section_map(self, section): """ returns all configuration options in 'section' in a dict with
key: config_option and
value: the read value in the file""" dict1 = {} options = self.config.options(section) for option in options: try: dict1[option] = self.config.get(section, option) if dict1[option] == -1: DebugPrint("skip: %s" % option) except: dict1[option] = None return dict1 # getint(section, option) # getboolean(section, option)
import os from bs4 import BeautifulSoup count = pd.DataFrame(columns = ['filename', 'count']) for folder, subs, files in os.walk('data
/xml'): for filename in files: try: if ('.xml' in filename) and (filename[0] != '.'): f = open(os.path.join(folder, filename)) soup = BeautifulSoup(f.read()) tokens = soup.findAll('token') tokens_arr = [token.text for token in tokens] text = ' '.join(tokens_arr) f = open('data/text/'+filename, 'w') f.write(text) f.close()
except Exception as e: print e continue
Cache jedi environments to avoid startup cost.""" try: return _cached_jedi_environments[venv] except KeyError: logger.info('Creating jedi environment: %s', venv) if venv is None: jedienv = jedi.api.environment.get_default_environment() else: jedienv = jedi.create_environment(venv, safe=safe) _cached_jedi_environments[venv] = jedienv return jedienv def get_venv_sys_path(venv): if jedi_create_environment is not None: return jedi_create_environment(venv).get_sys_path() from jedi.evaluate.sys_path import get_venv_path return get_venv_path(venv) class JediEPCHandler(object): def __init__(self, sys_path=(), virtual_envs=(), sys_path_append=()): self.script_kwargs = self._get_script_path_kwargs( sys_path=sys_path, virtual_envs=virtual_envs, sys_path_append=sys_path_append, ) def get_sys_path(self): environment = self.script_kwargs.get('environment') if environment is not None: return environment.get_sys_path() sys_path = self.script_kwargs.get('sys_path') if sys_path is not None: return sys_path return sys.path @classmethod def _
get_script_path_kwargs(cls, sys_path, virtual_envs, sys_path_append): result = {} if jedi_create_environment: # Need to specify some environment explicitly to workaround # https://github.com/davidhalter/jedi/issues/1242. Otherwise jedi # will create a lot of child processes. if virtual_envs:
primary_env, virtual_envs = virtual_envs[0], virtual_envs[1:] primary_env = path_expand_vars_and_user(primary_env) else: primary_env = None try: result['environment'] = jedi_create_environment(primary_env) except Exception: logger.warning( 'Cannot create environment for %r', primary_env, exc_info=1 ) if primary_env is not None: result['environment'] = jedi_create_environment(None) if not sys_path and not virtual_envs and not sys_path_append: # No additional path customizations. return result # Either multiple environments or custom sys_path extensions are # specified, or jedi version doesn't support environments. final_sys_path = [] final_sys_path.extend(path_expand_vars_and_user(p) for p in sys_path) for p in virtual_envs: final_sys_path.extend(get_venv_sys_path(path_expand_vars_and_user(p))) final_sys_path.extend( path_expand_vars_and_user(p) for p in sys_path_append ) dupes = set() def not_seen_yet(val): if val in dupes: return False dupes.add(val) return True result['sys_path'] = [p for p in final_sys_path if not_seen_yet(p)] return result def jedi_script(self, source, line, column, source_path): if NEED_ENCODE: source = source.encode('utf-8') source_path = source_path and source_path.encode('utf-8') return jedi.Script( source, line, column, source_path or '', **self.script_kwargs ) def complete(self, *args): def _wrap_completion_result(comp): try: docstr = comp.docstring() except Exception: logger.warning( "Cannot get docstring for completion %s", comp, exc_info=1 ) docstr = "" return dict( word=comp.name, doc=docstr, description=candidates_description(comp), symbol=candidate_symbol(comp), ) return [ _wrap_completion_result(comp) for comp in self.jedi_script(*args).completions() ] def get_in_function_call(self, *args): sig = self.jedi_script(*args).call_signatures() call_def = sig[0] if sig else None if not call_def: return [] return dict( # p.description should do the job. But jedi-vim use replace. # So follow what jedi-vim does... params=[PARAM_PREFIX_RE.sub('', p.description).replace('\n', '') for p in call_def.params], index=call_def.index, call_name=call_def.name, ) def _goto(self, method, *args): """ Helper function for `goto_assignments` and `usages`. :arg method: `jedi.Script.goto_assignments` or `jedi.Script.usages` :arg args: Arguments to `jedi_script` """ # `definitions` is a list. Each element is an instances of # `jedi.api_classes.BaseOutput` subclass, i.e., # `jedi.api_classes.RelatedName` or `jedi.api_classes.Definition`. definitions = method(self.jedi_script(*args)) return [dict( column=d.column, line_nr=d.line, module_path=d.module_path if d.module_path != '__builtin__' else [], module_name=d.module_name, description=d.description, ) for d in definitions] def goto(self, *args): return self._goto(jedi.Script.goto_assignments, *args) def related_names(self, *args): return self._goto(jedi.Script.usages, *args) def get_definition(self, *args): definitions = self.jedi_script(*args).goto_definitions() return [definition_to_dict(d) for d in definitions] def defined_names(self, *args): # XXX: there's a bug in Jedi that returns returns definitions from inside # classes or functions even though all_scopes=False is set by # default. Hence some additional filtering is in order. # # See https://github.com/davidhalter/jedi/issues/1202 top_level_names = [ defn for defn in jedi.api.names(*args) if defn.parent().type == 'module' ] return list(map(get_names_recursively, top_level_names)) def get_jedi_version(self): return [dict( name=module.__name__, file=getattr(module, '__file__', []), version=get_module_version(module) or [], ) for module in [sys, jedi, epc, sexpdata]] def candidate_symbol(comp): """ Return a character representing completion type. :type comp: jedi.api.Completion :arg comp: A completion object returned by `jedi.Script.completions`. """ try: return comp.type[0].lower() except (AttributeError, TypeError): return '?' def candidates_description(comp): """ Return `comp.description` in an appropriate format. * Avoid return a string 'None'. * Strip off all newlines. This is required for using `comp.description` as candidate summary. """ desc = comp.description return _WHITESPACES_RE.sub(' ', desc) if desc and desc != 'None' else '' _WHITESPACES_RE = re.compile(r'\s+') PARAM_PREFIX_RE = re.compile(r'^param\s+') """RE to strip unwanted "param " prefix returned by param.description.""" def definition_to_dict(d): return dict( doc=d.docstring(), description=d.description, desc_with_module=d.desc_with_module, line_nr=d.line, column=d.column, module_path=d.module_path, name=getattr(d, 'name', []), full_name=getattr(d, 'full_name', []), type=getattr(d, 'type', []), ) def get_names_recursively(definition, parent=None): """ Fetch interesting defined names in sub-scopes under `definition`. :type names: jedi.api_classes.Definition """ d = definition_to_dict(definition) try: d['local_name'] = parent['local_name'] + '.' + d['name'] except (AttributeError, TypeError): d['local_name'] = d['name'] if definition.type == 'class': ds = definition.defined_names() retu
# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license. # See LICENSE in the project root for license information. #!/usr/bin/env python # -*- coding:utf-8 -*- import urllib.parse import requests from testutils import prefix, api_v0 HOUR = 60 * 60 DAY = HOUR * 24 @prefix('test_v0_schedules') def test_api_v0_schedules(team, roster, role): tuesday9am = 2 * DAY + 9 * HOUR tuesday9pm = tuesday9am + 12 * HOUR wednesday9am = tuesday9pm + 12 * HOUR wednesday9pm = wednesday9am + 12 * HOUR team_name = team.create() team_name_2 = team.create() roster_name = roster.create(team_name) roster_name_2 = roster.create(team_name_2) role_name = role.create() role_name_2 = role.create() # test create schedule events = [{'start': tuesday9am, 'duration': 12 * HOUR}, {'start': tuesday9pm, 'duration': 12 * HOUR}, {'start': wednesday9am, 'duration': 12 * HOUR}, {'start': wednesday9pm, 'duration': 12 * HOUR}] re = requests.post(api_v0('teams/%s/rosters/%s/schedules' % (team_name, roster_name)), json={ 'role': role_name, 'events': events, 'advanced_mode': 1 }) assert re.status_code == 201 schedule_id = str(re.json()['id']) # verify schedule created properly re = requests.get(api_v0('teams/%s/rosters/%s/schedules' % (team_name, roster_name))) assert re.status_code == 200 data = re.json() assert len(data) == 1 schedule = data[0] assert schedule['role'] == role_name # check consecutive events have been merged assert len(schedule['events']) == 1 assert schedule['events'][0]['start'] == tuesday9am assert schedule['events'][0]['duration'] == 48 * HOUR assert schedule['advanced_mode'] == 1 # test 'schedule' endpoint re = requests.get(api_v0('schedules/%s' % (schedule_id))) assert re.status_code == 200 assert re.json() == data[0] updated_events = [{'start': 0, 'duration': 100}, {'start': 150, 'duration': 200}] # verify schedule updates properly re = requests.put(api_v0('schedules/' + schedule_id), json={'role': role_name_2, 'team': team_name_2, 'roster': roster_name_2, 'auto_populate_threshold': 28, 'events': updated_events, 'advanced_mode': 1}) assert re.status_code == 200 re = requests.get(api_v0('teams/%s/rosters/%s/schedules' % (team_name_2, roster_name_2))) assert re.status_code == 200 data = re.json() assert len(data) == 1 schedule = data[0] assert schedule['roster'] == roster_name_2 assert schedule['role'] == role_name_2 assert schedule['auto_populate_threshold'] == 28 assert schedule['events'] == updated_events assert schedule['advanced_mode'] == 1 re = requests.put(api_v0('schedu
les/' + schedule_id), json={'team': team_name, 'roster': roster_name}) assert re.status_code == 200 # test delete schedule re = requests.delete(api_v0('schedules/' + schedule_id)) assert re.status_code == 200 # verify schedule was deleted re = requests.get(api_v0('teams/%s/rosters/%s/schedules' % (team_name_2, roster_name_2))) assert re.status_code == 200 data = re.json() assert data == [] @prefix('te
st_v0_advanced_schedule') def test_api_v0_advanced_schedule(team, roster, role, schedule): team_name = team.create() roster_name = roster.create(team_name) role_name = role.create() schedule_id = schedule.create(team_name, roster_name, {'role': role_name, 'events': [{'start': 0, 'duration': 100}, {'start': 200, 'duration': 300}], 'advanced_mode': 1}) # check invalid schedule updates re = requests.put(api_v0('schedules/%d' % schedule_id), json={'events': [{'start': 0, 'duration': 100}, {'start': 150, 'duration': 300}], 'advanced_mode': 0}) assert re.status_code == 400 re = requests.put(api_v0('schedules/%d' % schedule_id), json={'advanced_mode': 0}) assert re.status_code == 400 @prefix('test_v0_invalid_schedule_event') def test_api_v0_invalid_schedule_event(team, roster, role, schedule): team_name = team.create() roster_name = roster.create(team_name) role_name = role.create() api_url = api_v0('teams/%s/rosters/%s/schedules' % (team_name, roster_name)) re = requests.post(api_url, json={ 'role': role_name, 'events': [{'duration': 100}, {'start': 150, 'duration': 300}], 'advanced_mode': 1 }) assert re.status_code == 400 re = requests.post(api_url, json={ 'role': role_name, 'events': [{'start': 150}], 'advanced_mode': 1 }) assert re.status_code == 400 re = requests.post(api_url, json={ 'role': role_name, 'events': [{'start': 150, 'duration': 300}], 'advanced_mode': 0 }) assert re.status_code == 400 re = requests.post(api_url, json={ 'role': role_name, 'events': 7 * [{'start': 150, 'duration': 300}], 'advanced_mode': 0 }) assert re.status_code == 400 @prefix('test_v0_schedules_spaces') def test_api_v0_schedules_with_spaces_in_roster_name(team): team_name = 'test_v0 spaces team foo' roster_name = 'test_v0 spaces roster foo' re = requests.post(api_v0('teams'), json={'name': team_name, 'scheduling_timezone': 'UTC'}) assert re.status_code == 201 team.mark_for_cleaning(team_name) re = requests.post(api_v0('teams/%s/rosters' % team_name), json={'name': roster_name}) assert re.status_code == 201 re = requests.get(api_v0('teams/%s/rosters/%s/schedules' % (team_name, urllib.parse.quote(roster_name, safe='')))) assert re.status_code == 200
# Copyright (c) 2010 ActiveState Software Inc. All rights reserved. """ pypm.common.util ~~~~~~~~~~~~~~~~ Assorted utility code """ import os from os import path as P import sys import re from contextlib import contextmanager import logging import time import textwrap from datetime import datetime from pkg_resources import Requirement from pkg_resources import resource_filename import six import pypm from zclockfile import LockFile LOG = logging.getLogger(__name__) # Language/library utilities ##################################################################### def wrapped(txt, prefix='', **options): """Return wrapped text suitable for printing to terminal""" MAX_WIDTH=70 # textwrap.wrap's default return '\n'.join([ '{0}{1}'.format(prefix, line) for line in textwrap.wrap(txt, width=MAX_WIDTH-len(prefix), **options)]) def lazyproperty(func): """A property decorator for lazy evaluation""" cache = {} def _get(self): """Return the property value from cache once it is calculated""" try: return cache[self] except KeyError: cache[self] = value = func(self) return value return property(_get) def memoize(fn): """Memoize functions that take simple arguments The arugments of this function must be 'hashable' Keywords are not supported """ memo = {} def wrapper(*args): key = tuple(args) if key not in memo: memo[key] = fn(*args) return memo[key] return wrapper class ConfigParserNamedLists(object): """Parse a named mapping from the configuration file. Example input (config file): [packages] free = h
ttp://pypm-free.as.com be = http://pypm-be.as.com staging = http://pypm-staging.as.com default = be free QA = staging default What this class produces (self.mapping): { 'free': [factory('free', 'http://pypm-free.as.com')], 'be': [fac
tory('be', 'http://pypm-be.as.com')], 'staging': [factory('staging', 'http://pypm-staging.as.com')], 'default': [factory('be', 'http://pypm-be.as.com'), factory('free', 'http://pypm-free.as.com')], 'QA': [factory('staging', 'http://pypm-staging.as.com'), factory('be', 'http://pypm-be.as.com'), factory('free', 'http://pypm-free.as.com')], } """ VALUE_SEP = re.compile('[\s,]+') def __init__(self, option_items, factory, is_sentinel): """ - option_items: ConfigParser.items('yoursection') - factory: a function that produces the value object - sentinel_p: a function that returns True for sentinels """ self.option_items = option_items self.factory = factory self.is_sentinel = is_sentinel self.mapping = {} self._init() def _init(self): for name, value in self.option_items: if name in self.mapping: raise ValueError('duplicate option key found: {0}'.format(name)) else: self.mapping[name] = value # substitute references _processed = set() for name in self.mapping: self._expand_rvalue(name, _processed) def _expand_rvalue(self, name, processed): if name in processed: return value = self.mapping[name] if isinstance(value, list): processed.add(name) return if name not in self.mapping: raise ValueError('unknown option reference: {0}'.format(name)) if self.is_sentinel(value): self.mapping[name] = [self.factory(name, value)] else: self.mapping[name] = [] for part in self.VALUE_SEP.split(value): self._expand_rvalue(part, processed) self.mapping[name].extend(self.mapping[part]) # System routines ###################################################################### @contextmanager def locked(lockfile): """'with' context to lock a file""" lock = LockFile(lockfile) try: yield finally: lock.close() @contextmanager def dlocked(directory): """Lock based on a directory You need this function if you do not want more than on process to be operating on a directory """ if not P.exists(directory): os.makedirs(directory) lockfile = P.join(directory, '.lock') with locked(lockfile): yield def get_user_agent(default): """Return an user agent string representing PyPM Retain the default user-agent for backward-compat """ return '{0} (PyPM {1.__version__})'.format(default, pypm) # Path routines # ######################################################################## def existing(path): """Return path, but assert its presence first""" assert isinstance(path, (six.string_types, six.text_type)), \ 'not of string type: %s <%s>' % (path, type(path)) assert P.exists(path), 'file/directory not found: %s' % path return path def concise_path(pth): """Return a concise, but human-understandable, version of ``pth`` Compresses %HOME% and %APPDATA% """ aliases = [ ('%APPDATA%', os.getenv('APPDATA', None)), ('~', P.expanduser('~')), ] for alias, pthval in aliases: if pthval and pth.startswith(pthval): return P.join(alias, P.relpath(pth, pthval)) return pth def abs2rel(absolute_path): """Convert an absolute path to relative path assuming the topmost directory is the bast dir. >>> strip_abs_root('/opt/ActivePython/') 'opt/ActivePython/' >>> strip_abs_root('/opt/ActivePython') 'opt/ActivePython' """ assert os.path.isabs(absolute_path), \ '`%s` is not a absolute path' % absolute_path if sys.platform.startswith('win'): assert absolute_path[1:3] == ':\\' return absolute_path[3:] # remove the DRIVE else: assert absolute_path[0] == '/' return absolute_path[1:] # remove the '/' def url_join(url, components): """Join URL components .. always with a forward slash""" assert type(components) is list assert '\\' not in url, \ 'URL is not supposed to contain backslashes. Is this windows path? '+url return url + '/' + '/'.join(components) def path_to_url(path): """Convert local path to remote url """ if sys.platform.startswith('win'): assert '/' not in path, \ 'windows path cannot contain forward slash: '+path drive, path = os.path.splitdrive(path) return url_join('file:///' + drive, path.split('\\')) else: return 'file://' + P.abspath(path) def pypm_file(*paths): """Return absolute path to a file residing inside the pypm package using pkg_resources API""" return resource_filename(Requirement.parse('pypm'), P.join(*paths)) class BareDateTime(six.text_type): """Wrapper around the DateTime object with our own standard string representation """ DATE_FORMAT = "%Y-%m-%d" TIME_FORMAT = "%H:%M:%S" FORMAT = DATE_FORMAT + ' ' + TIME_FORMAT @classmethod def to_string(cls, dt): """Convert the datetime object `dt` to a string with format as defind by this class """ return dt.strftime(cls.FORMAT) @classmethod def to_datetime(cls, dt_string): """Convert dt_string, formatted by `to_string()` method above""" ts = time.mktime(time.strptime(dt_string, cls.FORMAT)) return datetime.fromtimestamp(ts)
from typing import Any # Copyright (c)
2016 Google Inc. (under http://www.apache.org/licenses/LICENSE-2.0) # If not annotate_pep484, info in pyi files is augmented with heuristics to decide if un-annotated # arguments are "Any" or "" (like "self") class B(object): def __init__(self): pass def f(self, x): # type: (e1) -> None pass class C(object): def __init__(self, x): # type: (e2) -> None pass @staticmethod def f2():
pass @staticmethod def f3(x, y): # type: (Any, e3) -> None pass @classmethod def f4(cls): pass
# Copyright: (c) 2018, Toshio Kuratomi <tkuratomi@ansible.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type """ Context of the running Ansible. In the future we *may* create Context objects to allow running multiple Ansible plays in parallel with different contexts but that is currently out of scope as the Ansible library is just for running the ansible command line tools. These APIs are still in flux so do not use them unless you are willing to update them with every Ansible release """ from ansible.module_utils.common._collections_compat import Mapping, Set from ansible.module_utils.common.collections import is_sequence from ansible.utils.context_objects import CLIArgs, GlobalCLIArgs __all__ = ('CLIARGS',) # Note: this is not the singleton version. The Singleton is only created once the program has # actually parsed the args CLIARGS = CLIArgs({}) # This should be called immediately after cli_args are processed (parsed, validated, and any # normalization performed on them). No other code should call it def _init_global_context(cli_args): """Initialize the global context objects""" global CLIARGS CLIARGS = Globa
lCLIArgs.from_options(cli_args) def cliargs_deferred_ge
t(key, default=None, shallowcopy=False): """Closure over getting a key from CLIARGS with shallow copy functionality Primarily used in ``FieldAttribute`` where we need to defer setting the default until after the CLI arguments have been parsed This function is not directly bound to ``CliArgs`` so that it works with ``CLIARGS`` being replaced """ def inner(): value = CLIARGS.get(key, default=default) if not shallowcopy: return value elif is_sequence(value): return value[:] elif isinstance(value, (Mapping, Set)): return value.copy() return value return inner
from django.test.testcases import SimpleTestCase from corehq.apps.app_manager.const import APP_V2 from corehq.apps.app_manager.models import Application, Module, OpenCaseAction, ParentSelect, OpenSubCaseAction, \ AdvancedModule, LoadUpdateAction, AdvancedOpenCaseAction from mock import patch class CaseMetaTest(SimpleTestCase): def setUp(self): self.is_usercase_in_use_patch = patch('corehq.apps.app_manager.models.is_usercase_in_use') self.is_usercase_in_use_mock = self.is_usercase_in_use_patch.start() def tearDown(self): self.is_usercase_in_use_patch.stop() def _make_module(self, app, module_id, case_type): m = app.add_module(Module.new_module('Module{}'.format(module_id), lang='en')) m.case_type = case_type mf = app.new_form(module_id, 'form {}'.format(case_type), lang='en') mf.actions.open_case = OpenCaseAction(name_path="/data/question1", external_id=None) mf.actions.open_case.condition.type = 'always' return m def test_hierarchy(self): app, expected_hierarchy = self.get_test_app() meta = app.get_case_metadata() self.assertDictEqual(meta.type_hierarchy, expected_hierarchy) def get_test_app(self): app = Application.new_app('domain', 'New App', APP_V2) app.version = 1 m0 = self._make_module(app, 0, 'parent') m0.get_form(0).actions.subcases.append(OpenSubCaseAction( case_type='child', reference_id='parent' )) m1 = self._make_module(app, 1, 'child') m1.get_form(0).actions.subcases.append(OpenSubCaseAction( case_type='grand child', refe
rence_id='parent' )) m2 = self._make_module(app, 2, 'grand child') m3 = app.add_module(AdvancedModule.new_mod
ule('Module3', lang='en')) m3.case_type = 'other grand child' m3f0 = m3.new_form('other form', 'en') m3f0.actions.load_update_cases.append(LoadUpdateAction( case_type='child', case_tag='child')) m3f0.actions.open_cases.append(AdvancedOpenCaseAction( name_path='/data/question1', case_type='other grand child', parent_tag='child' )) m3f0.actions.open_cases[0].open_condition.type = 'always' m2.parent_select = ParentSelect(active=True, module_id=m1.unique_id) m1.parent_select = ParentSelect(active=True, module_id=m0.unique_id) expected_hierarchy = { 'parent': { 'child': { 'grand child': {}, 'other grand child': {} } } } return app, expected_hierarchy
import aifc import sndhdr import utils class Aiff: def __init__(self, filename): assert sndhdr.what(filename).filetype == 'aiff' x = aifc.open(filename) data = x.readframes(x.getnframes()) self.nchannels = x.getnchannels() self.sampwidth = x.getsampwidth() self.framerate = x.getframerate() self.sig = utils
.from_buffer(data).reshape(-1, x.getnchannels()) def save(self, filename): y = aifc.open(filename, 'wb') y.setnchannels(self.nchannels) y.setsampwidth(self.sampwidth) y
.setframerate(self.framerate) y.writeframes(self.sig.flatten().tobytes()) def save_channel(self, filename, channel): y = aifc.open(filename, 'wb') y.setnchannels(1) y.setsampwidth(self.sampwidth) y.setframerate(self.framerate) y.writeframes(self.sig[:, channel].flatten().tobytes())
from pythonforandroid.recipe import PythonRecipe class Asn1cryptoRecipe(PythonRecipe): name = 'asn1crypto' version = '0.23.0' url = 'https://pypi.python.org/packages/31/53/8bca924b30cb79d6d70dbab6a99e8731d1e4dd3b090b7f3d8412a8d8ffbc/asn1crypto-0.23.0.tar.gz#md5=97d54665c397b72b165768398dfdd876' depends = ['python2', 'setuptools'] call_hostpython_via_targetpython = Fa
lse recipe = Asn1cryptoRecipe()
# is set to its default value, we don't write it out. if value: if key in self.fields and self.fields[key].is_set_on(self): try: xml.set(key, six.text_type(value)) except UnicodeDecodeError: exception_message = format_xml_exception_message(self.location, key, value) log.exception(exception_message) # If exception is UnicodeDecodeError set value using unicode 'utf-8' scheme. log.info("Setting xml value using 'utf-8' scheme.") xml.set(key, six.text_type(value, 'utf-8')) except ValueError: exception_message = format_xml_exception_message(self.location, key, value) log.exception(exception_message) raise for source in self.html5_sources: ele = etree.Element('source') ele.set('src', source) xml.append(ele) if self.track: ele = etree.Element('track') ele.set('src', self.track) xml.append(ele) if self.handout: ele = etree.Element('handout') ele.set('src', self.handout) xml.append(ele) transcripts = {} if self.transcripts is not None: transcripts.update(self.transcripts) edx_video_id = clean_video_id(self.edx_video_id) if edxval_api and edx_video_id: try: # Create static dir if not created earlier. resource_fs.makedirs(EXPORT_IMPORT_STATIC_DIR, recreate=True) # Backward compatible exports # edxval exports new transcripts into the course OLX and returns a transcript # files map so that it can also be rewritten in old transcript metadata fields # (i.e. `self.transcripts`) on import and older open-releases (<= ginkgo), # who do not have deprecated contentstore yet, can also import and use new-style # transcripts into their openedX instances. exported_metadata = edxval_api.export_to_xml( video_id=edx_video_id, resource_fs=resource_fs, static_dir=EXPORT_IMPORT_STATIC_DIR, course_id=six.text_type(self.runtime.course_id.for_branch(None)) ) # Update xml with edxval metadata xml.append(exported_metadata['xml']) # we don't need sub if english transcript # is also in new transcripts. new_transcripts = exported_metadata['transcripts'] transcripts.update(new_transcripts) if new_transcripts.get('en'): xml.set('sub', '') # Update `transcripts` attribute in the xml xml.set('transcripts', json.dumps(transcripts)) except edxval_api.ValVideoNotFoundError: pass # Sorting transcripts for easy testing of resulting xml for transcript_language in sorted(transcripts.keys()): ele = etree.Element('transcript') ele.set('language', transcript_language) ele.set('src', transcripts[transcript_language]) xml.append(ele) # handle license specifically self.add_license_to_xml(xml) return xml def create_youtube_url(self, youtube_id): """ Args: youtube_id: The ID of the video to create a link for Returns: A full youtube url to the video whose ID is passed in """ if youtube_id: return u'https://www.youtube.com/watch?v={0}'.format(youtube_id) else: return u'' def get_context(self): """ Extend context by data for transcript basic tab. """ _context = super(VideoBlock, self).get_context() metadata_fields = copy.deepcopy(self.editable_metadata_fields) display_name = metadata_fields['display_name'] video_url = metadata_fields['html5_sources'] video_id = metadata_fields['edx_video_id'] youtube_id_1_0 = metadata_fields['youtube_id_1_0'] def get_youtube_link(video_id): """ Returns the fully-qualified YouTube URL for the given video identifier """ # First try a lookup in VAL. If we have a YouTube entry there, it overrides the # one passed in. if self.edx_video_id and edxval_api: val_youtube_id = edxval_api.get_url_for_profile(self.edx_video_id, "youtube") if val_youtube_id: video_id = val_youtube_id return self.create_youtube_url(video_id) _ = self.runtime.service(self, "i18n").ugettext video_url.update({ 'help': _('The URL for your video. This can be a YouTube URL or a link to an .mp4, .ogg, or .webm video file hosted elsewhere on the Internet.'), # pylint: disable=line-too-long 'display_name': _('Default Video URL'), 'field_name': 'video_url', 'type': 'VideoList', 'default_value': [get_youtube_link(youtube_id_1_0['default_value'])] }) source_url = self.create_youtube_url(youtube_id_1_0['value']) # First try a lookup in VAL. If any video encoding is found given the video id then # override the source_url with it. if self.edx_video_id and edxval_api: val_profiles = ['youtube', 'desktop_webm', 'desktop_mp4'] if HLSPlaybackEnabledFlag.feature_enabled(self.runtime.course_id.for_branch(None)): val_profiles.append('hls') # Get video encodings for val profiles. val_video_encodings = edxval_api.get_urls_for_profiles(self.edx_video_id, val_profiles) # VAL's youtube source has greater priority over external youtube source. if val_video_encodings.get('youtube'): source_url = self.create_youtube_url(val_video_encodings['youtube']) # If no youtube source is provided externally or in VAl, update source_url in order: hls > mp4 and webm if not source_url: if val_video_encodings.get('hls'): source_url = val_video_encodings['hls'] elif val_video_encodings.get('desktop_mp4'): source_url = val_video_encodings['desktop_mp4'] elif val_video_encodings.get('desktop_webm'): source_url = val_video_encodings['desktop_webm'] # Only add if html5 sources do not already contain source_url. if source_url and source_url not in video_url['value']: video_url['value'].insert(0, source_url) metadata = { 'display_name': display_name, 'video_url': video_url, 'edx_video_id': video_id } _context.update({'transcripts_basic_tab_metadata': metadata}) return _context @classmethod def _parse_youtube(cls, data): """ Parses a string of Youtube IDs such as "1.0:AXdE34_U,1.5:VO3SxfeD" into a dictionary. Necessary for backwards compatibility with XML-based courses. """ ret = {'0.75': '', '1.00': '', '1.25': '', '1.50': ''} videos = data.split(',') for video in videos: pieces = video.split(':') try: speed = '%.2f' % float(pieces[0]) # normalize speed # Handle the fact that youtube IDs got double-quoted for a period of time. # Note: we pass in "VideoFields.youtube_id_1_0" so we deserialize as a String--
# it doesn't matter what the actual speed is for the purposes of deserializing. youtube_id = deserialize_field(cls.youtube_id_1_0, pieces[1]) ret[speed] = youtube_id ex
cept (ValueError, IndexError): log.warning('I
"""Provide functionality to keep track of devices.""" import asyncio import voluptuous as vol from homeassistant.loader import bind_hass from homeassistant.components import group from homeassistant.helpers import discovery import homeassistant.helpers.config_validation as cv from homeassistant.helpers.typing import GPSType, ConfigType, HomeAssistantType from homeassistant.helpers.event import async_track_utc_time_change from homeassistant.const import ATTR_GPS_ACCURACY, STATE_HOME
from . import legacy, setup from .config_entry import ( # noqa # pylint: disable=unused-import async_setup_entry, async_unload_entry, ) from .legacy import DeviceScanner # noqa # pylint: disable=unused-import from .const import ( ATTR_ATTRIBUTES, ATTR_BATTERY, ATTR_CONSIDER_HOME, ATTR_DEV_ID, ATTR_GPS, ATTR_HOST_NAME, ATTR_LOCATION_NAME, ATTR_MAC, AT
TR_SOURCE_TYPE, CONF_AWAY_HIDE, CONF_CONSIDER_HOME, CONF_NEW_DEVICE_DEFAULTS, CONF_SCAN_INTERVAL, CONF_TRACK_NEW, DEFAULT_AWAY_HIDE, DEFAULT_CONSIDER_HOME, DEFAULT_TRACK_NEW, DOMAIN, PLATFORM_TYPE_LEGACY, SOURCE_TYPE_BLUETOOTH_LE, SOURCE_TYPE_BLUETOOTH, SOURCE_TYPE_GPS, SOURCE_TYPE_ROUTER, ) ENTITY_ID_ALL_DEVICES = group.ENTITY_ID_FORMAT.format("all_devices") SERVICE_SEE = "see" SOURCE_TYPES = ( SOURCE_TYPE_GPS, SOURCE_TYPE_ROUTER, SOURCE_TYPE_BLUETOOTH, SOURCE_TYPE_BLUETOOTH_LE, ) NEW_DEVICE_DEFAULTS_SCHEMA = vol.Any( None, vol.Schema( { vol.Optional(CONF_TRACK_NEW, default=DEFAULT_TRACK_NEW): cv.boolean, vol.Optional(CONF_AWAY_HIDE, default=DEFAULT_AWAY_HIDE): cv.boolean, } ), ) PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend( { vol.Optional(CONF_SCAN_INTERVAL): cv.time_period, vol.Optional(CONF_TRACK_NEW): cv.boolean, vol.Optional(CONF_CONSIDER_HOME, default=DEFAULT_CONSIDER_HOME): vol.All( cv.time_period, cv.positive_timedelta ), vol.Optional(CONF_NEW_DEVICE_DEFAULTS, default={}): NEW_DEVICE_DEFAULTS_SCHEMA, } ) PLATFORM_SCHEMA_BASE = cv.PLATFORM_SCHEMA_BASE.extend(PLATFORM_SCHEMA.schema) SERVICE_SEE_PAYLOAD_SCHEMA = vol.Schema( vol.All( cv.has_at_least_one_key(ATTR_MAC, ATTR_DEV_ID), { ATTR_MAC: cv.string, ATTR_DEV_ID: cv.string, ATTR_HOST_NAME: cv.string, ATTR_LOCATION_NAME: cv.string, ATTR_GPS: cv.gps, ATTR_GPS_ACCURACY: cv.positive_int, ATTR_BATTERY: cv.positive_int, ATTR_ATTRIBUTES: dict, ATTR_SOURCE_TYPE: vol.In(SOURCE_TYPES), ATTR_CONSIDER_HOME: cv.time_period, # Temp workaround for iOS app introduced in 0.65 vol.Optional("battery_status"): str, vol.Optional("hostname"): str, }, ) ) @bind_hass def is_on(hass: HomeAssistantType, entity_id: str = None): """Return the state if any or a specified device is home.""" entity = entity_id or ENTITY_ID_ALL_DEVICES return hass.states.is_state(entity, STATE_HOME) def see( hass: HomeAssistantType, mac: str = None, dev_id: str = None, host_name: str = None, location_name: str = None, gps: GPSType = None, gps_accuracy=None, battery: int = None, attributes: dict = None, ): """Call service to notify you see device.""" data = { key: value for key, value in ( (ATTR_MAC, mac), (ATTR_DEV_ID, dev_id), (ATTR_HOST_NAME, host_name), (ATTR_LOCATION_NAME, location_name), (ATTR_GPS, gps), (ATTR_GPS_ACCURACY, gps_accuracy), (ATTR_BATTERY, battery), ) if value is not None } if attributes: data[ATTR_ATTRIBUTES] = attributes hass.services.call(DOMAIN, SERVICE_SEE, data) async def async_setup(hass: HomeAssistantType, config: ConfigType): """Set up the device tracker.""" tracker = await legacy.get_tracker(hass, config) legacy_platforms = await setup.async_extract_config(hass, config) setup_tasks = [ legacy_platform.async_setup_legacy(hass, tracker) for legacy_platform in legacy_platforms ] if setup_tasks: await asyncio.wait(setup_tasks) tracker.async_setup_group() async def async_platform_discovered(p_type, info): """Load a platform.""" platform = await setup.async_create_platform_type(hass, config, p_type, {}) if platform is None or platform.type != PLATFORM_TYPE_LEGACY: return await platform.async_setup_legacy(hass, tracker, info) discovery.async_listen_platform(hass, DOMAIN, async_platform_discovered) # Clean up stale devices async_track_utc_time_change( hass, tracker.async_update_stale, second=range(0, 60, 5) ) async def async_see_service(call): """Service to see a device.""" # Temp workaround for iOS, introduced in 0.65 data = dict(call.data) data.pop("hostname", None) data.pop("battery_status", None) await tracker.async_see(**data) hass.services.async_register( DOMAIN, SERVICE_SEE, async_see_service, SERVICE_SEE_PAYLOAD_SCHEMA ) # restore await tracker.async_setup_tracked_device() return True
import logging import os from tempfile import mkdtemp from .repositories import Repository, AuthenticatedRepository log = logging.getLogger(__name__) class RepoManager(object): """ Manages creation and deletion of `Repository` objects. """ to_cleanup = {} def __init__(self, authenticated=False, cache_directory=None, tools=None, executor=None, shallow_clone=False): self.should_cleanup = cache_directory is None self.authenticated = authenticated self.cache_directory = cache_directory self.tools = tools or [] self.executor = executor self.shallow = shallow_clone def get_repo_class(self): if self.authenticated: return AuthenticatedRepository return Repository def clone_dir(self, repo_name): dired_repo_name = repo_name.replace('/', '__') if not self.cache_directory: dirname = mkdtemp(suffix=dired_repo_name) else: dirname = os.path.abspath("%s/%s" % ( self.cache_directory, dired_repo_name)) return dirname def fetch(self, dirname, remote_name, ref): log.debug("Fetching %s %s", remote_name, ref) self.executor("cd %s && git fetch --depth=1 %s %s" % (dirname, remote_name, ref)) def pull(self, dirname): log.debug("Pulling all %s", dirname) self.executor("cd %s && git pull --all" % dirname) def add_remote(self, dirname, name, url): log.debug("Adding remote %s url: %s", name, url) self.executor("cd %s && git remote add %s %s" % (dirname, name, url)) def set_up_clone(self, repo_name, remote_repo): """Sets up the working directory and returns a tuple of (dirname, repo )""" dirname = self.clone_dir(repo_name) self.to_cleanup[repo_name] = dirname klass = self.get_repo_class() repo = klass(repo_name, dirname, self.to
ols, self.executor, shallow=self.shallow_clone) return (dirname, repo) def clone_repo(self, repo_name, remote_repo, ref): """Clones the given repo and returns the Repository object.""" self.shallow_clone = False
dirname, repo = self.set_up_clone(repo_name, remote_repo) if os.path.isdir("%s/.git" % dirname): log.debug("Updating %s to %s", repo.download_location, dirname) self.executor( "cd %s && git checkout master" % dirname) self.pull(dirname) else: log.debug("Cloning %s to %s", repo.download_location, dirname) self.executor( "git clone %s %s" % (repo.download_location, dirname)) if remote_repo is not None: log.debug("Pulling remote branch from %s", remote_repo.url) self.add_remote(dirname, remote_repo.name, remote_repo.url) self.pull(dirname) return repo def cleanup(self): if self.should_cleanup: for repo_dir in self.to_cleanup.values(): log.debug("Cleaning up %s", repo_dir) self.executor('rm -rf %s' % repo_dir) class ShallowRepoManager(RepoManager): def __init__(self, *args, **kwargs): super(ShallowRepoManager, self).__init__(*args, **kwargs) def clone_repo(self, repo_name, remote_repo, ref): self.shallow_clone = True dirname, repo = self.set_up_clone(repo_name, remote_repo) remote_name = 'origin' log.debug("Shallow cloning.") download_location = repo.download_location log.debug("Creating stub git repo at %s" % (dirname)) self.executor("mkdir -p %s" % (dirname, )) self.executor("cd %s && git init" % (dirname, )) log.debug("Adding origin repo %s " % (download_location)) self.add_remote(dirname, 'origin', download_location) if remote_repo: self.add_remote(dirname, remote_repo.name, remote_repo.url) remote_name = remote_repo.name self.fetch(dirname, 'origin', 'HEAD') self.fetch(dirname, remote_name, ref) return repo
"""Default website configurations, used only for testing. """ from donut import environment # Public Test Database
TEST = environment.Environment( db_hostname="localhost", db_name="donut_test", db_user="donut_test", db_password="public", debug=True, testing=True, secret_key="1234567890", imgur_api={ "id": "b579f690cacf867", "secret": "***************************
*************" }, restricted_ips=r"127\.0\.0\.1")
e(b, (list, tuple)): setattr(self, a, [dict_wrapper(x) if isinstance(x, dict) else x for x in b]) else: setattr(self, a, dict_wrapper(b) if isinstance(b, dict) else b) class spritesheet(object): """ A sprite sheet is a series of images (usually animation frames) combined into a larger image. A dictionary is usually spread into the object constructor parameters with the following top-level attributes: :param image: A path to a sprite map image. :type image: str :param frames: A dictionary of settings that defines how to extract individual frames from the supplied image, as follows - ``width`` & ``height`` are required and specify the dimensions of the frames - ``regX`` & ``regY`` indicate the registration point or "origin" of the frames - ``count`` allows you to specify the total number of frames in the spritesheet; if omitted, this will be calculated based on the dimensions of the source images and the frames. Frames will be assigned indexes based on their position in the source images (left to right, top to bottom). :type frames: dict :param animations: A dictionary of key/value pairs where the key is the name of of the animation sequence, and the value are settings that defines an animation sequence as follows: - ``frames`` is a list of frame to show in sequence. Usually this comprises of frame numbers, but can refer to other animation sequences (which are handled much like a subroutine call). - ``speed`` determines how quickly the animation frames are cycled through compared to the how often the animation sequence yields. - ``next`` is optional, but if supplied, determines what happens when the animation sequence is exhausted. Typically this can be used to self-reference, so that it forms an infinite loop, but can hand off to any other animation sequence. :type animations: dict Loosely based on https://www.createjs.com/docs/easeljs/classes/SpriteSheet.html """ def __init__(self, image, frames, animations): with open(image, 'rb') as fp: self.image = Image.open(fp) self.image.load() self.frames = dict_wrapper(frames) self.animations = dict_wrapper(animations) # Reframe the sprite map in terms of the registration point (if set) regX = self.frames.regX if hasattr(self.frames, "regX") else 0 regY = self.frames.regY if hasattr(self.frames, "regY") else 0 self.image = self.image.crop((regX, regY, self.image.width - regX, self.image.height - regY)) self.width, self.height = self.image.size assert(self.width % self.frames.width == 0) assert(self.height % self.frames.height == 0) self.frames.size = (self.frames.width, self.frames.height) if not hasattr(self.frames, 'count'): self.frames.count = (self.width * self.height) // (self.frames.width * self.frames.height) self.cache = {} def __getitem__(self, frame_index): """ Returns (and caches) the frame for the given index. :param frame_index: The index of the frame. :type frame_index: int :returns: A Pillow image cropped from the main image corresponding to the given frame index. :raises TypeError: if the ``frame_index`` is not numeric :raises IndexError: if the ``frame_index`` is less than zero or more than the largest frame. """ if not isinstance(frame_index, int): raise TypeError("frame index must be numeric") if frame_index < 0 or frame_index > self.frames.count: raise IndexError("frame index out of range") cached_frame = self.cache.get(frame_index) if cached_frame is None: offset = frame_index * self.frames.width left = offset % self.width top = (offset // self.width) * self.frames.height right = left + self.frames.width bottom = top + self.frames.height bounds = [left, top, right, bottom] cached_frame = self.image.crop(bounds) self.cache[frame_index] = cached_frame return cached_frame def __len__(self): """ The number of frames in the sprite sheet """ return self.frames.count def animate(self, seq_name): """ Returns a generator which "executes" an animation sequence for the given ``seq_name``, inasmuch as the next frame for the given animation is yielded when requested. :param seq_name: The name of a previously defined animation sequence. :type seq_name: str :returns: A generator that yields all frames from the animation sequence. :raises AttributeError: If the ``seq_name`` is unknown. """ while True: index = 0 anim = getattr(self.animations, seq_name) speed = anim.speed if hasattr(anim, "speed") else 1 num_frames = len(anim.frames) while index < num_frames: frame = anim.frames[int(index)] index += speed if isinstance(frame, int): yield self[frame] else: for subseq_frame in self.animate(frame): yield subseq_frame if not hasattr(anim, "next"):
break seq_name = anim.next class framerate_regulator(object): """ Implements a variable sleep mechanism to give the appearance of a consistent frame rate. Using a fixed-time sleep will cause animations to be jittery (looking like they are speeding up or slowing down, depending on what other work is occurring), whereas this class keeps track of when the last time the ``sleep()`` method was called, and calculates a sleep period to s
mooth out the jitter. :param fps: The desired frame rate, expressed numerically in frames-per-second. By default, this is set at 16.67, to give a frame render time of approximately 60ms. This can be overridden as necessary, and if no FPS limiting is required, the ``fps`` can be set to zero. :type fps: float """ def __init__(self, fps=16.67): if fps == 0: fps = -1 self.max_sleep_time = 1.0 / fps self.total_transit_time = 0 self.called = 0 self.start_time = None self.last_time = None def __enter__(self): self.enter_time = perf_counter() if not self.start_time: self.start_time = self.enter_time self.last_time = self.enter_time return self def __exit__(self, *args): """ Sleeps for a variable amount of time (dependent on when it was last called), to give a consistent frame rate. If it cannot meet the desired frame rate (i.e. too much time has occurred since the last call), then it simply exits without blocking. """ self.called += 1 self.total_transit_time += perf_counter() - self.enter_time if self.max_sleep_time >= 0: elapsed = perf_counter() - self.last_time sleep_for = self.max_sleep_time - elapsed if sleep_for > 0: sleep(sleep_for) self.last_time = perf_counter() def effective_FPS(self): """ Calculates the effective frames-per-second - this should largely correlate to the desired FPS supplied in the constructor, but no guarantees are given. :returns: The effective frame rate. :rtype: float """ if self.start_time is None: self.start_time = 0 elapsed = perf_counter() - self.start_time return self.called / elapsed def average_transit_time(self): """ Calculates the average transit time between the enter and exit methods, and return t
" klass = self._select_struct_union_class(p[1]) p[0] = klass( name=p[2], decls=p[4], coord=self._coord(p.lineno(2))) def p_struct_or_union(self, p): """ struct_or_union : STRUCT | UNION """ p[0] = p[1] # Combine all declarations into a single list # def p_struct_declaration_list(self, p): """ struct_declaration_list : struct_declaration | struct_declaration_list struct_declaration """ p[0] = p[1] if len(p) == 2 else p[1] + p[2] def p_struct_declaration_1(self, p): """ struct_declaration : specifier_qualifier_list struct_declarator_list_opt SEMI """ spec = p[1] assert 'typedef' not in spec['storage'] if p[2] is not None: decls = self._build_declarations( spec=spec, decls=p[2]) elif len(spec['type']) == 1: # Anonymous struct/union, gcc extension, C1x feature. # Although the standard only allows structs/unions here, I see no # reason to disallow other types since some compilers have typedefs # here, and pycparser isn't about rejecting all invalid code. # node = spec['type'][0] if isinstance(node, c_ast.Node): decl_type = node else: decl_type = c_ast.IdentifierType(node) decls = self._build_declarations( spec=spec, decls=[dict(decl=decl_type)]) else: # Structure/union members can have the same names as typedefs. # The trouble is that the member's name gets grouped into # specifier_qualifier_list; _build_declarations compensates. # decls = self._build_declarations( spec=spec, decls=[dict(decl=None, init=None)]) p[0] = decls def p_struct_declaration_2(self, p): """ struct_declaration : specifier_qualifier_list abstract_declarator SEMI """ # "Abstract declarator?!", you ask? Structure members can have the # same names as typedefs. The trouble is that the member's name gets # grouped into specifier_qualifier_list, leaving any remainder to # appear as an abstract declarator, as in: # typedef int Foo; # struct { Foo Foo[3]; }; # p[0] = self._build_declarations( spec=p[1], decls=[dict(decl=p[2], init=None)]) def p_struct_declarator_list(self, p): """ struct_declarator_list : struct_declarator | struct_declarator_list COMMA struct_declarator """ p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]] # struct_declarator passes up a dict with the keys: decl (for # the underlying declarator) and bitsize (for the bitsize) # def p_struct_declarator_1(self, p): """ struct_declarator : declarator """ p[0] = {'decl': p[1], 'bitsize': None} def p_struct_declarator_2(self, p): """ struct_declarator : declarator COLON constant_expression | COLON constant_expression """ if len(p) > 3: p[0] = {'decl': p[1], 'bitsize': p[3]} else: p[0] = {'decl': c_ast.TypeDecl(None, None, None), 'bitsize': p[2]} def p_enum_specifier_1(self, p): """ enum_specifier : ENUM ID | ENUM TYPEID """ p[0] = c_ast.Enum(p[2], None, self._coord(p.lineno(1))) def p_enum_specifier_2(self, p): """ enum_specifier : ENUM brace_open enumerator_list brace_close """ p[0] = c_ast.Enum(None, p[3], self._coord(p.lineno(1))) def p_enum_specifier_3(self, p): """ enum_specifier : ENUM ID brace_open enumerator_list brace_close | ENUM T
YPEID brace_open enumerator_list brace_close """ p[0] = c_ast.Enum(p[2], p[4], self._coord(p.lineno(1
))) def p_enumerator_list(self, p): """ enumerator_list : enumerator | enumerator_list COMMA | enumerator_list COMMA enumerator """ if len(p) == 2: p[0] = c_ast.EnumeratorList([p[1]], p[1].coord) elif len(p) == 3: p[0] = p[1] else: p[1].enumerators.append(p[3]) p[0] = p[1] def p_enumerator(self, p): """ enumerator : ID | ID EQUALS constant_expression """ if len(p) == 2: enumerator = c_ast.Enumerator( p[1], None, self._coord(p.lineno(1))) else: enumerator = c_ast.Enumerator( p[1], p[3], self._coord(p.lineno(1))) self._add_identifier(enumerator.name, enumerator.coord) p[0] = enumerator def p_declarator_1(self, p): """ declarator : direct_declarator """ p[0] = p[1] def p_declarator_2(self, p): """ declarator : pointer direct_declarator """ p[0] = self._type_modify_decl(p[2], p[1]) # Since it's impossible for a type to be specified after a pointer, assume # it's intended to be the name for this declaration. _add_identifier will # raise an error if this TYPEID can't be redeclared. # def p_declarator_3(self, p): """ declarator : pointer TYPEID """ decl = c_ast.TypeDecl( declname=p[2], type=None, quals=None, coord=self._coord(p.lineno(2))) p[0] = self._type_modify_decl(decl, p[1]) def p_direct_declarator_1(self, p): """ direct_declarator : ID """ p[0] = c_ast.TypeDecl( declname=p[1], type=None, quals=None, coord=self._coord(p.lineno(1))) def p_direct_declarator_2(self, p): """ direct_declarator : LPAREN declarator RPAREN """ p[0] = p[2] def p_direct_declarator_3(self, p): """ direct_declarator : direct_declarator LBRACKET assignment_expression_opt RBRACKET """ arr = c_ast.ArrayDecl( type=None, dim=p[3], coord=p[1].coord) p[0] = self._type_modify_decl(decl=p[1], modifier=arr) # Special for VLAs # def p_direct_declarator_4(self, p): """ direct_declarator : direct_declarator LBRACKET TIMES RBRACKET """ arr = c_ast.ArrayDecl( type=None, dim=c_ast.ID(p[3], self._coord(p.lineno(3))), coord=p[1].coord) p[0] = self._type_modify_decl(decl=p[1], modifier=arr) def p_direct_declarator_5(self, p): """ direct_declarator : direct_declarator LPAREN parameter_type_list RPAREN | direct_declarator LPAREN identifier_list_opt RPAREN """ func = c_ast.FuncDecl( args=p[3], type=None, coord=p[1].coord) # To see why _get_yacc_lookahead_token is needed, consider: # typedef char TT; # void foo(int TT) { TT = 10; } # Outside the function, TT is a typedef, but inside (starting and # ending with the braces) it's a parameter. The trouble begins with # yacc's lookahead token. We don't know if we're declaring or # defining a function until we see LBRACE, but if we wait for yacc to # trigger a rule on that token, then TT will have already been read # and incorrectly interpreted as TYPEID. We need to add the # parameters to the scope the moment the lexer sees LBRACE. # if self._get_yacc_lookahead_token().type == "LBRACE": if func.args is not None: for param in func.args.params: if isinstance(param, c_ast.EllipsisParam): break self._add_identifier(param.name,
BODY_SCOPE), ('a', 'a[0]'), ('a[0]',)) def test_return_vars_are_read(self): def test_fn(a, b, c): # pylint: disable=unused-argument return c node, _ = self._parse_and_analyze(test_fn) fn_node = node self.assertScopeIs(anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('c',), ()) self.assertScopeIs( anno.getanno(node.body[0], anno.Static.SCOPE), ('c',), ()) def test_raise_names_are_read(self): def test_fn(a, b, c): # pylint: disable=unused-argument raise b node, _ = self._parse_and_analyze(test_fn) fn_node = node self.assertScopeIs(anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('b',), ()) self.assertScopeIs( anno.getanno(node.body[0], anno.Static.SCOPE), ('b',), ()) def test_except_expo
ses_names(self): def test_fn(a, b, c): # pylint: disable=unused-argument try: pass except: # pylint: disable=bare-except b = c node, _ = self._parse_and_analyze(test_fn) fn_node = node self.assertScopeIs(
anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('c',), ('b',)) def test_except_hides_exception_var_name(self): def test_fn(a, b, c): # pylint: disable=unused-argument try: pass except a as e: b = e node, _ = self._parse_and_analyze(test_fn) fn_node = node self.assertScopeIs( anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('a',), ('b',)) def test_aug_assign(self): def test_fn(a, b): a += b node, _ = self._parse_and_analyze(test_fn) fn_node = node self.assertScopeIs( anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('a', 'b'), ('a')) def test_aug_assign_rvalues(self): a = dict(bar=3) def foo(): return a def test_fn(x): foo()['bar'] += x node, _ = self._parse_and_analyze(test_fn) fn_node = node self.assertScopeIs( anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('foo', 'x'), ()) def test_lambda(self): def test_fn(a, b): return lambda: (a + b) node, _ = self._parse_and_analyze(test_fn) fn_node = node scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE) self.assertScopeIs(scope, ('a', 'b'), ()) lam_def_node = node.body[0].value scope = anno.getanno(lam_def_node, anno.Static.SCOPE) self.assertScopeIs(scope, (), ()) scope = anno.getanno(lam_def_node, NodeAnno.BODY_SCOPE) self.assertScopeIs(scope, ('a', 'b'), ()) scope = anno.getanno(lam_def_node, NodeAnno.ARGS_AND_BODY_SCOPE) self.assertScopeIs(scope, ('a', 'b'), ()) self.assertSymbolSetsAre((), scope.bound, 'BOUND') scope = anno.getanno(lam_def_node.args, anno.Static.SCOPE) self.assertSymbolSetsAre((), scope.params.keys(), 'lambda params') def test_lambda_params_args(self): def test_fn(a, b): # pylint: disable=unused-argument return lambda a: a + b node, _ = self._parse_and_analyze(test_fn) fn_node = node scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE) # Note: `a` in `a + b` is not "read" here because it's hidden by the `a` # argument. self.assertScopeIs(scope, ('b',), ()) lam_def_node = node.body[0].value scope = anno.getanno(lam_def_node, anno.Static.SCOPE) self.assertScopeIs(scope, (), ()) scope = anno.getanno(lam_def_node, NodeAnno.BODY_SCOPE) self.assertScopeIs(scope, ('a', 'b'), ()) scope = anno.getanno(lam_def_node, NodeAnno.ARGS_AND_BODY_SCOPE) self.assertScopeIs(scope, ('a', 'b'), ()) self.assertSymbolSetsAre(('a',), scope.bound, 'BOUND') scope = anno.getanno(lam_def_node.args, anno.Static.SCOPE) self.assertSymbolSetsAre(('a',), scope.params.keys(), 'lambda params') def test_lambda_params_arg_defaults(self): def test_fn(a, b, c): # pylint: disable=unused-argument return lambda b=c: a + b node, _ = self._parse_and_analyze(test_fn) fn_node = node scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE) # Note: `b` is not "read" here because it's hidden by the argument. self.assertScopeIs(scope, ('a', 'c'), ()) lam_def_node = node.body[0].value scope = anno.getanno(lam_def_node, anno.Static.SCOPE) self.assertScopeIs(scope, ('c',), ()) scope = anno.getanno(lam_def_node, NodeAnno.BODY_SCOPE) self.assertScopeIs(scope, ('a', 'b'), ()) scope = anno.getanno(lam_def_node, NodeAnno.ARGS_AND_BODY_SCOPE) self.assertScopeIs(scope, ('a', 'b'), ()) self.assertSymbolSetsAre(('b',), scope.bound, 'BOUND') scope = anno.getanno(lam_def_node.args, anno.Static.SCOPE) self.assertSymbolSetsAre(('b',), scope.params.keys(), 'lambda params') def test_lambda_complex(self): def test_fn(a, b, c, d, e): # pylint: disable=unused-argument a = (lambda a, b, c=e: a + b + c)(d, 1, 2) + b node, _ = self._parse_and_analyze(test_fn) fn_node = node scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE) self.assertScopeIs(scope, ('d', 'b', 'e'), ('a',)) lam_def_node = node.body[0].value.left.func scope = anno.getanno(lam_def_node, anno.Static.SCOPE) self.assertScopeIs(scope, ('e',), ()) scope = anno.getanno(lam_def_node, NodeAnno.BODY_SCOPE) self.assertScopeIs(scope, ('a', 'b', 'c'), ()) scope = anno.getanno(lam_def_node, NodeAnno.ARGS_AND_BODY_SCOPE) self.assertScopeIs(scope, ('a', 'b', 'c'), ()) self.assertSymbolSetsAre(('a', 'b', 'c'), scope.bound, 'BOUND') scope = anno.getanno(lam_def_node.args, anno.Static.SCOPE) self.assertSymbolSetsAre( ('a', 'b', 'c'), scope.params.keys(), 'lambda params') def test_lambda_nested(self): def test_fn(a, b, c, d, e, f): # pylint: disable=unused-argument a = lambda a, b: d(lambda b=f: a + b + c) # pylint: disable=undefined-variable node, _ = self._parse_and_analyze(test_fn) fn_node = node scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE) self.assertScopeIs(scope, ('d', 'c', 'f'), ('a',)) outer_lam_def = node.body[0].value scope = anno.getanno(outer_lam_def, anno.Static.SCOPE) self.assertScopeIs(scope, (), ()) scope = anno.getanno(outer_lam_def, NodeAnno.BODY_SCOPE) self.assertScopeIs(scope, ('d', 'f', 'a', 'c'), ()) scope = anno.getanno(outer_lam_def, NodeAnno.ARGS_AND_BODY_SCOPE) self.assertScopeIs(scope, ('d', 'f', 'a', 'c'), ()) self.assertSymbolSetsAre(('a', 'b'), scope.bound, 'BOUND') scope = anno.getanno(outer_lam_def.args, anno.Static.SCOPE) self.assertSymbolSetsAre(('a', 'b'), scope.params.keys(), 'lambda params') inner_lam_def = outer_lam_def.body.args[0] scope = anno.getanno(inner_lam_def, anno.Static.SCOPE) self.assertScopeIs(scope, ('f',), ()) scope = anno.getanno(inner_lam_def, NodeAnno.BODY_SCOPE) self.assertScopeIs(scope, ('a', 'b', 'c'), ()) scope = anno.getanno(inner_lam_def, NodeAnno.ARGS_AND_BODY_SCOPE) self.assertScopeIs(scope, ('a', 'b', 'c'), ()) self.assertSymbolSetsAre(('b',), scope.bound, 'BOUND') scope = anno.getanno(inner_lam_def.args, anno.Static.SCOPE) self.assertSymbolSetsAre(('b',), scope.params.keys(), 'lambda params') def test_comprehension_targets_are_isolated(self): def test_fn(a): b = {c for c in a} # pylint:disable=unused-variable node, _ = self._parse_and_analyze(test_fn) fn_node = node body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE) self.assertScopeIs(body_scope, ('a',), ('b',)) def test_comprehension_targets_are_isolated_list_function_w_generator(self): def test_fn(a): b = list(c for c in a) # pylint:disable=unused-variable node, _ = self._parse_and_analyze(test_fn) fn_node = node body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE) self.assertScopeIs(body_scope, ('a', 'list'), ('b',)) def test_list_comprehension_targets_are_sometimes_isolated(self): def test_fn(a): b = [c for c in a] # pylint:disable=unused-variable node, _ = self._parse_and_analyze(test_fn) fn_node = node body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE) self.assertScopeIs(body_scope, ('a',), ('b',)) def test_comprehension_targets_are_isolated_in_augassign(self): def test_fn(
from __future__ import unicode_literals from __future__ import print_function from __future__ import division from __future__ import absolute_import from future import standard_library standard_library.install_aliases() from builtins import * import logging import emission.core.wrapper.transition as et import emission.net.usercache.formatters.common as fc import attrdict as ad state_map = { "STATE_START": et.State.START, "STATE_WAITING_FOR_TRIP_START": et.State.WAITING_FOR_TRIP_START, "STATE_ONGOING_TRIP": et.State.ONGOING_TRIP, "STATE_TRACKING_STOPPED": et.State.TRACKING_STOPPED } transition_map = { "booted": et.TransitionType.BOOTED, "T_INITIALIZE": et.TransitionType.INITIALIZE, "T_INIT_COMPLETE": et.TransitionType.INIT_COMPLETE, "T_EXITED_GEOFENCE": et.TransitionType.EXITED_GEOFENCE, "T_TRIP_STARTED": et.TransitionType.TRIP_STARTED, "T_RECEIVED_SILENT_PUSH": et.TransitionType.RECEIVED_SILENT_PUSH, "T_TRIP_END_DETECTED": et.TransitionType.TRIP_END_DETECTED, "T_TRIP_RESTARTED": et.TransitionType.TRIP_RESTARTED, "T_END_TRIP_TRACKING": et.TransitionT
ype.END_TRIP_TRACKING, "T_DATA_PUSHED": et.TransitionType.DATA_PUSHED, "T_TRIP_ENDED": et.TransitionType.STOPPED_MOVING, "T_FORCE_STOP_TRACKING": et.TransitionType.STOP_TRACKING, "T_TRACKING_STOPPED": et.TransitionType.TRACKING_STOPPED, "T_VISIT_STARTED": et.TransitionType.VISIT_STARTED, "T_VISIT_ENDED": et.TransitionType.VISIT_ENDED, "T_NOP":
et.TransitionType.NOP, "T_START_TRACKING": et.TransitionType.START_TRACKING } def format(entry): formatted_entry = ad.AttrDict() formatted_entry["_id"] = entry["_id"] formatted_entry.user_id = entry.user_id m = entry.metadata fc.expand_metadata_times(m) formatted_entry.metadata = m data = ad.AttrDict() data.curr_state = state_map[entry.data.currState].value logging.debug("Mapped %s -> %s" % (entry.data.currState, data.curr_state)) # The iOS state diagram is significantly more complex than the android state diagram # So there are a lot more transitions. But some of the intermediate states are # not interesting, so it seems like it should be possible to collapse them to the # simple 2-state android state machine. But that requires looking at a window of # transitions, which we don't have here. Let's focus on simply mapping here and # deal with collapsing later # data.transition_raw = entry.data.transition data.transition = transition_map[entry.data.transition].value if entry.data.transition is not None: data.transition = transition_map[entry.data.transition].value else: data.transition = None logging.debug("Mapped %s -> %s" % (entry.data.transition, data.transition)) if "ts" not in data: data.ts = formatted_entry.metadata.write_ts logging.debug("No existing timestamp, copyied from metadata%s" % data.ts) data.local_dt = formatted_entry.metadata.write_local_dt data.fmt_time = formatted_entry.metadata.write_fmt_time else: logging.debug("Retaining existing timestamp %s" % data.ts) fc.expand_data_times(data, metadata) formatted_entry.data = data return formatted_entry
#!/usr/bin/env python """ A toolkit for identifying and advertising service resources. Uses a specific naming convention for the Task Definition of services. If you name the Task Definition ending with "-service", no configuration is needed. This also requires that you not use that naming convention for task definitions that are not services. For example: A Task Definition with the family name of 'cache-service' will have its hosting Container Instance's internal ip added to a Route53 private Zone as cache.local and other machines on the same subnet can address it that way. """ import argparse import logging import os import re import json import boto import boto.ec2 import boto.route53 import requests from etcd.client import Client from time import sleep region = os.environ.get('ECS_REGION', 'us-east-1') ecs = boto.connect_ec2containerservice( host='ecs.{0}.amazonaws.com'.format(region)) ec2 = boto.ec2.connect_to_region(region) route53 = boto.route53.connect_to_region(region) logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%Y/%m/%d/ %I:%M:%S %p') if 'ECS_CLUSTER' in os.environ: cluster = os.environ['ECS_CLUSTER'] elif os.path.exists('/etc/ecs/ecs.config'): pat = re.compile(r'\bECS_CLUSTER\b\s*=\s*(\w*)') cluster = pat.findall(open('/etc/ecs/ecs.config').read())[-1] else: cluster = None def get_task_arns(family): """ Get the ARN of running task, given the family name. """ response = ecs.list_tasks(cluster=cluster, family=family) arns = response['ListTasksResponse']['ListTasksResult']['taskArns'] if len(arns) == 0: return None return arns def get_ec2_interface(container_instance_arn): """ Get the ec2 interface from an container instance ARN. """ response = ecs.describe_container_instances(container_instance_arn, cluster=cluster) ec2_instance_id = response['DescribeContainerInstancesResponse'] \ ['DescribeContainerInstancesResult']['containerInstances'] \ [0]['ec2InstanceId'] response = ec2.get_all_instances(filters={'instance-id': ec2_instance_id}) return response[0].instances[0].interfaces[0] def get_zone_for_vpc(vpc_id): """ Identify the Hosted Zone for the given VPC. Assumes a 1 to 1 relationship. NOTE: There is an existing bug. https://github.com/boto/boto/issues/3061 When that changes, I expect to have to search ['VPCs'] as a list of dictionaries rather than a dictionary. This has the unfortunate side effect of not working for Hosted Zones that are associated with more than one VPC. (But, why would you expect internal DNS for 2 different private networks to be the same anyway?) """ response = route53.get_all_hosted_zones()['ListHostedZonesResponse'] for zone in response['HostedZones']: zone_id = zone['Id'].split('/')[-1] detail = route53.get_hosted_zone(zone_id)['GetHostedZoneResponse'] try: if detail['VPCs']['VPC']['VPCId'] == vpc_id: return {'zone_id': zone_id, 'zone_name': zone['Name']} except KeyError: pass def get_se
rvice_info(service_name): info = { "name": service_name, "tasks": [] } if service_name[-8:] == '-service': info['name'] = service_name[:-8] task_arns = get_task_arns(service_name) if not task_arns: logging.info('{0} is NOT RUNNING'.format(service_name)) return None else: logging.info('{0} is RUNNING'.format(service_name)) data = ecs.describe_tasks(task_arns, cluster=cluster) tasks = data['DescribeTasksR
esponse']['DescribeTasksResult']['tasks'] for task in tasks: interface = get_ec2_interface(task['containerInstanceArn']) task_info = { 'ip': interface.private_ip_address, 'ports': {} } for container in task['containers']: if container['networkBindings']: for port in container['networkBindings']: if port['protocol'] == 'tcp': task_info['ports'][port['containerPort']] = port['hostPort'] info['tasks'].append(task_info) info['vpc_id'] = interface.vpc_id return info def update_dns(zone_id, zone_name, service_name, service_ips, ttl=20): """ Insert or update DNS record. """ host_name = '.'.join([service_name, zone_name]) record_set = boto.route53.record.ResourceRecordSets(route53, zone_id) record = record_set.add_change('UPSERT', host_name, 'A', ttl) for service_ip in service_ips: record.add_value(service_ip) record_set.commit() return record_set def update_service(service_name, method, prefix): """ Update DNS to allow discovery of properly named task definitions. """ info = get_service_info(service_name) if not info: return None if method == 'dns': network = get_zone_for_vpc(info["vpc_id"]) ips = [t['ip'] for t in info['tasks']] logging.info('Registering {0}.{1} as {2}'.format( info['name'], network['zone_name'], ','.join(ips))) update_dns(network['zone_id'], network['zone_name'], info['name'], ips) elif method == 'etcd': data = json.dumps(info['tasks']) logging.info('Registering {0} as {1}'.format( info['name'], data)) host = requests.get("http://169.254.169.254/latest/meta-data/local-ipv4").content client = Client(host=host, port=4001) key = '/' + '/'.join([i for i in ['tasks', prefix, info['name']] if i]) client.node.set(key, data) def main(): """ Main function that handles running the command. """ parser = argparse.ArgumentParser() parser.add_argument('service_name', nargs=1, help='list of services to start') parser.add_argument('method', nargs=1, help='method of registering service') parser.add_argument('-p', '--prefix', action='store', default=False, help='prefix when saving to etcd') parser.add_argument('-q', '--quiet', action='store_true', help='suppress output') parser.add_argument('-r', '--rerun', action='store_true', help='run again after a 60 second pause') args = parser.parse_args() if not args.quiet: logging.getLogger().setLevel(logging.INFO) update_service(args.service_name[0], args.method[0], args.prefix) if args.rerun: sleep(60) update_service(args.service_name[0], args.method[0], args.prefix) if __name__ == '__main__': main()
# -*- coding: utf8 -*- from urllib.request import Request, urlopen import logging import parsing __author__ = 'carlos' class Downloader(object): def __init__(self, url): self.url = url def read(self): request = Request( self.url ) request.add_header('Accept-encoding', 'text/html') response = urlopen(request) charset = response.headers.get('charset') data = response.read() loggin
g.debug('Read %u
bytes from %s (%s)' % (len(data), self.url, charset)) return data class StocksInfoUpdater(object): def __init__(self, url): self.downloader = Downloader(url) self.parser = parsing.StockParser() def update(self): dataread = self.downloader.read() self.parser.feed(dataread) return self.parser.stocks @property def stocks(self): return self.parser.stocks @property def url(self): return self.downloader.url
# coding: utf-8 # # Copyright 2018 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Controllers for suggestions.""" from constants import constants from core.controllers import base from core.domain import acl_decorators from core.domain import suggestion_services from core.platform import models (suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion]) class SuggestionHandler(base.BaseHandler): """"Handles operations relating to suggestions.""" @acl_decorators.can_suggest_changes def post(self): if not constants.USE_NEW_SUGGESTION_FRAMEWORK: raise self.PageNotFoundException suggestion_services.create_suggestion( self.payload.get('suggestion_type'), self.payload.get('target_type'), self.payload.get('target_id'), self
.payload.get('target_version_at_submission'), self.user_id, self.payload.get('change_cmd'), self.payload.get('description'), self.payload.get('final_reviewer_id')) self.render_json(self.values) class SuggestionToExplorationActionHandler(base.BaseHandler): """Handles actions perform
ed on suggestions to explorations.""" ACTION_TYPE_ACCEPT = 'accept' ACTION_TYPE_REJECT = 'reject' # TODO (nithesh): Add permissions for users with enough scores to review # Will be added as part of milestone 2 of the generalized review system # project. @acl_decorators.can_edit_exploration def put(self, exploration_id, suggestion_id): if not constants.USE_NEW_SUGGESTION_FRAMEWORK: raise self.PageNotFoundException if len(suggestion_id.split('.')) != 3: raise self.InvalidInputException('Invalid format for suggestion_id.' ' It must contain 3 parts' ' separated by \'.\'') if suggestion_id.split('.')[0] != 'exploration': raise self.InvalidInputException('This handler allows actions only' ' on suggestions to explorations.') if suggestion_id.split('.')[1] != exploration_id: raise self.InvalidInputException('The exploration id provided does ' 'not match the exploration id ' 'present as part of the ' 'suggestion_id') action = self.payload.get('action') suggestion = suggestion_services.get_suggestion_by_id(suggestion_id) if action == self.ACTION_TYPE_ACCEPT: suggestion_services.accept_suggestion( suggestion, self.user_id, self.payload.get('commit_message'), self.payload.get('review_message')) elif action == self.ACTION_TYPE_REJECT: suggestion_services.reject_suggestion( suggestion, self.user_id, self.payload.get('review_message')) else: raise self.InvalidInputException('Invalid action.') self.render_json(self.values) class SuggestionListHandler(base.BaseHandler): """Handles list operations on suggestions.""" LIST_TYPE_AUTHOR = 'author' LIST_TYPE_ID = 'id' LIST_TYPE_REVIEWER = 'reviewer' LIST_TYPE_STATUS = 'status' LIST_TYPE_SUGGESTION_TYPE = 'type' LIST_TYPE_TARGET_ID = 'target' LIST_TYPES_TO_SERVICES_MAPPING = { LIST_TYPE_AUTHOR: suggestion_services.get_suggestions_by_author, LIST_TYPE_ID: suggestion_services.get_suggestion_by_id, LIST_TYPE_REVIEWER: suggestion_services.get_suggestions_reviewed_by, LIST_TYPE_STATUS: suggestion_services.get_suggestions_by_status, LIST_TYPE_SUGGESTION_TYPE: suggestion_services.get_suggestion_by_type, LIST_TYPE_TARGET_ID: suggestion_services.get_suggestions_by_target_id } PARAMS_FOR_LIST_TYPES = { LIST_TYPE_AUTHOR: ['author_id'], LIST_TYPE_ID: ['suggestion_id'], LIST_TYPE_REVIEWER: ['reviewer_id'], LIST_TYPE_STATUS: ['status'], LIST_TYPE_SUGGESTION_TYPE: ['suggestion_type'], LIST_TYPE_TARGET_ID: ['target_type', 'target_id'] } def get_params_from_request(self, request, list_type): return [request.get(param_name) for param_name in self.PARAMS_FOR_LIST_TYPES[list_type]] @acl_decorators.open_access def get(self): if not constants.USE_NEW_SUGGESTION_FRAMEWORK: raise self.PageNotFoundException list_type = self.request.get('list_type') if list_type not in self.LIST_TYPES_TO_SERVICES_MAPPING: raise self.InvalidInputException('Invalid list type.') params = self.get_params_from_request(self.request, list_type) suggestions = self.LIST_TYPES_TO_SERVICES_MAPPING[list_type](*params) # When querying by ID, only a single suggestion is retrieved, so we make # it a list. if list_type == self.LIST_TYPE_ID: suggestions = [suggestions] self.values.update({'suggestions': [s.to_dict() for s in suggestions]}) self.render_json(self.values)
# SPDX-FileCopyr
ightText: 2020 The Kapitan Authors <kapitan-admins@googlegroups.com> # # SPDX-License-Identifier: Apache-2.0 import logging logger = logging.getLogger(__name__) class Validator(object): def __init__(self, cache_dir, **kwargs): self.cache_dir = cache
_dir def validate(self, validate_obj, **kwargs): raise NotImplementedError
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional gra
nt of patent rights # can be found in the PATENTS file in the same directory. import torch class GradMultiply(torch.autograd.Function): @staticmethod def forward(ctx, x, scale): ctx.scale = scale res = x.new(x) return res @staticmethod def backward(ctx, grad): return grad * ctx.scale, Non
e
fr
om django.apps import AppConfig from django.utils.translation import gettext_lazy as _ class WagtailTestsAppConfig(AppConfig): name = 'wagtail.tests.modeladmintest' label = '
modeladmintest' verbose_name = _("Test Wagtail Model Admin")
ee the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.Dataset.from_sparse_tensor_slices()`.""" from absl.testing import parameterized import numpy as np from tensorflow.python.data.kernel_tests import checkpoint_test_base from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import combinations from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class FromSparseTensorSlicesTest(test_base.DatasetTestBase, parameterized.TestCase): @combinations.generate( combinations.times( combinations.combine(tf_api_version=1, mode=["graph"]), combinations.combine(slices=[[ [1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], [] ], [[1., 2.], [], [1., 2.], [1.], [1., 2.], [], [1., 2.]]]))) def testFromSparseTensorSlices(self, slices): """Test a dataset based on slices of a `tf.sparse.SparseTensor`.""" st = array_ops.sparse_placeholder(dtypes.float64) iterator = dataset_ops.make_initializable_iterator( dataset_ops.Dataset.from_sparse_tensor_slices(st)) init_op = iterator.initializer get_next = sparse_tensor.SparseTensor(*iterator.get_next()) with self.cached_session() as sess: # Test with sparse tensor in the appropriate order. # pylint: disable=g-complex-comprehension indices = np.array( [[i, j] for i in range(len(slices)) for j in range(len(slices[i]))]) values = np.array([val for s in slices for val in s]) # pylint: enable=g-complex-comprehension dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1]) sparse_feed = sparse_tensor.SparseTensorValue(indices, values, dense_shape) sess.run(init_op, feed_dict={st: sparse_feed}) for i, s in enumerate(slices): results = sess.run(get_next) self.assertAllEqual(s, results.values) expected_indices = np.array( [[j] for j in range(len(slices[i]))]).reshape([-1, 1]) self.assertAllEqual(expected_indices, results.indices) self.assertAllEqual(dense_shape[1:], results.dense_shape) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) @combinations.generate( combinations.times( combinations.combine(tf_api_version=1, mode=["graph"]), combinations.combine(slices=[[ [1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], [] ], [[1., 2.], [], [1., 2.], [1.], [1., 2.], [], [1., 2.]]]))) def testFromSparseTensorSlicesInReverse(self, slices): """Test a dataset based on slices of a `tf.sparse.SparseTensor` in reverse order.""" st = array_ops.sparse_placeholder(dtypes.float64) iterator = dataset_ops.make_initializable_iterator( dataset_ops.Dataset.from_sparse_tensor_slices(st)) init_op = iterator.initializer with self.cached_session() as sess: # pylint: disable=g-complex-comprehension indices = np.array( [[i, j] for i in range(len(slices)) for j in range(len(slices[i]))]) values = np.array([val for s in slices for val in s]) # pylint: enable=g-complex-comprehension dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1]) # Test with sparse tensor in the reverse order, which is not # currently supported. reverse_order_indices = indices[::-1, :] reverse_order_values = values[::-1] sparse_feed = sparse_tensor.SparseTensorValue( reverse_order_indices, reverse_order_values, dense_shape) with self.assertRaises(errors.UnimplementedError): sess.run(init_op, feed_dict={st: sparse_feed}) @combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"])) def testEmptySparseTensorSlices(self): """Test a dataset based on slices of an empty `tf.sparse.SparseTensor`.""" st = array_ops.sparse_placeholder(dtypes.float64) iterator = dataset_ops.make_initializable_iterator( dataset_ops.Dataset.from_sparse_tensor_slices(st)) init_op = iterator.initializer get_next = sparse_tensor.SparseTensor(*iterator.get_next()) with self.cached_session() as sess: # Test with an empty sparse tensor. empty_indices = np.empty((0, 4), dtype=np.int64) empty_values = np.empty((0,), dtype=np.float64) empty_dense_shape = [0, 4, 37, 9] sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values, empty_dense_shape) sess.run(init_op, feed_dict={st: sparse_feed}) with self.assertRaise
s(errors.OutOfRangeError): sess.run(get_next) @combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"])) def testEmptySparseTensorSlicesInvalid(self): """Test a dataset based on invalid `tf.sparse.SparseTensor`.""" st = array_ops.sparse_placeholder(dtypes.float64) iterator = dataset_ops.make_initializable_iterator( dataset_ops.Dataset.from_
sparse_tensor_slices(st)) init_op = iterator.initializer with self.cached_session() as sess: # Test with an empty sparse tensor but with non empty values. empty_indices = np.empty((0, 4), dtype=np.int64) non_empty_values = [1, 2, 3, 4] empty_dense_shape = [0, 4, 37, 9] sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, non_empty_values, empty_dense_shape) # Here, we expect the test to fail when running the feed. with self.assertRaises(errors.InvalidArgumentError): sess.run(init_op, feed_dict={st: sparse_feed}) @combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"])) def testEmptySparseTensorSlicesInvalid2(self): """Test a dataset based on invalid `tf.sparse.SparseTensor`.""" st = array_ops.sparse_placeholder(dtypes.float64) iterator = dataset_ops.make_initializable_iterator( dataset_ops.Dataset.from_sparse_tensor_slices(st)) init_op = iterator.initializer with self.cached_session() as sess: # Test with an empty sparse tensor but with non empty values. empty_indices = [[]] empty_values = [] dense_shape = [1, 1] sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values, dense_shape) # Here, we expect the test to fail when running the feed. with self.assertRaises(errors.InvalidArgumentError): sess.run(init_op, feed_dict={st: sparse_feed}) @combinations.generate(combinations.combine(tf_api_version=2, mode=["eager"])) def testFromSparseTensorSlicesError(self): with self.assertRaises(AttributeError): dataset_ops.Dataset.from_sparse_tensor_slices(None) class FromSparseTensorSlicesCheckpointTest( checkpoint_test_base.CheckpointTestBase, parameterized.TestCase): def _build_sparse_tensor_slice_dataset(self, slices): # pylint: disable=g-complex-comprehension indices = np.array( [[i, j] for i in range(len(slices)) for j in range(len(slices[i]))], dtype=np.int64) values = np.array([val for s in slices for val in s], dtype=np.float64) # pylint: enable=g-complex-comprehension dense_shape = np.array( [len(slices), max(len(s) for s in slices) + 1], dtype=np.int64) sparse_components = sparse_tensor.SparseTensor(indices, values, dense_shape) return dataset_ops.Dataset.from_sparse_tensor_slices(sparse_components) @combinations.generate( combinations.times(test_base.v1_only_combinations(), checkpoint_test_base.default_test_combinations())) def test(self, verify_fn): slices
from __future__ import absolute_import, division, print_function from builtins import * # @UnusedWildImport from mcculw import ul from mcculw.ul import ULError from mcculw.enums import (BoardInfo, InfoType, ErrorCode, EventType, ExpansionInfo) from .ai_info import AiInfo from .ao_info import AoInfo from .ctr_info import CtrInfo from .daqi_info import DaqiInfo from .daqo_info import DaqoInfo from .dio_info import DioInfo class DaqDeviceInfo: """Provides hardware information for the DAQ device configured with the specified board number. NOTE: This class is primarily used to provide hardware information for the library examples and may change some hardware configuration values. It is recommended that values provided by this class be hard-coded in production code. Parameters ---------- board_num : int The board number associated with the device when created with :func:`.create_daq_device` or configured with Instacal. """ def __init__(self, board_num): self._board_num = board_num self._board_type = ul.get_config(InfoType.BOARDINFO, board_num, 0, BoardInfo.BOARDTYPE) if self._board_type == 0: raise ULError(ErrorCode.BADBOARD) self._ai_info = AiInfo(self._board_num) self._ao_info = AoInfo(self._board_num) self._ctr_info = CtrInfo(self._board_num) self._daqi_info = DaqiInfo(self._board_num) self._daqo_info = DaqoInfo(self._board_num) self._dio_info = DioInfo(self._board_num) @property def board_num(self): # -> int return self._board_num @property def product_name(self): # -> str return ul.get_board_name(self._board_num) @property def unique_id(self): # -> str return ul.get_config_string(InfoType.BOARDINFO, self._board_num, 0, BoardInfo.DEVUNIQUEID, 32) @property def supports_analog_input(self): # -> boolean return self._ai_info.is_supported @property def supports_temp_input(self): # -> boolean return self._ai_info.temp_supported def get_ai_info(self): # -> AiInfo return self._ai_info @property def supports_analog_output(self): # -> boolean return self._ao_info.is_supported def get_ao_info(self): # -> AoInfo return self._ao_info @property def supports_counters(self): # -> boolean return self._ctr_info.is_supported def get_ctr_info(self): # -> CtrInfo return self._ctr_info @property def supports_daq_input(self): # -> boolean return self._daqi_info.is_supported def get_daqi_info(self): # -> DaqiInfo
return self._daqi_info @property def supports_daq_output(self): # -> boolean return self._daqo_info.is_supported def get_daqo_info(self): # -> DaqoInfo return self._daqo_info @property def supports_digital_io(self): # -> boolean return self._dio_info.is_supported def get_dio_info(self): # -> DioInfo return self._dio_info @property def supported_event_types(self): # -> list[Eve
ntType] event_types = [] for event_type in EventType: try: ul.disable_event(self._board_num, event_type) event_types.append(event_type) except ULError: pass return event_types @property def num_expansions(self): # -> int return ul.get_config(InfoType.BOARDINFO, self.board_num, 0, BoardInfo.NUMEXPS) @property def exp_info(self): # -> list[ExpInfo] exp_info = [] for expansion_num in range(self.num_expansions): exp_info.append(ExpInfo(self._board_num, expansion_num)) return exp_info class ExpInfo: def __init__(self, board_num, expansion_num): self._board_num = board_num self._expansion_num = expansion_num @property def board_type(self): return ul.get_config(InfoType.EXPANSIONINFO, self._board_num, self._expansion_num, ExpansionInfo.BOARDTYPE) @property def mux_ad_chan(self): return ul.get_config(InfoType.EXPANSIONINFO, self._board_num, self._expansion_num, ExpansionInfo.MUX_AD_CHAN1)