text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def _cleanup(self): """Clean up resources used by the session. """ self.exit() workspace = osp.join(os.getcwd(), 'octave-workspace') if osp.exists(workspace): os.remove(workspace)
[ "def", "_cleanup", "(", "self", ")", ":", "self", ".", "exit", "(", ")", "workspace", "=", "osp", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "'octave-workspace'", ")", "if", "osp", ".", "exists", "(", "workspace", ")", ":", "os", ".", ...
33
10.857143
def stop(self): """ Force the next() method to return while in another thread. The return value of next() will be None. """ with self.condition: self.running = False self.condition.notify_all()
[ "def", "stop", "(", "self", ")", ":", "with", "self", ".", "condition", ":", "self", ".", "running", "=", "False", "self", ".", "condition", ".", "notify_all", "(", ")" ]
31.25
10
def checkOptions(options, parser): """ Check options, throw parser.error() if something goes wrong """ if options.jobStore == None: parser.error("Specify --jobStore") defaultCategories = ["time", "clock", "wait", "memory"] if options.categories is None: options.categories = defaultCategories else: options.categories = [x.lower() for x in options.categories.split(",")] for c in options.categories: if c not in defaultCategories: parser.error("Unknown category %s. Must be from %s" % (c, str(defaultCategories))) extraSort = ["count", "alpha"] if options.sortCategory is not None: if (options.sortCategory not in defaultCategories and options.sortCategory not in extraSort): parser.error("Unknown --sortCategory %s. Must be from %s" % (options.sortCategory, str(defaultCategories + extraSort))) sortFields = ["min", "med", "ave", "max", "total"] if options.sortField is not None: if (options.sortField not in sortFields): parser.error("Unknown --sortField %s. Must be from %s" % (options.sortField, str(sortFields)))
[ "def", "checkOptions", "(", "options", ",", "parser", ")", ":", "if", "options", ".", "jobStore", "==", "None", ":", "parser", ".", "error", "(", "\"Specify --jobStore\"", ")", "defaultCategories", "=", "[", "\"time\"", ",", "\"clock\"", ",", "\"wait\"", ","...
45.740741
14.111111
def remove_hwpack(name): """remove hardware package. :param name: hardware package name (e.g. 'Sanguino') :rtype: None """ targ_dlib = hwpack_dir() / name log.debug('remove %s', targ_dlib) targ_dlib.rmtree()
[ "def", "remove_hwpack", "(", "name", ")", ":", "targ_dlib", "=", "hwpack_dir", "(", ")", "/", "name", "log", ".", "debug", "(", "'remove %s'", ",", "targ_dlib", ")", "targ_dlib", ".", "rmtree", "(", ")" ]
22.8
16.2
def get_coiledcoil_region(self, cc_number=0, cutoff=7.0, min_kihs=2): """ Assembly containing only assigned regions (i.e. regions with contiguous KnobsIntoHoles. """ g = self.filter_graph(self.graph, cutoff=cutoff, min_kihs=min_kihs) ccs = sorted(networkx.connected_component_subgraphs(g, copy=True), key=lambda x: len(x.nodes()), reverse=True) cc = ccs[cc_number] helices = [x for x in g.nodes() if x.number in cc.nodes()] assigned_regions = self.get_assigned_regions(helices=helices, include_alt_states=False, complementary_only=True) coiledcoil_monomers = [h.get_slice_from_res_id(*assigned_regions[h.number]) for h in helices] return Assembly(coiledcoil_monomers)
[ "def", "get_coiledcoil_region", "(", "self", ",", "cc_number", "=", "0", ",", "cutoff", "=", "7.0", ",", "min_kihs", "=", "2", ")", ":", "g", "=", "self", ".", "filter_graph", "(", "self", ".", "graph", ",", "cutoff", "=", "cutoff", ",", "min_kihs", ...
75.5
31.8
def _do_eval(self, cmd, args): """\ Evaluate python code. e <expr> Evaluate <expr>. """ code = args[0].lstrip() if not code: self.stderr.write('e: cannot evalutate empty expression\n') return try: eval(code) except: self.stderr.write('''When executing code '{}', the following error was raised:\n\n'''.format(code)) self.stderr.write(textwrap.indent(traceback.format_exc(), ' '))
[ "def", "_do_eval", "(", "self", ",", "cmd", ",", "args", ")", ":", "code", "=", "args", "[", "0", "]", ".", "lstrip", "(", ")", "if", "not", "code", ":", "self", ".", "stderr", ".", "write", "(", "'e: cannot evalutate empty expression\\n'", ")", "retur...
36
20.5
async def encoder_read(self, command): """ This is a polling method to read the last cached FirmataPlus encoder value. Normally not used. See encoder config for the asynchronous report message format. :param command: {"method": "encoder_read", "params": [PIN_A]} :returns: {"method": "encoder_read_reply", "params": [PIN_A, ENCODER_VALUE]} """ pin = int(command[0]) val = await self.core.encoder_read(pin) reply = json.dumps({"method": "encoder_read_reply", "params": [pin, val]}) await self.websocket.send(reply)
[ "async", "def", "encoder_read", "(", "self", ",", "command", ")", ":", "pin", "=", "int", "(", "command", "[", "0", "]", ")", "val", "=", "await", "self", ".", "core", ".", "encoder_read", "(", "pin", ")", "reply", "=", "json", ".", "dumps", "(", ...
48.583333
22.25
def slurp_properties(source, destination, ignore=[], srckeys=None): """Copy properties from *source* (assumed to be a module) to *destination* (assumed to be a dict). *ignore* lists properties that should not be thusly copied. *srckeys* is a list of keys to copy, if the source's __all__ is untrustworthy. """ if srckeys is None: srckeys = source.__all__ destination.update(dict([(name, getattr(source, name)) for name in srckeys if not (name.startswith('__') or name in ignore) ]))
[ "def", "slurp_properties", "(", "source", ",", "destination", ",", "ignore", "=", "[", "]", ",", "srckeys", "=", "None", ")", ":", "if", "srckeys", "is", "None", ":", "srckeys", "=", "source", ".", "__all__", "destination", ".", "update", "(", "dict", ...
42.857143
17.214286
def make_cashed(self): """ Включает кэширование запросов к descend """ self._descendance_cash = [dict() for _ in self.graph] self.descend = self._descend_cashed
[ "def", "make_cashed", "(", "self", ")", ":", "self", ".", "_descendance_cash", "=", "[", "dict", "(", ")", "for", "_", "in", "self", ".", "graph", "]", "self", ".", "descend", "=", "self", ".", "_descend_cashed" ]
32.5
8.166667
def parse_event_out(self, node): """ Parses <EventOut> @param node: Node containing the <EventOut> element @type node: xml.etree.Element """ try: port = node.lattrib['port'] except: self.raise_error('<EventOut> must be specify a port.') action = EventOut(port) self.current_event_handler.add_action(action)
[ "def", "parse_event_out", "(", "self", ",", "node", ")", ":", "try", ":", "port", "=", "node", ".", "lattrib", "[", "'port'", "]", "except", ":", "self", ".", "raise_error", "(", "'<EventOut> must be specify a port.'", ")", "action", "=", "EventOut", "(", ...
24.4375
19.1875
def _load_wordlist(name, stream): """ Loads list of words or phrases from file. Returns "words" or "phrases" dictionary, the same as used in config. Raises Exception if file is missing or invalid. """ items = [] max_length = None multiword = False multiword_start = None number_of_words = None for i, line in enumerate(stream, start=1): line = line.strip() if not line or line.startswith('#'): continue # Is it an option line, e.g. 'max_length = 10'? if '=' in line: if items: raise ConfigurationError('Invalid assignment at list {!r} line {}: {!r} ' '(options must be defined before words)' .format(name, i, line)) try: option, option_value = _parse_option(line) except ValueError as ex: raise ConfigurationError('Invalid assignment at list {!r} line {}: {!r} ' '({})' .format(name, i, line, ex)) if option == _CONF.FIELD.MAX_LENGTH: max_length = option_value elif option == _CONF.FIELD.NUMBER_OF_WORDS: number_of_words = option_value continue # pragma: no cover # Parse words if not multiword and _WORD_REGEX.match(line): if max_length is not None and len(line) > max_length: raise ConfigurationError('Word is too long at list {!r} line {}: {!r}' .format(name, i, line)) items.append(line) elif _PHRASE_REGEX.match(line): if not multiword: multiword = True multiword_start = len(items) phrase = tuple(line.split(' ')) if number_of_words is not None and len(phrase) != number_of_words: raise ConfigurationError('Phrase has {} word(s) (while number_of_words={}) ' 'at list {!r} line {}: {!r}' .format(len(phrase), number_of_words, name, i, line)) if max_length is not None and sum(len(x) for x in phrase) > max_length: raise ConfigurationError('Phrase is too long at list {!r} line {}: {!r}' .format(name, i, line)) items.append(phrase) else: raise ConfigurationError('Invalid syntax at list {!r} line {}: {!r}' .format(name, i, line)) if multiword: # If in phrase mode, convert everything to tuples for i in range(0, multiword_start): items[i] = (items[i], ) result = { _CONF.FIELD.TYPE: _CONF.TYPE.PHRASES, _CONF.FIELD.PHRASES: items } if number_of_words is not None: result[_CONF.FIELD.NUMBER_OF_WORDS] = number_of_words else: result = { _CONF.FIELD.TYPE: _CONF.TYPE.WORDS, _CONF.FIELD.WORDS: items } if max_length is not None: result[_CONF.FIELD.MAX_LENGTH] = max_length return result
[ "def", "_load_wordlist", "(", "name", ",", "stream", ")", ":", "items", "=", "[", "]", "max_length", "=", "None", "multiword", "=", "False", "multiword_start", "=", "None", "number_of_words", "=", "None", "for", "i", ",", "line", "in", "enumerate", "(", ...
43.60274
18.890411
def split_crawl_tasks(tasks, concurrency): """ Reorganize tasks according to the tasks max concurrency value. :param tasks: sub-tasks to execute, can be either a list of tasks of a list of list of tasks :param int concurrency: Maximum number of tasks that might be executed in parallel. :return: list of list of tasks. """ if any(tasks) and isinstance(tasks[0], list): for seq in tasks: if not isinstance(seq, list): raise Exception("Expected a list of tasks") else: if concurrency > 1: chain_size = int(ceil(float(len(tasks)) / concurrency)) tasks = [ chunk for chunk in chunks( iter(tasks), max(1, chain_size) ) ] else: tasks = [tasks] return tasks
[ "def", "split_crawl_tasks", "(", "tasks", ",", "concurrency", ")", ":", "if", "any", "(", "tasks", ")", "and", "isinstance", "(", "tasks", "[", "0", "]", ",", "list", ")", ":", "for", "seq", "in", "tasks", ":", "if", "not", "isinstance", "(", "seq", ...
29.931034
18.103448
def _other_dpss_method(N, NW, Kmax): """Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1] for a given frequency-spacing multiple NW and sequence length N. See dpss function that is the official version. This version is indepedant of the C code and relies on Scipy function. However, it is slower by a factor 3 Tridiagonal form of DPSS calculation from: """ # here we want to set up an optimization problem to find a sequence # whose energy is maximally concentrated within band [-W,W]. # Thus, the measure lambda(T,W) is the ratio between the energy within # that band, and the total energy. This leads to the eigen-system # (A - (l1)I)v = 0, where the eigenvector corresponding to the largest # eigenvalue is the sequence with maximally concentrated energy. The # collection of eigenvectors of this system are called Slepian sequences, # or discrete prolate spheroidal sequences (DPSS). Only the first K, # K = 2NW/dt orders of DPSS will exhibit good spectral concentration # [see http://en.wikipedia.org/wiki/Spectral_concentration_problem] # Here I set up an alternative symmetric tri-diagonal eigenvalue problem # such that # (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1) # the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1] # and the first off-diangonal = t(N-t)/2, t=[1,2,...,N-1] # [see Percival and Walden, 1993] from scipy import linalg as la Kmax = int(Kmax) W = float(NW)/N ab = np.zeros((2,N), 'd') nidx = np.arange(N) ab[0,1:] = nidx[1:]*(N-nidx[1:])/2. ab[1] = ((N-1-2*nidx)/2.)**2 * np.cos(2*np.pi*W) # only calculate the highest Kmax-1 eigenvectors l,v = la.eig_banded(ab, select='i', select_range=(N-Kmax, N-1)) dpss = v.transpose()[::-1] # By convention (Percival and Walden, 1993 pg 379) # * symmetric tapers (k=0,2,4,...) should have a positive average. # * antisymmetric tapers should begin with a positive lobe fix_symmetric = (dpss[0::2].sum(axis=1) < 0) for i, f in enumerate(fix_symmetric): if f: dpss[2*i] *= -1 fix_skew = (dpss[1::2,1] < 0) for i, f in enumerate(fix_skew): if f: dpss[2*i+1] *= -1 # Now find the eigenvalues of the original # Use the autocovariance sequence technique from Percival and Walden, 1993 # pg 390 # XXX : why debias false? it's all messed up o.w., even with means # on the order of 1e-2 acvs = _autocov(dpss, debias=False) * N r = 4*W*np.sinc(2*W*nidx) r[0] = 2*W eigvals = np.dot(acvs, r) return dpss, eigvals
[ "def", "_other_dpss_method", "(", "N", ",", "NW", ",", "Kmax", ")", ":", "# here we want to set up an optimization problem to find a sequence", "# whose energy is maximally concentrated within band [-W,W].", "# Thus, the measure lambda(T,W) is the ratio between the energy within", "# that ...
43.366667
21.733333
def purge_all(user=None, fast=False): """ Remove all calculations of the given user """ user = user or getpass.getuser() if os.path.exists(datadir): if fast: shutil.rmtree(datadir) print('Removed %s' % datadir) else: for fname in os.listdir(datadir): mo = re.match('calc_(\d+)\.hdf5', fname) if mo is not None: calc_id = int(mo.group(1)) purge_one(calc_id, user)
[ "def", "purge_all", "(", "user", "=", "None", ",", "fast", "=", "False", ")", ":", "user", "=", "user", "or", "getpass", ".", "getuser", "(", ")", "if", "os", ".", "path", ".", "exists", "(", "datadir", ")", ":", "if", "fast", ":", "shutil", ".",...
32.8
7.733333
def convert_flatten(builder, layer, input_names, output_names, keras_layer): """ Convert a flatten layer from keras to coreml. ---------- Parameters keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = (input_names[0], output_names[0]) # blob_order == 0 if the input blob needs not be rearranged # blob_order == 1 if the input blob needs to be rearranged blob_order = 0 # using keras_layer.input.shape have a "?" (Dimension[None] at the front), # making a 3D tensor with unknown batch size 4D try: in_shape = keras_layer.input_shape if len(in_shape) == 4: blob_order = 1 if len(in_shape) == 3 and in_shape[0] is None: # handling Keras rank-3 tensor (Batch, Sequence, Channels) permute_output_name = output_name + '__permute__' builder.add_permute(name=layer+'__permute__', dim=(2,1,0,3), input_name=input_name, output_name=permute_output_name) builder.add_flatten(name=layer, mode=1, input_name=permute_output_name, output_name=output_name) else: builder.add_flatten(name=layer, mode=blob_order, input_name=input_name, output_name=output_name) except: builder.add_flatten(name=layer, mode=1, input_name=input_name, output_name=output_name)
[ "def", "convert_flatten", "(", "builder", ",", "layer", ",", "input_names", ",", "output_names", ",", "keras_layer", ")", ":", "input_name", ",", "output_name", "=", "(", "input_names", "[", "0", "]", ",", "output_names", "[", "0", "]", ")", "# blob_order ==...
39.694444
22.75
def transform_audio(self, y): '''Compute the CQT Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape = (n_frames, n_bins) The CQT magnitude data['phase']: np.ndarray, shape = mag.shape The CQT phase ''' n_frames = self.n_frames(get_duration(y=y, sr=self.sr)) C = cqt(y=y, sr=self.sr, hop_length=self.hop_length, fmin=self.fmin, n_bins=(self.n_octaves * self.over_sample * 12), bins_per_octave=(self.over_sample * 12)) C = fix_length(C, n_frames) cqtm, phase = magphase(C) if self.log: cqtm = amplitude_to_db(cqtm, ref=np.max) return {'mag': cqtm.T.astype(np.float32)[self.idx], 'phase': np.angle(phase).T.astype(np.float32)[self.idx]}
[ "def", "transform_audio", "(", "self", ",", "y", ")", ":", "n_frames", "=", "self", ".", "n_frames", "(", "get_duration", "(", "y", "=", "y", ",", "sr", "=", "self", ".", "sr", ")", ")", "C", "=", "cqt", "(", "y", "=", "y", ",", "sr", "=", "s...
29
22.625
def extract_error_message(error): """ Extract a useful message from an error. Prefer the description attribute, then the message attribute, then the errors string conversion. In each case, fall back to the error class's name in the event that the attribute value was set to a uselessly empty string. """ try: return error.description or error.__class__.__name__ except AttributeError: try: return str(error.message) or error.__class__.__name__ except AttributeError: return str(error) or error.__class__.__name__
[ "def", "extract_error_message", "(", "error", ")", ":", "try", ":", "return", "error", ".", "description", "or", "error", ".", "__class__", ".", "__name__", "except", "AttributeError", ":", "try", ":", "return", "str", "(", "error", ".", "message", ")", "o...
36.1875
21.6875
def jid_to_time(jid): ''' Convert a salt job id into the time when the job was invoked ''' jid = six.text_type(jid) if len(jid) != 20 and (len(jid) <= 21 or jid[20] != '_'): return '' year = jid[:4] month = jid[4:6] day = jid[6:8] hour = jid[8:10] minute = jid[10:12] second = jid[12:14] micro = jid[14:20] ret = '{0}, {1} {2} {3}:{4}:{5}.{6}'.format(year, months[int(month)], day, hour, minute, second, micro) return ret
[ "def", "jid_to_time", "(", "jid", ")", ":", "jid", "=", "six", ".", "text_type", "(", "jid", ")", "if", "len", "(", "jid", ")", "!=", "20", "and", "(", "len", "(", "jid", ")", "<=", "21", "or", "jid", "[", "20", "]", "!=", "'_'", ")", ":", ...
32.695652
20.695652
def send_setpoint(self, roll, pitch, yaw, thrust): """ Send a new control setpoint for roll/pitch/yaw/thrust to the copter The arguments roll/pitch/yaw/trust is the new setpoints that should be sent to the copter """ if thrust > 0xFFFF or thrust < 0: raise ValueError('Thrust must be between 0 and 0xFFFF') if self._x_mode: roll, pitch = 0.707 * (roll - pitch), 0.707 * (roll + pitch) pk = CRTPPacket() pk.port = CRTPPort.COMMANDER pk.data = struct.pack('<fffH', roll, -pitch, yaw, thrust) self._cf.send_packet(pk)
[ "def", "send_setpoint", "(", "self", ",", "roll", ",", "pitch", ",", "yaw", ",", "thrust", ")", ":", "if", "thrust", ">", "0xFFFF", "or", "thrust", "<", "0", ":", "raise", "ValueError", "(", "'Thrust must be between 0 and 0xFFFF'", ")", "if", "self", ".", ...
36.058824
19.941176
def main_cli(): """ Actualiza la base de datos de PVPC/DEMANDA almacenados como dataframe en local, creando una nueva si no existe o hubiere algún problema. Los datos registrados se guardan en HDF5 """ def _get_parser_args(): p = argparse.ArgumentParser(description='Gestor de DB de PVPC/DEMANDA (esios.ree.es)') p.add_argument('-d', '--dem', action='store_true', help='Selecciona BD de demanda (BD de PVPC por defecto)') p.add_argument('-i', '--info', action='store', nargs='*', help="Muestra información de la BD seleccionada. " "* Puede usar intervalos temporales y nombres de columnas, " "como '-i gen noc 2017-01-24 2017-01-26'") p.add_argument('-fu', '-FU', '--forceupdate', action='store_true', help="Fuerza la reconstrucción total de la BD seleccionada") p.add_argument('-u', '-U', '--update', action='store_true', help="Actualiza la información de la BD seleccionada hasta el instante actual") p.add_argument('-p', '--plot', action='store_true', help="Genera plots de la información filtrada de la BD") p.add_argument('-v', '--verbose', action='store_true', help='Muestra información extra') arguments = p.parse_args() return arguments, p def _parse_date(string, columns): try: ts = pd.Timestamp(string) print_cyan('{} es timestamp: {:%c} --> {}'.format(string, ts, ts.date())) columns.remove(string) return ts.date().isoformat() except ValueError: pass args, parser = _get_parser_args() print_secc('ESIOS PVPC/DEMANDA') if args.dem: db_web = DatosREE(update=args.update, force_update=args.forceupdate, verbose=args.verbose) else: db_web = PVPC(update=args.update, force_update=args.forceupdate, verbose=args.verbose) data = db_web.data['data'] if args.info is not None: if len(args.info) > 0: cols = args.info.copy() dates = [d for d in [_parse_date(s, cols) for s in args.info] if d] if len(dates) == 2: data = data.loc[dates[0]:dates[1]] elif len(dates) == 1: data = data.loc[dates[0]] if len(cols) > 0: try: data = data[[c.upper() for c in cols]] except KeyError as e: print_red('NO SE PUEDE FILTRAR LA COLUMNA (Exception: {})\nLAS COLUMNAS DISPONIBLES SON:\n{}' .format(e, data.columns)) print_info(data) else: print_secc('LAST 24h in DB:') print_info(data.iloc[-24:]) print_cyan(data.columns) if args.plot: if args.dem: from esiosdata.pvpcplot import pvpcplot_tarifas_hora, pvpcplot_grid_hora print_red('IMPLEMENTAR PLOTS DEM') else: from esiosdata.pvpcplot import pvpcplot_tarifas_hora, pvpcplot_grid_hora if len(data) < 750: pvpcplot_grid_hora(data) # pvpcplot_tarifas_hora(data) else: print_red('La selección para plot es excesiva: {} samples de {} a {}\nSe hace plot de las últimas 24h'. format(len(data), data.index[0], data.index[-1])) pvpcplot_grid_hora(db_web.data['data'].iloc[-24:]) pvpcplot_tarifas_hora(db_web.data['data'].iloc[-24:])
[ "def", "main_cli", "(", ")", ":", "def", "_get_parser_args", "(", ")", ":", "p", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Gestor de DB de PVPC/DEMANDA (esios.ree.es)'", ")", "p", ".", "add_argument", "(", "'-d'", ",", "'--dem'", ",", ...
49.042254
25.43662
def get_entry_by_material_id(self, material_id, compatible_only=True, inc_structure=None, property_data=None, conventional_unit_cell=False): """ Get a ComputedEntry corresponding to a material_id. Args: material_id (str): Materials Project material_id (a string, e.g., mp-1234). compatible_only (bool): Whether to return only "compatible" entries. Compatible entries are entries that have been processed using the MaterialsProjectCompatibility class, which performs adjustments to allow mixing of GGA and GGA+U calculations for more accurate phase diagrams and reaction energies. inc_structure (str): If None, entries returned are ComputedEntries. If inc_structure="final", ComputedStructureEntries with final structures are returned. Otherwise, ComputedStructureEntries with initial structures are returned. property_data (list): Specify additional properties to include in entry.data. If None, no data. Should be a subset of supported_properties. conventional_unit_cell (bool): Whether to get the standard conventional unit cell Returns: ComputedEntry or ComputedStructureEntry object. """ data = self.get_entries(material_id, compatible_only=compatible_only, inc_structure=inc_structure, property_data=property_data, conventional_unit_cell=conventional_unit_cell) return data[0]
[ "def", "get_entry_by_material_id", "(", "self", ",", "material_id", ",", "compatible_only", "=", "True", ",", "inc_structure", "=", "None", ",", "property_data", "=", "None", ",", "conventional_unit_cell", "=", "False", ")", ":", "data", "=", "self", ".", "get...
51.411765
24.235294
def deconstruct(self): """Deconstruct operation.""" return ( self.__class__.__name__, [], { 'process': self.process, 'field': self._raw_field, 'schema': self.schema, 'default': self.default, } )
[ "def", "deconstruct", "(", "self", ")", ":", "return", "(", "self", ".", "__class__", ".", "__name__", ",", "[", "]", ",", "{", "'process'", ":", "self", ".", "process", ",", "'field'", ":", "self", ".", "_raw_field", ",", "'schema'", ":", "self", "....
26.583333
13.25
def stop_image_acquisition(self): """ Stops image acquisition. :return: None. """ if self.is_acquiring_images: # self._is_acquiring_images = False # if self.thread_image_acquisition.is_running: # TODO self.thread_image_acquisition.stop() with MutexLocker(self.thread_image_acquisition): # self.device.node_map.AcquisitionStop.execute() try: # Unlock TLParamsLocked in order to allow full device # configuration: self.device.node_map.TLParamsLocked.value = 0 except LogicalErrorException: # SFNC < 2.0 pass for data_stream in self._data_streams: # Stop image acquisition. try: data_stream.stop_acquisition( ACQ_STOP_FLAGS_LIST.ACQ_STOP_FLAGS_KILL ) except (ResourceInUseException, TimeoutException) as e: self._logger.error(e, exc_info=True) # Flash the queue for image acquisition process. data_stream.flush_buffer_queue( ACQ_QUEUE_TYPE_LIST.ACQ_QUEUE_ALL_DISCARD ) for event_manager in self._event_new_buffer_managers: event_manager.flush_event_queue() if self._create_ds_at_connection: self._release_buffers() else: self._release_data_streams() # self._has_acquired_1st_image = False # self._chunk_adapter.detach_buffer() # self._logger.info( '{0} stopped image acquisition.'.format(self._device.id_) ) if self._profiler: self._profiler.print_diff()
[ "def", "stop_image_acquisition", "(", "self", ")", ":", "if", "self", ".", "is_acquiring_images", ":", "#", "self", ".", "_is_acquiring_images", "=", "False", "#", "if", "self", ".", "thread_image_acquisition", ".", "is_running", ":", "# TODO", "self", ".", "t...
32.327869
20.95082
def get_all_blockstack_ops_at( self, block_number, offset=None, count=None, include_history=None, restore_history=None ): """ Get all name, namespace, and account records affected at a particular block, in the state they were at the given block number. Paginate if offset, count are given. """ if include_history is not None: log.warn("DEPRECATED use of include_history") if restore_history is not None: log.warn("DEPRECATED use of restore_history") log.debug("Get all accepted operations at %s in %s" % (block_number, self.db_filename)) recs = namedb_get_all_blockstack_ops_at( self.db, block_number, offset=offset, count=count ) # include opcode for rec in recs: assert 'op' in rec rec['opcode'] = op_get_opcode_name(rec['op']) return recs
[ "def", "get_all_blockstack_ops_at", "(", "self", ",", "block_number", ",", "offset", "=", "None", ",", "count", "=", "None", ",", "include_history", "=", "None", ",", "restore_history", "=", "None", ")", ":", "if", "include_history", "is", "not", "None", ":"...
39.909091
25.818182
def process_bind_param(self, obj, dialect): """Get a flask_cloudy.Object and save it as a dict""" value = obj or {} if isinstance(obj, flask_cloudy.Object): value = {} for k in self.DEFAULT_KEYS: value[k] = getattr(obj, k) return super(self.__class__, self).process_bind_param(value, dialect)
[ "def", "process_bind_param", "(", "self", ",", "obj", ",", "dialect", ")", ":", "value", "=", "obj", "or", "{", "}", "if", "isinstance", "(", "obj", ",", "flask_cloudy", ".", "Object", ")", ":", "value", "=", "{", "}", "for", "k", "in", "self", "."...
39.666667
13.777778
def color(self, key=None): """ Returns the color for this data set. :return <QColor> """ if key is not None: return self._colorMap.get(nativestring(key), self._color) return self._color
[ "def", "color", "(", "self", ",", "key", "=", "None", ")", ":", "if", "key", "is", "not", "None", ":", "return", "self", ".", "_colorMap", ".", "get", "(", "nativestring", "(", "key", ")", ",", "self", ".", "_color", ")", "return", "self", ".", "...
28.666667
12.888889
def deflections_from_grid(self, grid): """ Calculate the deflection angles at a given set of arc-second gridded coordinates. Parameters ---------- grid : grids.RegularGrid The grid of (y,x) arc-second coordinates the deflection angles are computed on. """ eta = self.grid_to_grid_radii(grid=grid) deflection = np.multiply(2. * self.einstein_radius_rescaled, np.divide( np.add(np.power(np.add(self.core_radius ** 2, np.square(eta)), (3. - self.slope) / 2.), -self.core_radius ** (3 - self.slope)), np.multiply((3. - self.slope), eta))) return self.grid_to_grid_cartesian(grid=grid, radius=deflection)
[ "def", "deflections_from_grid", "(", "self", ",", "grid", ")", ":", "eta", "=", "self", ".", "grid_to_grid_radii", "(", "grid", "=", "grid", ")", "deflection", "=", "np", ".", "multiply", "(", "2.", "*", "self", ".", "einstein_radius_rescaled", ",", "np", ...
50.142857
27.714286
def check_syntax(self, app_path=None): """Run syntax on each ".py" and ".json" file. Args: app_path (str, optional): Defaults to None. The path of Python files. """ app_path = app_path or '.' for filename in sorted(os.listdir(app_path)): error = None status = True if filename.endswith('.py'): try: with open(filename, 'rb') as f: ast.parse(f.read(), filename=filename) except SyntaxError: status = False # cleanup output e = [] for line in traceback.format_exc().split('\n')[-5:-2]: e.append(line.strip()) error = ' '.join(e) elif filename.endswith('.json'): try: with open(filename, 'r') as fh: json.load(fh) except ValueError as e: status = False error = e else: # skip unsupported file types continue if error: # update validation data errors self.validation_data['errors'].append( 'Syntax validation failed for {} ({}).'.format(filename, error) ) # store status for this file self.validation_data['fileSyntax'].append({'filename': filename, 'status': status})
[ "def", "check_syntax", "(", "self", ",", "app_path", "=", "None", ")", ":", "app_path", "=", "app_path", "or", "'.'", "for", "filename", "in", "sorted", "(", "os", ".", "listdir", "(", "app_path", ")", ")", ":", "error", "=", "None", "status", "=", "...
35.52381
16.761905
def default(self, obj): """Default object encoder function Args: obj (:obj:`Any`): Object to be serialized Returns: JSON string """ if isinstance(obj, datetime): return obj.isoformat() if issubclass(obj.__class__, Enum.__class__): return obj.value to_json = getattr(obj, 'to_json', None) if to_json: out = obj.to_json() if issubclass(obj.__class__, Model): out.update({'__type': obj.__class__.__name__}) return out return JSONEncoder.default(self, obj)
[ "def", "default", "(", "self", ",", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "datetime", ")", ":", "return", "obj", ".", "isoformat", "(", ")", "if", "issubclass", "(", "obj", ".", "__class__", ",", "Enum", ".", "__class__", ")", ":", ...
25.291667
19.25
def sphcyl(radius, colat, slon): """ This routine converts from spherical coordinates to cylindrical coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sphcyl_c.html :param radius: Distance of point from origin. :type radius: float :param colat: Polar angle (co-latitude in radians) of point. :type colat: float :param slon: Azimuthal angle (longitude) of point (radians). :type slon: float :return: Distance of point from z axis, angle (radians) of point from XZ plane, Height of point above XY plane. :rtype: tuple """ radius = ctypes.c_double(radius) colat = ctypes.c_double(colat) slon = ctypes.c_double(slon) r = ctypes.c_double() lon = ctypes.c_double() z = ctypes.c_double() libspice.sphcyl_c(radius, colat, slon, ctypes.byref(r), ctypes.byref(lon), ctypes.byref(z)) return r.value, lon.value, z.value
[ "def", "sphcyl", "(", "radius", ",", "colat", ",", "slon", ")", ":", "radius", "=", "ctypes", ".", "c_double", "(", "radius", ")", "colat", "=", "ctypes", ".", "c_double", "(", "colat", ")", "slon", "=", "ctypes", ".", "c_double", "(", "slon", ")", ...
33.678571
16.178571
def manage_delObjects(self, ids=None, REQUEST=None): """Overrides parent function. If the ids passed in are from Attachment types, the function ignores the DeleteObjects permission. For the rest of types, it works as usual (checks the permission) """ if ids is None: ids = [] if isinstance(ids, basestring): ids = [ids] for id in ids: item = self._getOb(id) if isinstance(item, Attachment): # Ignore DeleteObjects permission check continue if not _checkPermission(permissions.DeleteObjects, item): raise Unauthorized, ( "Do not have permissions to remove this object") return PortalFolder.manage_delObjects(self, ids, REQUEST=REQUEST)
[ "def", "manage_delObjects", "(", "self", ",", "ids", "=", "None", ",", "REQUEST", "=", "None", ")", ":", "if", "ids", "is", "None", ":", "ids", "=", "[", "]", "if", "isinstance", "(", "ids", ",", "basestring", ")", ":", "ids", "=", "[", "ids", "]...
42.578947
16.631579
def countdown(template, duration=datetime.timedelta(seconds=5)): """ Do a countdown for duration, printing the template (which may accept one positional argument). Template should be something like ``countdown complete in {} seconds.`` """ now = datetime.datetime.now() deadline = now + duration remaining = deadline - datetime.datetime.now() while remaining: remaining = deadline - datetime.datetime.now() remaining = max(datetime.timedelta(), remaining) msg = template.format(remaining.total_seconds()) print(msg, end=' ' * 10) sys.stdout.flush() time.sleep(.1) print('\b' * 80, end='') sys.stdout.flush() print()
[ "def", "countdown", "(", "template", ",", "duration", "=", "datetime", ".", "timedelta", "(", "seconds", "=", "5", ")", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "deadline", "=", "now", "+", "duration", "remaining", "=", ...
32.789474
14.789474
def sync_networks(self): """sync networks. It will retrieve networks from neutron and populate them in dfa database and dcnm """ nets = self.neutronclient.list_networks() for net in nets.get("networks"): LOG.info("Syncing network %s", net["id"]) self.network_create_func(net) subnets = self.neutronclient.list_subnets() for subnet in subnets.get("subnets"): LOG.info("Syncing subnet %s", subnet["id"]) self.create_subnet(subnet)
[ "def", "sync_networks", "(", "self", ")", ":", "nets", "=", "self", ".", "neutronclient", ".", "list_networks", "(", ")", "for", "net", "in", "nets", ".", "get", "(", "\"networks\"", ")", ":", "LOG", ".", "info", "(", "\"Syncing network %s\"", ",", "net"...
37.714286
9.571429
async def RemoteApplicationInfo(self, offer_urls): ''' offer_urls : typing.Sequence[str] Returns -> typing.Sequence[~RemoteApplicationInfoResult] ''' # map input types to rpc msg _params = dict() msg = dict(type='ApplicationOffers', request='RemoteApplicationInfo', version=2, params=_params) _params['offer-urls'] = offer_urls reply = await self.rpc(msg) return reply
[ "async", "def", "RemoteApplicationInfo", "(", "self", ",", "offer_urls", ")", ":", "# map input types to rpc msg", "_params", "=", "dict", "(", ")", "msg", "=", "dict", "(", "type", "=", "'ApplicationOffers'", ",", "request", "=", "'RemoteApplicationInfo'", ",", ...
35.142857
12.285714
def library_directories(self): """Return a list of directories containing any static libraries built by this IOTile.""" libs = self.find_products('library') if len(libs) > 0: return [os.path.join(self.output_folder)] return []
[ "def", "library_directories", "(", "self", ")", ":", "libs", "=", "self", ".", "find_products", "(", "'library'", ")", "if", "len", "(", "libs", ")", ">", "0", ":", "return", "[", "os", ".", "path", ".", "join", "(", "self", ".", "output_folder", ")"...
29.444444
20.555556
def import_experience(self, states, internals, actions, terminal, reward): """ Stores experiences. """ fetches = self.import_experience_output feed_dict = self.get_feed_dict( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward ) self.monitored_session.run(fetches=fetches, feed_dict=feed_dict)
[ "def", "import_experience", "(", "self", ",", "states", ",", "internals", ",", "actions", ",", "terminal", ",", "reward", ")", ":", "fetches", "=", "self", ".", "import_experience_output", "feed_dict", "=", "self", ".", "get_feed_dict", "(", "states", "=", "...
28.733333
17.133333
def parse(self, text, as_html=True): """ Get entity value with markup :param text: original text :param as_html: as html? :return: entity text with markup """ if not text: return text entity_text = self.get_text(text) if self.type == MessageEntityType.BOLD: if as_html: return markdown.hbold(entity_text) return markdown.bold(entity_text) elif self.type == MessageEntityType.ITALIC: if as_html: return markdown.hitalic(entity_text) return markdown.italic(entity_text) elif self.type == MessageEntityType.PRE: if as_html: return markdown.hpre(entity_text) return markdown.pre(entity_text) elif self.type == MessageEntityType.CODE: if as_html: return markdown.hcode(entity_text) return markdown.code(entity_text) elif self.type == MessageEntityType.URL: if as_html: return markdown.hlink(entity_text, entity_text) return markdown.link(entity_text, entity_text) elif self.type == MessageEntityType.TEXT_LINK: if as_html: return markdown.hlink(entity_text, self.url) return markdown.link(entity_text, self.url) elif self.type == MessageEntityType.TEXT_MENTION and self.user: return self.user.get_mention(entity_text) return entity_text
[ "def", "parse", "(", "self", ",", "text", ",", "as_html", "=", "True", ")", ":", "if", "not", "text", ":", "return", "text", "entity_text", "=", "self", ".", "get_text", "(", "text", ")", "if", "self", ".", "type", "==", "MessageEntityType", ".", "BO...
38.128205
12.692308
def find_mismatch(first, second, indent=''): """ Finds where two objects differ, iterating down into nested containers (i.e. dicts, lists and tuples) They can be nested containers any combination of primary dtypes, str, int, float, dict and lists Parameters ---------- first : dict | list | tuple | str | int | float The first object to compare second : dict | list | tuple | str | int | float The other object to compare with the first indent : str The amount newlines in the output string should be indented. Provide the actual indent, i.e. a string of spaces. Returns ------- mismatch : str Human readable output highlighting where two container differ. """ # Basic case where we are dealing with non-containers if not (isinstance(first, type(second)) or isinstance(second, type(first))): mismatch = (' types: self={} v other={}' .format(type(first).__name__, type(second).__name__)) elif not iscontainer(first, second): mismatch = ': self={} v other={}'.format(first, second) else: sub_indent = indent + ' ' mismatch = '' if isinstance(first, dict): if sorted(first.keys()) != sorted(second.keys()): mismatch += (' keys: self={} v other={}' .format(sorted(first.keys()), sorted(second.keys()))) else: mismatch += ":" for k in first: if first[k] != second[k]: mismatch += ("\n{indent}'{}' values{}" .format(k, find_mismatch(first[k], second[k], indent=sub_indent), indent=sub_indent)) else: mismatch += ":" for i, (f, s) in enumerate(zip_longest(first, second)): if f != s: mismatch += ("\n{indent}{} index{}" .format(i, find_mismatch(f, s, indent=sub_indent), indent=sub_indent)) return mismatch
[ "def", "find_mismatch", "(", "first", ",", "second", ",", "indent", "=", "''", ")", ":", "# Basic case where we are dealing with non-containers", "if", "not", "(", "isinstance", "(", "first", ",", "type", "(", "second", ")", ")", "or", "isinstance", "(", "seco...
41.75
19.285714
def league_header(self, league): """Prints the league header""" league_name = " {0} ".format(league) click.secho("{:=^62}".format(league_name), fg=self.colors.MISC) click.echo()
[ "def", "league_header", "(", "self", ",", "league", ")", ":", "league_name", "=", "\" {0} \"", ".", "format", "(", "league", ")", "click", ".", "secho", "(", "\"{:=^62}\"", ".", "format", "(", "league_name", ")", ",", "fg", "=", "self", ".", "colors", ...
41
12.6
def _classify_target_compile_workflow(self, target): """Return the compile workflow to use for this target.""" if target.has_sources('.java') or target.has_sources('.scala'): return self.get_scalar_mirrored_target_option('workflow', target) return None
[ "def", "_classify_target_compile_workflow", "(", "self", ",", "target", ")", ":", "if", "target", ".", "has_sources", "(", "'.java'", ")", "or", "target", ".", "has_sources", "(", "'.scala'", ")", ":", "return", "self", ".", "get_scalar_mirrored_target_option", ...
53.2
19
def build_gui(self, container): """Build GUI such that image list area is maximized.""" vbox, sw, orientation = Widgets.get_oriented_box(container) captions = (('Channel:', 'label', 'Channel Name', 'combobox', 'Modified only', 'checkbutton'), ) w, b = Widgets.build_info(captions, orientation=orientation) self.w.update(b) b.channel_name.set_tooltip('Channel for locating images to save') b.channel_name.add_callback('activated', self.select_channel_cb) mod_only = self.settings.get('modified_only', True) b.modified_only.set_state(mod_only) b.modified_only.add_callback('activated', lambda *args: self.redo()) b.modified_only.set_tooltip("Show only locally modified images") container.add_widget(w, stretch=0) captions = (('Path:', 'llabel', 'OutDir', 'entry', 'Browse', 'button'), ('Suffix:', 'llabel', 'Suffix', 'entry')) w, b = Widgets.build_info(captions, orientation=orientation) self.w.update(b) b.outdir.set_text(self.outdir) b.outdir.set_tooltip('Output directory') b.outdir.add_callback('activated', lambda w: self.set_outdir()) b.browse.set_tooltip('Browse for output directory') b.browse.add_callback('activated', lambda w: self.browse_outdir()) b.suffix.set_text(self.suffix) b.suffix.set_tooltip('Suffix to append to filename') b.suffix.add_callback('activated', lambda w: self.set_suffix()) container.add_widget(w, stretch=0) self.treeview = Widgets.TreeView(auto_expand=True, sortable=True, selection='multiple', use_alt_row_color=True) self.treeview.setup_table(self.columns, 1, 'IMAGE') self.treeview.add_callback('selected', self.toggle_save_cb) container.add_widget(self.treeview, stretch=1) captions = (('Status', 'llabel'), ) w, b = Widgets.build_info(captions, orientation=orientation) self.w.update(b) b.status.set_text('') b.status.set_tooltip('Status message') container.add_widget(w, stretch=0) btns = Widgets.HBox() btns.set_border_width(4) btns.set_spacing(3) btn = Widgets.Button('Save') btn.set_tooltip('Save selected image(s)') btn.add_callback('activated', lambda w: self.save_images()) btn.set_enabled(False) btns.add_widget(btn, stretch=0) self.w.save = btn btn = Widgets.Button('Close') btn.add_callback('activated', lambda w: self.close()) btns.add_widget(btn, stretch=0) btn = Widgets.Button("Help") btn.add_callback('activated', lambda w: self.help()) btns.add_widget(btn, stretch=0) btns.add_widget(Widgets.Label(''), stretch=1) container.add_widget(btns, stretch=0) self.gui_up = True # Initialize directory selection dialog self.dirsel = DirectorySelection(self.fv.w.root.get_widget()) # Generate initial listing self.update_channels()
[ "def", "build_gui", "(", "self", ",", "container", ")", ":", "vbox", ",", "sw", ",", "orientation", "=", "Widgets", ".", "get_oriented_box", "(", "container", ")", "captions", "=", "(", "(", "'Channel:'", ",", "'label'", ",", "'Channel Name'", ",", "'combo...
39.1
21.1375
def printConcordance(concordance, prefix, tped, snps): """Print the concordance. :param concordance: the concordance. :param prefix: the prefix if the output files. :param tped: a representation of the ``tped`` of duplicated markers. :param snps: the position of the duplicated markers in the ``tped``. :type concordance: dict :type prefix: str :type tped: numpy.array :type snps: dict Prints the concordance in a file, in the format of a matrix. For each duplicated markers, the first line (starting with the `#` signs) contains the name of all the markers in the duplicated markers set. Then a :math:`N \\times N` matrix is printed to file (where :math:`N` is the number of markers in the duplicated marker list), containing the pairwise concordance. """ outFile = None try: outFile = open(prefix + ".concordance", "w") except IOError: msg = "%s: can't write file" % prefix + ".concordance" raise ProgramError(msg) for snpID in concordance.iterkeys(): print >>outFile, "#" + "\t".join( list(snpID) + list(tped[snps[snpID], 1]) ) # Doing the division true_concordance = np.true_divide(concordance[snpID][0], concordance[snpID][1]) output = StringIO.StringIO() np.savetxt(output, true_concordance, delimiter="\t", fmt="%.8f") print >>outFile, output.getvalue().rstrip("\r\n") outFile.close()
[ "def", "printConcordance", "(", "concordance", ",", "prefix", ",", "tped", ",", "snps", ")", ":", "outFile", "=", "None", "try", ":", "outFile", "=", "open", "(", "prefix", "+", "\".concordance\"", ",", "\"w\"", ")", "except", "IOError", ":", "msg", "=",...
35.142857
22.904762
def dns_name(self): """Get the DNS name for this machine. This is a best guess based on the addresses available in current data. May return None if no suitable address is found. """ for scope in ['public', 'local-cloud']: addresses = self.safe_data['addresses'] or [] addresses = [address for address in addresses if address['scope'] == scope] if addresses: return addresses[0]['value'] return None
[ "def", "dns_name", "(", "self", ")", ":", "for", "scope", "in", "[", "'public'", ",", "'local-cloud'", "]", ":", "addresses", "=", "self", ".", "safe_data", "[", "'addresses'", "]", "or", "[", "]", "addresses", "=", "[", "address", "for", "address", "i...
39.384615
13.538462
def save(self, directory=None, append_timestep=True): """ Save TensorFlow model. If no checkpoint directory is given, the model's default saver directory is used. Optionally appends current timestep to prevent overwriting previous checkpoint files. Turn off to be able to load model from the same given path argument as given here. Args: directory: Optional checkpoint directory. append_timestep: Appends the current timestep to the checkpoint file if true. Returns: Checkpoint path where the model was saved. """ if self.flush_summarizer is not None: self.monitored_session.run(fetches=self.flush_summarizer) return self.saver.save( sess=self.session, save_path=(self.saver_directory if directory is None else directory), global_step=(self.global_timestep if append_timestep else None), # latest_filename=None, # Defaults to 'checkpoint'. meta_graph_suffix='meta', write_meta_graph=True, write_state=True )
[ "def", "save", "(", "self", ",", "directory", "=", "None", ",", "append_timestep", "=", "True", ")", ":", "if", "self", ".", "flush_summarizer", "is", "not", "None", ":", "self", ".", "monitored_session", ".", "run", "(", "fetches", "=", "self", ".", "...
42.538462
25
def populate_development(version): """Populates ``DEVELOPMENT.rst`` with release-specific data. This is because ``DEVELOPMENT.rst`` is used in the Sphinx documentation. Args: version (str): The current version. """ with open(DEVELOPMENT_TEMPLATE, "r") as file_obj: template = file_obj.read() contents = template.format(revision=version, rtd_version=version) with open(DEVELOPMENT_FILE, "w") as file_obj: file_obj.write(contents)
[ "def", "populate_development", "(", "version", ")", ":", "with", "open", "(", "DEVELOPMENT_TEMPLATE", ",", "\"r\"", ")", "as", "file_obj", ":", "template", "=", "file_obj", ".", "read", "(", ")", "contents", "=", "template", ".", "format", "(", "revision", ...
36.153846
17
def get_repo(self, name): """ :calls: `GET /repos/:owner/:repo <http://developer.github.com/v3/repos>`_ :param name: string :rtype: :class:`github.Repository.Repository` """ assert isinstance(name, (str, unicode)), name headers, data = self._requester.requestJsonAndCheck( "GET", "/repos/" + self.login + "/" + name ) return github.Repository.Repository(self._requester, headers, data, completed=True)
[ "def", "get_repo", "(", "self", ",", "name", ")", ":", "assert", "isinstance", "(", "name", ",", "(", "str", ",", "unicode", ")", ")", ",", "name", "headers", ",", "data", "=", "self", ".", "_requester", ".", "requestJsonAndCheck", "(", "\"GET\"", ",",...
40.5
18.833333
def auto_find_instance_path(self): """Tries to locate the instance path if it was not provided to the constructor of the application class. It will basically calculate the path to a folder named ``instance`` next to your main file or the package. .. versionadded:: 0.8 """ prefix, package_path = find_package(self.import_name) if prefix is None: return os.path.join(package_path, 'instance') return os.path.join(prefix, 'var', self.name + '-instance')
[ "def", "auto_find_instance_path", "(", "self", ")", ":", "prefix", ",", "package_path", "=", "find_package", "(", "self", ".", "import_name", ")", "if", "prefix", "is", "None", ":", "return", "os", ".", "path", ".", "join", "(", "package_path", ",", "'inst...
43.833333
18.583333
def _imread(self, file): """Proxy to skimage.io.imread with some fixes.""" # For now, we have to select the imageio plugin to read image from byte stream # When ski-image v0.15 is released, imageio will be the default plugin, so this # code can be simplified at that time. See issue report and pull request: # https://github.com/scikit-image/scikit-image/issues/2889 # https://github.com/scikit-image/scikit-image/pull/3126 img = skimage_io.imread(file, as_gray=self.as_gray, plugin='imageio') if img is not None and len(img.shape) != 2: # The PIL plugin somewhy fails to load some images img = skimage_io.imread(file, as_gray=self.as_gray, plugin='matplotlib') return img
[ "def", "_imread", "(", "self", ",", "file", ")", ":", "# For now, we have to select the imageio plugin to read image from byte stream", "# When ski-image v0.15 is released, imageio will be the default plugin, so this", "# code can be simplified at that time. See issue report and pull request:",...
63.166667
28.083333
def lag_matrix(blk, max_lag=None): """ Finds the lag matrix for a given 1-D block sequence. Parameters ---------- blk : An iterable with well-defined length. Don't use this function with Stream objects! max_lag : The size of the result, the lags you'd need. Defaults to ``len(blk) - 1``, the maximum lag that doesn't create fully zeroed matrices. Returns ------- The covariance matrix as a list of lists. Each cell (i, j) contains the sum of ``blk[n - i] * blk[n - j]`` elements for all n that allows such without padding the given block. """ if max_lag is None: max_lag = len(blk) - 1 elif max_lag >= len(blk): raise ValueError("Block length should be higher than order") return [[sum(blk[n - i] * blk[n - j] for n in xrange(max_lag, len(blk)) ) for i in xrange(max_lag + 1) ] for j in xrange(max_lag + 1)]
[ "def", "lag_matrix", "(", "blk", ",", "max_lag", "=", "None", ")", ":", "if", "max_lag", "is", "None", ":", "max_lag", "=", "len", "(", "blk", ")", "-", "1", "elif", "max_lag", ">=", "len", "(", "blk", ")", ":", "raise", "ValueError", "(", "\"Block...
30.785714
24.285714
def _recv(self, rm_colon=False, blocking=True, expected_replies=None, default_rvalue=[''], ignore_unexpected_replies=True, rm_first=True, recur_limit=10): """ Receives and processes an IRC protocol message. Optional arguments: * rm_colon=False - If True: If the message is > 3 items long: Remove the colon(if found) from the [3] item. Else: Remove the colon(if found) from the [2] item. * blocking=True - Should this call block? * expected_replies=None - If specified: If no matching reply is found: Return the default_rvalue. Else: Return the message. * default_rvalue=[''] - If no message or a matching message; is found, return default_rvalue. * ignore_unexpected_replies=True - If an, unexpected reply is encountered, should we keep going, until we get a valid reply? If False, it will just return default_rvalue(If a valid reply isn't found). * rm_first=True - If True, remove [0] from the message before returning it. """ append = False if expected_replies: if len(expected_replies) > 1: append = True if self.readable(): msg = self._raw_recv() else: if not blocking: return default_rvalue else: msg = self._raw_recv() msg = msg.split(None, 3) if msg[1] in self.error_dictionary: self.exception(msg[1]) if rm_colon: if len(msg) > 3: if msg[3][0] == ':': msg[3] = msg[3][1:] elif len(msg) > 2: if msg[2][0] == ':': msg[2] = msg[2][1:] if expected_replies: if msg[1] not in expected_replies: self.stepback(append) if ignore_unexpected_replies and recur_limit > 0: recur_limit -= 1 return self._recv(rm_colon=rm_colon, blocking=blocking, \ expected_replies=expected_replies, \ default_rvalue=default_rvalue, \ ignore_unexpected_replies=ignore_unexpected_replies, rm_first=rm_first, recur_limit=recur_limit) return default_rvalue if rm_first: return msg[1:] return msg
[ "def", "_recv", "(", "self", ",", "rm_colon", "=", "False", ",", "blocking", "=", "True", ",", "expected_replies", "=", "None", ",", "default_rvalue", "=", "[", "''", "]", ",", "ignore_unexpected_replies", "=", "True", ",", "rm_first", "=", "True", ",", ...
40.164179
15.38806
def run_helper_process(python_file, metadata_queue, quit_event, options): """ :param python_file: The absolute path of a python file containing the helper process that should be run. It must define a class which is a subclass of BotHelperProcess. :param metadata_queue: A queue from which the helper process will read AgentMetadata updates. :param quit_event: An event which should be set when rlbot is shutting down. :param options: A dict with arbitrary options that will be passed through to the helper process. """ class_wrapper = import_class_with_base(python_file, BotHelperProcess) helper_class = class_wrapper.get_loaded_class() helper = helper_class(metadata_queue, quit_event, options) helper.start()
[ "def", "run_helper_process", "(", "python_file", ",", "metadata_queue", ",", "quit_event", ",", "options", ")", ":", "class_wrapper", "=", "import_class_with_base", "(", "python_file", ",", "BotHelperProcess", ")", "helper_class", "=", "class_wrapper", ".", "get_loade...
61.916667
31.083333
def get_init_container(self, init_command, init_args, env_vars, context_mounts, persistence_outputs, persistence_data): """Pod init container for setting outputs path.""" env_vars = to_list(env_vars, check_none=True) outputs_path = stores.get_job_outputs_path( persistence=persistence_outputs, job_name=self.job_name) _, outputs_volume_mount = get_pod_outputs_volume(persistence_outputs=persistence_outputs) volume_mounts = outputs_volume_mount + to_list(context_mounts, check_none=True) init_command = init_command or ["/bin/sh", "-c"] init_args = init_args or to_list( get_output_args(command=InitCommands.CREATE, outputs_path=outputs_path)) init_args += to_list(get_auth_context_args(entity='job', entity_name=self.job_name)) return client.V1Container( name=self.init_container_name, image=self.init_docker_image, image_pull_policy=self.init_docker_image_pull_policy, command=init_command, args=[''.join(init_args)], env=env_vars, volume_mounts=volume_mounts)
[ "def", "get_init_container", "(", "self", ",", "init_command", ",", "init_args", ",", "env_vars", ",", "context_mounts", ",", "persistence_outputs", ",", "persistence_data", ")", ":", "env_vars", "=", "to_list", "(", "env_vars", ",", "check_none", "=", "True", "...
48.857143
12.071429
def _is_raising(body: typing.List) -> bool: """Return true if the given statement node raise an exception""" for node in body: if isinstance(node, astroid.Raise): return True return False
[ "def", "_is_raising", "(", "body", ":", "typing", ".", "List", ")", "->", "bool", ":", "for", "node", "in", "body", ":", "if", "isinstance", "(", "node", ",", "astroid", ".", "Raise", ")", ":", "return", "True", "return", "False" ]
35.666667
11
def examples(directory): """ Generate example strategies to target folder """ source_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "examples") try: shutil.copytree(source_dir, os.path.join(directory, "examples")) except OSError as e: if e.errno == errno.EEXIST: six.print_("Folder examples is exists.")
[ "def", "examples", "(", "directory", ")", ":", "source_dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", ",", "\"examples\"", ")", "try", ":", "shu...
33
18.818182
def update_to_v24(self): """Convert older tags into an ID3v2.4 tag. This updates old ID3v2 frames to ID3v2.4 ones (e.g. TYER to TDRC). If you intend to save tags, you must call this function at some point; it is called by default when loading the tag. """ self.__update_common() # TDAT, TYER, and TIME have been turned into TDRC. try: date = text_type(self.get("TYER", "")) if date.strip(u"\x00"): self.pop("TYER") dat = text_type(self.get("TDAT", "")) if dat.strip("\x00"): self.pop("TDAT") date = "%s-%s-%s" % (date, dat[2:], dat[:2]) time = text_type(self.get("TIME", "")) if time.strip("\x00"): self.pop("TIME") date += "T%s:%s:00" % (time[:2], time[2:]) if "TDRC" not in self: self.add(TDRC(encoding=0, text=date)) except UnicodeDecodeError: # Old ID3 tags have *lots* of Unicode problems, so if TYER # is bad, just chuck the frames. pass # TORY can be the first part of a TDOR. if "TORY" in self: f = self.pop("TORY") if "TDOR" not in self: try: self.add(TDOR(encoding=0, text=str(f))) except UnicodeDecodeError: pass # IPLS is now TIPL. if "IPLS" in self: f = self.pop("IPLS") if "TIPL" not in self: self.add(TIPL(encoding=f.encoding, people=f.people)) # These can't be trivially translated to any ID3v2.4 tags, or # should have been removed already. for key in ["RVAD", "EQUA", "TRDA", "TSIZ", "TDAT", "TIME"]: if key in self: del(self[key]) # Recurse into chapters for f in self.getall("CHAP"): f.sub_frames.update_to_v24() for f in self.getall("CTOC"): f.sub_frames.update_to_v24()
[ "def", "update_to_v24", "(", "self", ")", ":", "self", ".", "__update_common", "(", ")", "# TDAT, TYER, and TIME have been turned into TDRC.", "try", ":", "date", "=", "text_type", "(", "self", ".", "get", "(", "\"TYER\"", ",", "\"\"", ")", ")", "if", "date", ...
36.696429
15.928571
def is_attribute_applicable_to_object_type(self, attribute, object_type): """ Check if the attribute is supported by the given object type. Args: attribute (string): The name of the attribute (e.g., 'Name'). Required. object_type (ObjectType): An ObjectType enumeration (e.g., ObjectType.SYMMETRIC_KEY). Required. Returns: bool: True if the attribute is applicable to the object type. False otherwise. """ # TODO (peterhamilton) Handle applicability between certificate types rule_set = self._attribute_rule_sets.get(attribute) if object_type in rule_set.applies_to_object_types: return True else: return False
[ "def", "is_attribute_applicable_to_object_type", "(", "self", ",", "attribute", ",", "object_type", ")", ":", "# TODO (peterhamilton) Handle applicability between certificate types", "rule_set", "=", "self", ".", "_attribute_rule_sets", ".", "get", "(", "attribute", ")", "i...
40.684211
22.052632
def add_to_toolbar(self, toolbar, widget): """Add widget actions to toolbar""" actions = widget.toolbar_actions if actions is not None: add_actions(toolbar, actions)
[ "def", "add_to_toolbar", "(", "self", ",", "toolbar", ",", "widget", ")", ":", "actions", "=", "widget", ".", "toolbar_actions", "if", "actions", "is", "not", "None", ":", "add_actions", "(", "toolbar", ",", "actions", ")" ]
40.2
2.6
def calc_stress_tf(self, lin, lout, damped): """Compute the stress transfer function. Parameters ---------- lin : :class:`~site.Location` Location of input lout : :class:`~site.Location` Location of output. Note that this would typically be midheight of the layer. """ tf = self.calc_strain_tf(lin, lout) if damped: # Scale by complex shear modulus to include the influence of # damping tf *= lout.layer.comp_shear_mod else: tf *= lout.layer.shear_mod return tf
[ "def", "calc_stress_tf", "(", "self", ",", "lin", ",", "lout", ",", "damped", ")", ":", "tf", "=", "self", ".", "calc_strain_tf", "(", "lin", ",", "lout", ")", "if", "damped", ":", "# Scale by complex shear modulus to include the influence of", "# damping", "tf"...
28.952381
17.380952
def ParseMultiple(self, result_dicts): """Parse the WMI Win32_UserAccount output.""" for result_dict in result_dicts: kb_user = rdf_client.User() for wmi_key, kb_key in iteritems(self.account_mapping): try: kb_user.Set(kb_key, result_dict[wmi_key]) except KeyError: pass # We need at least a sid or a username. If these are missing its likely # we retrieved just the userdomain for an AD account that has a name # collision with a local account that is correctly populated. We drop the # bogus domain account. if kb_user.sid or kb_user.username: yield kb_user
[ "def", "ParseMultiple", "(", "self", ",", "result_dicts", ")", ":", "for", "result_dict", "in", "result_dicts", ":", "kb_user", "=", "rdf_client", ".", "User", "(", ")", "for", "wmi_key", ",", "kb_key", "in", "iteritems", "(", "self", ".", "account_mapping",...
42.733333
17.2
def analyze(output_dir, dataset, cloud=False, project_id=None): """Blocking version of analyze_async. See documentation of analyze_async.""" job = analyze_async( output_dir=output_dir, dataset=dataset, cloud=cloud, project_id=project_id) job.wait() print('Analyze: ' + str(job.state))
[ "def", "analyze", "(", "output_dir", ",", "dataset", ",", "cloud", "=", "False", ",", "project_id", "=", "None", ")", ":", "job", "=", "analyze_async", "(", "output_dir", "=", "output_dir", ",", "dataset", "=", "dataset", ",", "cloud", "=", "cloud", ",",...
34.222222
15.111111
def _index(self, refresh_time=None): """Bottle callback for index.html (/) file.""" if refresh_time is None or refresh_time < 1: refresh_time = self.args.time # Update the stat self.__update__() # Display return template("index.html", refresh_time=refresh_time)
[ "def", "_index", "(", "self", ",", "refresh_time", "=", "None", ")", ":", "if", "refresh_time", "is", "None", "or", "refresh_time", "<", "1", ":", "refresh_time", "=", "self", ".", "args", ".", "time", "# Update the stat", "self", ".", "__update__", "(", ...
28.545455
19.454545
def repeat(self, target, sender, **kwargs): "will repeat whatever yo say" if target.startswith("#"): self.message(target, kwargs["msg"]) else: self.message(sender, kwargs["msg"])
[ "def", "repeat", "(", "self", ",", "target", ",", "sender", ",", "*", "*", "kwargs", ")", ":", "if", "target", ".", "startswith", "(", "\"#\"", ")", ":", "self", ".", "message", "(", "target", ",", "kwargs", "[", "\"msg\"", "]", ")", "else", ":", ...
36.833333
8.833333
def send_results(self): ''' send results ''' for server in self.servers: if self.servers[server]['results']: if len(self.servers[server]['results']) == 1: msg = MIMEText('') msg['Subject'] = '[%(custom_fqdn)s] [%(service_description)s] %(return_status)s: %(output)s' % self.servers[server]['results'][0] else: txt = '' summary = [0, 0, 0, 0] for results in self.servers[server]['results']: txt += '[%(service_description)s] %(return_status)s: %(output)s\n' % results summary[results['return_code']] += 1 msg = MIMEText(txt) subject = '[%(custom_fqdn)s]' % self.servers[server]['results'][0] for i, status in enumerate(STATUSES): subject += ' %s:%s' % (status[0], summary[i]) msg['Subject'] = subject msg['From'] = self.servers[server]['from'] msg['To'] = ', '.join(self.servers[server]['to']) if self.servers[server]['tls']: smtp_server = smtplib.SMTP_SSL(self.servers[server]['host'], self.servers[server]['port']) else: smtp_server = smtplib.SMTP(self.servers[server]['host'], self.servers[server]['port']) if self.servers[server]['login'] and len(self.servers[server]['login']) > 0: smtp_server.login(self.servers[server]['login'], self.servers[server]['password']) smtp_server.sendmail(self.servers[server]['from'], self.servers[server]['to'], msg.as_string()) smtp_server.quit() LOG.info("[email][%s]: e-mail sent from: %s to: %s", server, self.servers[server]['from'], self.servers[server]['to'])
[ "def", "send_results", "(", "self", ")", ":", "for", "server", "in", "self", ".", "servers", ":", "if", "self", ".", "servers", "[", "server", "]", "[", "'results'", "]", ":", "if", "len", "(", "self", ".", "servers", "[", "server", "]", "[", "'res...
55.529412
31.941176
def convert_type(self, type): """Convert type to SQL """ # Default dialect mapping = { 'any': sa.Text, 'array': None, 'boolean': sa.Boolean, 'date': sa.Date, 'datetime': sa.DateTime, 'duration': None, 'geojson': None, 'geopoint': None, 'integer': sa.Integer, 'number': sa.Float, 'object': None, 'string': sa.Text, 'time': sa.Time, 'year': sa.Integer, 'yearmonth': None, } # Postgresql dialect if self.__dialect == 'postgresql': mapping.update({ 'array': JSONB, 'geojson': JSONB, 'number': sa.Numeric, 'object': JSONB, }) # Not supported type if type not in mapping: message = 'Field type "%s" is not supported' raise tableschema.exceptions.StorageError(message % type) return mapping[type]
[ "def", "convert_type", "(", "self", ",", "type", ")", ":", "# Default dialect", "mapping", "=", "{", "'any'", ":", "sa", ".", "Text", ",", "'array'", ":", "None", ",", "'boolean'", ":", "sa", ".", "Boolean", ",", "'date'", ":", "sa", ".", "Date", ","...
27.052632
14.394737
def set_plot_CC_T_rho_new(self,fig='CC evol',linestyle=['-'],burn_limit=0.997,color=['r'],marker=['o'],markevery=500): ''' Plots end_model - array, control how far in models a run is plottet, if -1 till end symbs_1 - set symbols of runs ''' if len(linestyle)==0: linestyle=200*['-'] plt.figure(fig) for i in range(len(self.runs_H5_surf)): sefiles=se(self.runs_H5_out[i]) t1_model=-1 mini=sefiles.get('mini') zini=sefiles.get('zini') label=str(mini)+'$M_{\odot}$, Z='+str(zini) model=sefiles.se.cycles model_list=[] for k in range(0,len(model),1): model_list.append(model[k]) print 'REad Rho,T, this might take a while...' rho1=sefiles.get(model_list,'rho') #[:(t1_model-t0_model)] T1=sefiles.get(model_list,'temperature')#[:(t1_model-t0_model)] print 'finished' rho=[] T=[] T_unit=sefiles.get('temperature_unit') labeldone=False for k in range(len(model_list)): #print 'test model ',model_list[k] t9max=max(np.array(T1[k])*T_unit/1.e9) #T.append(max(t9)) rho1max=max(rho1[k]) print 'model',model_list[k] print 'maxT, maxrho' print t9max,rho1max if k==0: t9_prev=t9max rho1_prev=rho1max idx_T=0 idx_rho=0 continue if t9max>t9_prev: idx_T=k t9_prev=t9max if rho1max>rho1_prev: idx_rho=k rho1_prev=rho1max print 'found highest rho',idx_rho,max(np.array(T1[idx_rho])*T_unit/1.0e9),max(rho1[idx_rho]),model_list[idx_rho] print 'found highest T',idx_T,max(np.array(T1[idx_T])*T_unit/1.0e9),max(rho1[idx_T]),model_list[idx_T] if idx_T==idx_rho: x=np.array(T1[idx_T])*T_unit/1e9 y=rho1[idx_T] rho1=[] T1=[] for k in range(len(x)): if not y[k]==1.0: rho1.append(y[k]) T1.append(x[k]) x=T1 y=rho1 plt.plot(x,y,label=label,color=color[i],marker=marker[i],linestyle=linestyle[i],markevery=markevery) #rhoi.append(max(rho1[k])) else: #for max T x=np.array(T1[idx_T])*T_unit/1e9 y=rho1[idx_T] rho_temp=[] T_temp=[] for k in range(len(x)): if not y[k]==1.0: rho_temp.append(y[k]) T_temp.append(x[k]) x=T_temp y=rho_temp plt.plot(x,y,label=label,color=color[i],marker=marker[i],linestyle=linestyle[i],markevery=markevery) #for max rho x=np.array(T1[idx_rho])*T_unit/1e9 y=rho1[idx_rho] rho_temp=[] T_temp=[] for k in range(len(x)): if not y[k]==1.0: rho_temp.append(y[k]) T_temp.append(x[k]) x=T_temp y=rho_temp plt.plot(x,y,color=color[i],marker=marker[i],linestyle=linestyle[i],markevery=markevery)
[ "def", "set_plot_CC_T_rho_new", "(", "self", ",", "fig", "=", "'CC evol'", ",", "linestyle", "=", "[", "'-'", "]", ",", "burn_limit", "=", "0.997", ",", "color", "=", "[", "'r'", "]", ",", "marker", "=", "[", "'o'", "]", ",", "markevery", "=", "500",...
38.62069
20.781609
def getchar(echo=False): """Fetches a single character from the terminal and returns it. This will always return a unicode character and under certain rare circumstances this might return more than one character. The situations which more than one character is returned is when for whatever reason multiple characters end up in the terminal buffer or standard input was not actually a terminal. Note that this will always read from the terminal, even if something is piped into the standard input. Note for Windows: in rare cases when typing non-ASCII characters, this function might wait for a second character and then return both at once. This is because certain Unicode characters look like special-key markers. .. versionadded:: 2.0 :param echo: if set to `True`, the character read will also show up on the terminal. The default is to not show it. """ f = _getchar if f is None: from ._termui_impl import getchar as f return f(echo)
[ "def", "getchar", "(", "echo", "=", "False", ")", ":", "f", "=", "_getchar", "if", "f", "is", "None", ":", "from", ".", "_termui_impl", "import", "getchar", "as", "f", "return", "f", "(", "echo", ")" ]
42.291667
24.208333
def create_event_handler(event_type, handler): """Register a comm and return a serializable object with target name""" target_name = '{hash}_{event_type}'.format(hash=hash(handler), event_type=event_type) def handle_comm_opened(comm, msg): @comm.on_msg def _handle_msg(msg): data = msg['content']['data'] event = json.loads(data) return_value = handler(event) if return_value: comm.send(return_value) comm.send('Comm target "{target_name}" registered by vdom'.format(target_name=target_name)) # Register a new comm for this event handler if get_ipython(): get_ipython().kernel.comm_manager.register_target(target_name, handle_comm_opened) # Return a serialized object return target_name
[ "def", "create_event_handler", "(", "event_type", ",", "handler", ")", ":", "target_name", "=", "'{hash}_{event_type}'", ".", "format", "(", "hash", "=", "hash", "(", "handler", ")", ",", "event_type", "=", "event_type", ")", "def", "handle_comm_opened", "(", ...
36.090909
21.318182
def set(self, key, value, expires=None, future=None): """Set a value """ # assert the values above with self._lock: try: self._dict[key].set(value, expires=expires, future=future) except KeyError: self._dict[key] = moment(value, expires=expires, future=future, lock=self._lock) return value
[ "def", "set", "(", "self", ",", "key", ",", "value", ",", "expires", "=", "None", ",", "future", "=", "None", ")", ":", "# assert the values above", "with", "self", ".", "_lock", ":", "try", ":", "self", ".", "_dict", "[", "key", "]", ".", "set", "...
38.1
17.8
def xmlGenBinaryDataArrayList(binaryDataInfo, binaryDataDict, compression='zlib', arrayTypes=None): """ #TODO: docstring :params binaryDataInfo: #TODO: docstring :params binaryDataDict: #TODO: docstring :params compression: #TODO: docstring :params arrayTypes: #TODO: docstring :returns: #TODO: docstring """ #Note: any other value for "compression" than "zlib" results in no # compression #Note: Use arrayTypes parameter to specify the order of the arrays if arrayTypes is None: arrayTypes = [_ for _ in viewkeys(binaryDataInfo)] numEntries = len(binaryDataInfo) xmlBinaryDataArrayList = ETREE.Element('binaryDataArrayList', {'count': str(numEntries)} ) for arrayType in arrayTypes: _, dataTypeParam = maspy.xml.findBinaryDataType(binaryDataInfo[arrayType]['params']) binaryData = binaryDataDict[arrayType] bitEncoding = '64' if binaryData.dtype.str == '<f8' else '32' if binaryData.size > 0: binaryData, arrayLength = maspy.xml.encodeBinaryData(binaryData, bitEncoding, compression ) else: binaryData = '' arrayLength = 0 # --- define binaryDataArray parameters --- # params = list() if bitEncoding == '64': params.append(('MS:1000523', None, None)) else: params.append(('MS:1000521', None, None)) if compression == 'zlib': params.append(('MS:1000574', None, None)) else: params.append(('MS:1000576', None, None)) mandatoryAccessions = ['MS:1000523', 'MS:1000521', 'MS:1000574', 'MS:1000576' ] for param in binaryDataInfo[arrayType]['params']: if param[0] not in mandatoryAccessions: params.append(param) #Note: not all attributes supported binaryDataArrayAttrib = {'encodedLength': str(len(binaryData))} for attr in ['dataProcessingRef']: if binaryDataInfo[arrayType][attr] is not None: binaryDataArrayAttrib[attr] = binaryDataInfo[arrayType][attr] xmlBinaryDataArray = ETREE.Element('binaryDataArray', binaryDataArrayAttrib ) maspy.xml.xmlAddParams(xmlBinaryDataArray, params) xmlBinary = ETREE.Element('binary') xmlBinary.text = binaryData xmlBinaryDataArray.append(xmlBinary) xmlBinaryDataArrayList.append(xmlBinaryDataArray) return xmlBinaryDataArrayList
[ "def", "xmlGenBinaryDataArrayList", "(", "binaryDataInfo", ",", "binaryDataDict", ",", "compression", "=", "'zlib'", ",", "arrayTypes", "=", "None", ")", ":", "#Note: any other value for \"compression\" than \"zlib\" results in no", "# compression", "#Note: Use arrayTypes param...
43.8
17.784615
def dir(): """Return the list of patched function names. Used for patching functions imported from the module. """ dir = [ 'abspath', 'dirname', 'exists', 'expanduser', 'getatime', 'getctime', 'getmtime', 'getsize', 'isabs', 'isdir', 'isfile', 'islink', 'ismount', 'join', 'lexists', 'normcase', 'normpath', 'realpath', 'relpath', 'split', 'splitdrive' ] if IS_PY2: dir.append('walk') if sys.platform != 'win32' or not IS_PY2: dir.append('samefile') return dir
[ "def", "dir", "(", ")", ":", "dir", "=", "[", "'abspath'", ",", "'dirname'", ",", "'exists'", ",", "'expanduser'", ",", "'getatime'", ",", "'getctime'", ",", "'getmtime'", ",", "'getsize'", ",", "'isabs'", ",", "'isdir'", ",", "'isfile'", ",", "'islink'", ...
38.8
18.133333
def create(self, **fields): """Create and return a new record in associated app and return the newly created Record instance Args: **fields: Field names and values to be validated and sent to server with create request Notes: Keyword arguments should be field names with their respective python values Field values are validated before sending create request to server Examples: Create a new record on an app with simple field names :: record = app.records.create( field_a='Some Value', someOtherField=100, ... ) Create a new record on an app with complex field names :: record = app.records.create(**{ 'Field 1': 'Field 1 Value', 'Field 2': 100, ... }) Returns: Record: Newly created Record instance with data as returned from API response Raises: swimlane.exceptions.UnknownField: If any fields are provided that are not available on target app swimlane.exceptions.ValidationError: If any field fails validation before creation """ new_record = record_factory(self._app, fields) new_record.save() return new_record
[ "def", "create", "(", "self", ",", "*", "*", "fields", ")", ":", "new_record", "=", "record_factory", "(", "self", ".", "_app", ",", "fields", ")", "new_record", ".", "save", "(", ")", "return", "new_record" ]
31.136364
27.840909
def compute_edges(edges): """ Computes edges as midpoints of the bin centers. The first and last boundaries are equidistant from the first and last midpoints respectively. """ edges = np.asarray(edges) if edges.dtype.kind == 'i': edges = edges.astype('f') midpoints = (edges[:-1] + edges[1:])/2.0 boundaries = (2*edges[0] - midpoints[0], 2*edges[-1] - midpoints[-1]) return np.concatenate([boundaries[:1], midpoints, boundaries[-1:]])
[ "def", "compute_edges", "(", "edges", ")", ":", "edges", "=", "np", ".", "asarray", "(", "edges", ")", "if", "edges", ".", "dtype", ".", "kind", "==", "'i'", ":", "edges", "=", "edges", ".", "astype", "(", "'f'", ")", "midpoints", "=", "(", "edges"...
39.333333
15.666667
def DeserializeUnsigned(self, reader): """ Deserialize object. Args: reader (neo.IO.BinaryReader): Raises: Exception: if transaction type is incorrect. """ txtype = reader.ReadByte() if txtype != int.from_bytes(self.Type, 'little'): raise Exception('incorrect type {}, wanted {}'.format(txtype, int.from_bytes(self.Type, 'little'))) self.DeserializeUnsignedWithoutType(reader)
[ "def", "DeserializeUnsigned", "(", "self", ",", "reader", ")", ":", "txtype", "=", "reader", ".", "ReadByte", "(", ")", "if", "txtype", "!=", "int", ".", "from_bytes", "(", "self", ".", "Type", ",", "'little'", ")", ":", "raise", "Exception", "(", "'in...
33.214286
19.214286
def parse(self, source: str=None, entry: str=None) -> parsing.Node: """Parse source using the grammar""" self.from_string = True if source is not None: self.parsed_stream(source) if entry is None: entry = self.entry if entry is None: raise ValueError("No entry rule name defined for {}".format( self.__class__.__name__)) return self._do_parse(entry)
[ "def", "parse", "(", "self", ",", "source", ":", "str", "=", "None", ",", "entry", ":", "str", "=", "None", ")", "->", "parsing", ".", "Node", ":", "self", ".", "from_string", "=", "True", "if", "source", "is", "not", "None", ":", "self", ".", "p...
39.909091
11.363636
def qaoa_ansatz(gammas, betas): """ Function that returns a QAOA ansatz program for a list of angles betas and gammas. len(betas) == len(gammas) == P for a QAOA program of order P. :param list(float) gammas: Angles over which to parameterize the cost Hamiltonian. :param list(float) betas: Angles over which to parameterize the driver Hamiltonian. :return: The QAOA ansatz program. :rtype: Program. """ return Program([exponentiate_commuting_pauli_sum(h_cost)(g) + exponentiate_commuting_pauli_sum(h_driver)(b) for g, b in zip(gammas, betas)])
[ "def", "qaoa_ansatz", "(", "gammas", ",", "betas", ")", ":", "return", "Program", "(", "[", "exponentiate_commuting_pauli_sum", "(", "h_cost", ")", "(", "g", ")", "+", "exponentiate_commuting_pauli_sum", "(", "h_driver", ")", "(", "b", ")", "for", "g", ",", ...
46.769231
22.923077
def reflect_runtime_member(self, name): """Reflect 'name' using ONLY runtime reflection. You most likely want to use ScopeStack.reflect instead. Returns: Type of 'name', or protocol.AnyType. """ for scope in reversed(self.scopes): try: return structured.reflect_runtime_member(scope, name) except (NotImplementedError, KeyError, AttributeError): continue return protocol.AnyType
[ "def", "reflect_runtime_member", "(", "self", ",", "name", ")", ":", "for", "scope", "in", "reversed", "(", "self", ".", "scopes", ")", ":", "try", ":", "return", "structured", ".", "reflect_runtime_member", "(", "scope", ",", "name", ")", "except", "(", ...
32.2
18.933333
def writeRoot(self, root): """ Strategy is: - write header - wrap root object so everything is hashable - compute size of objects which will be written - need to do this in order to know how large the object refs will be in the list/dict/set reference lists - write objects - keep objects in writtenReferences - keep positions of object references in referencePositions - write object references with the length computed previously - computer object reference length - write object reference positions - write trailer """ output = self.header wrapped_root = self.wrapRoot(root) self.computeOffsets(wrapped_root, asReference=True, isRoot=True) self.trailer = self.trailer._replace(**{'objectRefSize':self.intSize(len(self.computedUniques))}) self.writeObjectReference(wrapped_root, output) output = self.writeObject(wrapped_root, output, setReferencePosition=True) # output size at this point is an upper bound on how big the # object reference offsets need to be. self.trailer = self.trailer._replace(**{ 'offsetSize':self.intSize(len(output)), 'offsetCount':len(self.computedUniques), 'offsetTableOffset':len(output), 'topLevelObjectNumber':0 }) output = self.writeOffsetTable(output) output += pack('!xxxxxxBBQQQ', *self.trailer) self.file.write(output)
[ "def", "writeRoot", "(", "self", ",", "root", ")", ":", "output", "=", "self", ".", "header", "wrapped_root", "=", "self", ".", "wrapRoot", "(", "root", ")", "self", ".", "computeOffsets", "(", "wrapped_root", ",", "asReference", "=", "True", ",", "isRoo...
43.628571
16.771429
def python_executable_changed(self, pyexec): """Custom Python executable value has been changed""" if not self.cus_exec_radio.isChecked(): return False def_pyexec = get_python_executable() if not is_text_string(pyexec): pyexec = to_text_string(pyexec.toUtf8(), 'utf-8') if pyexec == def_pyexec: return False if (not programs.is_python_interpreter(pyexec) or not self.warn_python_compatibility(pyexec)): QMessageBox.warning(self, _('Warning'), _("You selected an invalid Python interpreter for the " "console so the previous interpreter will stay. Please " "make sure to select a valid one."), QMessageBox.Ok) self.def_exec_radio.setChecked(True) return False return True
[ "def", "python_executable_changed", "(", "self", ",", "pyexec", ")", ":", "if", "not", "self", ".", "cus_exec_radio", ".", "isChecked", "(", ")", ":", "return", "False", "def_pyexec", "=", "get_python_executable", "(", ")", "if", "not", "is_text_string", "(", ...
48.777778
15.777778
def attachIterator(self, login, tableName, setting, scopes): """ Parameters: - login - tableName - setting - scopes """ self.send_attachIterator(login, tableName, setting, scopes) self.recv_attachIterator()
[ "def", "attachIterator", "(", "self", ",", "login", ",", "tableName", ",", "setting", ",", "scopes", ")", ":", "self", ".", "send_attachIterator", "(", "login", ",", "tableName", ",", "setting", ",", "scopes", ")", "self", ".", "recv_attachIterator", "(", ...
23.7
18.3
def _kaiser(n, beta): """Independant Kaiser window For the definition of the Kaiser window, see A. V. Oppenheim & R. W. Schafer, "Discrete-Time Signal Processing". The continuous version of width n centered about x=0 is: .. note:: 2 times slower than scipy.kaiser """ from scipy.special import iv as besselI m = n - 1 k = arange(0, m) k = 2. * beta / m * sqrt (k * (m - k)) w = besselI (0, k) / besselI (0, beta) return w
[ "def", "_kaiser", "(", "n", ",", "beta", ")", ":", "from", "scipy", ".", "special", "import", "iv", "as", "besselI", "m", "=", "n", "-", "1", "k", "=", "arange", "(", "0", ",", "m", ")", "k", "=", "2.", "*", "beta", "/", "m", "*", "sqrt", "...
30.266667
21.533333
def addPattern(self, word, vector): """ Adds a pattern with key word. Example: net.addPattern("tom", [0, 0, 0, 1]) """ if word in self.patterns: raise NetworkError('Pattern key already in use. Call delPattern to free key.', word) else: self.patterns[word] = vector
[ "def", "addPattern", "(", "self", ",", "word", ",", "vector", ")", ":", "if", "word", "in", "self", ".", "patterns", ":", "raise", "NetworkError", "(", "'Pattern key already in use. Call delPattern to free key.'", ",", "word", ")", "else", ":", "self", ".", "p...
30.545455
16.545455
def get_packages(self, feed_id, protocol_type=None, package_name_query=None, normalized_package_name=None, include_urls=None, include_all_versions=None, is_listed=None, get_top_package_versions=None, is_release=None, include_description=None, top=None, skip=None, include_deleted=None, is_cached=None, direct_upstream_id=None): """GetPackages. [Preview API] Get details about all of the packages in the feed. Use the various filters to include or exclude information from the result set. :param str feed_id: Name or Id of the feed. :param str protocol_type: One of the supported artifact package types. :param str package_name_query: Filter to packages that contain the provided string. Characters in the string must conform to the package name constraints. :param str normalized_package_name: [Obsolete] Used for legacy scenarios and may be removed in future versions. :param bool include_urls: True to return REST Urls with the response. Default is True. :param bool include_all_versions: True to return all versions of the package in the response. Default is false (latest version only). :param bool is_listed: Only applicable for NuGet packages, setting it for other package types will result in a 404. If false, delisted package versions will be returned. Use this to filter the response when includeAllVersions is set to true. Default is unset (do not return delisted packages). :param bool get_top_package_versions: Changes the behavior of $top and $skip to return all versions of each package up to $top. Must be used in conjunction with includeAllVersions=true :param bool is_release: Only applicable for Nuget packages. Use this to filter the response when includeAllVersions is set to true. Default is True (only return packages without prerelease versioning). :param bool include_description: Return the description for every version of each package in the response. Default is False. :param int top: Get the top N packages (or package versions where getTopPackageVersions=true) :param int skip: Skip the first N packages (or package versions where getTopPackageVersions=true) :param bool include_deleted: Return deleted or unpublished versions of packages in the response. Default is False. :param bool is_cached: [Obsolete] Used for legacy scenarios and may be removed in future versions. :param str direct_upstream_id: Filter results to return packages from a specific upstream. :rtype: [Package] """ route_values = {} if feed_id is not None: route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str') query_parameters = {} if protocol_type is not None: query_parameters['protocolType'] = self._serialize.query('protocol_type', protocol_type, 'str') if package_name_query is not None: query_parameters['packageNameQuery'] = self._serialize.query('package_name_query', package_name_query, 'str') if normalized_package_name is not None: query_parameters['normalizedPackageName'] = self._serialize.query('normalized_package_name', normalized_package_name, 'str') if include_urls is not None: query_parameters['includeUrls'] = self._serialize.query('include_urls', include_urls, 'bool') if include_all_versions is not None: query_parameters['includeAllVersions'] = self._serialize.query('include_all_versions', include_all_versions, 'bool') if is_listed is not None: query_parameters['isListed'] = self._serialize.query('is_listed', is_listed, 'bool') if get_top_package_versions is not None: query_parameters['getTopPackageVersions'] = self._serialize.query('get_top_package_versions', get_top_package_versions, 'bool') if is_release is not None: query_parameters['isRelease'] = self._serialize.query('is_release', is_release, 'bool') if include_description is not None: query_parameters['includeDescription'] = self._serialize.query('include_description', include_description, 'bool') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if skip is not None: query_parameters['$skip'] = self._serialize.query('skip', skip, 'int') if include_deleted is not None: query_parameters['includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool') if is_cached is not None: query_parameters['isCached'] = self._serialize.query('is_cached', is_cached, 'bool') if direct_upstream_id is not None: query_parameters['directUpstreamId'] = self._serialize.query('direct_upstream_id', direct_upstream_id, 'str') response = self._send(http_method='GET', location_id='7a20d846-c929-4acc-9ea2-0d5a7df1b197', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[Package]', self._unwrap_collection(response))
[ "def", "get_packages", "(", "self", ",", "feed_id", ",", "protocol_type", "=", "None", ",", "package_name_query", "=", "None", ",", "normalized_package_name", "=", "None", ",", "include_urls", "=", "None", ",", "include_all_versions", "=", "None", ",", "is_liste...
89.5
53.844828
def get_multiplicon_seeds(self, redundant=False): """ Return a generator of the IDs of multiplicons that are initial seeding 'pairs' in level 2 multiplicons. Arguments: o redundant - if true, report redundant multiplicons """ for node in self._multiplicon_graph.nodes(): if not len(self._multiplicon_graph.in_edges(node)): if not self.is_redundant_multiplicon(node): yield node elif redundant: yield node else: continue else: continue
[ "def", "get_multiplicon_seeds", "(", "self", ",", "redundant", "=", "False", ")", ":", "for", "node", "in", "self", ".", "_multiplicon_graph", ".", "nodes", "(", ")", ":", "if", "not", "len", "(", "self", ".", "_multiplicon_graph", ".", "in_edges", "(", ...
34.833333
16.444444
def get_node(self): """return etree Element representing this slide""" # already added title, text frames # add animation chunks if self.animations: anim_par = el("anim:par", attrib={"presentation:node-type": "timing-root"}) self._page.append(anim_par) anim_seq = sub_el( anim_par, "anim:seq", attrib={"presentation:node-type": "main-sequence"} ) for a in self.animations: a_node = a.get_node() anim_seq.append(a_node) # add notes now (so they are last) if self.notes_frame: notes = self.notes_frame.get_node() self._page.append(notes) if self.footer: self._page.attrib[ns("presentation", "use-footer-name")] = self.footer.name return self._page
[ "def", "get_node", "(", "self", ")", ":", "# already added title, text frames", "# add animation chunks", "if", "self", ".", "animations", ":", "anim_par", "=", "el", "(", "\"anim:par\"", ",", "attrib", "=", "{", "\"presentation:node-type\"", ":", "\"timing-root\"", ...
39.714286
15.714286
def find(self, spec=None, fields=None, skip=0, limit=0, timeout=True, snapshot=False, tailable=False, sort=None, max_scan=None, slave_okay=False, _must_use_master=False, _is_command=False, hint=None, debug=False, comment=None, callback=None): """Query the database. The `spec` argument is a prototype document that all results must match. For example: >>> db.test.find({"hello": "world"}, callback=...) only matches documents that have a key "hello" with value "world". Matches can have other keys *in addition* to "hello". The `fields` argument is used to specify a subset of fields that should be included in the result documents. By limiting results to a certain subset of fields you can cut down on network traffic and decoding time. Raises :class:`TypeError` if any of the arguments are of improper type. :Parameters: - `spec` (optional): a SON object specifying elements which must be present for a document to be included in the result set - `fields` (optional): a list of field names that should be returned in the result set ("_id" will always be included), or a dict specifying the fields to return - `skip` (optional): the number of documents to omit (from the start of the result set) when returning the results - `limit` (optional): the maximum number of results to return - `timeout` (optional): if True, any returned cursor will be subject to the normal timeout behavior of the mongod process. Otherwise, the returned cursor will never timeout at the server. Care should be taken to ensure that cursors with timeout turned off are properly closed. - `snapshot` (optional): if True, snapshot mode will be used for this query. Snapshot mode assures no duplicates are returned, or objects missed, which were present at both the start and end of the query's execution. For details, see the `snapshot documentation <http://dochub.mongodb.org/core/snapshot>`_. - `tailable` (optional): the result of this find call will be a tailable cursor - tailable cursors aren't closed when the last data is retrieved but are kept open and the cursors location marks the final document's position. if more data is received iteration of the cursor will continue from the last document received. For details, see the `tailable cursor documentation <http://www.mongodb.org/display/DOCS/Tailable+Cursors>`_. - `sort` (optional): a list of (key, direction) pairs specifying the sort order for this query. See :meth:`~pymongo.cursor.Cursor.sort` for details. - `max_scan` (optional): limit the number of documents examined when performing the query - `slave_okay` (optional): is it okay to connect directly to and perform queries on a slave instance .. mongodoc:: find """ if spec is None: spec = {} if limit is None: limit = 0 if not isinstance(spec, dict): raise TypeError("spec must be an instance of dict") if not isinstance(skip, int): raise TypeError("skip must be an instance of int") if not isinstance(limit, int): raise TypeError("limit must be an instance of int or None") if not isinstance(timeout, bool): raise TypeError("timeout must be an instance of bool") if not isinstance(snapshot, bool): raise TypeError("snapshot must be an instance of bool") if not isinstance(tailable, bool): raise TypeError("tailable must be an instance of bool") if not callable(callback): raise TypeError("callback must be callable") if fields is not None: if not fields: fields = {"_id": 1} if not isinstance(fields, dict): fields = helpers._fields_list_to_dict(fields) self.__spec = spec self.__fields = fields self.__skip = skip self.__limit = limit self.__batch_size = 0 self.__timeout = timeout self.__tailable = tailable self.__snapshot = snapshot self.__ordering = sort and helpers._index_document(sort) or None self.__max_scan = max_scan self.__slave_okay = slave_okay self.__explain = False self.__hint = hint self.__comment = comment self.__debug = debug # self.__as_class = as_class self.__tz_aware = False #collection.database.connection.tz_aware self.__must_use_master = _must_use_master self.__is_command = _is_command connection = self.__pool.connection() try: if self.__debug: logging.debug('QUERY_SPEC: %r' % self.__query_spec()) connection.send_message( message.query(self.__query_options(), self.full_collection_name, self.__skip, self.__limit, self.__query_spec(), self.__fields), callback=functools.partial(self._handle_response, orig_callback=callback)) except Exception, e: logging.debug('Error sending query %s' % e) connection.close() raise
[ "def", "find", "(", "self", ",", "spec", "=", "None", ",", "fields", "=", "None", ",", "skip", "=", "0", ",", "limit", "=", "0", ",", "timeout", "=", "True", ",", "snapshot", "=", "False", ",", "tailable", "=", "False", ",", "sort", "=", "None", ...
44.625
19.023438
def p_ex_list_item_id(self, p): 'ex_list_item : ID' p[0] = AstExampleRef(self.path, p.lineno(1), p.lexpos(1), p[1])
[ "def", "p_ex_list_item_id", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "AstExampleRef", "(", "self", ".", "path", ",", "p", ".", "lineno", "(", "1", ")", ",", "p", ".", "lexpos", "(", "1", ")", ",", "p", "[", "1", "]", ")" ]
43
17.666667
def verify_path(self, mold_id_path): """ Lookup and verify path. """ try: path = self.lookup_path(mold_id_path) if not exists(path): raise KeyError except KeyError: raise_os_error(ENOENT) return path
[ "def", "verify_path", "(", "self", ",", "mold_id_path", ")", ":", "try", ":", "path", "=", "self", ".", "lookup_path", "(", "mold_id_path", ")", "if", "not", "exists", "(", "path", ")", ":", "raise", "KeyError", "except", "KeyError", ":", "raise_os_error",...
24.083333
12.583333
def file_strip_ext( afile, skip_version=False, only_known_extensions=False, allow_subformat=True): """ Strip in the best way the extension from a filename. >>> file_strip_ext("foo.tar.gz") 'foo' >>> file_strip_ext("foo.buz.gz") 'foo.buz' >>> file_strip_ext("foo.buz") 'foo' >>> file_strip_ext("foo.buz", only_known_extensions=True) 'foo.buz' >>> file_strip_ext("foo.buz;1", skip_version=False, ... only_known_extensions=True) 'foo.buz;1' >>> file_strip_ext("foo.gif;icon") 'foo' >>> file_strip_ext("foo.gif;icon", only_know_extensions=True, ... allow_subformat=False) 'foo.gif;icon' @param afile: the path/name of a file. @type afile: string @param skip_version: whether to skip a trailing ";version". @type skip_version: bool @param only_known_extensions: whether to strip out only known extensions or to consider as extension anything that follows a dot. @type only_known_extensions: bool @param allow_subformat: whether to consider also subformats as part of the extension. @type allow_subformat: bool @return: the name/path without the extension (and version). @rtype: string """ import os afile = afile.split(';') if len(afile) > 1 and allow_subformat and not afile[-1].isdigit(): afile = afile[0:-1] if len(afile) > 1 and skip_version and afile[-1].isdigit(): afile = afile[0:-1] afile = ';'.join(afile) nextfile = _extensions.sub('', afile) if nextfile == afile and not only_known_extensions: nextfile = os.path.splitext(afile)[0] while nextfile != afile: afile = nextfile nextfile = _extensions.sub('', afile) return nextfile
[ "def", "file_strip_ext", "(", "afile", ",", "skip_version", "=", "False", ",", "only_known_extensions", "=", "False", ",", "allow_subformat", "=", "True", ")", ":", "import", "os", "afile", "=", "afile", ".", "split", "(", "';'", ")", "if", "len", "(", "...
33.153846
17.038462
def fromkeys(cls, keys, value=None, names=None): ''' Create a new dictionary with keys from ``keys`` and values set to ``value``. fromkeys() is a class method that returns a new dictionary. ``value`` defaults to None. Length of ``keys`` must not exceed one because no duplicate values are allowed. Optional ``names`` can be provided for index names (of length 2). ''' N = len(keys) if N > 1: raise ValueError('Length of keys (%s) must not exceed one because ' 'no duplicate values are allowed' % (N,)) items = [[keys[0], value]] if N == 1 else [] return cls(items, names)
[ "def", "fromkeys", "(", "cls", ",", "keys", ",", "value", "=", "None", ",", "names", "=", "None", ")", ":", "N", "=", "len", "(", "keys", ")", "if", "N", ">", "1", ":", "raise", "ValueError", "(", "'Length of keys (%s) must not exceed one because '", "'n...
42.5
31
def group_callback(self, iocb): """Callback when a child iocb completes.""" if _debug: IOGroup._debug("group_callback %r", iocb) # check all the members for iocb in self.ioMembers: if not iocb.ioComplete.isSet(): if _debug: IOGroup._debug(" - waiting for child: %r", iocb) break else: if _debug: IOGroup._debug(" - all children complete") # everything complete self.ioState = COMPLETED self.trigger()
[ "def", "group_callback", "(", "self", ",", "iocb", ")", ":", "if", "_debug", ":", "IOGroup", ".", "_debug", "(", "\"group_callback %r\"", ",", "iocb", ")", "# check all the members", "for", "iocb", "in", "self", ".", "ioMembers", ":", "if", "not", "iocb", ...
37.571429
15.928571
async def deleteallreactions(self, ctx): """Removes a reaction""" data = self.config.get(ctx.message.server.id, {}) if data: await self.config.put(ctx.message.server.id, {}) await self.bot.responses.success(message="All reactions have been deleted.") else: await self.bot.responses.failure(message="This server has no reactions.")
[ "async", "def", "deleteallreactions", "(", "self", ",", "ctx", ")", ":", "data", "=", "self", ".", "config", ".", "get", "(", "ctx", ".", "message", ".", "server", ".", "id", ",", "{", "}", ")", "if", "data", ":", "await", "self", ".", "config", ...
48.875
22.625
def get_location(self, location): """ For an index location return a dict of the index and value. This is optimized for speed because it does not need to lookup the index location with a search. Also can accept relative indexing from the end of the SEries in standard python notation [-3, -2, -1] :param location: index location in standard python form of positive or negative number :return: dictionary """ return {self.index_name: self._index[location], self.data_name: self._data[location]}
[ "def", "get_location", "(", "self", ",", "location", ")", ":", "return", "{", "self", ".", "index_name", ":", "self", ".", "_index", "[", "location", "]", ",", "self", ".", "data_name", ":", "self", ".", "_data", "[", "location", "]", "}" ]
54.9
32.7
def logSystemInfo(self): """ A function to be called just after a logging object is instantiated to load the log up with info about the computer it is being ran on and the software version. This function utilizes the psutil and platform libraries, so they must be install for it to work. For clarity of the log, it is suggested to perform immediately after instantiation to put it at the top of the log file. The messages this prints to the log will look like: | System Information Summary: | OS type = Linux | OS Version = 3.9.10-100.fc17.x86_64 | Machine UserName = xxxxxx.astron.s.u-tokyo.ac.jp | Machine Processor Type = x86_64 | Number of cores = 8 | Total RAM [GB] = 23.5403785706, % used = 15.9 | Python Version = '2.7.3' """ t = datetime.date.today() infoStr = 'Date KMlogger object instantiated: '+t.strftime('%b %d, %Y')+'\n\n' infoStr+="\n"+"="*11+' System Information Summary '+'='*11 infoStr+="\n"+'OS type = '+platform.uname()[0] infoStr+="\n"+'OS Version = '+platform.uname()[2] infoStr+="\n"+'Machine UserName = '+platform.uname()[1] infoStr+="\n"+'Machine Processor Type = '+platform.processor() multiprocessing.cpu_count() ## Some Macs have issues installing psutil, so I will remove it as a dependency for now. #import psutil #totMem = int(round(psutil.virtual_memory()[0]/1073741824.0)) #percentMem = int(round(psutil.virtual_memory()[2])) infoStr+="\n"+'Python Version = '+repr(platform.python_version()) infoStr+="\n"+'='*50 self.fileonly(infoStr)
[ "def", "logSystemInfo", "(", "self", ")", ":", "t", "=", "datetime", ".", "date", ".", "today", "(", ")", "infoStr", "=", "'Date KMlogger object instantiated: '", "+", "t", ".", "strftime", "(", "'%b %d, %Y'", ")", "+", "'\\n\\n'", "infoStr", "+=", "\"\\n\""...
47.405405
20.297297
async def write(self, data): """ This method writes sends data to the IP device :param data: :return: None """ self.writer.write((bytes([ord(data)]))) await self.writer.drain()
[ "async", "def", "write", "(", "self", ",", "data", ")", ":", "self", ".", "writer", ".", "write", "(", "(", "bytes", "(", "[", "ord", "(", "data", ")", "]", ")", ")", ")", "await", "self", ".", "writer", ".", "drain", "(", ")" ]
25
13.222222
def idle_task(self): '''called on idle''' if self.module('console') is not None and not self.menu_added_console: self.menu_added_console = True self.module('console').add_menu(self.menu) if self.module('map') is not None and not self.menu_added_map: self.menu_added_map = True self.module('map').add_menu(self.menu)
[ "def", "idle_task", "(", "self", ")", ":", "if", "self", ".", "module", "(", "'console'", ")", "is", "not", "None", "and", "not", "self", ".", "menu_added_console", ":", "self", ".", "menu_added_console", "=", "True", "self", ".", "module", "(", "'consol...
47.5
16
def _win32_dir(path, star=''): """ Using the windows cmd shell to get information about a directory """ from ubelt import util_cmd import re wrapper = 'cmd /S /C "{}"' # the /S will preserve all inner quotes command = 'dir /-C "{}"{}'.format(path, star) wrapped = wrapper.format(command) info = util_cmd.cmd(wrapped, shell=True) if info['ret'] != 0: from ubelt import util_format print('Failed command:') print(info['command']) print(util_format.repr2(info, nl=1)) raise OSError(str(info)) # parse the output of dir to get some info # Remove header and footer lines = info['out'].split('\n')[5:-3] splitter = re.compile('( +)') for line in lines: parts = splitter.split(line) date, sep, time, sep, ampm, sep, type_or_size, sep = parts[:8] name = ''.join(parts[8:]) # if type is a junction then name will also contain the linked loc if name == '.' or name == '..': continue if type_or_size in ['<JUNCTION>', '<SYMLINKD>', '<SYMLINK>']: # colons cannot be in path names, so use that to find where # the name ends pos = name.find(':') bpos = name[:pos].rfind('[') name = name[:bpos - 1] pointed = name[bpos + 1:-1] yield type_or_size, name, pointed else: yield type_or_size, name, None
[ "def", "_win32_dir", "(", "path", ",", "star", "=", "''", ")", ":", "from", "ubelt", "import", "util_cmd", "import", "re", "wrapper", "=", "'cmd /S /C \"{}\"'", "# the /S will preserve all inner quotes", "command", "=", "'dir /-C \"{}\"{}'", ".", "format", "(", "p...
38.135135
11.648649
def in6_addrtovendor(addr): """ Extract the MAC address from a modified EUI-64 constructed IPv6 address provided and use the IANA oui.txt file to get the vendor. The database used for the conversion is the one loaded by Scapy from a Wireshark installation if discovered in a well-known location. None is returned on error, "UNKNOWN" if the vendor is unknown. """ mac = in6_addrtomac(addr) if mac is None or conf.manufdb is None: return None res = conf.manufdb._get_manuf(mac) if len(res) == 17 and res.count(':') != 5: # Mac address, i.e. unknown res = "UNKNOWN" return res
[ "def", "in6_addrtovendor", "(", "addr", ")", ":", "mac", "=", "in6_addrtomac", "(", "addr", ")", "if", "mac", "is", "None", "or", "conf", ".", "manufdb", "is", "None", ":", "return", "None", "res", "=", "conf", ".", "manufdb", ".", "_get_manuf", "(", ...
34.833333
20.5
def dictfetchone(cursor: Cursor) -> Optional[Dict[str, Any]]: """ Return the next row from a cursor as an :class:`OrderedDict`, or ``None``. """ columns = get_fieldnames_from_cursor(cursor) row = cursor.fetchone() if not row: return None return OrderedDict(zip(columns, row))
[ "def", "dictfetchone", "(", "cursor", ":", "Cursor", ")", "->", "Optional", "[", "Dict", "[", "str", ",", "Any", "]", "]", ":", "columns", "=", "get_fieldnames_from_cursor", "(", "cursor", ")", "row", "=", "cursor", ".", "fetchone", "(", ")", "if", "no...
33.666667
14.111111
def set_proxy(self, host = "localhost", port = 0, user = "", password = ""): """ Sets a custom HTTP proxy to use for future requests. """ self.conn.issue_command("SetProxy", host, port, user, password)
[ "def", "set_proxy", "(", "self", ",", "host", "=", "\"localhost\"", ",", "port", "=", "0", ",", "user", "=", "\"\"", ",", "password", "=", "\"\"", ")", ":", "self", ".", "conn", ".", "issue_command", "(", "\"SetProxy\"", ",", "host", ",", "port", ","...
47
7
def free_norm(self, name, free=True, **kwargs): """Free/Fix normalization of a source. Parameters ---------- name : str Source name. free : bool Choose whether to free (free=True) or fix (free=False). """ name = self.get_source_name(name) normPar = self.like.normPar(name).getName() self.free_source(name, pars=[normPar], free=free, **kwargs)
[ "def", "free_norm", "(", "self", ",", "name", ",", "free", "=", "True", ",", "*", "*", "kwargs", ")", ":", "name", "=", "self", ".", "get_source_name", "(", "name", ")", "normPar", "=", "self", ".", "like", ".", "normPar", "(", "name", ")", ".", ...
26.6875
21