text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def _process_data_var(string): """Transform datastring to key, values pair. All values are transformed to floating point values. Parameters ---------- string : str Returns ------- Tuple[Str, Str] key, values pair """ key, var = string.split("<-") if "structure" in var: var, dim = var.replace("structure(", "").replace(",", "").split(".Dim") # dtype = int if '.' not in var and 'e' not in var.lower() else float dtype = float var = var.replace("c(", "").replace(")", "").strip().split() dim = dim.replace("=", "").replace("c(", "").replace(")", "").strip().split() dim = tuple(map(int, dim)) var = np.fromiter(map(dtype, var), dtype).reshape(dim, order="F") elif "c(" in var: # dtype = int if '.' not in var and 'e' not in var.lower() else float dtype = float var = var.replace("c(", "").replace(")", "").split(",") var = np.fromiter(map(dtype, var), dtype) else: # dtype = int if '.' not in var and 'e' not in var.lower() else float dtype = float var = dtype(var) return key.strip(), var
[ "def", "_process_data_var", "(", "string", ")", ":", "key", ",", "var", "=", "string", ".", "split", "(", "\"<-\"", ")", "if", "\"structure\"", "in", "var", ":", "var", ",", "dim", "=", "var", ".", "replace", "(", "\"structure(\"", ",", "\"\"", ")", ...
34.393939
23.272727
def apply_all_link_refs( bytecode: bytes, link_refs: List[Dict[str, Any]], attr_dict: Dict[str, str] ) -> bytes: """ Applies all link references corresponding to a valid attr_dict to the bytecode. """ if link_refs is None: return bytecode link_fns = ( apply_link_ref(offset, ref["length"], attr_dict[ref["name"]]) for ref in link_refs for offset in ref["offsets"] ) linked_bytecode = pipe(bytecode, *link_fns) return linked_bytecode
[ "def", "apply_all_link_refs", "(", "bytecode", ":", "bytes", ",", "link_refs", ":", "List", "[", "Dict", "[", "str", ",", "Any", "]", "]", ",", "attr_dict", ":", "Dict", "[", "str", ",", "str", "]", ")", "->", "bytes", ":", "if", "link_refs", "is", ...
32.4
18.933333
def get_l(self): """Returns the left border of the cell""" cell_left = CellBorders(self.cell_attributes, *self.cell.get_left_key_rect()) return cell_left.get_r()
[ "def", "get_l", "(", "self", ")", ":", "cell_left", "=", "CellBorders", "(", "self", ".", "cell_attributes", ",", "*", "self", ".", "cell", ".", "get_left_key_rect", "(", ")", ")", "return", "cell_left", ".", "get_r", "(", ")" ]
35.5
18
def template2regex(template, ranges=None): """Convert a URL template to a regular expression. Converts a template, such as /{name}/ to a regular expression, e.g. /(?P<name>[^/]+)/ and a list of the named parameters found in the template (e.g. ['name']). Ranges are given after a colon in a template name to indicate a restriction on the characters that can appear there. For example, in the template: "/user/{id:alpha}" The `id` must contain only characters from a-zA-Z. Other characters there will cause the pattern not to match. The ranges parameter is an optional dictionary that maps range names to regular expressions. New range names can be added, or old range names can be redefined using this parameter. Example: >>> import rhino.mapper >>> rhino.mapper.template2regex("{fred}") ('^(?P<fred>[^/]+)$', ['fred']) """ if len(template) and -1 < template.find('|') < len(template) - 1: raise InvalidTemplateError("'|' may only appear at the end, found at position %d in %s" % (template.find('|'), template)) if ranges is None: ranges = DEFAULT_RANGES anchor = True state = S_PATH if len(template) and template[-1] == '|': anchor = False params = [] bracketdepth = 0 result = ['^'] name = "" pattern = "[^/]+" rangename = None for c in template_splitter.split(template): if state == S_PATH: if len(c) > 1: result.append(re.escape(c)) elif c == '[': result.append("(") bracketdepth += 1 elif c == ']': bracketdepth -= 1 if bracketdepth < 0: raise InvalidTemplateError("Mismatched brackets in %s" % template) result.append(")?") elif c == '{': name = "" state = S_TEMPLATE elif c == '}': raise InvalidTemplateError("Mismatched braces in %s" % template) elif c == '|': pass else: result.append(re.escape(c)) else: if c == '}': if rangename and rangename in ranges: result.append("(?P<%s>%s)" % (name, ranges[rangename])) else: result.append("(?P<%s>%s)" % (name, pattern)) params.append(name) state = S_PATH rangename = None else: name = c if name.find(":") > -1: name, rangename = name.split(":") if bracketdepth != 0: raise InvalidTemplateError("Mismatched brackets in %s" % template) if state == S_TEMPLATE: raise InvalidTemplateError("Mismatched braces in %s" % template) if anchor: result.append('$') return "".join(result), params
[ "def", "template2regex", "(", "template", ",", "ranges", "=", "None", ")", ":", "if", "len", "(", "template", ")", "and", "-", "1", "<", "template", ".", "find", "(", "'|'", ")", "<", "len", "(", "template", ")", "-", "1", ":", "raise", "InvalidTem...
35.049383
20.098765
def SetCoreGRRKnowledgeBaseValues(kb, client_obj): """Set core values from GRR into the knowledgebase.""" client_schema = client_obj.Schema kb.fqdn = utils.SmartUnicode(client_obj.Get(client_schema.FQDN, "")) if not kb.fqdn: kb.fqdn = utils.SmartUnicode(client_obj.Get(client_schema.HOSTNAME, "")) versions = client_obj.Get(client_schema.OS_VERSION) if versions and versions.versions: try: kb.os_major_version = versions.versions[0] kb.os_minor_version = versions.versions[1] except IndexError: # Some OSs don't have a minor version. pass client_os = client_obj.Get(client_schema.SYSTEM) if client_os: kb.os = utils.SmartUnicode(client_obj.Get(client_schema.SYSTEM))
[ "def", "SetCoreGRRKnowledgeBaseValues", "(", "kb", ",", "client_obj", ")", ":", "client_schema", "=", "client_obj", ".", "Schema", "kb", ".", "fqdn", "=", "utils", ".", "SmartUnicode", "(", "client_obj", ".", "Get", "(", "client_schema", ".", "FQDN", ",", "\...
41.529412
16.705882
def snake_to_camel(snake_str): ''' Convert `snake_str` from snake_case to camelCase ''' components = snake_str.split('_') if len(components) > 1: camel = (components[0].lower() + ''.join(x.title() for x in components[1:])) return camel # Not snake_case return snake_str
[ "def", "snake_to_camel", "(", "snake_str", ")", ":", "components", "=", "snake_str", ".", "split", "(", "'_'", ")", "if", "len", "(", "components", ")", ">", "1", ":", "camel", "=", "(", "components", "[", "0", "]", ".", "lower", "(", ")", "+", "''...
29.090909
16.727273
def getDisplayIdentifier(self): """Return the display_identifier if set, else return the claimed_id. """ if self.display_identifier is not None: return self.display_identifier if self.claimed_id is None: return None else: return urlparse.urldefrag(self.claimed_id)[0]
[ "def", "getDisplayIdentifier", "(", "self", ")", ":", "if", "self", ".", "display_identifier", "is", "not", "None", ":", "return", "self", ".", "display_identifier", "if", "self", ".", "claimed_id", "is", "None", ":", "return", "None", "else", ":", "return",...
37.222222
9.333333
def synchronize_files(inputpaths, outpath, database=None, tqdm_bar=None, report_file=None, ptee=None, verbose=False): ''' Main function to synchronize files contents by majority vote The main job of this function is to walk through the input folders and align the files, so that we can compare every files across every folders, one by one. The whole trick here is to align files, so that we don't need to memorize all the files in memory and we compare all equivalent files together: to do that, we ensure that we walk through the input directories in alphabetical order, and we pick the relative filepath at the top of the alphabetical order, this ensures the alignment of files between different folders, without memorizing the whole trees structures. ''' # (Generator) Files Synchronization Algorithm: # Needs a function stable_dir_walking, which will walk through directories recursively but in always the same order on all platforms (same order for files but also for folders), whatever order it is, as long as it is stable. # Until there's no file in any of the input folders to be processed: # - curfiles <- load first file for each folder by using stable_dir_walking on each input folder. # - curfiles_grouped <- group curfiles_ordered: # * curfiles_ordered <- order curfiles alphabetically (need to separate the relative parent directory and the filename, to account for both without ambiguity) # * curfiles_grouped <- empty list # * curfiles_grouped[0] = add first element in curfiles_ordered # * last_group = 0 # * for every subsequent element nextelt in curfiles_ordered: # . if nextelt == curfiles_grouped[last_group][0]: add nextelt into curfiles_grouped[last_group] (the latest group in curfiles_grouped) # . else: create a new group in curfiles_grouped (last_group += 1) and add nextelt into curfiles_grouped[last_group] # At this stage, curfiles_grouped[0] should contain a group of files with the same relative filepath from different input folders, and since we used stable_dir_walking, we are guaranteed that this file is the next to be processed in alphabetical order. # - Majority vote byte-by-byte for each of curfiles_grouped[0], and output winning byte to the output file. # - Update files list alignment: we will now ditch files in curfiles_grouped[0] from curfiles, and replace by the next files respectively from each respective folder. Since we processed in alphabetical (or whatever) order, the next loaded files will match the files in other curfiles_grouped groups that we could not process before. # At this point (after the loop), all input files have been processed in order, without maintaining the whole files list in memory, just one file per input folder. # Init files walking generator for each inputpaths recgen = [recwalk(path, sorting=True) for path in inputpaths] curfiles = {} recgen_exhausted = {} recgen_exhausted_count = 0 nbpaths = len(inputpaths) retcode = 0 if not ptee: ptee = sys.stdout # Open report file and write header if report_file is not None: rfile = open(report_file, 'wb') r_writer = csv.writer(rfile, delimiter='|', lineterminator='\n', quotechar='"') r_header = ["filepath"] + ["dir%i" % (i+1) for i in xrange(nbpaths)] + ["hash-correct", "error_code", "errors"] r_length = len(r_header) r_writer.writerow(r_header) # Initialization: load the first batch of files, one for each folder for i in xrange(len(recgen)): recgen_exhausted[i] = False try: if curfiles.get(i, None) is None: curfiles[i] = relpath_posix(recgen[i].next(), inputpaths[i])[1] except StopIteration: recgen_exhausted[i] = True recgen_exhausted_count += 1 # Files lists alignment loop while recgen_exhausted_count < nbpaths: errcode = 0 errmsg = None # Init a new report's row if report_file: r_row = ["-"] * r_length # -- Group equivalent relative filepaths together #print curfiles # debug curfiles_grouped = sort_group(curfiles, True) # -- Extract first group of equivalent filepaths (this allows us to process with the same alphabetical order on all platforms) # Note that the remaining files in other groups will be processed later, because their alphabetical order is higher to the first group, this means that the first group is to be processed now to_process = curfiles_grouped[0] #print to_process # debug # -- Byte-by-byte majority vote on the first group of files # Need the relative filepath also (note that there's only one since it's a group of equivalent relative filepaths, only the absolute path is different between files of a same group) relfilepath = path2unix(os.path.join(*to_process[0][1])) if report_file: r_row[0] = relfilepath if verbose: ptee.write("- Processing file %s." % relfilepath) # Generate output path outpathfull = os.path.join(outpath, relfilepath) create_dir_if_not_exist(os.path.dirname(outpathfull)) # Initialize the list of absolute filepaths fileslist = [] for elt in to_process: i = elt[0] fileslist.append(os.path.join(inputpaths[i], os.path.join(*elt[1]))) if report_file: r_row[i+1] = 'X' # put an X in the report file below each folder that contains this file # If there's only one file, just copy it over if len(to_process) == 1: shutil.copyfile(fileslist[0], outpathfull) id = to_process[0][0] if report_file: r_row[id+1] = 'O' # Else, merge by majority vote else: # Before-merge check using rfigc database, if provided # If one of the files in the input folders is already correct, just copy it over correct_file = None if database: for id, filepath in enumerate(fileslist): if rfigc.main("-i \"%s\" -d \"%s\" -m --silent" % (filepath, database)) == 0: correct_file = filepath correct_id = to_process[id][0] break # If one correct file was found, copy it over if correct_file: create_dir_if_not_exist(os.path.dirname(outpathfull)) shutil.copyfile(correct_file, outpathfull) if report_file: r_row[correct_id+1] = "O" r_row[-3] = "OK" # Else, we need to do the majority vote merge else: # Do the majority vote merge errcode, errmsg = majority_vote_byte_scan(relfilepath, fileslist, outpath) # After-merge/move check using rfigc database, if provided if database: if rfigc.main("-i \"%s\" -d \"%s\" -m --silent" % (outpathfull, database)) == 1: errcode = 1 r_row[-3] = "KO" if not errmsg: errmsg = '' errmsg += " File could not be totally repaired according to rfigc database." else: if report_file: r_row[-3] = "OK" if errmsg: errmsg += " But merged file is correct according to rfigc database." # Display errors if any if errcode: if report_file: r_row[-2] = "KO" r_row[-1] = errmsg ptee.write(errmsg) retcode = 1 else: if report_file: r_row[-2] = "OK" # Save current report's row if report_file: r_writer.writerow(r_row) # -- Update files lists alignment (ie, retrieve new files but while trying to keep the alignment) for elt in to_process: # for files of the first group (the ones we processed) i = elt[0] # Walk their respective folders and load up the next file try: if not recgen_exhausted.get(i, False): curfiles[i] = relpath_posix(recgen[i].next(), inputpaths[i])[1] # If there's no file left in this folder, mark this input folder as exhausted and continue with the others except StopIteration: curfiles[i] = None recgen_exhausted[i] = True recgen_exhausted_count += 1 if tqdm_bar: tqdm_bar.update() if tqdm_bar: tqdm_bar.close() # Closing report file if report_file: # Write list of directories and legend rfile.write("\n=> Input directories:") for id, ipath in enumerate(inputpaths): rfile.write("\n\t- dir%i = %s" % ((id+1), ipath)) rfile.write("\n=> Output directory: %s" % outpath) rfile.write("\n=> Legend: X=existing/selected for majority vote, O=only used this file, - = not existing, OK = check correct, KO = check incorrect (file was not recovered)\n") # Close the report file handle rfile.close() return retcode
[ "def", "synchronize_files", "(", "inputpaths", ",", "outpath", ",", "database", "=", "None", ",", "tqdm_bar", "=", "None", ",", "report_file", "=", "None", ",", "ptee", "=", "None", ",", "verbose", "=", "False", ")", ":", "# (Generator) Files Synchronization A...
55.202454
33.214724
def move_to_end(self, key=NOT_SET, index=NOT_SET, last=True): """Move an existing element to the end (or beginning if last==False). Runs in O(N). """ if index is NOT_SET and key is not NOT_SET: index, value = self._dict[key] elif index is not NOT_SET and key is NOT_SET: key, value = self._list[index] # Normalize index if index < 0: index += len(self._list) else: raise KEY_EQ_INDEX_ERROR if last: index_range = range(len(self._list) - 1, index - 1, -1) self._dict[key] = (len(self._list) - 1, value) else: index_range = range(index + 1) self._dict[key] = (0, value) previous = (key, value) for i in index_range: self._dict[previous[0]] = i, previous[1] previous, self._list[i] = self._list[i], previous
[ "def", "move_to_end", "(", "self", ",", "key", "=", "NOT_SET", ",", "index", "=", "NOT_SET", ",", "last", "=", "True", ")", ":", "if", "index", "is", "NOT_SET", "and", "key", "is", "not", "NOT_SET", ":", "index", ",", "value", "=", "self", ".", "_d...
27.37037
18.037037
def _load_csv_file(csv_file): """ load csv file and check file content format @param csv_file: csv file path e.g. csv file content: username,password test1,111111 test2,222222 test3,333333 @return list of parameter, each parameter is in dict format e.g. [ {'username': 'test1', 'password': '111111'}, {'username': 'test2', 'password': '222222'}, {'username': 'test3', 'password': '333333'} ] """ csv_content_list = [] with io.open(csv_file, encoding='utf-8') as csvfile: reader = csv.DictReader(csvfile) for row in reader: csv_content_list.append(row) return csv_content_list
[ "def", "_load_csv_file", "(", "csv_file", ")", ":", "csv_content_list", "=", "[", "]", "with", "io", ".", "open", "(", "csv_file", ",", "encoding", "=", "'utf-8'", ")", "as", "csvfile", ":", "reader", "=", "csv", ".", "DictReader", "(", "csvfile", ")", ...
32.961538
15.576923
def add(self, child): """ Adds a typed child object to the structure object. @param child: Child object to be added. """ if isinstance(child, With): self.add_with(child) elif isinstance(child, EventConnection): self.add_event_connection(child) elif isinstance(child, ChildInstance): self.add_child_instance(child) elif isinstance(child, MultiInstantiate): self.add_multi_instantiate(child) elif isinstance(child, ForEach): self.add_for_each(child) else: raise ModelError('Unsupported child element')
[ "def", "add", "(", "self", ",", "child", ")", ":", "if", "isinstance", "(", "child", ",", "With", ")", ":", "self", ".", "add_with", "(", "child", ")", "elif", "isinstance", "(", "child", ",", "EventConnection", ")", ":", "self", ".", "add_event_connec...
33.421053
11.526316
def record_get_field_instances(rec, tag="", ind1=" ", ind2=" "): """ Return the list of field instances for the specified tag and indications. Return empty list if not found. If tag is empty string, returns all fields Parameters (tag, ind1, ind2) can contain wildcard %. :param rec: a record structure as returned by create_record() :param tag: a 3 characters long string :param ind1: a 1 character long string :param ind2: a 1 character long string :param code: a 1 character long string :return: a list of field tuples (Subfields, ind1, ind2, value, field_position_global) where subfields is list of (code, value) """ if not rec: return [] if not tag: return rec.items() else: out = [] ind1, ind2 = _wash_indicators(ind1, ind2) if '%' in tag: # Wildcard in tag. Check all possible for field_tag in rec: if _tag_matches_pattern(field_tag, tag): for possible_field_instance in rec[field_tag]: if (ind1 in ('%', possible_field_instance[1]) and ind2 in ('%', possible_field_instance[2])): out.append(possible_field_instance) else: # Completely defined tag. Use dict for possible_field_instance in rec.get(tag, []): if (ind1 in ('%', possible_field_instance[1]) and ind2 in ('%', possible_field_instance[2])): out.append(possible_field_instance) return out
[ "def", "record_get_field_instances", "(", "rec", ",", "tag", "=", "\"\"", ",", "ind1", "=", "\" \"", ",", "ind2", "=", "\" \"", ")", ":", "if", "not", "rec", ":", "return", "[", "]", "if", "not", "tag", ":", "return", "rec", ".", "items", "(", ")",...
39.375
20.075
def _get_broadcast_shape(shape1, shape2): """Given two shapes that are not identical, find the shape that both input shapes can broadcast to.""" if shape1 == shape2: return shape1 length1 = len(shape1) length2 = len(shape2) if length1 > length2: shape = list(shape1) else: shape = list(shape2) i = max(length1, length2) - 1 for a, b in zip(shape1[::-1], shape2[::-1]): if a != 1 and b != 1 and a != b: raise ValueError('shape1=%s is not broadcastable to shape2=%s' % (shape1, shape2)) shape[i] = max(a, b) i -= 1 return tuple(shape)
[ "def", "_get_broadcast_shape", "(", "shape1", ",", "shape2", ")", ":", "if", "shape1", "==", "shape2", ":", "return", "shape1", "length1", "=", "len", "(", "shape1", ")", "length2", "=", "len", "(", "shape2", ")", "if", "length1", ">", "length2", ":", ...
32.368421
15.789474
def proj_to_cartopy(proj): """ Converts a pyproj.Proj to a cartopy.crs.Projection (Code copied from https://github.com/fmaussion/salem) Parameters ---------- proj: pyproj.Proj the projection to convert Returns ------- a cartopy.crs.Projection object """ import cartopy.crs as ccrs try: from osgeo import osr has_gdal = True except ImportError: has_gdal = False proj = check_crs(proj) if proj.is_latlong(): return ccrs.PlateCarree() srs = proj.srs if has_gdal: # this is more robust, as srs could be anything (espg, etc.) s1 = osr.SpatialReference() s1.ImportFromProj4(proj.srs) srs = s1.ExportToProj4() km_proj = {'lon_0': 'central_longitude', 'lat_0': 'central_latitude', 'x_0': 'false_easting', 'y_0': 'false_northing', 'k': 'scale_factor', 'zone': 'zone', } km_globe = {'a': 'semimajor_axis', 'b': 'semiminor_axis', } km_std = {'lat_1': 'lat_1', 'lat_2': 'lat_2', } kw_proj = dict() kw_globe = dict() kw_std = dict() for s in srs.split('+'): s = s.split('=') if len(s) != 2: continue k = s[0].strip() v = s[1].strip() try: v = float(v) except: pass if k == 'proj': if v == 'tmerc': cl = ccrs.TransverseMercator if v == 'lcc': cl = ccrs.LambertConformal if v == 'merc': cl = ccrs.Mercator if v == 'utm': cl = ccrs.UTM if k in km_proj: kw_proj[km_proj[k]] = v if k in km_globe: kw_globe[km_globe[k]] = v if k in km_std: kw_std[km_std[k]] = v globe = None if kw_globe: globe = ccrs.Globe(**kw_globe) if kw_std: kw_proj['standard_parallels'] = (kw_std['lat_1'], kw_std['lat_2']) # mercatoooor if cl.__name__ == 'Mercator': kw_proj.pop('false_easting', None) kw_proj.pop('false_northing', None) return cl(globe=globe, **kw_proj)
[ "def", "proj_to_cartopy", "(", "proj", ")", ":", "import", "cartopy", ".", "crs", "as", "ccrs", "try", ":", "from", "osgeo", "import", "osr", "has_gdal", "=", "True", "except", "ImportError", ":", "has_gdal", "=", "False", "proj", "=", "check_crs", "(", ...
24.852273
16.920455
def crossover(self, gene2): """ Creates a new gene randomly inheriting attributes from its parents.""" assert self.key == gene2.key # Note: we use "a if random() > 0.5 else b" instead of choice((a, b)) # here because `choice` is substantially slower. new_gene = self.__class__(self.key) for a in self._gene_attributes: if random() > 0.5: setattr(new_gene, a.name, getattr(self, a.name)) else: setattr(new_gene, a.name, getattr(gene2, a.name)) return new_gene
[ "def", "crossover", "(", "self", ",", "gene2", ")", ":", "assert", "self", ".", "key", "==", "gene2", ".", "key", "# Note: we use \"a if random() > 0.5 else b\" instead of choice((a, b))", "# here because `choice` is substantially slower.", "new_gene", "=", "self", ".", "...
39.928571
18.071429
def check_closed_streams(options): """Work around Python issue with multiprocessing forking on closed streams https://bugs.python.org/issue28326 Attempting to a fork/exec a new Python process when any of std{in,out,err} are closed or not flushable for some reason may raise an exception. Fix this by opening devnull if the handle seems to be closed. Do this globally to avoid tracking places all places that fork. Seems to be specific to multiprocessing.Process not all Python process forkers. The error actually occurs when the stream object is not flushable, but replacing an open stream object that is not flushable with /dev/null is a bad idea since it will create a silent failure. Replacing a closed handle with /dev/null seems safe. """ if sys.version_info[0:3] >= (3, 6, 4): return True # Issued fixed in Python 3.6.4+ if sys.stderr is None: sys.stderr = open(os.devnull, 'w') if sys.stdin is None: if options.input_file == '-': print("Trying to read from stdin but stdin seems closed", file=sys.stderr) return False sys.stdin = open(os.devnull, 'r') if sys.stdout is None: if options.output_file == '-': # Can't replace stdout if the user is piping # If this case can even happen, it must be some kind of weird # stream. print( textwrap.dedent( """\ Output was set to stdout '-' but the stream attached to stdout does not support the flush() system call. This will fail.""" ), file=sys.stderr, ) return False sys.stdout = open(os.devnull, 'w') return True
[ "def", "check_closed_streams", "(", "options", ")", ":", "if", "sys", ".", "version_info", "[", "0", ":", "3", "]", ">=", "(", "3", ",", "6", ",", "4", ")", ":", "return", "True", "# Issued fixed in Python 3.6.4+", "if", "sys", ".", "stderr", "is", "No...
35.22
21.54
def start_capture(self, adapter_number, output_file): """ Starts a packet capture. :param adapter_number: adapter number :param output_file: PCAP destination file for the capture """ try: adapter = self._ethernet_adapters[adapter_number] except KeyError: raise VirtualBoxError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(name=self.name, adapter_number=adapter_number)) nio = adapter.get_nio(0) if not nio: raise VirtualBoxError("Adapter {} is not connected".format(adapter_number)) if nio.capturing: raise VirtualBoxError("Packet capture is already activated on adapter {adapter_number}".format(adapter_number=adapter_number)) nio.startPacketCapture(output_file) if self.ubridge: yield from self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name="VBOX-{}-{}".format(self._id, adapter_number), output_file=output_file)) log.info("VirtualBox VM '{name}' [{id}]: starting packet capture on adapter {adapter_number}".format(name=self.name, id=self.id, adapter_number=adapter_number))
[ "def", "start_capture", "(", "self", ",", "adapter_number", ",", "output_file", ")", ":", "try", ":", "adapter", "=", "self", ".", "_ethernet_adapters", "[", "adapter_number", "]", "except", "KeyError", ":", "raise", "VirtualBoxError", "(", "\"Adapter {adapter_num...
51.419355
41.16129
def remove_module_docstring(app, what, name, obj, options, lines): """ Ignore the docstring of the ``clusterpolate`` module. """ if what == "module" and name == "clusterpolate": del lines[:]
[ "def", "remove_module_docstring", "(", "app", ",", "what", ",", "name", ",", "obj", ",", "options", ",", "lines", ")", ":", "if", "what", "==", "\"module\"", "and", "name", "==", "\"clusterpolate\"", ":", "del", "lines", "[", ":", "]" ]
34.833333
12.5
def del_calculation(job_id, confirmed=False): """ Delete a calculation and all associated outputs. """ if logs.dbcmd('get_job', job_id) is None: print('There is no job %d' % job_id) return if confirmed or confirm( 'Are you sure you want to (abort and) delete this calculation and ' 'all associated outputs?\nThis action cannot be undone. (y/n): '): try: abort(job_id) resp = logs.dbcmd('del_calc', job_id, getpass.getuser()) except RuntimeError as err: safeprint(err) else: if 'success' in resp: print('Removed %d' % job_id) else: print(resp['error'])
[ "def", "del_calculation", "(", "job_id", ",", "confirmed", "=", "False", ")", ":", "if", "logs", ".", "dbcmd", "(", "'get_job'", ",", "job_id", ")", "is", "None", ":", "print", "(", "'There is no job %d'", "%", "job_id", ")", "return", "if", "confirmed", ...
33.714286
16
def GetHiResImage(ID): ''' Queries the Palomar Observatory Sky Survey II catalog to obtain a higher resolution optical image of the star with EPIC number :py:obj:`ID`. ''' # Get the TPF info client = kplr.API() star = client.k2_star(ID) k2ra = star.k2_ra k2dec = star.k2_dec tpf = star.get_target_pixel_files()[0] with tpf.open() as f: k2wcs = WCS(f[2].header) shape = np.array(f[1].data.field('FLUX'), dtype='float64')[0].shape # Get the POSS URL hou = int(k2ra * 24 / 360.) min = int(60 * (k2ra * 24 / 360. - hou)) sec = 60 * (60 * (k2ra * 24 / 360. - hou) - min) ra = '%02d+%02d+%.2f' % (hou, min, sec) sgn = '' if np.sign(k2dec) >= 0 else '-' deg = int(np.abs(k2dec)) min = int(60 * (np.abs(k2dec) - deg)) sec = 3600 * (np.abs(k2dec) - deg - min / 60) dec = '%s%02d+%02d+%.1f' % (sgn, deg, min, sec) url = 'https://archive.stsci.edu/cgi-bin/dss_search?v=poss2ukstu_red&' + \ 'r=%s&d=%s&e=J2000&h=3&w=3&f=fits&c=none&fov=NONE&v3=' % (ra, dec) # Query the server r = urllib.request.Request(url) handler = urllib.request.urlopen(r) code = handler.getcode() if int(code) != 200: # Unavailable return None data = handler.read() # Atomically write to a temp file f = NamedTemporaryFile("wb", delete=False) f.write(data) f.flush() os.fsync(f.fileno()) f.close() # Now open the POSS fits file with pyfits.open(f.name) as ff: img = ff[0].data # Map POSS pixels onto K2 pixels xy = np.empty((img.shape[0] * img.shape[1], 2)) z = np.empty(img.shape[0] * img.shape[1]) pwcs = WCS(f.name) k = 0 for i in range(img.shape[0]): for j in range(img.shape[1]): ra, dec = pwcs.all_pix2world(float(j), float(i), 0) xy[k] = k2wcs.all_world2pix(ra, dec, 0) z[k] = img[i, j] k += 1 # Resample grid_x, grid_y = np.mgrid[-0.5:shape[1] - 0.5:0.1, -0.5:shape[0] - 0.5:0.1] resampled = griddata(xy, z, (grid_x, grid_y), method='cubic') # Rotate to align with K2 image. Not sure why, but it is necessary resampled = np.rot90(resampled) return resampled
[ "def", "GetHiResImage", "(", "ID", ")", ":", "# Get the TPF info", "client", "=", "kplr", ".", "API", "(", ")", "star", "=", "client", ".", "k2_star", "(", "ID", ")", "k2ra", "=", "star", ".", "k2_ra", "k2dec", "=", "star", ".", "k2_dec", "tpf", "=",...
30.535211
19.549296
def disable_performance_data(self): """Disable performance data processing (globally) Format of the line that triggers function call:: DISABLE_PERFORMANCE_DATA :return: None """ # todo: #783 create a dedicated brok for global parameters if self.my_conf.process_performance_data: self.my_conf.modified_attributes |= \ DICT_MODATTR["MODATTR_PERFORMANCE_DATA_ENABLED"].value self.my_conf.process_performance_data = False self.my_conf.explode_global_conf() self.daemon.update_program_status()
[ "def", "disable_performance_data", "(", "self", ")", ":", "# todo: #783 create a dedicated brok for global parameters", "if", "self", ".", "my_conf", ".", "process_performance_data", ":", "self", ".", "my_conf", ".", "modified_attributes", "|=", "DICT_MODATTR", "[", "\"MO...
39.733333
15.466667
def ndstype(self): """NDS type integer for this channel. This property is mapped to the `Channel.type` string. """ if self.type is not None: return io_nds2.Nds2ChannelType.find(self.type).value
[ "def", "ndstype", "(", "self", ")", ":", "if", "self", ".", "type", "is", "not", "None", ":", "return", "io_nds2", ".", "Nds2ChannelType", ".", "find", "(", "self", ".", "type", ")", ".", "value" ]
33.142857
16.285714
def update_git_repository(self, folder, git_repository): """Updates git remote for the managed theme folder if has changed. :param git_repository: git url of the theme folder :param folder: path of the git managed theme folder """ # load repo object from path repo = git.Repo(folder) # keep local_head_name for to reset folder remote head later local_head_name = repo.head.ref.name # test if git repository url has changed remote = repo.remote('origin') if remote.url == git_repository: return # remove/add new remote repository origin remote.remove(repo, 'origin') origin = remote.add(repo, 'origin', git_repository) # fetch available branches origin.fetch() # get remote head according previously store local head name remote_head = getattr(origin.refs, local_head_name) # reset repository tracking branch according deduced remote head repo.create_head(local_head_name, remote_head)\ .set_tracking_branch(remote_head)
[ "def", "update_git_repository", "(", "self", ",", "folder", ",", "git_repository", ")", ":", "# load repo object from path", "repo", "=", "git", ".", "Repo", "(", "folder", ")", "# keep local_head_name for to reset folder remote head later", "local_head_name", "=", "repo"...
36.033333
18.8
def get_uncompleted_tasks(self): """Return a list of all uncompleted tasks in this project. .. warning:: Requires Todoist premium. :return: A list of all uncompleted tasks in this project. :rtype: list of :class:`pytodoist.todoist.Task` >>> from pytodoist import todoist >>> user = todoist.login('john.doe@gmail.com', 'password') >>> project = user.get_project('PyTodoist') >>> project.add_task('Install PyTodoist') >>> uncompleted_tasks = project.get_uncompleted_tasks() >>> for task in uncompleted_tasks: ... task.complete() """ all_tasks = self.get_tasks() completed_tasks = self.get_completed_tasks() return [t for t in all_tasks if t not in completed_tasks]
[ "def", "get_uncompleted_tasks", "(", "self", ")", ":", "all_tasks", "=", "self", ".", "get_tasks", "(", ")", "completed_tasks", "=", "self", ".", "get_completed_tasks", "(", ")", "return", "[", "t", "for", "t", "in", "all_tasks", "if", "t", "not", "in", ...
40.526316
15.631579
def wait_for_capture(self, timeout=None): """See base class documentation """ if self._process is None: raise sniffer.InvalidOperationError( "Trying to wait on a non-started process") try: utils.wait_for_standing_subprocess(self._process, timeout) self._post_process() except subprocess.TimeoutExpired: self.stop_capture()
[ "def", "wait_for_capture", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "self", ".", "_process", "is", "None", ":", "raise", "sniffer", ".", "InvalidOperationError", "(", "\"Trying to wait on a non-started process\"", ")", "try", ":", "utils", ".", ...
39.454545
11.636364
def gauge_laplacian(npts, spacing=1.0, beta=0.1): """Construct a Gauge Laplacian from Quantum Chromodynamics for regular 2D grids. Note that this function is not written efficiently, but should be fine for N x N grids where N is in the low hundreds. Parameters ---------- npts : int number of pts in x and y directions spacing : float grid spacing between points beta : float temperature Note that if beta=0, then we get the typical 5pt Laplacian stencil Returns ------- A : csr matrix A is Hermitian positive definite for beta > 0.0 A is Symmetric semi-definite for beta = 0.0 Examples -------- >>> from pyamg.gallery import gauge_laplacian >>> A = gauge_laplacian(10) References ---------- .. [1] MacLachlan, S. and Oosterlee, C., "Algebraic Multigrid Solvers for Complex-Valued Matrices", Vol. 30, SIAM J. Sci. Comp, 2008 """ # The gauge Laplacian has the same sparsity structure as a normal # Laplacian, so we start out with a Poisson Operator N = npts A = poisson((N, N), format='coo', dtype=complex) # alpha is a random function of a point's integer position # on a 1-D grid along the x or y direction. e.g. the first # point at (0,0) would be evaluate at alpha_*[0], while the # last point at (N*spacing, N*spacing) would evaluate at alpha_*[-1] alpha_x = 1.0j * 2.0 * np.pi * beta * np.random.randn(N*N) alpha_y = 1.0j * 2.0 * np.pi * beta * np.random.randn(N*N) # Replace off diagonals of A for i in range(A.nnz): r = A.row[i] c = A.col[i] diff = np.abs(r - c) index = min(r, c) if r > c: s = -1.0 else: s = 1.0 if diff == 1: # differencing in the x-direction A.data[i] = -1.0 * np.exp(s * alpha_x[index]) if diff == N: # differencing in the y-direction A.data[i] = -1.0 * np.exp(s * alpha_y[index]) # Handle periodic BCs alpha_x = 1.0j * 2.0 * np.pi * beta * np.random.randn(N*N) alpha_y = 1.0j * 2.0 * np.pi * beta * np.random.randn(N*N) new_r = [] new_c = [] new_data = [] new_diff = [] for i in range(0, N): new_r.append(i) new_c.append(i + N*N - N) new_diff.append(N) for i in range(N*N - N, N*N): new_r.append(i) new_c.append(i - N*N + N) new_diff.append(N) for i in range(0, N*N-1, N): new_r.append(i) new_c.append(i + N - 1) new_diff.append(1) for i in range(N-1, N*N, N): new_r.append(i) new_c.append(i - N + 1) new_diff.append(1) for i in range(len(new_r)): r = new_r[i] c = new_c[i] diff = new_diff[i] index = min(r, c) if r > c: s = -1.0 else: s = 1.0 if diff == 1: # differencing in the x-direction new_data.append(-1.0 * np.exp(s * alpha_x[index])) if diff == N: # differencing in the y-direction new_data.append(-1.0 * np.exp(s * alpha_y[index])) # Construct Final Matrix data = np.hstack((A.data, np.array(new_data))) row = np.hstack((A.row, np.array(new_r))) col = np.hstack((A.col, np.array(new_c))) A = sp.sparse.coo_matrix((data, (row, col)), shape=(N*N, N*N)).tocsr() return (1.0/spacing**2)*A
[ "def", "gauge_laplacian", "(", "npts", ",", "spacing", "=", "1.0", ",", "beta", "=", "0.1", ")", ":", "# The gauge Laplacian has the same sparsity structure as a normal", "# Laplacian, so we start out with a Poisson Operator", "N", "=", "npts", "A", "=", "poisson", "(", ...
29.156522
20.53913
def check(schema, data, trace=False): """Verify some json. Args: schema - the description of a general-case 'valid' json object. data - the json data to verify. Returns: bool: True if data matches the schema, False otherwise. Raises: TypeError: If the schema is of an unknown data type. ValueError: If the schema contains a string with an invalid value. If the schema attempts to reference a non-existent named schema. """ if trace == True: trace = 1 else: trace = None return _check(schema, data, trace=trace)
[ "def", "check", "(", "schema", ",", "data", ",", "trace", "=", "False", ")", ":", "if", "trace", "==", "True", ":", "trace", "=", "1", "else", ":", "trace", "=", "None", "return", "_check", "(", "schema", ",", "data", ",", "trace", "=", "trace", ...
27.73913
22.217391
def get_schema(self, table_name, database=None): """ Return a Schema object for the indicated table and database Parameters ---------- table_name : string May be fully qualified database : string, default None Returns ------- schema : ibis Schema """ qualified_name = self._fully_qualified_name(table_name, database) query = 'DESC {0}'.format(qualified_name) data, _, _ = self.raw_sql(query, results=True) colnames, coltypes = data[:2] coltypes = list(map(ClickhouseDataType.parse, coltypes)) return sch.schema(colnames, coltypes)
[ "def", "get_schema", "(", "self", ",", "table_name", ",", "database", "=", "None", ")", ":", "qualified_name", "=", "self", ".", "_fully_qualified_name", "(", "table_name", ",", "database", ")", "query", "=", "'DESC {0}'", ".", "format", "(", "qualified_name",...
29.590909
18.681818
def write_versions(dirs, items): """Write data versioning for genomes present in the configuration. """ genomes = {} for d in items: genomes[d["genome_build"]] = d.get("reference", {}).get("versions") out_file = _get_out_file(dirs) found_versions = False if genomes and out_file: with open(out_file, "w") as out_handle: writer = csv.writer(out_handle) writer.writerow(["genome", "resource", "version"]) for genome, version_file in genomes.items(): if not version_file: genome_dir = install.get_genome_dir(genome, dirs.get("galaxy"), items[0]) if genome_dir: version_file = os.path.join(genome_dir, "versions.csv") if version_file and os.path.exists(version_file): found_versions = True with open(version_file) as in_handle: reader = csv.reader(in_handle) for parts in reader: if len(parts) >= 2: resource, version = parts[:2] writer.writerow([genome, resource, version]) if found_versions: return out_file
[ "def", "write_versions", "(", "dirs", ",", "items", ")", ":", "genomes", "=", "{", "}", "for", "d", "in", "items", ":", "genomes", "[", "d", "[", "\"genome_build\"", "]", "]", "=", "d", ".", "get", "(", "\"reference\"", ",", "{", "}", ")", ".", "...
46.185185
15.925926
def get_repository_admin_session(self): """Gets the repository administrative session for creating, updating and deleteing repositories. return: (osid.repository.RepositoryAdminSession) - a ``RepositoryAdminSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_repository_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_repository_admin()`` is ``true``.* """ if not self.supports_repository_admin(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.RepositoryAdminSession(runtime=self._runtime)
[ "def", "get_repository_admin_session", "(", "self", ")", ":", "if", "not", "self", ".", "supports_repository_admin", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "RepositoryAdminSession", ...
44.9375
15.375
def get_bandstructure(self): """ returns a LobsterBandStructureSymmLine object which can be plotted with a normal BSPlotter """ return LobsterBandStructureSymmLine(kpoints=self.kpoints_array, eigenvals=self.eigenvals, lattice=self.lattice, efermi=self.efermi, labels_dict=self.label_dict, structure=self.structure, projections=self.p_eigenvals)
[ "def", "get_bandstructure", "(", "self", ")", ":", "return", "LobsterBandStructureSymmLine", "(", "kpoints", "=", "self", ".", "kpoints_array", ",", "eigenvals", "=", "self", ".", "eigenvals", ",", "lattice", "=", "self", ".", "lattice", ",", "efermi", "=", ...
55.666667
33.666667
def remove_this_tlink(self,tlink_id): """ Removes the tlink for the given tlink identifier @type tlink_id: string @param tlink_id: the tlink identifier to be removed """ for tlink in self.get_tlinks(): if tlink.get_id() == tlink_id: self.node.remove(tlink.get_node()) break
[ "def", "remove_this_tlink", "(", "self", ",", "tlink_id", ")", ":", "for", "tlink", "in", "self", ".", "get_tlinks", "(", ")", ":", "if", "tlink", ".", "get_id", "(", ")", "==", "tlink_id", ":", "self", ".", "node", ".", "remove", "(", "tlink", ".", ...
35.6
8
def ms_to_times(ms): """ Convert milliseconds to normalized tuple (h, m, s, ms). Arguments: ms: Number of milliseconds (may be int, float or other numeric class). Should be non-negative. Returns: Named tuple (h, m, s, ms) of ints. Invariants: ``ms in range(1000) and s in range(60) and m in range(60)`` """ ms = int(round(ms)) h, ms = divmod(ms, 3600000) m, ms = divmod(ms, 60000) s, ms = divmod(ms, 1000) return Times(h, m, s, ms)
[ "def", "ms_to_times", "(", "ms", ")", ":", "ms", "=", "int", "(", "round", "(", "ms", ")", ")", "h", ",", "ms", "=", "divmod", "(", "ms", ",", "3600000", ")", "m", ",", "ms", "=", "divmod", "(", "ms", ",", "60000", ")", "s", ",", "ms", "=",...
28.055556
19.166667
def print_tree(graph, tails, node_id_map): """Print out a tree of blocks starting from the common ancestor.""" # Example: # | # | 5 # * a {0, 1, 2, 3, 4} # | # | 6 # |\ # * | b {0, 1, 2, 3} # | * n {4} # | | # | | 7 # * | c {0, 1, 2, 3} # | * o {4} # | | # | | 8 # |\ \ # * | | i {2, 3} # | * | d {0, 1} # | | * p {4} # | | | # | | | 9 # * | | j {2, 3} # | * | e {0, 1} # | | * q {4} # | | | # | | | 10 # * | | k {2, 3} # | * | f {0, 1} # | | * r {4} # | | | # | | | 11 # |\ \ \ # | | |\ \ # * | | | | g {0} # | * | | | h {1} # | * | | l {2} # | | * | m {3} # | | * s {4} # | / / # | | / # | | | 12 # * | | t {0} # | * | u {2} # | | * v {4} # | | # | | 13 # * | w {0} # | * x {2} # | # | 14 # * y {0} # | 15 # * z {0} walker = graph.walk() next_block_num, next_parent, next_siblings = next(walker) prev_cliques = [] done = False while not done: cliques = {} block_num = next_block_num # Read all the cliques for this block number try: while block_num == next_block_num: cliques[next_parent] = next_siblings next_block_num, next_parent, next_siblings = next(walker) except StopIteration: # Do one last iteration after we've consumed the entire graph done = True print_cliques(prev_cliques, cliques, node_id_map) print_block_num_row(block_num, prev_cliques, cliques) print_splits(prev_cliques, cliques) print_folds(prev_cliques, cliques) prev_cliques = build_ordered_cliques(prev_cliques, cliques) print_cliques(prev_cliques, [], node_id_map)
[ "def", "print_tree", "(", "graph", ",", "tails", ",", "node_id_map", ")", ":", "# Example:", "# |", "# | 5", "# * a {0, 1, 2, 3, 4}", "# |", "# | 6", "# |\\", "# * | b {0, 1, 2, 3}", "# | * n {4}", "# | |", "# | | 7", "# * | c {0, 1, 2, 3}", "# | * o {4}", "# | |",...
21.270588
23.729412
def execute_sparql_query(query, prefix=None, endpoint='https://query.wikidata.org/sparql', user_agent=config['USER_AGENT_DEFAULT'], as_dataframe=False): """ Static method which can be used to execute any SPARQL query :param prefix: The URI prefixes required for an endpoint, default is the Wikidata specific prefixes :param query: The actual SPARQL query string :param endpoint: The URL string for the SPARQL endpoint. Default is the URL for the Wikidata SPARQL endpoint :param user_agent: Set a user agent string for the HTTP header to let the WDQS know who you are. :param as_dataframe: Return result as pandas dataframe :type user_agent: str :return: The results of the query are returned in JSON format """ if not endpoint: endpoint = 'https://query.wikidata.org/sparql' if prefix: query = prefix + '\n' + query params = { 'query': '#Tool: PBB_core fastrun\n' + query, 'format': 'json' } headers = { 'Accept': 'application/sparql-results+json', 'User-Agent': user_agent } response = requests.get(endpoint, params=params, headers=headers) response.raise_for_status() results = response.json() if as_dataframe: return WDItemEngine._sparql_query_result_to_df(results) else: return results
[ "def", "execute_sparql_query", "(", "query", ",", "prefix", "=", "None", ",", "endpoint", "=", "'https://query.wikidata.org/sparql'", ",", "user_agent", "=", "config", "[", "'USER_AGENT_DEFAULT'", "]", ",", "as_dataframe", "=", "False", ")", ":", "if", "not", "e...
40.333333
26.333333
def betweenness(self, kind='vertex', directed=None, weighted=None): '''Computes the betweenness centrality of a graph. kind : string, either 'vertex' (default) or 'edge' directed : bool, defaults to self.is_directed() weighted : bool, defaults to self.is_weighted() ''' assert kind in ('vertex', 'edge'), 'Invalid kind argument: ' + kind weighted = weighted is not False and self.is_weighted() directed = directed if directed is not None else self.is_directed() adj = self.matrix('csr') btw = betweenness(adj, weighted, kind=='vertex') # normalize if undirected if not directed: btw /= 2. return btw
[ "def", "betweenness", "(", "self", ",", "kind", "=", "'vertex'", ",", "directed", "=", "None", ",", "weighted", "=", "None", ")", ":", "assert", "kind", "in", "(", "'vertex'", ",", "'edge'", ")", ",", "'Invalid kind argument: '", "+", "kind", "weighted", ...
42.866667
19.8
def probabilistic_collocation(order, dist, subset=.1): """ Probabilistic collocation method. Args: order (int, numpy.ndarray) : Quadrature order along each axis. dist (Dist) : Distribution to generate samples from. subset (float) : Rate of which to removed samples. """ abscissas, weights = chaospy.quad.collection.golub_welsch(order, dist) likelihood = dist.pdf(abscissas) alpha = numpy.random.random(len(weights)) alpha = likelihood > alpha*subset*numpy.max(likelihood) abscissas = abscissas.T[alpha].T weights = weights[alpha] return abscissas, weights
[ "def", "probabilistic_collocation", "(", "order", ",", "dist", ",", "subset", "=", ".1", ")", ":", "abscissas", ",", "weights", "=", "chaospy", ".", "quad", ".", "collection", ".", "golub_welsch", "(", "order", ",", "dist", ")", "likelihood", "=", "dist", ...
32.052632
19.210526
def append(self, data): """Add a data set to the next block index""" index = self.n_blocks # note off by one so use as index self[index] = data self.refs.append(data)
[ "def", "append", "(", "self", ",", "data", ")", ":", "index", "=", "self", ".", "n_blocks", "# note off by one so use as index", "self", "[", "index", "]", "=", "data", "self", ".", "refs", ".", "append", "(", "data", ")" ]
38.8
12.8
def build_request(self, input_data=None, api_data=None, aux_data=None, *args, **kwargs): """ Builds request :param input_data: :param api_data: :param aux_data: :param args: :param kwargs: :return: """ if input_data is not None: self.client_data = input_data if api_data is not None: self.api_data = api_data if aux_data is not None: self.aux_data = aux_data self.request = RequestHolder() self.request.nonce = get_random_vector(EBConsts.FRESHNESS_NONCE_LEN) self.request.endpoint = self.get_endpoint() self.request.url = self.get_endpoint().get_url() + self.url_suffix self.request.configuration = self.config self.request.api_method = self.operation self.request.headers = {'X-Auth-Token': 'public'} # Result request body self.request.body = { 'nonce': to_hex(self.request.nonce), 'version': 1, 'function': self.operation, 'environment': self.env if self.env is not None else ENVIRONMENT_DEVELOPMENT, } if self.client_data is not None: self.request.body['client'] = self.client_data if self.api_data is not None: self.request.body['apidata'] = self.api_data if self.aux_data is not None: if isinstance(self.aux_data, dict): self.request.body = EBUtils.merge(self.request.body, self.aux_data) else: raise ValueError('Aux data has to be a dictionary') self.request = self.extend_request(self.request) return self.request
[ "def", "build_request", "(", "self", ",", "input_data", "=", "None", ",", "api_data", "=", "None", ",", "aux_data", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "input_data", "is", "not", "None", ":", "self", ".", "client_d...
34.583333
18.666667
def abs_area(max): """ Point area palette (continuous), with area proportional to value. Parameters ---------- max : float A number representing the maximum size Returns ------- out : function Palette function that takes a sequence of values in the range ``[0, 1]`` and returns values in the range ``[0, max]``. Examples -------- >>> x = np.arange(0, .8, .1)**2 >>> palette = abs_area(5) >>> palette(x) array([0. , 0.5, 1. , 1.5, 2. , 2.5, 3. , 3.5]) Compared to :func:`area_pal`, :func:`abs_area` will handle values in the range ``[-1, 0]`` without returning ``np.nan``. And values whose absolute value is greater than 1 will be clipped to the maximum. """ def abs_area_palette(x): return rescale(np.sqrt(np.abs(x)), to=(0, max), _from=(0, 1)) return abs_area_palette
[ "def", "abs_area", "(", "max", ")", ":", "def", "abs_area_palette", "(", "x", ")", ":", "return", "rescale", "(", "np", ".", "sqrt", "(", "np", ".", "abs", "(", "x", ")", ")", ",", "to", "=", "(", "0", ",", "max", ")", ",", "_from", "=", "(",...
27.935484
22.645161
def _cork_one(self, s, obj): """ Construct a socketpair, saving one side of it, and passing the other to `obj` to be written to by one of its threads. """ rsock, wsock = mitogen.parent.create_socketpair(size=4096) mitogen.core.set_cloexec(rsock.fileno()) mitogen.core.set_cloexec(wsock.fileno()) mitogen.core.set_block(wsock) # gevent self._rsocks.append(rsock) obj.defer(self._do_cork, s, wsock)
[ "def", "_cork_one", "(", "self", ",", "s", ",", "obj", ")", ":", "rsock", ",", "wsock", "=", "mitogen", ".", "parent", ".", "create_socketpair", "(", "size", "=", "4096", ")", "mitogen", ".", "core", ".", "set_cloexec", "(", "rsock", ".", "fileno", "...
42.454545
11
def _compose_dict_for_nginx(port_specs): """Return a dictionary containing the Compose spec required to run Dusty's nginx container used for host forwarding.""" spec = {'image': constants.NGINX_IMAGE, 'volumes': ['{}:{}'.format(constants.NGINX_CONFIG_DIR_IN_VM, constants.NGINX_CONFIG_DIR_IN_CONTAINER)], 'command': 'nginx -g "daemon off;" -c /etc/nginx/conf.d/nginx.primary', 'container_name': 'dusty_{}_1'.format(constants.DUSTY_NGINX_NAME)} all_host_ports = set([nginx_spec['host_port'] for nginx_spec in port_specs['nginx']]) if all_host_ports: spec['ports'] = [] for port in all_host_ports: spec['ports'].append('{0}:{0}'.format(port)) return {constants.DUSTY_NGINX_NAME: spec}
[ "def", "_compose_dict_for_nginx", "(", "port_specs", ")", ":", "spec", "=", "{", "'image'", ":", "constants", ".", "NGINX_IMAGE", ",", "'volumes'", ":", "[", "'{}:{}'", ".", "format", "(", "constants", ".", "NGINX_CONFIG_DIR_IN_VM", ",", "constants", ".", "NGI...
58.307692
20.461538
def invalid_code(self, code, card_id=None): """ 设置卡券失效 """ card_data = { 'code': code } if card_id: card_data['card_id'] = card_id return self._post( 'card/code/unavailable', data=card_data )
[ "def", "invalid_code", "(", "self", ",", "code", ",", "card_id", "=", "None", ")", ":", "card_data", "=", "{", "'code'", ":", "code", "}", "if", "card_id", ":", "card_data", "[", "'card_id'", "]", "=", "card_id", "return", "self", ".", "_post", "(", ...
22.384615
13.923077
def is_equal_to(self, other, **kwargs): """Asserts that val is equal to other.""" if self._check_dict_like(self.val, check_values=False, return_as_bool=True) and \ self._check_dict_like(other, check_values=False, return_as_bool=True): if self._dict_not_equal(self.val, other, ignore=kwargs.get('ignore'), include=kwargs.get('include')): self._dict_err(self.val, other, ignore=kwargs.get('ignore'), include=kwargs.get('include')) else: if self.val != other: self._err('Expected <%s> to be equal to <%s>, but was not.' % (self.val, other)) return self
[ "def", "is_equal_to", "(", "self", ",", "other", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_check_dict_like", "(", "self", ".", "val", ",", "check_values", "=", "False", ",", "return_as_bool", "=", "True", ")", "and", "self", ".", "_check_...
64.4
34.7
def GetTagDescription(tag_name): """ Gets the current description of a point configured in a real-time eDNA service. :param tag_name: fully-qualified (site.service.tag) eDNA tag :return: tag description """ # Check if the point even exists if not DoesIDExist(tag_name): warnings.warn("WARNING- " + tag_name + " does not exist or " + "connection was dropped. Try again if tag does exist.") return None # To get the point information for the service, we need the Site.Service split_tag = tag_name.split(".") # If the full Site.Service.Tag was not supplied, return the tag_name if len(split_tag) < 3: warnings.warn("WARNING- Please supply the full Site.Service.Tag.") return tag_name # The Site.Service will be the first two split strings site_service = split_tag[0] + "." + split_tag[1] # GetPoints will return a DataFrame with point information points = GetPoints(site_service) if tag_name in points.Tag.values: description = points[points.Tag == tag_name].Description.values[0] if description: return description else: return tag_name else: warnings.warn("WARNING- " + tag_name + " not found in service.") return None
[ "def", "GetTagDescription", "(", "tag_name", ")", ":", "# Check if the point even exists\r", "if", "not", "DoesIDExist", "(", "tag_name", ")", ":", "warnings", ".", "warn", "(", "\"WARNING- \"", "+", "tag_name", "+", "\" does not exist or \"", "+", "\"connection was d...
38.529412
20.588235
def convert_coord(coord_from,matrix_file,base_to_aligned=True): '''Takes an XYZ array (in DICOM coordinates) and uses the matrix file produced by 3dAllineate to transform it. By default, the 3dAllineate matrix transforms from base to aligned space; to get the inverse transform set ``base_to_aligned`` to ``False``''' with open(matrix_file) as f: try: values = [float(y) for y in ' '.join([x for x in f.readlines() if x.strip()[0]!='#']).strip().split()] except: nl.notify('Error reading values from matrix file %s' % matrix_file, level=nl.level.error) return False if len(values)!=12: nl.notify('Error: found %d values in matrix file %s (expecting 12)' % (len(values),matrix_file), level=nl.level.error) return False matrix = np.vstack((np.array(values).reshape((3,-1)),[0,0,0,1])) if not base_to_aligned: matrix = np.linalg.inv(matrix) return np.dot(matrix,list(coord_from) + [1])[:3]
[ "def", "convert_coord", "(", "coord_from", ",", "matrix_file", ",", "base_to_aligned", "=", "True", ")", ":", "with", "open", "(", "matrix_file", ")", "as", "f", ":", "try", ":", "values", "=", "[", "float", "(", "y", ")", "for", "y", "in", "' '", "....
60.9375
37.0625
def before_content(self): """Called before parsing content. Push the class name onto the class name stack. Used to construct the full name for members. """ ChapelObject.before_content(self) if self.names: self.env.temp_data['chpl:class'] = self.names[0][0] self.clsname_set = True
[ "def", "before_content", "(", "self", ")", ":", "ChapelObject", ".", "before_content", "(", "self", ")", "if", "self", ".", "names", ":", "self", ".", "env", ".", "temp_data", "[", "'chpl:class'", "]", "=", "self", ".", "names", "[", "0", "]", "[", "...
42.125
10.125
def get(self, type: Type[T], query: Mapping[str, Any]) -> T: """Gets a query from the data pipeline. 1) Extracts the query the sequence of data sources. 2) Inserts the result into the data sinks (if appropriate). 3) Transforms the result into the requested type if it wasn't already. 4) Inserts the transformed result into any data sinks. Args: query: The query being requested. context: The context for the extraction (mutable). Returns: The requested object. """ LOGGER.info("Getting SourceHandlers for \"{type}\"".format(type=type.__name__)) try: handlers = self._get_types[type] except KeyError: try: LOGGER.info("Building new SourceHandlers for \"{type}\"".format(type=type.__name__)) handlers = self._get_handlers(type) except NoConversionError: handlers = None self._get_types[type] = handlers if handlers is None: raise NoConversionError("No source can provide \"{type}\"".format(type=type.__name__)) LOGGER.info("Creating new PipelineContext") context = self._new_context() LOGGER.info("Querying SourceHandlers for \"{type}\"".format(type=type.__name__)) for handler in handlers: try: return handler.get(query, context) except NotFoundError: pass raise NotFoundError("No source returned a query result!")
[ "def", "get", "(", "self", ",", "type", ":", "Type", "[", "T", "]", ",", "query", ":", "Mapping", "[", "str", ",", "Any", "]", ")", "->", "T", ":", "LOGGER", ".", "info", "(", "\"Getting SourceHandlers for \\\"{type}\\\"\"", ".", "format", "(", "type",...
37.925
23.075
def copy(self): """ Returns a copy of the datamat. """ return self.filter(np.ones(self._num_fix).astype(bool))
[ "def", "copy", "(", "self", ")", ":", "return", "self", ".", "filter", "(", "np", ".", "ones", "(", "self", ".", "_num_fix", ")", ".", "astype", "(", "bool", ")", ")" ]
27.6
10
def match(self, dom, act): """ Check if the given `domain` and `act` are allowed by this capability """ return self.match_domain(dom) and self.match_action(act)
[ "def", "match", "(", "self", ",", "dom", ",", "act", ")", ":", "return", "self", ".", "match_domain", "(", "dom", ")", "and", "self", ".", "match_action", "(", "act", ")" ]
32.5
11.5
def get_owned_by(cls, username, api=None): """Query ( List ) datasets by owner :param api: Api instance :param username: Owner username :return: Collection object """ api = api if api else cls._API return super(Dataset, cls)._query( url=cls._URL['owned_by'].format(username=username), fields='_all', api=api )
[ "def", "get_owned_by", "(", "cls", ",", "username", ",", "api", "=", "None", ")", ":", "api", "=", "api", "if", "api", "else", "cls", ".", "_API", "return", "super", "(", "Dataset", ",", "cls", ")", ".", "_query", "(", "url", "=", "cls", ".", "_U...
30.615385
11.538462
def connection_exists(self, from_obj, to_obj): """ Returns ``True`` if a connection between the given objects exists, else ``False``. """ self._validate_ctypes(from_obj, to_obj) return self.connections.filter(from_pk=from_obj.pk, to_pk=to_obj.pk).exists()
[ "def", "connection_exists", "(", "self", ",", "from_obj", ",", "to_obj", ")", ":", "self", ".", "_validate_ctypes", "(", "from_obj", ",", "to_obj", ")", "return", "self", ".", "connections", ".", "filter", "(", "from_pk", "=", "from_obj", ".", "pk", ",", ...
42.428571
15.571429
def add(self, name, attr=None, value=None): "Set values in constant" if isinstance(name, tuple) or isinstance(name, list): name, attr, value = self.__set_iter_value(name) if attr is None: attr = name if value is None: value = attr self.__data += (self.get_const_string(name=name, value=value),) # set attribute as slugfiy self.__dict__[s_attr(attr)] = self.__data[-1]
[ "def", "add", "(", "self", ",", "name", ",", "attr", "=", "None", ",", "value", "=", "None", ")", ":", "if", "isinstance", "(", "name", ",", "tuple", ")", "or", "isinstance", "(", "name", ",", "list", ")", ":", "name", ",", "attr", ",", "value", ...
29.933333
21.666667
def params(self): """Parameters used in the url of the API call and for authentication. :return: parameters used in the url. :rtype: dict """ params = {} params["access_token"] = self.access_token params["account_id"] = self.account_id return params
[ "def", "params", "(", "self", ")", ":", "params", "=", "{", "}", "params", "[", "\"access_token\"", "]", "=", "self", ".", "access_token", "params", "[", "\"account_id\"", "]", "=", "self", ".", "account_id", "return", "params" ]
30.5
14.3
def _get_day_of_month(other, day_option): """Find the day in `other`'s month that satisfies a BaseCFTimeOffset's onOffset policy, as described by the `day_option` argument. Parameters ---------- other : cftime.datetime day_option : 'start', 'end' 'start': returns 1 'end': returns last day of the month Returns ------- day_of_month : int """ if day_option == 'start': return 1 elif day_option == 'end': days_in_month = _days_in_month(other) return days_in_month elif day_option is None: # Note: unlike `_shift_month`, _get_day_of_month does not # allow day_option = None raise NotImplementedError else: raise ValueError(day_option)
[ "def", "_get_day_of_month", "(", "other", ",", "day_option", ")", ":", "if", "day_option", "==", "'start'", ":", "return", "1", "elif", "day_option", "==", "'end'", ":", "days_in_month", "=", "_days_in_month", "(", "other", ")", "return", "days_in_month", "eli...
26.285714
17.892857
def set_referencepixel(self, pix): """Set the reference pixel of the given axis in this coordinate.""" assert len(pix) == len(self._coord["crpix"]) self._coord["crpix"] = pix[::-1]
[ "def", "set_referencepixel", "(", "self", ",", "pix", ")", ":", "assert", "len", "(", "pix", ")", "==", "len", "(", "self", ".", "_coord", "[", "\"crpix\"", "]", ")", "self", ".", "_coord", "[", "\"crpix\"", "]", "=", "pix", "[", ":", ":", "-", "...
50.25
4.5
def identify_groups(ref_labels, pred_labels, return_overlaps=False): """Which predicted label explains which reference label? A predicted label explains the reference label which maximizes the minimum of ``relative_overlaps_pred`` and ``relative_overlaps_ref``. Compare this with ``compute_association_matrix_of_groups``. Returns ------- A dictionary of length ``len(np.unique(ref_labels))`` that stores for each reference label the predicted label that best explains it. If ``return_overlaps`` is ``True``, this will in addition return the overlap of the reference group with the predicted group; normalized with respect to the reference group size and the predicted group size, respectively. """ ref_unique, ref_counts = np.unique(ref_labels, return_counts=True) ref_dict = dict(zip(ref_unique, ref_counts)) pred_unique, pred_counts = np.unique(pred_labels, return_counts=True) pred_dict = dict(zip(pred_unique, pred_counts)) associated_predictions = {} associated_overlaps = {} for ref_label in ref_unique: sub_pred_unique, sub_pred_counts = np.unique(pred_labels[ref_label == ref_labels], return_counts=True) relative_overlaps_pred = [sub_pred_counts[i] / pred_dict[n] for i, n in enumerate(sub_pred_unique)] relative_overlaps_ref = [sub_pred_counts[i] / ref_dict[ref_label] for i, n in enumerate(sub_pred_unique)] relative_overlaps = np.c_[relative_overlaps_pred, relative_overlaps_ref] relative_overlaps_min = np.min(relative_overlaps, axis=1) pred_best_index = np.argsort(relative_overlaps_min)[::-1] associated_predictions[ref_label] = sub_pred_unique[pred_best_index] associated_overlaps[ref_label] = relative_overlaps[pred_best_index] if return_overlaps: return associated_predictions, associated_overlaps else: return associated_predictions
[ "def", "identify_groups", "(", "ref_labels", ",", "pred_labels", ",", "return_overlaps", "=", "False", ")", ":", "ref_unique", ",", "ref_counts", "=", "np", ".", "unique", "(", "ref_labels", ",", "return_counts", "=", "True", ")", "ref_dict", "=", "dict", "(...
55
29.970588
def daily_from_hourly(df): """Aggregates data (hourly to daily values) according to the characteristics of each variable (e.g., average for temperature, sum for precipitation) Args: df: dataframe including time series with one hour time steps Returns: dataframe (daily) """ df_daily = pd.DataFrame() if 'temp' in df: df_daily['temp'] = df.temp.resample('D').mean() df_daily['tmin'] = df.temp.groupby(df.temp.index.date).min() df_daily['tmax'] = df.temp.groupby(df.temp.index.date).max() if 'precip' in df: df_daily['precip'] = df.precip.resample('D').sum() if 'glob' in df: df_daily['glob'] = df.glob.resample('D').mean() if 'hum' in df: df_daily['hum'] = df.hum.resample('D').mean() if 'hum' in df: df_daily['hum_min'] = df.hum.groupby(df.hum.index.date).min() if 'hum' in df: df_daily['hum_max'] = df.hum.groupby(df.hum.index.date).max() if 'wind' in df: df_daily['wind'] = df.wind.resample('D').mean() if 'ssd' in df: df_daily['ssd'] = df.ssd.resample('D').sum() / 60 # minutes to hours df_daily.index.name = None return df_daily
[ "def", "daily_from_hourly", "(", "df", ")", ":", "df_daily", "=", "pd", ".", "DataFrame", "(", ")", "if", "'temp'", "in", "df", ":", "df_daily", "[", "'temp'", "]", "=", "df", ".", "temp", ".", "resample", "(", "'D'", ")", ".", "mean", "(", ")", ...
28.414634
25.902439
def may_be_null_is_nullable(): """If may_be_null returns nullable or if NULL can be passed in. This can still be wrong if the specific typelib is older than the linked libgirepository. https://bugzilla.gnome.org/show_bug.cgi?id=660879#c47 """ repo = GIRepository() repo.require("GLib", "2.0", 0) info = repo.find_by_name("GLib", "spawn_sync") # this argument is (allow-none) and can never be (nullable) return not info.get_arg(8).may_be_null
[ "def", "may_be_null_is_nullable", "(", ")", ":", "repo", "=", "GIRepository", "(", ")", "repo", ".", "require", "(", "\"GLib\"", ",", "\"2.0\"", ",", "0", ")", "info", "=", "repo", ".", "find_by_name", "(", "\"GLib\"", ",", "\"spawn_sync\"", ")", "# this a...
33.642857
18.5
def ecg_wave_detector(ecg, rpeaks): """ Returns the localization of the P, Q, T waves. This function needs massive help! Parameters ---------- ecg : list or ndarray ECG signal (preferably filtered). rpeaks : list or ndarray R peaks localization. Returns ---------- ecg_waves : dict Contains wave peaks location indices. Example ---------- >>> import neurokit as nk >>> ecg = nk.ecg_simulate(duration=5, sampling_rate=1000) >>> ecg = nk.ecg_preprocess(ecg=ecg, sampling_rate=1000) >>> rpeaks = ecg["ECG"]["R_Peaks"] >>> ecg = ecg["df"]["ECG_Filtered"] >>> ecg_waves = nk.ecg_wave_detector(ecg=ecg, rpeaks=rpeaks) >>> nk.plot_events_in_signal(ecg, [ecg_waves["P_Waves"], ecg_waves["Q_Waves_Onsets"], ecg_waves["Q_Waves"], list(rpeaks), ecg_waves["S_Waves"], ecg_waves["T_Waves_Onsets"], ecg_waves["T_Waves"], ecg_waves["T_Waves_Ends"]], color=["green", "yellow", "orange", "red", "black", "brown", "blue", "purple"]) Notes ---------- *Details* - **Cardiac Cycle**: A typical ECG showing a heartbeat consists of a P wave, a QRS complex and a T wave.The P wave represents the wave of depolarization that spreads from the SA-node throughout the atria. The QRS complex reflects the rapid depolarization of the right and left ventricles. Since the ventricles are the largest part of the heart, in terms of mass, the QRS complex usually has a much larger amplitude than the P-wave. The T wave represents the ventricular repolarization of the ventricles. On rare occasions, a U wave can be seen following the T wave. The U wave is believed to be related to the last remnants of ventricular repolarization. *Authors* - `Dominique Makowski <https://dominiquemakowski.github.io/>`_ """ q_waves = [] p_waves = [] q_waves_starts = [] s_waves = [] t_waves = [] t_waves_starts = [] t_waves_ends = [] for index, rpeak in enumerate(rpeaks[:-3]): try: epoch_before = np.array(ecg)[int(rpeaks[index-1]):int(rpeak)] epoch_before = epoch_before[int(len(epoch_before)/2):len(epoch_before)] epoch_before = list(reversed(epoch_before)) q_wave_index = np.min(find_peaks(epoch_before)) q_wave = rpeak - q_wave_index p_wave_index = q_wave_index + np.argmax(epoch_before[q_wave_index:]) p_wave = rpeak - p_wave_index inter_pq = epoch_before[q_wave_index:p_wave_index] inter_pq_derivative = np.gradient(inter_pq, 2) q_start_index = find_closest_in_list(len(inter_pq_derivative)/2, find_peaks(inter_pq_derivative)) q_start = q_wave - q_start_index q_waves.append(q_wave) p_waves.append(p_wave) q_waves_starts.append(q_start) except ValueError: pass except IndexError: pass try: epoch_after = np.array(ecg)[int(rpeak):int(rpeaks[index+1])] epoch_after = epoch_after[0:int(len(epoch_after)/2)] s_wave_index = np.min(find_peaks(epoch_after)) s_wave = rpeak + s_wave_index t_wave_index = s_wave_index + np.argmax(epoch_after[s_wave_index:]) t_wave = rpeak + t_wave_index inter_st = epoch_after[s_wave_index:t_wave_index] inter_st_derivative = np.gradient(inter_st, 2) t_start_index = find_closest_in_list(len(inter_st_derivative)/2, find_peaks(inter_st_derivative)) t_start = s_wave + t_start_index t_end = np.min(find_peaks(epoch_after[t_wave_index:])) t_end = t_wave + t_end s_waves.append(s_wave) t_waves.append(t_wave) t_waves_starts.append(t_start) t_waves_ends.append(t_end) except ValueError: pass except IndexError: pass # pd.Series(epoch_before).plot() # t_waves = [] # for index, rpeak in enumerate(rpeaks[0:-1]): # # epoch = np.array(ecg)[int(rpeak):int(rpeaks[index+1])] # pd.Series(epoch).plot() # # # T wave # middle = (rpeaks[index+1] - rpeak) / 2 # quarter = middle/2 # # epoch = np.array(ecg)[int(rpeak+quarter):int(rpeak+middle)] # # try: # t_wave = int(rpeak+quarter) + np.argmax(epoch) # t_waves.append(t_wave) # except ValueError: # pass # # p_waves = [] # for index, rpeak in enumerate(rpeaks[1:]): # index += 1 # # Q wave # middle = (rpeak - rpeaks[index-1]) / 2 # quarter = middle/2 # # epoch = np.array(ecg)[int(rpeak-middle):int(rpeak-quarter)] # # try: # p_wave = int(rpeak-quarter) + np.argmax(epoch) # p_waves.append(p_wave) # except ValueError: # pass # # q_waves = [] # for index, p_wave in enumerate(p_waves): # epoch = np.array(ecg)[int(p_wave):int(rpeaks[rpeaks>p_wave][0])] # # try: # q_wave = p_wave + np.argmin(epoch) # q_waves.append(q_wave) # except ValueError: # pass # # # TODO: manage to find the begininng of the Q and the end of the T wave so we can extract the QT interval ecg_waves = {"T_Waves": t_waves, "P_Waves": p_waves, "Q_Waves": q_waves, "S_Waves": s_waves, "Q_Waves_Onsets": q_waves_starts, "T_Waves_Onsets": t_waves_starts, "T_Waves_Ends": t_waves_ends} return(ecg_waves)
[ "def", "ecg_wave_detector", "(", "ecg", ",", "rpeaks", ")", ":", "q_waves", "=", "[", "]", "p_waves", "=", "[", "]", "q_waves_starts", "=", "[", "]", "s_waves", "=", "[", "]", "t_waves", "=", "[", "]", "t_waves_starts", "=", "[", "]", "t_waves_ends", ...
36.695946
27.763514
def init_pkg(pkg, repo_dest): """ Initializes a custom named package module. This works by replacing all instances of 'project' with a custom module name. """ vars = {'pkg': pkg} with dir_path(repo_dest): patch("""\ diff --git a/manage.py b/manage.py index 40ebb0a..cdfe363 100755 --- a/manage.py +++ b/manage.py @@ -3,7 +3,7 @@ import os import sys # Edit this if necessary or override the variable in your environment. -os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project.settings') +os.environ.setdefault('DJANGO_SETTINGS_MODULE', '%(pkg)s.settings') try: # For local development in a virtualenv: diff --git a/project/settings/base.py b/project/settings/base.py index 312f280..c75e673 100644 --- a/project/settings/base.py +++ b/project/settings/base.py @@ -7,7 +7,7 @@ from funfactory.settings_base import * # If you did not install Playdoh with the funfactory installer script # you may need to edit this value. See the docs about installing from a # clone. -PROJECT_MODULE = 'project' +PROJECT_MODULE = '%(pkg)s' # Bundles is a dictionary of two dictionaries, css and js, which list css files # and js files that can be bundled together by the minify app. diff --git a/setup.py b/setup.py index 58dbd93..9a38628 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ import os from setuptools import setup, find_packages -setup(name='project', +setup(name='%(pkg)s', version='1.0', description='Django application.', long_description='', """ % vars) git(['mv', 'project', pkg]) git(['commit', '-a', '-m', 'Renamed project module to %s' % pkg])
[ "def", "init_pkg", "(", "pkg", ",", "repo_dest", ")", ":", "vars", "=", "{", "'pkg'", ":", "pkg", "}", "with", "dir_path", "(", "repo_dest", ")", ":", "patch", "(", "\"\"\"\\\n diff --git a/manage.py b/manage.py\n index 40ebb0a..cdfe363 100755\n ---...
35.358491
18.886792
def remember(self, request, username, **kw): """ Returns 'WWW-Authenticate' header with a value that should be used in 'Authorization' header. """ if self.credentials_callback: token = self.credentials_callback(username, request) api_key = 'ApiKey {}:{}'.format(username, token) return [('WWW-Authenticate', api_key)]
[ "def", "remember", "(", "self", ",", "request", ",", "username", ",", "*", "*", "kw", ")", ":", "if", "self", ".", "credentials_callback", ":", "token", "=", "self", ".", "credentials_callback", "(", "username", ",", "request", ")", "api_key", "=", "'Api...
47.25
8.375
def execute(cmd): """ execute command, return rc and output string. The cmd argument can be a string or a list composed of the command name and each of its argument. eg, ['/usr/bin/cp', '-r', 'src', 'dst'] """ # Parse cmd string to a list if not isinstance(cmd, list): cmd = shlex.split(cmd) # Execute command rc = 0 output = "" try: output = subprocess.check_output(cmd, close_fds=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as err: rc = err.returncode output = err.output except Exception as err: err_msg = ('Command "%s" Error: %s' % (' '.join(cmd), str(err))) raise exception.SDKInternalError(msg=err_msg) output = bytes.decode(output) return (rc, output)
[ "def", "execute", "(", "cmd", ")", ":", "# Parse cmd string to a list", "if", "not", "isinstance", "(", "cmd", ",", "list", ")", ":", "cmd", "=", "shlex", ".", "split", "(", "cmd", ")", "# Execute command", "rc", "=", "0", "output", "=", "\"\"", "try", ...
33.666667
17.5
def memoize(fnc): """memoization function. >>> import random >>> imax = 100 >>> def fnc1(arg=True): ... return arg and random.choice((True, False)) >>> fnc2 = memoize(fnc1) >>> (ret1, ret2) = (fnc1(), fnc2()) >>> assert any(fnc1() != ret1 for i in range(imax)) >>> assert all(fnc2() == ret2 for i in range(imax)) """ cache = dict() @functools.wraps(fnc) def wrapped(*args, **kwargs): """Decorated one""" key = repr(args) + repr(kwargs) if key not in cache: cache[key] = fnc(*args, **kwargs) return cache[key] return wrapped
[ "def", "memoize", "(", "fnc", ")", ":", "cache", "=", "dict", "(", ")", "@", "functools", ".", "wraps", "(", "fnc", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Decorated one\"\"\"", "key", "=", "repr", "(", "a...
25.333333
16.416667
def as_iframe(self, html_data): """Build the HTML representation for the mapviz.""" srcdoc = html_data.replace('"', "'") return ('<iframe id="{div_id}", srcdoc="{srcdoc}" style="width: {width}; ' 'height: {height};"></iframe>'.format( div_id=self.div_id, srcdoc=srcdoc, width=self.width, height=self.height))
[ "def", "as_iframe", "(", "self", ",", "html_data", ")", ":", "srcdoc", "=", "html_data", ".", "replace", "(", "'\"'", ",", "\"'\"", ")", "return", "(", "'<iframe id=\"{div_id}\", srcdoc=\"{srcdoc}\" style=\"width: {width}; '", "'height: {height};\"></iframe>'", ".", "fo...
42
11.9
def _command_list(self): """ build the command list """ ## base args cmd = [self.params.binary, "-i", OPJ(self.workdir, self.name+".treemix.in.gz"), "-o", OPJ(self.workdir, self.name), ] ## addon params args = [] for key, val in self.params: if key not in ["minmap", "binary"]: if key == "g": if val[0]: args += ["-"+key, str(val[0]), str(val[1])] elif key == "global_": if val: args += ["-"+key[:-1]] elif key in ["se", "global", "noss"]: if val: args += ["-"+key] else: if val: args += ["-"+key, str(val)] return cmd+args
[ "def", "_command_list", "(", "self", ")", ":", "## base args", "cmd", "=", "[", "self", ".", "params", ".", "binary", ",", "\"-i\"", ",", "OPJ", "(", "self", ".", "workdir", ",", "self", ".", "name", "+", "\".treemix.in.gz\"", ")", ",", "\"-o\"", ",", ...
31.777778
15.925926
def unhandled_keys(self, size, key): """ Override this method to intercept keystrokes in subclasses. Default behavior: Toggle flagged on space, ignore other keys. """ if key == " ": if not self.flagged: self.display.new_files.append(self.get_node().get_value()) else: self.display.new_files.remove(self.get_node().get_value()) self.flagged = not self.flagged self.update_w() self.display.update_status() else: return key
[ "def", "unhandled_keys", "(", "self", ",", "size", ",", "key", ")", ":", "if", "key", "==", "\" \"", ":", "if", "not", "self", ".", "flagged", ":", "self", ".", "display", ".", "new_files", ".", "append", "(", "self", ".", "get_node", "(", ")", "."...
37.2
15.866667
def from_string(cls, s): """ Init a new object from a string. Args: s (string): raw email Returns: Instance of MailParser """ log.debug("Parsing email from string") message = email.message_from_string(s) return cls(message)
[ "def", "from_string", "(", "cls", ",", "s", ")", ":", "log", ".", "debug", "(", "\"Parsing email from string\"", ")", "message", "=", "email", ".", "message_from_string", "(", "s", ")", "return", "cls", "(", "message", ")" ]
21.5
16.071429
def build_sector_fundamentals(sector): ''' In this method, for the given sector, we'll get the data we need for each stock in the sector from IEX. Once we have the data, we'll check that the earnings reports meet our criteria with `eps_good()`. We'll put stocks that meet those requirements into a dataframe along with all the data about them we'll need. ''' stocks = get_sector(sector) if len(stocks) == 0: raise ValueError("Invalid sector name: {}".format(sector)) # If we can't see its PE here, we're probably not interested in a stock. # Omit it from batch queries. stocks = [s for s in stocks if s['peRatio'] is not None] # IEX doesn't like batch queries for more than 100 symbols at a time. # We need to build our fundamentals info iteratively. batch_idx = 0 batch_size = 99 fundamentals_dict = {} while batch_idx < len(stocks): symbol_batch = [s['symbol'] for s in stocks[batch_idx:batch_idx + batch_size]] stock_batch = Stock(symbol_batch) # Pull all the data we'll need from IEX. financials_json = stock_batch.get_financials() quote_json = stock_batch.get_quote() stats_json = stock_batch.get_key_stats() earnings_json = stock_batch.get_earnings() for symbol in symbol_batch: # We'll filter based on earnings first to keep our fundamentals # info a bit cleaner. if not eps_good(earnings_json[symbol]): continue # Make sure we have all the data we'll need for our filters for # this stock. if not data_quality_good( symbol, financials_json, quote_json, stats_json): continue fundamentals_dict[symbol] = get_fundamental_data_for_symbol( symbol, financials_json, quote_json, stats_json ) batch_idx += batch_size # Transform all our data into a more filterable form - a dataframe - with # a bit of pandas magic. return pd.DataFrame.from_dict(fundamentals_dict).T
[ "def", "build_sector_fundamentals", "(", "sector", ")", ":", "stocks", "=", "get_sector", "(", "sector", ")", "if", "len", "(", "stocks", ")", "==", "0", ":", "raise", "ValueError", "(", "\"Invalid sector name: {}\"", ".", "format", "(", "sector", ")", ")", ...
38.140351
20.77193
def modExp(a, d, n): """returns a ** d (mod n)""" assert d >= 0 assert n >= 0 base2D = int2baseTwo(d) base2DLength = len(base2D) modArray = [] result = 1 for i in range(1, base2DLength + 1): if i == 1: modArray.append(a % n) else: modArray.append((modArray[i - 2] ** 2) % n) for i in range(0, base2DLength): if base2D[i] == 1: result *= base2D[i] * modArray[i] return result % n
[ "def", "modExp", "(", "a", ",", "d", ",", "n", ")", ":", "assert", "d", ">=", "0", "assert", "n", ">=", "0", "base2D", "=", "int2baseTwo", "(", "d", ")", "base2DLength", "=", "len", "(", "base2D", ")", "modArray", "=", "[", "]", "result", "=", ...
27.176471
14.705882
def handle_close(self, header, payload): """ Called when a close frame has been decoded from the stream. :param header: The decoded `Header`. :param payload: The bytestring payload associated with the close frame. """ if not payload: self.close(1000, None) return if len(payload) < 2: raise WebSocketError('Invalid close frame: {0} {1}'.format(header, payload)) rv = payload[:2] if six.PY2: code = struct.unpack('!H', str(rv))[0] else: code = struct.unpack('!H', bytes(rv))[0] payload = payload[2:] if payload: validator = Utf8Validator() val = validator.validate(payload) if not val[0]: raise UnicodeError if not self._is_valid_close_code(code): raise WebSocketError('Invalid close code {0}'.format(code)) self.close(code, payload)
[ "def", "handle_close", "(", "self", ",", "header", ",", "payload", ")", ":", "if", "not", "payload", ":", "self", ".", "close", "(", "1000", ",", "None", ")", "return", "if", "len", "(", "payload", ")", "<", "2", ":", "raise", "WebSocketError", "(", ...
36.307692
15.538462
def send_video_note(self, *args, **kwargs): """See :func:`send_video`""" return send_video_note(*args, **self._merge_overrides(**kwargs)).run()
[ "def", "send_video_note", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "send_video_note", "(", "*", "args", ",", "*", "*", "self", ".", "_merge_overrides", "(", "*", "*", "kwargs", ")", ")", ".", "run", "(", ")" ]
52.333333
13.666667
def _get_socketpair(self): """ Return an unused socketpair, creating one if none exist. """ try: return self._cls_idle_socketpairs.pop() # pop() must be atomic except IndexError: rsock, wsock = socket.socketpair() set_cloexec(rsock.fileno()) set_cloexec(wsock.fileno()) self._cls_all_sockets.extend((rsock, wsock)) return rsock, wsock
[ "def", "_get_socketpair", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_cls_idle_socketpairs", ".", "pop", "(", ")", "# pop() must be atomic", "except", "IndexError", ":", "rsock", ",", "wsock", "=", "socket", ".", "socketpair", "(", ")", "set_...
36.333333
12.333333
def _basic_cancel(self, frame_in): """Handle a Basic Cancel frame. :param specification.Basic.Cancel frame_in: Amqp frame. :return: """ LOGGER.warning( 'Received Basic.Cancel on consumer_tag: %s', try_utf8_decode(frame_in.consumer_tag) ) self.remove_consumer_tag(frame_in.consumer_tag)
[ "def", "_basic_cancel", "(", "self", ",", "frame_in", ")", ":", "LOGGER", ".", "warning", "(", "'Received Basic.Cancel on consumer_tag: %s'", ",", "try_utf8_decode", "(", "frame_in", ".", "consumer_tag", ")", ")", "self", ".", "remove_consumer_tag", "(", "frame_in",...
29.666667
18.5
def from_records(cls, records, name=None, **kwargs): """Creates a new instance of self from the given (list of) record(s). A "record" is a tuple in which each element is the value of one field in the resulting record array. This is done by calling `numpy.rec.fromrecords` on the given records with the given kwargs. The type of the returned array is cast to this class, and the name (if provided) is set. Parameters ---------- records : (list of) tuple(s) A list of the tuples to create the FieldArray from. name : {None|str} What the output array should be named. Other Parameters ---------------- For other keyword parameters, see the `numpy.rec.fromrecords` help. Returns ------- array : instance of this class An array that is an instance of this class in which the field data is from the given record(s). """ obj = numpy.rec.fromrecords(records, **kwargs).view( type=cls) obj.name = name return obj
[ "def", "from_records", "(", "cls", ",", "records", ",", "name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "obj", "=", "numpy", ".", "rec", ".", "fromrecords", "(", "records", ",", "*", "*", "kwargs", ")", ".", "view", "(", "type", "=", "cls...
36.6
21.4
def cauldron_extras(self): """ Yield extra tuples containing a field name and a callable that takes a row """ for extra in super(Dimension, self).cauldron_extras: yield extra if self.formatters: prop = self.id + '_raw' else: prop = self.id_prop yield self.id + '_id', lambda row: getattr(row, prop)
[ "def", "cauldron_extras", "(", "self", ")", ":", "for", "extra", "in", "super", "(", "Dimension", ",", "self", ")", ".", "cauldron_extras", ":", "yield", "extra", "if", "self", ".", "formatters", ":", "prop", "=", "self", ".", "id", "+", "'_raw'", "els...
29.230769
17.923077
def fit(self, X, y=None): """ Fits the RFECV with the wrapped model to the specified data and draws the rfecv curve with the optimal number of features found. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples) or (n_samples, n_features), optional Target relative to X for classification or regression. Returns ------- self : instance Returns the instance of the RFECV visualizer. """ X, y = check_X_y(X, y, "csr") n_features = X.shape[1] # This check is kind of unnecessary since RFE will do it, but it's # nice to get it out of the way ASAP and raise a meaningful error. if 0.0 < self.step < 1.0: step = int(max(1, self.step * n_features)) else: step = int(self.step) if step < 0: raise YellowbrickValueError("step must be >0") # Create the RFE model rfe = RFE(self.estimator, step=step) self.n_feature_subsets_ = np.arange(1, n_features+step, step) # Create the cross validation params # TODO: handle random state cv_params = { key: self.get_params()[key] for key in ('groups', 'cv', 'scoring') } # Perform cross-validation for each feature subset scores = [] for n_features_to_select in self.n_feature_subsets_: rfe.set_params(n_features_to_select=n_features_to_select) scores.append(cross_val_score(rfe, X, y, **cv_params)) # Convert scores to array self.cv_scores_ = np.array(scores) # Find the best RFE model bestidx = self.cv_scores_.mean(axis=1).argmax() self.n_features_ = self.n_feature_subsets_[bestidx] # Fit the final RFE model for the number of features self.rfe_estimator_ = rfe self.rfe_estimator_.set_params(n_features_to_select=self.n_features_) self.rfe_estimator_.fit(X, y) # Rewrap the visualizer to use the rfe estimator self._wrapped = self.rfe_estimator_ # Hoist the RFE params to the visualizer self.support_ = self.rfe_estimator_.support_ self.ranking_ = self.rfe_estimator_.ranking_ self.draw() return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "X", ",", "y", "=", "check_X_y", "(", "X", ",", "y", ",", "\"csr\"", ")", "n_features", "=", "X", ".", "shape", "[", "1", "]", "# This check is kind of unnecessary since RFE will do ...
34.471429
21.357143
def _get_reference(self): """:return: Reference Object we point to :raise TypeError: If this symbolic reference is detached, hence it doesn't point to a reference, but to a commit""" sha, target_ref_path = self._get_ref_info(self.repo, self.path) if target_ref_path is None: raise TypeError("%s is a detached symbolic reference as it points to %r" % (self, sha)) return self.from_path(self.repo, target_ref_path)
[ "def", "_get_reference", "(", "self", ")", ":", "sha", ",", "target_ref_path", "=", "self", ".", "_get_ref_info", "(", "self", ".", "repo", ",", "self", ".", "path", ")", "if", "target_ref_path", "is", "None", ":", "raise", "TypeError", "(", "\"%s is a det...
58.625
21.875
def parse_package(self, inputstring, addhash=True): """Parse package code.""" if addhash: use_hash = self.genhash(True, inputstring) else: use_hash = None return self.parse(inputstring, self.file_parser, {"nl_at_eof_check": True}, {"header": "package", "use_hash": use_hash})
[ "def", "parse_package", "(", "self", ",", "inputstring", ",", "addhash", "=", "True", ")", ":", "if", "addhash", ":", "use_hash", "=", "self", ".", "genhash", "(", "True", ",", "inputstring", ")", "else", ":", "use_hash", "=", "None", "return", "self", ...
46.428571
24.857143
def calc_containment(self): """Calculate PSF containment.""" hists = self.hists hists_out = self._hists_eff quantiles = [0.34, 0.68, 0.90, 0.95] cth_axis_idx = dict(evclass=2, evtype=3) for k in ['evclass']: # ,'evtype']: print(k) non = hists['%s_psf_on' % k] noff = hists['%s_psf_off' % k] alpha = hists['%s_alpha' % k][..., None] if k == 'evclass': sep = self._sep_bins[None, :, None, 1:] else: sep = self._sep_bins[None, None, :, None, 1:] qval, qerr = calc_quantiles(sep, non, noff, alpha, quantiles) for i, q in enumerate(quantiles): hists_out['%s_cth_q%2i' % (k, q * 100)] = qval[i] hists_out['%s_cth_q%2i_err' % (k, q * 100)] = qerr[i] non = np.sum(non, axis=cth_axis_idx[k]) noff = np.sum(noff, axis=cth_axis_idx[k]) alpha = np.squeeze(alpha, axis=cth_axis_idx[k]) sep = np.squeeze(sep, axis=cth_axis_idx[k]) qval, qerr = calc_quantiles(sep, non, noff, alpha, quantiles) for i, q in enumerate(quantiles): hists_out['%s_q%2i' % (k, q * 100)] = qval[i] hists_out['%s_q%2i_err' % (k, q * 100)] = qerr[i]
[ "def", "calc_containment", "(", "self", ")", ":", "hists", "=", "self", ".", "hists", "hists_out", "=", "self", ".", "_hists_eff", "quantiles", "=", "[", "0.34", ",", "0.68", ",", "0.90", ",", "0.95", "]", "cth_axis_idx", "=", "dict", "(", "evclass", "...
39.242424
18.939394
def array(self) -> numpy.ndarray: """The series data of all logged |IOSequence| objects contained in one single |numpy.ndarray| object. The documentation on |NetCDFVariableAgg.shape| explains how |NetCDFVariableAgg.array| is structured. The first example confirms that, under default configuration (`timeaxis=1`), the first axis corresponds to the location, while the second one corresponds to time: >>> from hydpy.core.examples import prepare_io_example_1 >>> nodes, elements = prepare_io_example_1() >>> from hydpy.core.netcdftools import NetCDFVariableFlat >>> ncvar = NetCDFVariableFlat('input_nied', isolate=False, timeaxis=1) >>> for element in elements: ... nied1 = element.model.sequences.inputs.nied ... ncvar.log(nied1, nied1.series) >>> ncvar.array array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.]]) Due to the flattening of higher dimensional sequences, their individual time series (e.g. of different hydrological response units) are spread over the rows of the array. For the 1-dimensional sequence |lland_fluxes.NKor|, the individual time series of the second element are stored in row two and three: >>> ncvar = NetCDFVariableFlat('flux_nkor', isolate=False, timeaxis=1) >>> for element in elements: ... nkor1 = element.model.sequences.fluxes.nkor ... ncvar.log(nkor1, nkor1.series) >>> ncvar.array[1:3] array([[ 16., 18., 20., 22.], [ 17., 19., 21., 23.]]) When using the first axis as the "timeaxis", the individual time series of the second element are stored in column two and three: >>> ncvar = NetCDFVariableFlat('flux_nkor', isolate=False, timeaxis=0) >>> for element in elements: ... nkor1 = element.model.sequences.fluxes.nkor ... ncvar.log(nkor1, nkor1.series) >>> ncvar.array[:, 1:3] array([[ 16., 17.], [ 18., 19.], [ 20., 21.], [ 22., 23.]]) """ array = numpy.full(self.shape, fillvalue, dtype=float) idx0 = 0 idxs: List[Any] = [slice(None)] for seq, subarray in zip(self.sequences.values(), self.arrays.values()): for prod in self._product(seq.shape): subsubarray = subarray[tuple(idxs + list(prod))] array[self.get_timeplaceslice(idx0)] = subsubarray idx0 += 1 return array
[ "def", "array", "(", "self", ")", "->", "numpy", ".", "ndarray", ":", "array", "=", "numpy", ".", "full", "(", "self", ".", "shape", ",", "fillvalue", ",", "dtype", "=", "float", ")", "idx0", "=", "0", "idxs", ":", "List", "[", "Any", "]", "=", ...
44
18.15
def commatize(leafs): """ Accepts/turns: (Name, Name, ..., Name, Name) Returns/into: (Name, Comma, Name, Comma, ..., Name, Comma, Name) """ new_leafs = [] for leaf in leafs: new_leafs.append(leaf) new_leafs.append(Comma()) del new_leafs[-1] return new_leafs
[ "def", "commatize", "(", "leafs", ")", ":", "new_leafs", "=", "[", "]", "for", "leaf", "in", "leafs", ":", "new_leafs", ".", "append", "(", "leaf", ")", "new_leafs", ".", "append", "(", "Comma", "(", ")", ")", "del", "new_leafs", "[", "-", "1", "]"...
26.818182
13.727273
def execute(self, command, term='xterm'): """ Execute a command on the remote server This method will forward traffic from the websocket to the SSH server and the other way around. You must connect to a SSH server using ssh_connect() prior to starting the session. """ transport = self._ssh.get_transport() channel = transport.open_session() channel.get_pty(term) channel.exec_command(command) self._bridge(channel) channel.close()
[ "def", "execute", "(", "self", ",", "command", ",", "term", "=", "'xterm'", ")", ":", "transport", "=", "self", ".", "_ssh", ".", "get_transport", "(", ")", "channel", "=", "transport", ".", "open_session", "(", ")", "channel", ".", "get_pty", "(", "te...
34.333333
13.066667
def warnExtractFromRegexpGroups(self, line, match): """ Extract file name, line number, and warning text as groups (1,2,3) of warningPattern match.""" file = match.group(1) lineNo = match.group(2) if lineNo is not None: lineNo = int(lineNo) text = match.group(3) return (file, lineNo, text)
[ "def", "warnExtractFromRegexpGroups", "(", "self", ",", "line", ",", "match", ")", ":", "file", "=", "match", ".", "group", "(", "1", ")", "lineNo", "=", "match", ".", "group", "(", "2", ")", "if", "lineNo", "is", "not", "None", ":", "lineNo", "=", ...
35.7
9.9
def serial_udb_extra_f5_send(self, sue_YAWKP_AILERON, sue_YAWKD_AILERON, sue_ROLLKP, sue_ROLLKD, sue_YAW_STABILIZATION_AILERON, sue_AILERON_BOOST, force_mavlink1=False): ''' Backwards compatible version of SERIAL_UDB_EXTRA F5: format sue_YAWKP_AILERON : Serial UDB YAWKP_AILERON Gain for Proporional control of navigation (float) sue_YAWKD_AILERON : Serial UDB YAWKD_AILERON Gain for Rate control of navigation (float) sue_ROLLKP : Serial UDB Extra ROLLKP Gain for Proportional control of roll stabilization (float) sue_ROLLKD : Serial UDB Extra ROLLKD Gain for Rate control of roll stabilization (float) sue_YAW_STABILIZATION_AILERON : YAW_STABILIZATION_AILERON Proportional control (float) sue_AILERON_BOOST : Gain For Boosting Manual Aileron control When Plane Stabilized (float) ''' return self.send(self.serial_udb_extra_f5_encode(sue_YAWKP_AILERON, sue_YAWKD_AILERON, sue_ROLLKP, sue_ROLLKD, sue_YAW_STABILIZATION_AILERON, sue_AILERON_BOOST), force_mavlink1=force_mavlink1)
[ "def", "serial_udb_extra_f5_send", "(", "self", ",", "sue_YAWKP_AILERON", ",", "sue_YAWKD_AILERON", ",", "sue_ROLLKP", ",", "sue_ROLLKD", ",", "sue_YAW_STABILIZATION_AILERON", ",", "sue_AILERON_BOOST", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", "...
91.538462
70.307692
def clone(self, data=None, shared_data=True, new_type=None, link=True, *args, **overrides): """Clones the object, overriding data and parameters. Args: data: New data replacing the existing data shared_data (bool, optional): Whether to use existing data new_type (optional): Type to cast object to link (bool, optional): Whether clone should be linked Determines whether Streams and Links attached to original object will be inherited. *args: Additional arguments to pass to constructor **overrides: New keyword arguments to pass to constructor Returns: Cloned object """ if 'link_inputs' in overrides and util.config.future_deprecations: self.param.warning( 'link_inputs argument to the clone method is deprecated, ' 'use the more general link argument instead.') link = link and overrides.pop('link_inputs', True) callback = overrides.pop('callback', self.callback) if data is None and shared_data: data = self.data if link and callback is self.callback: overrides['plot_id'] = self._plot_id clone = super(UniformNdMapping, self).clone( callback, shared_data, new_type, link, *(data,) + args, **overrides) # Ensure the clone references this object to ensure # stream sources are inherited if clone.callback is self.callback: with util.disable_constant(clone): clone.callback = clone.callback.clone(inputs=[self], link_inputs=link) return clone
[ "def", "clone", "(", "self", ",", "data", "=", "None", ",", "shared_data", "=", "True", ",", "new_type", "=", "None", ",", "link", "=", "True", ",", "*", "args", ",", "*", "*", "overrides", ")", ":", "if", "'link_inputs'", "in", "overrides", "and", ...
45.657895
18.236842
def getAceTypeText(self, t): ''' returns the textual representation of a acetype bit ''' try: return self.validAceTypes[t]['TEXT'] except KeyError: raise CommandExecutionError(( 'No ACE type "{0}". It should be one of the following: {1}' ).format(t, ', '.join(self.validAceTypes)))
[ "def", "getAceTypeText", "(", "self", ",", "t", ")", ":", "try", ":", "return", "self", ".", "validAceTypes", "[", "t", "]", "[", "'TEXT'", "]", "except", "KeyError", ":", "raise", "CommandExecutionError", "(", "(", "'No ACE type \"{0}\". It should be one of th...
37
19.8
def match_filtered_identities(self, fa, fb): """Determine if two filtered identities are the same. The method compares the email addresses or the names of each filtered identity to check if they are the same. When the given filtered identities are the same object or share the same UUID, this will also produce a positive match. Identities which their email addresses or names are in the blacklist will be ignored and the result of the comparison will be false. :param fa: filtered identity to match :param fb: filtered identity to match :returns: True when both filtered identities are likely to be the same. Otherwise, returns False. :raises ValueError: when any of the given filtered identities is not an instance of EmailNameIdentity class. """ if not isinstance(fa, EmailNameIdentity): raise ValueError("<fa> is not an instance of UniqueIdentity") if not isinstance(fb, EmailNameIdentity): raise ValueError("<fb> is not an instance of EmailNameIdentity") if fa.uuid and fb.uuid and fa.uuid == fb.uuid: return True if self._check_blacklist(fa): return False # Compare email addresses first if fa.email and fa.email == fb.email: return True # No match yet, so compare names if fa.name and fa.name == fb.name: return True return False
[ "def", "match_filtered_identities", "(", "self", ",", "fa", ",", "fb", ")", ":", "if", "not", "isinstance", "(", "fa", ",", "EmailNameIdentity", ")", ":", "raise", "ValueError", "(", "\"<fa> is not an instance of UniqueIdentity\"", ")", "if", "not", "isinstance", ...
36.775
21.675
def proximal(self): """Return the ``proximal factory`` of the functional. See Also -------- odl.solvers.nonsmooth.proximal_operators.proximal_l1 : `proximal factory` for the L1-norm. """ if self.pointwise_norm.exponent == 1: return proximal_l1(space=self.domain) elif self.pointwise_norm.exponent == 2: return proximal_l1_l2(space=self.domain) else: raise NotImplementedError('`proximal` only implemented for p = 1 ' 'or 2')
[ "def", "proximal", "(", "self", ")", ":", "if", "self", ".", "pointwise_norm", ".", "exponent", "==", "1", ":", "return", "proximal_l1", "(", "space", "=", "self", ".", "domain", ")", "elif", "self", ".", "pointwise_norm", ".", "exponent", "==", "2", "...
37.4
16.066667
def default(self): """Default the NTP source entry from the node. Returns: True if the operation succeeds, otherwise False. """ cmd = self.command_builder('ntp source', default=True) return self.configure(cmd)
[ "def", "default", "(", "self", ")", ":", "cmd", "=", "self", ".", "command_builder", "(", "'ntp source'", ",", "default", "=", "True", ")", "return", "self", ".", "configure", "(", "cmd", ")" ]
31.875
16.75
def failed_login_limit_reached(self): """ A boolean method to check for failed login limit being reached""" login_limit = 10 if self.failed_logins and self.failed_logins >= login_limit: return True else: return False
[ "def", "failed_login_limit_reached", "(", "self", ")", ":", "login_limit", "=", "10", "if", "self", ".", "failed_logins", "and", "self", ".", "failed_logins", ">=", "login_limit", ":", "return", "True", "else", ":", "return", "False" ]
38
15.285714
def switch_to_datetime_instants(df, start_year, eplus_frequency): """ works inplace """ # timestep -> monthly if eplus_frequency in (TIMESTEP, DAILY, HOURLY, MONTHLY): # prepare year switch if eplus_frequency in (TIMESTEP, HOURLY, DAILY): # print((df[["month", "day"]] - df[["month", "day"]].shift()) == pd.Series([-12, -31])) year_counter = ( (df[["month", "day"]] - df[["month", "day"]].shift()) == pd.Series(dict(month=12, day=-31)) ).all(axis=1).cumsum() else: year_counter = ((df["month"] - df["month"].shift()) == -12).cumsum() # add year columns df["year"] = year_counter + start_year # create index columns = { TIMESTEP: ("year", "month", "day", "hour", "minute"), HOURLY: ("year", "month", "day", "hour"), DAILY: ("year", "month", "day"), MONTHLY: ("year", "month") }[eplus_frequency] if eplus_frequency == MONTHLY: df.index = df.apply( # apply transforms ints to floats, we need to re-cast lambda x: dt.datetime(*(tuple(int(x[k]) for k in columns) + (1,))), axis=1 ) else: df.index = df.apply(lambda x: dt.datetime(*(int(x[k]) for k in columns)), axis=1) # drop old columns df.drop(columns=list(columns), inplace=True) # force frequency if eplus_frequency == TIMESTEP: # find freq ts = df.index[1] - df.index[0] # force forced_df = df.asfreq(ts) else: forced_df = df.asfreq({ HOURLY: "H", DAILY: "D", MONTHLY: "MS" }[eplus_frequency]) # if timestep, hourly or daily, check did not change (only those can suffer from leap year problems) if eplus_frequency in (TIMESTEP, HOURLY, DAILY): try: assert_index_equal(df.index, forced_df.index) except AssertionError: raise ValueError( f"Couldn't convert to datetime instants (frequency: {eplus_frequency}). Probable cause : " f"given start year ({start_year}) is incorrect and data can't match because of leap year issues." ) from None return forced_df # annual if eplus_frequency == ANNUAL: # check first year if df["year"].iloc[0] != start_year: raise ValueError( f"Given start year ({start_year}) differs from annual output data first year ({df['year'].iloc[0]})," f"can't switch to datetime instants.") df.index = df["year"].map(lambda x: dt.datetime(x, 1, 1)) del df["year"] # force freq df = df.asfreq("YS") return df # run period if eplus_frequency == RUN_PERIOD: return df raise AssertionError("should not be here")
[ "def", "switch_to_datetime_instants", "(", "df", ",", "start_year", ",", "eplus_frequency", ")", ":", "# timestep -> monthly", "if", "eplus_frequency", "in", "(", "TIMESTEP", ",", "DAILY", ",", "HOURLY", ",", "MONTHLY", ")", ":", "# prepare year switch", "if", "ep...
35.780488
22.780488
def to_source(node, indent_with=' ' * 4, add_line_information=False, pretty_string=pretty_string, pretty_source=pretty_source): """This function can convert a node tree back into python sourcecode. This is useful for debugging purposes, especially if you're dealing with custom asts not generated by python itself. It could be that the sourcecode is evaluable when the AST itself is not compilable / evaluable. The reason for this is that the AST contains some more data than regular sourcecode does, which is dropped during conversion. Each level of indentation is replaced with `indent_with`. Per default this parameter is equal to four spaces as suggested by PEP 8, but it might be adjusted to match the application's styleguide. If `add_line_information` is set to `True` comments for the line numbers of the nodes are added to the output. This can be used to spot wrong line number information of statement nodes. """ generator = SourceGenerator(indent_with, add_line_information, pretty_string) generator.visit(node) generator.result.append('\n') if set(generator.result[0]) == set('\n'): generator.result[0] = '' return pretty_source(generator.result)
[ "def", "to_source", "(", "node", ",", "indent_with", "=", "' '", "*", "4", ",", "add_line_information", "=", "False", ",", "pretty_string", "=", "pretty_string", ",", "pretty_source", "=", "pretty_source", ")", ":", "generator", "=", "SourceGenerator", "(", "i...
47
22.925926
def get_annotation_data_between_times(self, id_tier, start, end): """Gives the annotations within the times. When the tier contains reference annotations this will be returned, check :func:`get_ref_annotation_data_between_times` for the format. :param str id_tier: Name of the tier. :param int start: Start time of the annotation. :param int end: End time of the annotation. :returns: List of annotations within that time. :raises KeyError: If the tier is non existent. """ if self.tiers[id_tier][1]: return self.get_ref_annotation_data_between_times( id_tier, start, end) anns = ((self.timeslots[a[0]], self.timeslots[a[1]], a[2]) for a in self.tiers[id_tier][0].values()) return sorted(a for a in anns if a[1] >= start and a[0] <= end)
[ "def", "get_annotation_data_between_times", "(", "self", ",", "id_tier", ",", "start", ",", "end", ")", ":", "if", "self", ".", "tiers", "[", "id_tier", "]", "[", "1", "]", ":", "return", "self", ".", "get_ref_annotation_data_between_times", "(", "id_tier", ...
50.705882
17.705882
def write(filename, mesh, write_binary=False): """Writes mdpa files, cf. <https://github.com/KratosMultiphysics/Kratos/wiki/Input-data>. """ assert not write_binary if mesh.points.shape[1] == 2: logging.warning( "mdpa requires 3D points, but 2D points given. " "Appending 0 third component." ) mesh.points = numpy.column_stack( [mesh.points[:, 0], mesh.points[:, 1], numpy.zeros(mesh.points.shape[0])] ) # Kratos cells are mostly ordered like VTK, with a few exceptions: cells = mesh.cells.copy() if "hexahedron20" in cells: cells["hexahedron20"] = cells["hexahedron20"][ :, [0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 10, 9, 16, 17, 18, 19, 12, 15, 14, 13] ] if "hexahedron27" in cells: cells["hexahedron27"] = cells["hexahedron27"][ :, [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 10, 9, 16, 17, 18, 19, 12, 15, 14, 13, 22, 24, 21, 23, 20, 25, 26, ], ] with open(filename, "wb") as fh: # Write some additional info fh.write(("Begin ModelPartData\n").encode("utf-8")) fh.write(("// VARIABLE_NAME value\n").encode("utf-8")) fh.write(("End ModelPartData\n\n").encode("utf-8")) fh.write(("Begin Properties 0\n").encode("utf-8")) fh.write(("End Properties\n\n").encode("utf-8")) # Split the cell data: gmsh:physical and gmsh:geometrical are tags, the # rest is actual cell data. tag_data = {} other_data = {} for cell_type, a in mesh.cell_data.items(): tag_data[cell_type] = {} other_data[cell_type] = {} for key, data in a.items(): if key in ["gmsh:physical", "gmsh:geometrical"]: tag_data[cell_type][key] = data.astype(numpy.int32) else: other_data[cell_type][key] = data # We identity which dimension are we dimension = 2 for cell_type in cells.keys(): name_elem = _meshio_to_mdpa_type[cell_type] if local_dimension_types[name_elem] == 3: dimension = 3 break # We identify the entities _write_nodes(fh, mesh.points, write_binary) _write_elements_and_conditions(fh, cells, tag_data, write_binary, dimension) for name, dat in mesh.point_data.items(): _write_data(fh, "NodalData", name, dat, write_binary) cell_data_raw = raw_from_cell_data(other_data) for ( name, dat, ) in ( cell_data_raw.items() ): # NOTE: We will assume always when writing that the components are elements (for now) _write_data(fh, "ElementalData", name, dat, write_binary) return
[ "def", "write", "(", "filename", ",", "mesh", ",", "write_binary", "=", "False", ")", ":", "assert", "not", "write_binary", "if", "mesh", ".", "points", ".", "shape", "[", "1", "]", "==", "2", ":", "logging", ".", "warning", "(", "\"mdpa requires 3D poin...
32.244898
20.326531
def get(self, request, *args, **kwargs): """Handle get request""" try: kwargs = self.load_object(kwargs) except Exception as e: return self.render_te_response({ 'title': str(e), }) if not self.has_permission(request): return self.render_te_response({ 'title': 'No access', }) return self.render_te_response(self.display_dialog(*args, **kwargs))
[ "def", "get", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "kwargs", "=", "self", ".", "load_object", "(", "kwargs", ")", "except", "Exception", "as", "e", ":", "return", "self", ".", "render_te_respon...
33.142857
13.857143
def _build_indices(X, flann_args): "Builds FLANN indices for each bag." # TODO: should probably multithread this logger.info("Building indices...") indices = [None] * len(X) for i, bag in enumerate(plog(X, name="index building")): indices[i] = idx = FLANNIndex(**flann_args) idx.build_index(bag) return indices
[ "def", "_build_indices", "(", "X", ",", "flann_args", ")", ":", "# TODO: should probably multithread this", "logger", ".", "info", "(", "\"Building indices...\"", ")", "indices", "=", "[", "None", "]", "*", "len", "(", "X", ")", "for", "i", ",", "bag", "in",...
38
9.777778
def make_id(name): """ Create a random id combined with the creditor name. @return string consisting of name (truncated at 22 chars), -, 12 char rand hex string. """ name = re.sub(r'[^a-zA-Z0-9]', '', name) r = get_rand_string(12) if len(name) > 22: name = name[:22] return name + "-" + r
[ "def", "make_id", "(", "name", ")", ":", "name", "=", "re", ".", "sub", "(", "r'[^a-zA-Z0-9]'", ",", "''", ",", "name", ")", "r", "=", "get_rand_string", "(", "12", ")", "if", "len", "(", "name", ")", ">", "22", ":", "name", "=", "name", "[", "...
29.272727
12.727273
def python2_round(number, ndigits=0): """Python 2 round function, see: http://python3porting.com/differences.html#rounding-behavior The behavior of round has changed in Python 3. In Python 2, rounding of halfway cases was away from zero, and round() would always return a float. Round a number to a given precision in decimal digits (default 0 digits). This returns an int when called with one argument, otherwise the same type as the number. ndigits may be negative. :param number: float value to round :type number: type, float :param ndigits: integer decimal places :param ndigits: type, integer """ p = 10 ** ndigits return float(math.floor((number * p) + math.copysign(0.5, number))) / p
[ "def", "python2_round", "(", "number", ",", "ndigits", "=", "0", ")", ":", "p", "=", "10", "**", "ndigits", "return", "float", "(", "math", ".", "floor", "(", "(", "number", "*", "p", ")", "+", "math", ".", "copysign", "(", "0.5", ",", "number", ...
36.7
22.9
def resolve_secrets(self): """Retrieve handles for all basic:secret: fields on input. The process must have the ``secrets`` resource requirement specified in order to access any secrets. Otherwise this method will raise a ``PermissionDenied`` exception. :return: A dictionary of secrets where key is the secret handle and value is the secret value. """ secrets = {} for field_schema, fields in iterate_fields(self.input, self.process.input_schema): # pylint: disable=no-member if not field_schema.get('type', '').startswith('basic:secret:'): continue name = field_schema['name'] value = fields[name] try: handle = value['handle'] except KeyError: continue try: secrets[handle] = Secret.objects.get_secret( handle, contributor=self.contributor ) except Secret.DoesNotExist: raise PermissionDenied("Access to secret not allowed or secret does not exist") # If the process does not not have the right requirements it is not # allowed to access any secrets. allowed = self.process.requirements.get('resources', {}).get('secrets', False) # pylint: disable=no-member if secrets and not allowed: raise PermissionDenied( "Process '{}' has secret inputs, but no permission to see secrets".format( self.process.slug # pylint: disable=no-member ) ) return secrets
[ "def", "resolve_secrets", "(", "self", ")", ":", "secrets", "=", "{", "}", "for", "field_schema", ",", "fields", "in", "iterate_fields", "(", "self", ".", "input", ",", "self", ".", "process", ".", "input_schema", ")", ":", "# pylint: disable=no-member", "if...
39.829268
23.804878