text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def first_image(self): """Ready-only attribute that provides the value of the first non-none image that's not the thumbnail override field. """ # loop through image fields and grab the first non-none one for model_field in self._meta.fields: if isinstance(model_field, ImageField): if model_field.name is not 'thumbnail_override': field_value = getattr(self, model_field.name) if field_value.id is not None: return field_value # no non-none images, return None return None
[ "def", "first_image", "(", "self", ")", ":", "# loop through image fields and grab the first non-none one", "for", "model_field", "in", "self", ".", "_meta", ".", "fields", ":", "if", "isinstance", "(", "model_field", ",", "ImageField", ")", ":", "if", "model_field"...
43.428571
13.214286
def set(self, tclass, tnum, tlvt=0, tdata=''): """set the values of the tag.""" if not isinstance(tdata, str): raise TypeError("tag data must be str") self.tagClass = tclass self.tagNumber = tnum self.tagLVT = tlvt self.tagData = tdata
[ "def", "set", "(", "self", ",", "tclass", ",", "tnum", ",", "tlvt", "=", "0", ",", "tdata", "=", "''", ")", ":", "if", "not", "isinstance", "(", "tdata", ",", "str", ")", ":", "raise", "TypeError", "(", "\"tag data must be str\"", ")", "self", ".", ...
32
11.777778
def warning(self, message, print_location=True): """Displays warning message. Uses exshared for current location of parsing""" msg = "Warning" if print_location and (exshared.location != None): wline = lineno(exshared.location, exshared.text) wcol = col(exshared.location, exshared.text) wtext = line(exshared.location, exshared.text) msg += " at line %d, col %d" % (wline, wcol) msg += ": %s" % message if print_location and (exshared.location != None): msg += "\n%s" % wtext print(msg)
[ "def", "warning", "(", "self", ",", "message", ",", "print_location", "=", "True", ")", ":", "msg", "=", "\"Warning\"", "if", "print_location", "and", "(", "exshared", ".", "location", "!=", "None", ")", ":", "wline", "=", "lineno", "(", "exshared", ".",...
49.583333
14.416667
def main(): "Boots up the command line tool" logging.captureWarnings(True) args = build_parser().parse_args() # Configure logging args.setup_logging(args) # Dispatch into the appropriate subcommand function. try: return args.func(args) except SystemExit: raise except: logging.exception('Problem when running command. Sorry!') sys.exit(1)
[ "def", "main", "(", ")", ":", "logging", ".", "captureWarnings", "(", "True", ")", "args", "=", "build_parser", "(", ")", ".", "parse_args", "(", ")", "# Configure logging", "args", ".", "setup_logging", "(", "args", ")", "# Dispatch into the appropriate subcomm...
28.071429
17.785714
def stubs_clustering(network,use_reduced_coordinates=True, line_length_factor=1.0): """Cluster network by reducing stubs and stubby trees (i.e. sequentially reducing dead-ends). Parameters ---------- network : pypsa.Network use_reduced_coordinates : boolean If True, do not average clusters, but take from busmap. line_length_factor : float Factor to multiply the crow-flies distance between new buses in order to get new line lengths. Returns ------- Clustering : named tuple A named tuple containing network, busmap and linemap """ busmap = busmap_by_stubs(network) #reset coordinates to the new reduced guys, rather than taking an average if use_reduced_coordinates: # TODO : FIX THIS HACK THAT HAS UNEXPECTED SIDE-EFFECTS, # i.e. network is changed in place!! network.buses.loc[busmap.index,['x','y']] = network.buses.loc[busmap,['x','y']].values return get_clustering_from_busmap(network, busmap, line_length_factor=line_length_factor)
[ "def", "stubs_clustering", "(", "network", ",", "use_reduced_coordinates", "=", "True", ",", "line_length_factor", "=", "1.0", ")", ":", "busmap", "=", "busmap_by_stubs", "(", "network", ")", "#reset coordinates to the new reduced guys, rather than taking an average", "if",...
36.928571
24.571429
def subset_by_supported(input_file, get_coords, calls_by_name, work_dir, data, headers=("#",)): """Limit CNVkit input to calls with support from another caller. get_coords is a function that return chrom, start, end from a line of the input_file, allowing handling of multiple input file types. """ support_files = [(c, tz.get_in([c, "vrn_file"], calls_by_name)) for c in convert.SUBSET_BY_SUPPORT["cnvkit"]] support_files = [(c, f) for (c, f) in support_files if f and vcfutils.vcf_has_variants(f)] if len(support_files) == 0: return input_file else: out_file = os.path.join(work_dir, "%s-havesupport%s" % utils.splitext_plus(os.path.basename(input_file))) if not utils.file_uptodate(out_file, input_file): input_bed = _input_to_bed(input_file, work_dir, get_coords, headers) pass_coords = set([]) with file_transaction(data, out_file) as tx_out_file: support_beds = " ".join([_sv_vcf_to_bed(f, c, out_file) for c, f in support_files]) tmp_cmp_bed = "%s-intersectwith.bed" % utils.splitext_plus(tx_out_file)[0] cmd = "bedtools intersect -wa -f 0.5 -r -a {input_bed} -b {support_beds} > {tmp_cmp_bed}" do.run(cmd.format(**locals()), "Intersect CNVs with support files") for r in pybedtools.BedTool(tmp_cmp_bed): pass_coords.add((str(r.chrom), str(r.start), str(r.stop))) with open(input_file) as in_handle: with open(tx_out_file, "w") as out_handle: for line in in_handle: passes = True if not line.startswith(headers): passes = get_coords(line) in pass_coords if passes: out_handle.write(line) return out_file
[ "def", "subset_by_supported", "(", "input_file", ",", "get_coords", ",", "calls_by_name", ",", "work_dir", ",", "data", ",", "headers", "=", "(", "\"#\"", ",", ")", ")", ":", "support_files", "=", "[", "(", "c", ",", "tz", ".", "get_in", "(", "[", "c",...
57.735294
25
def datasets(self) -> tuple: """Datasets.""" return tuple(v for _, v in self.items() if isinstance(v, h5py.Dataset))
[ "def", "datasets", "(", "self", ")", "->", "tuple", ":", "return", "tuple", "(", "v", "for", "_", ",", "v", "in", "self", ".", "items", "(", ")", "if", "isinstance", "(", "v", ",", "h5py", ".", "Dataset", ")", ")" ]
43.333333
17
def MAU(self): '''Result of preconditioned operator to deflation space, i.e., :math:`MM_lAM_rU`.''' if self._MAU is None: self._MAU = self.linear_system.M * self.AU return self._MAU
[ "def", "MAU", "(", "self", ")", ":", "if", "self", ".", "_MAU", "is", "None", ":", "self", ".", "_MAU", "=", "self", ".", "linear_system", ".", "M", "*", "self", ".", "AU", "return", "self", ".", "_MAU" ]
36.666667
18
def dump_state(self): """Dump the current state of this emulated tile as a dictionary. This function just dumps the status of the config variables. It is designed to be called in a chained fashion to serialize the complete state of a tile subclass. Returns: dict: The current state of the object that could be passed to load_state. """ return { "config_variables": {x: base64.b64encode(y.current_value).decode('utf-8') for x, y in self._config_variables.items()}, }
[ "def", "dump_state", "(", "self", ")", ":", "return", "{", "\"config_variables\"", ":", "{", "x", ":", "base64", ".", "b64encode", "(", "y", ".", "current_value", ")", ".", "decode", "(", "'utf-8'", ")", "for", "x", ",", "y", "in", "self", ".", "_con...
41.307692
34.615385
def _extract_shifted_mean_gauss(image, mask = slice(None), offset = None, sigma = 1, voxelspacing = None): """ Internal, single-image version of `shifted_mean_gauss`. """ # set voxel spacing if voxelspacing is None: voxelspacing = [1.] * image.ndim # set offset if offset is None: offset = [0] * image.ndim # determine gaussian kernel size in voxel units sigma = _create_structure_array(sigma, voxelspacing) # compute smoothed version of image smoothed = gaussian_filter(image, sigma) shifted = numpy.zeros_like(smoothed) in_slicer = [] out_slicer = [] for o in offset: in_slicer.append(slice(o, None)) out_slicer.append(slice(None, -1 * o)) shifted[out_slicer] = smoothed[in_slicer] return _extract_intensities(shifted, mask)
[ "def", "_extract_shifted_mean_gauss", "(", "image", ",", "mask", "=", "slice", "(", "None", ")", ",", "offset", "=", "None", ",", "sigma", "=", "1", ",", "voxelspacing", "=", "None", ")", ":", "# set voxel spacing", "if", "voxelspacing", "is", "None", ":",...
31.730769
16.115385
def get_smokedetector_by_name(self, name): """Retrieves a smokedetector object by its name :param name: The name of the smokedetector to return :return: A smokedetector object """ return next((smokedetector for smokedetector in self.smokedetectors if smokedetector.name.lower() == name.lower()), None)
[ "def", "get_smokedetector_by_name", "(", "self", ",", "name", ")", ":", "return", "next", "(", "(", "smokedetector", "for", "smokedetector", "in", "self", ".", "smokedetectors", "if", "smokedetector", ".", "name", ".", "lower", "(", ")", "==", "name", ".", ...
44.5
16.5
def _delete_os_nwk(self, tenant_id, tenant_name, direc, is_fw_virt=False): """Delete the network created in Openstack. Function to delete Openstack network, It also releases the associated segmentation, VLAN and subnets. """ serv_obj = self.get_service_obj(tenant_id) fw_dict = serv_obj.get_fw_dict() fw_id = fw_dict.get('fw_id') fw_data, fw_data_dict = self.get_fw(fw_id) if fw_data is None: LOG.error("Unable to get fw_data for tenant %s", tenant_name) return False if direc == 'in': net_id = fw_data.in_network_id seg, vlan = self.get_in_seg_vlan(tenant_id) subnet_dict = self.get_in_ip_addr(tenant_id) else: net_id = fw_data.out_network_id seg, vlan = self.get_out_seg_vlan(tenant_id) subnet_dict = self.get_out_ip_addr(tenant_id) # Delete the Openstack Network sub = subnet_dict.get('subnet') try: ret = self.os_helper.delete_network_all_subnets(net_id) if not ret: LOG.error("Delete network for ID %(net)s direct %(dir)s " "failed", {'net': net_id, 'dir': direc}) return False except Exception as exc: LOG.error("Delete network for ID %(net)s direct %(dir)s failed" " Exc %(exc)s", {'net': net_id, 'dir': direc, 'exc': exc}) return False # Release the segment, VLAN and subnet allocated if not is_fw_virt: self.service_vlans.release_segmentation_id(vlan) self.service_segs.release_segmentation_id(seg) self.release_subnet(sub, direc) # Release the network DB entry self.delete_network_db(net_id) return True
[ "def", "_delete_os_nwk", "(", "self", ",", "tenant_id", ",", "tenant_name", ",", "direc", ",", "is_fw_virt", "=", "False", ")", ":", "serv_obj", "=", "self", ".", "get_service_obj", "(", "tenant_id", ")", "fw_dict", "=", "serv_obj", ".", "get_fw_dict", "(", ...
42.023256
15.604651
def agents(status, all): ''' List and manage agents. (admin privilege required) ''' fields = [ ('ID', 'id'), ('Status', 'status'), ('Region', 'region'), ('First Contact', 'first_contact'), ('CPU Usage (%)', 'cpu_cur_pct'), ('Used Memory (MiB)', 'mem_cur_bytes'), ('Total slots', 'available_slots'), ('Occupied slots', 'occupied_slots'), ] if is_legacy_server(): del fields[9] del fields[6] def execute_paginated_query(limit, offset): try: resp_agents = session.Agent.list_with_limit( limit, offset, status, fields=(item[1] for item in fields)) except Exception as e: print_error(e) sys.exit(1) return resp_agents def round_mem(results): for item in results: if 'mem_cur_bytes' in item and item['mem_cur_bytes'] is not None: item['mem_cur_bytes'] = round(item['mem_cur_bytes'] / 2 ** 20, 1) return results def _generate_paginated_results(interval): offset = 0 is_first = True total_count = -1 while True: limit = (interval if is_first else min(interval, total_count - offset)) try: result = execute_paginated_query(limit, offset) except Exception as e: print_error(e) sys.exit(1) offset += interval total_count = result['total_count'] items = result['items'] items = round_mem(items) table = tabulate((item.values() for item in items), headers=(item[0] for item in fields)) if is_first: is_first = False else: table_rows = table.split('\n') table = '\n'.join(table_rows[2:]) yield table + '\n' if not offset < total_count: break with Session() as session: paginating_interval = 10 if all: click.echo_via_pager(_generate_paginated_results(paginating_interval)) else: result = execute_paginated_query(paginating_interval, offset=0) total_count = result['total_count'] if total_count == 0: print('There are no matching agents.') return items = result['items'] items = round_mem(items) fields = [field for field in fields if field[1] in items[0]] print(tabulate((item.values() for item in items), headers=(item[0] for item in fields))) if total_count > paginating_interval: print("More agents can be displayed by using --all option.")
[ "def", "agents", "(", "status", ",", "all", ")", ":", "fields", "=", "[", "(", "'ID'", ",", "'id'", ")", ",", "(", "'Status'", ",", "'status'", ")", ",", "(", "'Region'", ",", "'region'", ")", ",", "(", "'First Contact'", ",", "'first_contact'", ")",...
34.78481
17.924051
def save(d, output_file, indent=4, spacer=" ", quote='"', newlinechar="\n", end_comment=False, **kwargs): """ Write a dictionary to an output Mapfile on disk Parameters ---------- d: dict A Python dictionary based on the the mappyfile schema output_file: string The output filename indent: int The number of ``spacer`` characters to indent structures in the Mapfile spacer: string The character to use for indenting structures in the Mapfile. Typically spaces or tab characters (``\\t``) quote: string The quote character to use in the Mapfile (double or single quotes) newlinechar: string The character used to insert newlines in the Mapfile end_comment: bool Add a comment with the block type at each closing END statement e.g. END # MAP Returns ------- string The output_file passed into the function Example ------- To open a Mapfile from a string, and then save it to a file:: s = '''MAP NAME "TEST" END''' d = mappyfile.loads(s) fn = "C:/Data/mymap.map" mappyfile.save(d, fn) """ map_string = _pprint(d, indent, spacer, quote, newlinechar, end_comment) _save(output_file, map_string) return output_file
[ "def", "save", "(", "d", ",", "output_file", ",", "indent", "=", "4", ",", "spacer", "=", "\" \"", ",", "quote", "=", "'\"'", ",", "newlinechar", "=", "\"\\n\"", ",", "end_comment", "=", "False", ",", "*", "*", "kwargs", ")", ":", "map_string", "=", ...
28.886364
24.340909
def compactor(conf): """ The compactor daemon. This fuction watches the sorted set containing bucket keys that need to be compacted, performing the necessary compaction. :param conf: A turnstile.config.Config instance containing the configuration for the compactor daemon. Note that a ControlDaemon is also started, so appropriate configuration for that must also be present, as must appropriate Redis connection information. """ # Get the database handle db = conf.get_database('compactor') # Get the limits container limit_map = LimitContainer(conf, db) # Get the compactor configuration config = conf['compactor'] # Make sure compaction is enabled if get_int(config, 'max_updates', 0) <= 0: # We'll just warn about it, since they could be running # the compactor with a different configuration file LOG.warning("Compaction is not enabled. Enable it by " "setting a positive integer value for " "'compactor.max_updates' in the configuration.") # Select the bucket key getter key_getter = GetBucketKey.factory(config, db) LOG.info("Compactor initialized") # Now enter our loop while True: # Get a bucket key to compact try: buck_key = limits.BucketKey.decode(key_getter()) except ValueError as exc: # Warn about invalid bucket keys LOG.warning("Error interpreting bucket key: %s" % exc) continue # Ignore version 1 keys--they can't be compacted if buck_key.version < 2: continue # Get the corresponding limit class try: limit = limit_map[buck_key.uuid] except KeyError: # Warn about missing limits LOG.warning("Unable to compact bucket for limit %s" % buck_key.uuid) continue LOG.debug("Compacting bucket %s" % buck_key) # OK, we now have the limit (which we really only need for # the bucket class); let's compact the bucket try: compact_bucket(db, buck_key, limit) except Exception: LOG.exception("Failed to compact bucket %s" % buck_key) else: LOG.debug("Finished compacting bucket %s" % buck_key)
[ "def", "compactor", "(", "conf", ")", ":", "# Get the database handle", "db", "=", "conf", ".", "get_database", "(", "'compactor'", ")", "# Get the limits container", "limit_map", "=", "LimitContainer", "(", "conf", ",", "db", ")", "# Get the compactor configuration",...
34.367647
19.632353
def mutex_opts(dict,ex_op): """Check for presence of mutually exclusive keys in a dict. Call: mutex_opts(dict,[[op1a,op1b],[op2a,op2b]...]""" for op1,op2 in ex_op: if op1 in dict and op2 in dict: raise ValueError,'\n*** ERROR in Arguments *** '\ 'Options '+op1+' and '+op2+' are mutually exclusive.'
[ "def", "mutex_opts", "(", "dict", ",", "ex_op", ")", ":", "for", "op1", ",", "op2", "in", "ex_op", ":", "if", "op1", "in", "dict", "and", "op2", "in", "dict", ":", "raise", "ValueError", ",", "'\\n*** ERROR in Arguments *** '", "'Options '", "+", "op1", ...
42.875
15.125
def Collect(self, top_frame): """Collects call stack, local variables and objects. Starts collection from the specified frame. We don't start from the top frame to exclude the frames due to debugger. Updates the content of self.breakpoint. Args: top_frame: top frame to start data collection. """ # Evaluate call stack. frame = top_frame top_line = self.breakpoint['location']['line'] breakpoint_frames = self.breakpoint['stackFrames'] try: # Evaluate watched expressions. if 'expressions' in self.breakpoint: self.breakpoint['evaluatedExpressions'] = [ self._CaptureExpression(top_frame, expression) for expression in self.breakpoint['expressions']] while frame and (len(breakpoint_frames) < self.max_frames): line = top_line if frame == top_frame else frame.f_lineno code = frame.f_code if len(breakpoint_frames) < self.max_expand_frames: frame_arguments, frame_locals = self.CaptureFrameLocals(frame) else: frame_arguments = [] frame_locals = [] breakpoint_frames.append({ 'function': _GetFrameCodeObjectName(frame), 'location': { 'path': NormalizePath(code.co_filename), 'line': line }, 'arguments': frame_arguments, 'locals': frame_locals }) frame = frame.f_back except BaseException as e: # pylint: disable=broad-except # The variable table will get serialized even though there was a failure. # The results can be useful for diagnosing the internal error. self.breakpoint['status'] = { 'isError': True, 'description': { 'format': ('INTERNAL ERROR: Failed while capturing locals ' 'of frame $0: $1'), 'parameters': [str(len(breakpoint_frames)), str(e)]}} # Number of entries in _var_table. Starts at 1 (index 0 is the 'buffer full' # status value). num_vars = 1 # Explore variables table in BFS fashion. The variables table will grow # inside CaptureVariable as we encounter new references. while (num_vars < len(self._var_table)) and ( self._total_size < self.max_size): self._var_table[num_vars] = self.CaptureVariable( self._var_table[num_vars], 0, self.default_capture_limits, can_enqueue=False) # Move on to the next entry in the variable table. num_vars += 1 # Trim variables table and change make all references to variables that # didn't make it point to var_index of 0 ("buffer full") self.TrimVariableTable(num_vars) self._CaptureEnvironmentLabels() self._CaptureRequestLogId() self._CaptureUserId()
[ "def", "Collect", "(", "self", ",", "top_frame", ")", ":", "# Evaluate call stack.", "frame", "=", "top_frame", "top_line", "=", "self", ".", "breakpoint", "[", "'location'", "]", "[", "'line'", "]", "breakpoint_frames", "=", "self", ".", "breakpoint", "[", ...
37.273973
20.575342
def allow_rwe(self, name): """Allow all privileges for a particular name group (user, group, other).""" assert name in PERMISSIONS.keys() os.chmod(self.file_path, PERMISSIONS[name]['all'])
[ "def", "allow_rwe", "(", "self", ",", "name", ")", ":", "assert", "name", "in", "PERMISSIONS", ".", "keys", "(", ")", "os", ".", "chmod", "(", "self", ".", "file_path", ",", "PERMISSIONS", "[", "name", "]", "[", "'all'", "]", ")" ]
52.25
8.25
def stop(self, message): """ Manually stops timer with the message. :param message: The display message. """ self._stop = time.clock() VSGLogger.info("{0:<20} - Finished [{1}s]".format(message, self.pprint(self._stop - self._start)))
[ "def", "stop", "(", "self", ",", "message", ")", ":", "self", ".", "_stop", "=", "time", ".", "clock", "(", ")", "VSGLogger", ".", "info", "(", "\"{0:<20} - Finished [{1}s]\"", ".", "format", "(", "message", ",", "self", ".", "pprint", "(", "self", "."...
34.5
17.5
def update_linode(linode_id, update_args=None): ''' Updates a Linode's properties. linode_id The ID of the Linode to shutdown. Required. update_args The args to update the Linode with. Must be in dictionary form. ''' update_args.update({'LinodeID': linode_id}) result = _query('linode', 'update', args=update_args) return _clean_data(result)
[ "def", "update_linode", "(", "linode_id", ",", "update_args", "=", "None", ")", ":", "update_args", ".", "update", "(", "{", "'LinodeID'", ":", "linode_id", "}", ")", "result", "=", "_query", "(", "'linode'", ",", "'update'", ",", "args", "=", "update_args...
25.266667
24.466667
def getCandScoresMap(self, profile): """ Returns a dictionary that associates integer representations of each candidate with their Copeland score. :ivar Profile profile: A Profile object that represents an election profile. """ # Currently, we expect the profile to contain complete ordering over candidates. Ties are # allowed however. elecType = profile.getElecType() if elecType != "soc" and elecType != "toc": print("ERROR: unsupported election type") exit() # Initialize each Copeland score as 0.0. copelandScores = dict() for cand in profile.candMap.keys(): copelandScores[cand] = 0.0 preferenceCounts = profile.getPreferenceCounts() # For each pair of candidates, calculate the number of votes in which one beat the other. wmgMap = profile.getWmg() for cand1, cand2 in itertools.combinations(wmgMap.keys(), 2): if cand2 in wmgMap[cand1].keys(): if wmgMap[cand1][cand2] > 0: copelandScores[cand1] += 1.0 elif wmgMap[cand1][cand2] < 0: copelandScores[cand2] += 1.0 # If a pair of candidates is tied, we add alpha to their score for each vote. else: copelandScores[cand1] += self.alpha copelandScores[cand2] += self.alpha return copelandScores
[ "def", "getCandScoresMap", "(", "self", ",", "profile", ")", ":", "# Currently, we expect the profile to contain complete ordering over candidates. Ties are", "# allowed however.", "elecType", "=", "profile", ".", "getElecType", "(", ")", "if", "elecType", "!=", "\"soc\"", ...
39.081081
21.459459
def onpress(self, event): """ Reacts to key commands :param event: a keyboard event :return: if 'c' is pressed, clear all region patches """ if event.key == 'c': # clears all the contours for patch in self.region_patches: patch.remove() self.region_patches = [] self.fig.canvas.draw_idle() elif event.key == "u": # undo a label self.undobutton_action()
[ "def", "onpress", "(", "self", ",", "event", ")", ":", "if", "event", ".", "key", "==", "'c'", ":", "# clears all the contours", "for", "patch", "in", "self", ".", "region_patches", ":", "patch", ".", "remove", "(", ")", "self", ".", "region_patches", "=...
35.538462
7.076923
def add_tokens_for_group(self, with_pass=False): """Add the tokens for the group signature""" kls = self.groups.super_kls name = self.groups.kls_name # Reset indentation to beginning and add signature self.reset_indentation('') self.result.extend(self.tokens.make_describe(kls, name)) # Add pass if necessary if with_pass: self.add_tokens_for_pass() self.groups.finish_signature()
[ "def", "add_tokens_for_group", "(", "self", ",", "with_pass", "=", "False", ")", ":", "kls", "=", "self", ".", "groups", ".", "super_kls", "name", "=", "self", ".", "groups", ".", "kls_name", "# Reset indentation to beginning and add signature", "self", ".", "re...
32.428571
15.571429
def get_items_batch(self, item_request_data, project=None): """GetItemsBatch. Post for retrieving a set of items given a list of paths or a long path. Allows for specifying the recursionLevel and version descriptors for each path. :param :class:`<TfvcItemRequestData> <azure.devops.v5_0.tfvc.models.TfvcItemRequestData>` item_request_data: :param str project: Project ID or project name :rtype: [[TfvcItem]] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') content = self._serialize.body(item_request_data, 'TfvcItemRequestData') response = self._send(http_method='POST', location_id='fe6f827b-5f64-480f-b8af-1eca3b80e833', version='5.0', route_values=route_values, content=content) return self._deserialize('[[TfvcItem]]', self._unwrap_collection(response))
[ "def", "get_items_batch", "(", "self", ",", "item_request_data", ",", "project", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", ...
60.705882
27.529412
def _parse_view_results(self, rows, factory, options): ''' rows here should be a list of tuples: - (key, value) for reduce views - (key, value, id) for nonreduce views without include docs - (key, value, id, doc) for nonreduce with with include docs ''' kwargs = dict() kwargs['reduced'] = factory.use_reduce and options.get('reduce', True) kwargs['include_docs'] = options.get('include_docs', False) # Lines below pass extra arguments to the parsing function if they # are expected. These arguments are bound method unserialize() and # unserialize_list(). They methods perform the magic of parsing and # upgrading if necessary the loaded documents. spec = inspect.getargspec(factory.parse_view_result) if 'unserialize' in spec.args: kwargs['unserialize'] = self.unserialize_document if 'unserialize_list' in spec.args: kwargs['unserialize_list'] = self.unserialize_list_of_documents return factory.parse_view_result(rows, **kwargs)
[ "def", "_parse_view_results", "(", "self", ",", "rows", ",", "factory", ",", "options", ")", ":", "kwargs", "=", "dict", "(", ")", "kwargs", "[", "'reduced'", "]", "=", "factory", ".", "use_reduce", "and", "options", ".", "get", "(", "'reduce'", ",", "...
51.238095
22.380952
def _MergeSameId(self): """Tries to merge entities based on their ids. This tries to merge only the entities from the old and new schedules which have the same id. These are added into the merged schedule. Entities which do not merge or do not have the same id as another entity in the other schedule are simply migrated into the merged schedule. This method is less flexible than _MergeDifferentId since it only tries to merge entities which have the same id while _MergeDifferentId tries to merge everything. However, it is faster and so should be used whenever possible. This method makes use of various methods like _Merge and _Migrate which are not implemented in the abstract DataSetMerger class. These method should be overwritten in a subclass to allow _MergeSameId to work with different entity types. Returns: The number of merged entities. """ a_not_merged = [] b_not_merged = [] for a in self._GetIter(self.feed_merger.a_schedule): try: b = self._GetById(self.feed_merger.b_schedule, self._GetId(a)) except KeyError: # there was no entity in B with the same id as a a_not_merged.append(a) continue try: self._Add(a, b, self._MergeEntities(a, b)) self._num_merged += 1 except MergeError as merge_error: a_not_merged.append(a) b_not_merged.append(b) self._ReportSameIdButNotMerged(self._GetId(a), merge_error) for b in self._GetIter(self.feed_merger.b_schedule): try: a = self._GetById(self.feed_merger.a_schedule, self._GetId(b)) except KeyError: # there was no entity in A with the same id as b b_not_merged.append(b) # migrate the remaining entities for a in a_not_merged: newid = self._HasId(self.feed_merger.b_schedule, self._GetId(a)) self._Add(a, None, self._Migrate(a, self.feed_merger.a_schedule, newid)) for b in b_not_merged: newid = self._HasId(self.feed_merger.a_schedule, self._GetId(b)) self._Add(None, b, self._Migrate(b, self.feed_merger.b_schedule, newid)) self._num_not_merged_a = len(a_not_merged) self._num_not_merged_b = len(b_not_merged) return self._num_merged
[ "def", "_MergeSameId", "(", "self", ")", ":", "a_not_merged", "=", "[", "]", "b_not_merged", "=", "[", "]", "for", "a", "in", "self", ".", "_GetIter", "(", "self", ".", "feed_merger", ".", "a_schedule", ")", ":", "try", ":", "b", "=", "self", ".", ...
38.736842
23.491228
def from_dict(cls, d, ignore=()): """Create an instance from a serialized version of cls Args: d(dict): Endpoints of cls to set ignore(tuple): Keys to ignore Returns: Instance of this class """ filtered = {} for k, v in d.items(): if k == "typeid": assert v == cls.typeid, \ "Dict has typeid %s but %s has typeid %s" % \ (v, cls, cls.typeid) elif k not in ignore: filtered[k] = v try: inst = cls(**filtered) except TypeError as e: raise TypeError("%s raised error: %s" % (cls.typeid, str(e))) return inst
[ "def", "from_dict", "(", "cls", ",", "d", ",", "ignore", "=", "(", ")", ")", ":", "filtered", "=", "{", "}", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ":", "if", "k", "==", "\"typeid\"", ":", "assert", "v", "==", "cls", ".", ...
30.956522
14.304348
def FromString(self, string): """Parse a bool from a string.""" if string.lower() in ("false", "no", "n"): return False if string.lower() in ("true", "yes", "y"): return True raise TypeValueError("%s is not recognized as a boolean value." % string)
[ "def", "FromString", "(", "self", ",", "string", ")", ":", "if", "string", ".", "lower", "(", ")", "in", "(", "\"false\"", ",", "\"no\"", ",", "\"n\"", ")", ":", "return", "False", "if", "string", ".", "lower", "(", ")", "in", "(", "\"true\"", ",",...
30
20.555556
def tox_addoption(parser): """Add arguments and needed monkeypatches.""" parser.add_argument( '--travis-after', dest='travis_after', action='store_true', help='Exit successfully after all Travis jobs complete successfully.') if 'TRAVIS' in os.environ: pypy_version_monkeypatch() subcommand_test_monkeypatch(tox_subcommand_test_post)
[ "def", "tox_addoption", "(", "parser", ")", ":", "parser", ".", "add_argument", "(", "'--travis-after'", ",", "dest", "=", "'travis_after'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Exit successfully after all Travis jobs complete successfully.'", ")", ...
41
19.111111
def reference(self, t, i, in_group=False): """Handle references.""" current = [] if not in_group and t == "m": current.append(self._re_start_wb) elif not in_group and t == "M": current.append(self._re_end_wb) elif not in_group and t == "R": current.append(self._re_line_break) elif not in_group and t == "X": no_mark = self.unicode_props("^m", None, in_group=False)[0] mark = self.unicode_props("m", None, in_group=False)[0] current.extend(self._grapheme_cluster % (no_mark, mark, mark)) elif t == "e": current.append(self._re_escape) elif t == "l": current.extend(self.letter_case_props(_LOWER, in_group)) self.found_property = True elif t == "L": current.extend(self.letter_case_props(_LOWER, in_group, negate=True)) self.found_property = True elif t == "c": current.extend(self.letter_case_props(_UPPER, in_group)) self.found_property = True elif t == "C": current.extend(self.letter_case_props(_UPPER, in_group, negate=True)) self.found_property = True elif t == 'p': prop = self.get_unicode_property(i) current.extend(self.unicode_props(prop[0], prop[1], in_group=in_group)) if in_group: self.found_property = True elif t == 'P': prop = self.get_unicode_property(i) current.extend(self.unicode_props(prop[0], prop[1], in_group=in_group, negate=True)) if in_group: self.found_property = True elif t == "N": text = self.get_named_unicode(i) current.extend(self.unicode_name(text, in_group)) if in_group: self.found_named_unicode = True else: current.extend(["\\", t]) return current
[ "def", "reference", "(", "self", ",", "t", ",", "i", ",", "in_group", "=", "False", ")", ":", "current", "=", "[", "]", "if", "not", "in_group", "and", "t", "==", "\"m\"", ":", "current", ".", "append", "(", "self", ".", "_re_start_wb", ")", "elif"...
40.787234
16.021277
def _segment(cls, segment): """ Returns a property capable of setting and getting a segment. """ return property( fget=lambda x: cls._get_segment(x, segment), fset=lambda x, v: cls._set_segment(x, segment, v), )
[ "def", "_segment", "(", "cls", ",", "segment", ")", ":", "return", "property", "(", "fget", "=", "lambda", "x", ":", "cls", ".", "_get_segment", "(", "x", ",", "segment", ")", ",", "fset", "=", "lambda", "x", ",", "v", ":", "cls", ".", "_set_segmen...
33.5
15.75
def process_command(command: CommandInput, matrix: VarMatrix) -> List[Command]: """Generate all combinations of commands given a variable matrix. Processes the commands to be sequences of strings. """ assert command is not None if isinstance(command, str): command_list = [Command(command, variables=variables) for variables in matrix] elif isinstance(command, list): command_list = [Command(command, variables=variables) for variables in matrix] else: if command.get("command") is not None: cmd = command.get("command") else: cmd = command.get("cmd") creates = str(command.get("creates", "")) requires = str(command.get("requires", "")) assert isinstance(cmd, (list, str)) command_list = [ Command(cmd, variables, creates, requires) for variables in matrix ] return uniqueify(command_list)
[ "def", "process_command", "(", "command", ":", "CommandInput", ",", "matrix", ":", "VarMatrix", ")", "->", "List", "[", "Command", "]", ":", "assert", "command", "is", "not", "None", "if", "isinstance", "(", "command", ",", "str", ")", ":", "command_list",...
37.916667
19.583333
def cofold(self, strand1, strand2, temp=37.0, dangles=2, nolp=False, nogu=False, noclosinggu=False, constraints=None, canonicalbponly=False, partition=-1, pfscale=None, gquad=False): '''Run the RNAcofold command and retrieve the result in a dictionary. :param strand1: Strand 1 for running RNAcofold. :type strand1: coral.DNA or coral.RNA :param strand1: Strand 2 for running RNAcofold. :type strand2: coral.DNA or coral.RNA :param temp: Temperature at which to run the calculations. :type temp: float :param dangles: How to treat dangling end energies. Set to 0 to ignore dangling ends. Set to 1 to limit unpaired bases to at most one dangling end (default for MFE calc). Set to 2 (the default) to remove the limit in 1. Set to 3 to allow coaxial stacking of adjacent helices in .multi-loops :type dangles: int :param nolp: Produce structures without lonely pairs (isolated single base pairs). :type nolp: bool :param nogu: Do not allow GU pairs. :type nogu: bool :param noclosinggu: Do not allow GU pairs at the end of helices. :type noclosinggu: bool :param constraints: Any structural constraints to use. Format is defined at http://www.tbi.univie.ac.at/RNA/RNAfold.1.html :type constraints: str :param canonicalbponly: Remove non-canonical base pairs from the structure constraint (if applicable). :type canonicalbponly: bool :param partition: Calculates the partition function for the sequence. :type partition: int :param pfscale: Scaling factor for the partition function. :type pfScale: float :param gquad: Incorporate G-Quadruplex formation into the structure prediction. :type gquad: bool :returns: Dictionary of calculated values, defaulting to values of MFE ('mfe': float) and dotbracket structure ('dotbracket': str). More keys are added depending on keyword arguments. :rtype: dict ''' cmd_args = [] cmd_kwargs = {'--temp=': str(temp)} cmd_kwargs['--dangles='] = dangles if nolp: cmd_args.append('--noLP') if nogu: cmd_args.append('--noGU') if noclosinggu: cmd_args.append('--noClosingGU') if constraints is not None: cmd_args.append('--constraint') if canonicalbponly: cmd_args.append('--canonicalBPonly') if partition: cmd_args.append('--partfunc') if pfscale is not None: cmd_kwargs['pfScale'] = float(pfscale) if gquad: cmd_args.append('--gquad') inputs = ['>strands\n{}&{}'.format(str(strand1), str(strand2))] if constraints is not None: inputs.append(constraints) rnafold_output = self._run('RNAcofold', inputs, cmd_args, cmd_kwargs) # Process the output output = {} lines = rnafold_output.splitlines() # Line 1 is the name of the sequence input, line 2 is the sequence lines.pop(0) lines.pop(0) # Line 3 is the dotbracket + mfe for strand1 line3 = lines.pop(0) output['dotbracket'] = self._lparse(line3, '^(.*) \(') output['mfe'] = float(self._lparse(line3, ' \((.*)\)$')) # Optional outputs if partition: # Line 4 is 'a coarse representation of the pair probabilities' and # the ensemble free energy line4 = lines.pop(0) output['coarse'] = self._lparse(line4, '^(.*) \[') output['ensemble'] = float(self._lparse(line4, ' \[(.*)\]$')) # Line 5 is the centroid structure, its free energy, and distance # to the ensemble line5 = lines.pop(0) 'ensemble (.*),' output['frequency'] = float(self._lparse(line5, 'ensemble (.*),')) output['deltaG'] = float(self._lparse(line5, 'binding=(.*)$')) # Parse the postscript file (the only place the probability matrix # is) with open(os.path.join(self._tempdir, 'strands_dp.ps')) as f: pattern = 'start of base pair probability data\n(.*)\nshowpage' dotplot_file = f.read() dotplot_data = re.search(pattern, dotplot_file, flags=re.DOTALL).group(1).split('\n') # Dimension of the dotplot - compares seq1, seq2 to self and # to each other (concatenation of seq1 and seq2 = axis) dim = len(strand1) + len(strand2) ensemble_probs = np.zeros((dim, dim)) optimal_probs = np.zeros((dim, dim)) for point in dotplot_data: point_split = point.split(' ') # Use zero indexing i = int(point_split[0]) - 1 j = int(point_split[1]) - 1 sqprob = float(point_split[2]) probtype = point_split[3] if probtype == 'ubox': ensemble_probs[i][j] = sqprob**2 else: optimal_probs[i][j] = sqprob**2 output['ensemble_matrix'] = ensemble_probs output['optimal_matrix'] = optimal_probs return output
[ "def", "cofold", "(", "self", ",", "strand1", ",", "strand2", ",", "temp", "=", "37.0", ",", "dangles", "=", "2", ",", "nolp", "=", "False", ",", "nogu", "=", "False", ",", "noclosinggu", "=", "False", ",", "constraints", "=", "None", ",", "canonical...
45.349593
19.902439
def reffinder(sectionObj): """ add reference indeces to sectionobj['references'] :param sectionObj :return: a section obj w references: field """ text = sectionObj['text'] reftags = [x for x in refTagRegEx.finditer(text)] if reftags: references = [] for tag in reftags: references.append(int(tag.group(1))) sectionObj['references'] = references text = refTagRegEx.sub('', text) sectionObj['text'] = text return sectionObj
[ "def", "reffinder", "(", "sectionObj", ")", ":", "text", "=", "sectionObj", "[", "'text'", "]", "reftags", "=", "[", "x", "for", "x", "in", "refTagRegEx", ".", "finditer", "(", "text", ")", "]", "if", "reftags", ":", "references", "=", "[", "]", "for...
26
15.263158
def drop_first(count): """ Assumes an iterable on the input, returns an iterable with identical items except for the first `count`. >>> range(10) > drop_first(5) | tuple (5, 6, 7, 8, 9) """ def _drop_first(iterable): g = (x for x in range(1, count + 1)) return dropwhile( lambda i: unless(StopIteration, lambda: next(g))(), iterable) return pipe | set_name('drop_first(%s)' % count, _drop_first)
[ "def", "drop_first", "(", "count", ")", ":", "def", "_drop_first", "(", "iterable", ")", ":", "g", "=", "(", "x", "for", "x", "in", "range", "(", "1", ",", "count", "+", "1", ")", ")", "return", "dropwhile", "(", "lambda", "i", ":", "unless", "("...
34.153846
16.307692
def _parse_publisher(details): """ Parse publisher of the book. Args: details (obj): HTMLElement containing slice of the page with details. Returns: str/None: Publisher's name as string or None if not found. """ publisher = _get_td_or_none( details, "ctl00_ContentPlaceHolder1_tblRowNakladatel" ) # publisher is not specified if not publisher: return None publisher = dhtmlparser.removeTags(publisher).strip() # return None instead of blank string if not publisher: return None return publisher
[ "def", "_parse_publisher", "(", "details", ")", ":", "publisher", "=", "_get_td_or_none", "(", "details", ",", "\"ctl00_ContentPlaceHolder1_tblRowNakladatel\"", ")", "# publisher is not specified", "if", "not", "publisher", ":", "return", "None", "publisher", "=", "dhtm...
22.115385
22.5
def set_background(self, background): """ Set the background color of the widget. """ scene = self.scene scene.setBackgroundBrush(QColor.fromRgba(background.argb))
[ "def", "set_background", "(", "self", ",", "background", ")", ":", "scene", "=", "self", ".", "scene", "scene", ".", "setBackgroundBrush", "(", "QColor", ".", "fromRgba", "(", "background", ".", "argb", ")", ")" ]
31.833333
13.833333
def diffplot(self, f, delay=1, lfilter=None, **kargs): """diffplot(f, delay=1, lfilter=None) Applies a function to couples (l[i],l[i+delay]) A list of matplotlib.lines.Line2D is returned. """ # Get the list of packets if lfilter is None: lst_pkts = [f(self.res[i], self.res[i + 1]) for i in range(len(self.res) - delay)] else: lst_pkts = [f(self.res[i], self.res[i + 1]) for i in range(len(self.res) - delay) if lfilter(self.res[i])] # Mimic the default gnuplot output if kargs == {}: kargs = MATPLOTLIB_DEFAULT_PLOT_KARGS lines = plt.plot(lst_pkts, **kargs) # Call show() if matplotlib is not inlined if not MATPLOTLIB_INLINED: plt.show() return lines
[ "def", "diffplot", "(", "self", ",", "f", ",", "delay", "=", "1", ",", "lfilter", "=", "None", ",", "*", "*", "kargs", ")", ":", "# Get the list of packets", "if", "lfilter", "is", "None", ":", "lst_pkts", "=", "[", "f", "(", "self", ".", "res", "[...
32.923077
17.538462
def _get_button_attrs(self, tool): """ Get the HTML attributes associated with a tool. There are some standard attributes (class and title) that the template will always want. Any number of additional attributes can be specified and passed on. This is kinda awkward and due for a refactor for readability. """ attrs = getattr(tool, 'attrs', {}) # href is not allowed to be set. should an exception be raised instead? if 'href' in attrs: attrs.pop('href') # title is not allowed to be set. should an exception be raised instead? # `short_description` should be set instead to parallel django admin # actions if 'title' in attrs: attrs.pop('title') default_attrs = { 'class': attrs.get('class', ''), 'title': getattr(tool, 'short_description', ''), } standard_attrs = {} custom_attrs = {} for k, v in dict(default_attrs, **attrs).items(): if k in default_attrs: standard_attrs[k] = v else: custom_attrs[k] = v return standard_attrs, custom_attrs
[ "def", "_get_button_attrs", "(", "self", ",", "tool", ")", ":", "attrs", "=", "getattr", "(", "tool", ",", "'attrs'", ",", "{", "}", ")", "# href is not allowed to be set. should an exception be raised instead?", "if", "'href'", "in", "attrs", ":", "attrs", ".", ...
39.3
17.633333
def build_parameter(field: Field) -> Mapping[str, Any]: """ Build JSON parameter from a marshmallow field. """ builder = Parameters() return builder.build(field)
[ "def", "build_parameter", "(", "field", ":", "Field", ")", "->", "Mapping", "[", "str", ",", "Any", "]", ":", "builder", "=", "Parameters", "(", ")", "return", "builder", ".", "build", "(", "field", ")" ]
25.142857
12.571429
def calc_registration(preregistration_map, anchors, max_steps=2000, max_step_size=0.05, method='random'): ''' calc_registration is a calculator that creates the registration coordinates. ''' # if max steps is a tuple (max, stride) then a trajectory is saved into # the registered_map meta-data pmap = preregistration_map if is_tuple(max_steps) or is_list(max_steps): (max_steps, stride) = max_steps traj = [preregistration_map.coordinates] x = preregistration_map.coordinates for s in np.arange(0, max_steps, stride): x = mesh_register( preregistration_map, [['edge', 'harmonic', 'scale', 1.0], ['angle', 'infinite-well', 'scale', 1.0], ['perimeter', 'harmonic'], anchors], initial_coordinates=x, method=method, max_steps=stride, max_step_size=max_step_size) traj.append(x) pmap = pmap.with_meta(trajectory=np.asarray(traj)) else: x = mesh_register( preregistration_map, [['edge', 'harmonic', 'scale', 1.0], ['angle', 'infinite-well', 'scale', 1.0], ['perimeter', 'harmonic'], anchors], method=method, max_steps=max_steps, max_step_size=max_step_size) return pmap.copy(coordinates=x)
[ "def", "calc_registration", "(", "preregistration_map", ",", "anchors", ",", "max_steps", "=", "2000", ",", "max_step_size", "=", "0.05", ",", "method", "=", "'random'", ")", ":", "# if max steps is a tuple (max, stride) then a trajectory is saved into", "# the registered_m...
40.333333
13.833333
def peripheral_didDiscoverCharacteristicsForService_error_(self, peripheral, service, error): """Called when characteristics are discovered for a service.""" logger.debug('peripheral_didDiscoverCharacteristicsForService_error called') # Stop if there was some kind of error. if error is not None: return # Make sure the discovered characteristics are added to the list of known # characteristics, and kick off descriptor discovery for each char. for char in service.characteristics(): # Add to list of known characteristics. if characteristic_list().get(char) is None: characteristic_list().add(char, CoreBluetoothGattCharacteristic(char)) # Start descriptor discovery. peripheral.discoverDescriptorsForCharacteristic_(char) # Notify the device about the discovered characteristics. device = device_list().get(peripheral) if device is not None: device._characteristics_discovered(service)
[ "def", "peripheral_didDiscoverCharacteristicsForService_error_", "(", "self", ",", "peripheral", ",", "service", ",", "error", ")", ":", "logger", ".", "debug", "(", "'peripheral_didDiscoverCharacteristicsForService_error called'", ")", "# Stop if there was some kind of error.", ...
57.722222
20.777778
def clone(self, config, **kwargs): """Make a clone of this analysis instance.""" gta = GTAnalysis(config, **kwargs) gta._roi = copy.deepcopy(self.roi) return gta
[ "def", "clone", "(", "self", ",", "config", ",", "*", "*", "kwargs", ")", ":", "gta", "=", "GTAnalysis", "(", "config", ",", "*", "*", "kwargs", ")", "gta", ".", "_roi", "=", "copy", ".", "deepcopy", "(", "self", ".", "roi", ")", "return", "gta" ...
37.8
6.4
def serialize(ty, *values, **kwargs): """ Serialize value using type specification in ty. ABI.serialize('int256', 1000) ABI.serialize('(int, int256)', 1000, 2000) """ try: parsed_ty = abitypes.parse(ty) except Exception as e: # Catch and rebrand parsing errors raise EthereumError(str(e)) if parsed_ty[0] != 'tuple': if len(values) > 1: raise ValueError('too many values passed for non-tuple') values = values[0] if isinstance(values, str): values = values.encode() else: # implement type forgiveness for bytesM/string types # allow python strs also to be used for Solidity bytesM/string types values = tuple(val.encode() if isinstance(val, str) else val for val in values) result, dyn_result = ABI._serialize(parsed_ty, values) return result + dyn_result
[ "def", "serialize", "(", "ty", ",", "*", "values", ",", "*", "*", "kwargs", ")", ":", "try", ":", "parsed_ty", "=", "abitypes", ".", "parse", "(", "ty", ")", "except", "Exception", "as", "e", ":", "# Catch and rebrand parsing errors", "raise", "EthereumErr...
38.44
15.4
def _c3_merge(sequences): # noqa """Merges MROs in *sequences* to a single MRO using the C3 algorithm. Adapted from http://www.python.org/download/releases/2.3/mro/. """ result = [] while True: sequences = [s for s in sequences if s] # purge empty sequences if not sequences: return result for s1 in sequences: # find merge candidates among seq heads candidate = s1[0] for s2 in sequences: if candidate in s2[1:]: candidate = None break # reject the current head, it appears later else: break if not candidate: raise RuntimeError("Inconsistent hierarchy") result.append(candidate) # remove the chosen candidate for seq in sequences: if seq[0] == candidate: del seq[0]
[ "def", "_c3_merge", "(", "sequences", ")", ":", "# noqa", "result", "=", "[", "]", "while", "True", ":", "sequences", "=", "[", "s", "for", "s", "in", "sequences", "if", "s", "]", "# purge empty sequences", "if", "not", "sequences", ":", "return", "resul...
34
16.230769
def list_certs(self, filters=None): """Retrieve loaded certificates. :param filters: retrieve only matching certificates (optional) :type filters: dict :return: list of installed trap, drop and bypass policies :rtype: list """ _, cert_list = self.handler.streamed_request("list-certs", "list-cert", filters) return cert_list
[ "def", "list_certs", "(", "self", ",", "filters", "=", "None", ")", ":", "_", ",", "cert_list", "=", "self", ".", "handler", ".", "streamed_request", "(", "\"list-certs\"", ",", "\"list-cert\"", ",", "filters", ")", "return", "cert_list" ]
39.272727
19
def formatter(self, value): ''' Format a enumerate value to enumerate names if possible. Used to generate human readable dump result. ''' if not self._bitwise: n = self.getName(value) if n is None: return value else: return n else: names = [] for k,v in sorted(self._values.items(), key=lambda x: x[1], reverse=True): if (v & value) == v: names.append(k) value = value ^ v names.reverse() if value != 0: names.append(hex(value)) if not names: return 0 return ' '.join(names)
[ "def", "formatter", "(", "self", ",", "value", ")", ":", "if", "not", "self", ".", "_bitwise", ":", "n", "=", "self", ".", "getName", "(", "value", ")", "if", "n", "is", "None", ":", "return", "value", "else", ":", "return", "n", "else", ":", "na...
31.695652
17.173913
def is_valid_index(self, code): """ returns: True | Flase , based on whether code is valid """ index_list = self.get_index_list() return True if code.upper() in index_list else False
[ "def", "is_valid_index", "(", "self", ",", "code", ")", ":", "index_list", "=", "self", ".", "get_index_list", "(", ")", "return", "True", "if", "code", ".", "upper", "(", ")", "in", "index_list", "else", "False" ]
36.166667
8.833333
def make_linear_workflow(*tasks, **kwargs): """Factory method for creating linear workflows. :param tasks: EOTask's t1,t2,...,tk with dependencies t1->t2->...->tk :param kwargs: Optional keyword arguments (such as workflow name) forwarded to the constructor :return: A new EO workflow instance :rtype: EOWorkflow """ warnings.warn("Method 'make_linear_workflow' will soon be removed. Use LinearWorkflow class instead", DeprecationWarning, stacklevel=2) return LinearWorkflow(*tasks, **kwargs)
[ "def", "make_linear_workflow", "(", "*", "tasks", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"Method 'make_linear_workflow' will soon be removed. Use LinearWorkflow class instead\"", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "r...
48.333333
24.416667
def _step(vertices, d_limit, temperature, placements, l2v, v2n, vertices_resources, fixed_vertices, machine, has_wrap_around_links, random): """Attempt a single swap operation: the kernel of the Simulated Annealing algorithm. Parameters ---------- vertices : [vertex, ...] The set of *movable* vertices. d_limit : int The maximum distance over-which swaps are allowed. temperature : float > 0.0 or None The temperature (i.e. likelihood of accepting a non-advantageous swap). Higher temperatures mean higher chances of accepting a swap. placements : {vertex: (x, y), ...} The positions of all vertices, will be updated if a swap is made. l2v : {(x, y): [vertex, ...], ...} Lookup from chip to vertices, will be updated if a swap is made. v2n : {vertex: [:py:class:`rig.netlist.Net`, ...], ...} Lookup from vertex to all nets that vertex is in. vertices_resources : {vertex: {resource: value, ...}, ...} fixed_vertices : {vertex, ...} The set of vertices which must not be moved. machine : :py:class:`rig.place_and_route.Machine` Describes the state of the machine including the resources actually available on each chip given the current placements. Updated if a swap is made. has_wrap_around_links : bool Should the placements attempt to make use of wrap-around links? random : :py:class:`random.Random` The random number generator to use. Returns ------- (swapped, delta) swapped is a boolean indicating if a swap was made. delta is a float indicating the change in cost resulting from the swap (or 0.0 when no swap is made). """ # Special case: If the machine is a singleton, no swaps can be made so just # terminate. if machine.width == 1 and machine.height == 1: return (False, 0.0) # Select a vertex to swap at random src_vertex = random.choice(vertices) # Select a random (nearby) location to swap the vertex with. Note: this is # guaranteed to be different from the selected vertex, otherwise the swap # cannot change the cost of the placements. # XXX: Does not consider hexagonal properties of the system! src_location = placements[src_vertex] dst_location = src_location while dst_location == src_location: if has_wrap_around_links: dst_location = tuple(random.randint(v - d_limit, v + d_limit) % limit for v, limit in [(src_location[0], machine.width), (src_location[1], machine.height)]) else: dst_location = tuple(random.randint(max(v - d_limit, 0), min(v + d_limit, limit-1)) for v, limit in [(src_location[0], machine.width), (src_location[1], machine.height)]) # If we've inadvertently selected a dead chip to swap to, abort the swap. if dst_location not in machine: return (False, 0.0) # Find out which vertices (if any) must be swapped out of the destination # to make room for the vertex we're moving. src_resources = vertices_resources[src_vertex] dst_vertices = _get_candidate_swap(src_resources, dst_location, l2v, vertices_resources, fixed_vertices, machine) # The destination simply isn't big enough (no matter how many vertices at # the destination are moved), abort the swap. if dst_vertices is None: return (False, 0.0) # Make sure that any vertices moved out of the destination will fit in the # space left in the source location. If there isn't enough space, abort the # swap. resources = machine[src_location] resources = add_resources(resources, src_resources) for dst_vertex in dst_vertices: resources = subtract_resources(resources, vertices_resources[dst_vertex]) if overallocated(resources): return (False, 0.0) # Work out the cost of the nets involved *before* swapping cost_before = _vertex_net_cost(src_vertex, v2n, placements, has_wrap_around_links, machine) for dst_vertex in dst_vertices: cost_before += _vertex_net_cost(dst_vertex, v2n, placements, has_wrap_around_links, machine) # Swap the vertices _swap([src_vertex], src_location, dst_vertices, dst_location, l2v, vertices_resources, placements, machine) # Work out the new cost cost_after = _vertex_net_cost(src_vertex, v2n, placements, has_wrap_around_links, machine) for dst_vertex in dst_vertices: cost_after += _vertex_net_cost(dst_vertex, v2n, placements, has_wrap_around_links, machine) # If the swap was beneficial, keep it, otherwise keep it with a probability # related to just how bad the cost change is is and the temperature. delta = cost_after - cost_before if delta <= 0.0 or random.random() < math.exp(-delta/temperature): # Keep the swap! return (True, delta) else: # Revert the swap _swap([src_vertex], dst_location, dst_vertices, src_location, l2v, vertices_resources, placements, machine) return (False, 0.0)
[ "def", "_step", "(", "vertices", ",", "d_limit", ",", "temperature", ",", "placements", ",", "l2v", ",", "v2n", ",", "vertices_resources", ",", "fixed_vertices", ",", "machine", ",", "has_wrap_around_links", ",", "random", ")", ":", "# Special case: If the machine...
43.692913
21.047244
def extract_common_fields(self, data): """Extract fields from a basic user query.""" email = None for curr_email in data.get("emails", []): email = email or curr_email.get("email") if curr_email.get("verified", False) and \ curr_email.get("primary", False): email = curr_email.get("email") return dict( email=email, id=data.get('id'), name=data.get('name'), first_name=data.get('first_name'), last_name=data.get('last_name'), image_url=data.get('image_url') )
[ "def", "extract_common_fields", "(", "self", ",", "data", ")", ":", "email", "=", "None", "for", "curr_email", "in", "data", ".", "get", "(", "\"emails\"", ",", "[", "]", ")", ":", "email", "=", "email", "or", "curr_email", ".", "get", "(", "\"email\""...
36.235294
12.529412
def operator_complexity(self): """Operator complexity of this multigrid hierarchy. Defined as: Number of nonzeros in the matrix on all levels / Number of nonzeros in the matrix on the finest level """ return sum([level.A.nnz for level in self.levels]) /\ float(self.levels[0].A.nnz)
[ "def", "operator_complexity", "(", "self", ")", ":", "return", "sum", "(", "[", "level", ".", "A", ".", "nnz", "for", "level", "in", "self", ".", "levels", "]", ")", "/", "float", "(", "self", ".", "levels", "[", "0", "]", ".", "A", ".", "nnz", ...
34.3
17.7
def sample_top(a=None, top_k=10): """Sample from ``top_k`` probabilities. Parameters ---------- a : list of float List of probabilities. top_k : int Number of candidates to be considered. """ if a is None: a = [] idx = np.argpartition(a, -top_k)[-top_k:] probs = a[idx] # tl.logging.info("new %f" % probs) probs = probs / np.sum(probs) choice = np.random.choice(idx, p=probs) return choice
[ "def", "sample_top", "(", "a", "=", "None", ",", "top_k", "=", "10", ")", ":", "if", "a", "is", "None", ":", "a", "=", "[", "]", "idx", "=", "np", ".", "argpartition", "(", "a", ",", "-", "top_k", ")", "[", "-", "top_k", ":", "]", "probs", ...
22.45
17.45
def transition_retry(self, pipeline_key, retry_message): """Marks the given pipeline as requiring another retry. Does nothing if all attempts have been exceeded. Args: pipeline_key: db.Key of the _PipelineRecord that needs to be retried. retry_message: User-supplied message indicating the reason for the retry. """ def txn(): pipeline_record = db.get(pipeline_key) if pipeline_record is None: logging.warning( 'Tried to retry pipeline ID "%s" but it does not exist.', pipeline_key.name()) raise db.Rollback() if pipeline_record.status not in ( _PipelineRecord.WAITING, _PipelineRecord.RUN): logging.warning( 'Tried to retry pipeline ID "%s", found bad state: %s', pipeline_key.name(), pipeline_record.status) raise db.Rollback() params = pipeline_record.params offset_seconds = ( params['backoff_seconds'] * (params['backoff_factor'] ** pipeline_record.current_attempt)) pipeline_record.next_retry_time = ( self._gettime() + datetime.timedelta(seconds=offset_seconds)) pipeline_record.current_attempt += 1 pipeline_record.retry_message = retry_message pipeline_record.status = _PipelineRecord.WAITING if pipeline_record.current_attempt >= pipeline_record.max_attempts: root_pipeline_key = ( _PipelineRecord.root_pipeline.get_value_for_datastore( pipeline_record)) logging.warning( 'Giving up on pipeline ID "%s" after %d attempt(s); causing abort ' 'all the way to the root pipeline ID "%s"', pipeline_key.name(), pipeline_record.current_attempt, root_pipeline_key.name()) # NOTE: We do *not* set the status to aborted here to ensure that # this pipeline will be finalized before it has been marked as aborted. pipeline_record.abort_message = ( 'Aborting after %d attempts' % pipeline_record.current_attempt) task = taskqueue.Task( url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key)) task.add(queue_name=self.queue_name, transactional=True) else: task = taskqueue.Task( url=self.pipeline_handler_path, eta=pipeline_record.next_retry_time, params=dict(pipeline_key=pipeline_key, purpose=_BarrierRecord.START, attempt=pipeline_record.current_attempt), headers={'X-Ae-Pipeline-Key': pipeline_key}, target=pipeline_record.params['target']) task.add(queue_name=self.queue_name, transactional=True) pipeline_record.put() db.run_in_transaction(txn)
[ "def", "transition_retry", "(", "self", ",", "pipeline_key", ",", "retry_message", ")", ":", "def", "txn", "(", ")", ":", "pipeline_record", "=", "db", ".", "get", "(", "pipeline_key", ")", "if", "pipeline_record", "is", "None", ":", "logging", ".", "warni...
43.301587
19.333333
def _erc_weights_ccd(x0, cov, b, maximum_iterations, tolerance): """ Calculates the equal risk contribution / risk parity weights given a DataFrame of returns. Args: * x0 (np.array): Starting asset weights. * cov (np.array): covariance matrix. * b (np.array): Risk target weights. * maximum_iterations (int): Maximum iterations in iterative solutions. * tolerance (float): Tolerance level in iterative solutions. Returns: np.array {weight} Reference: Griveau-Billion, Theophile and Richard, Jean-Charles and Roncalli, Thierry, A Fast Algorithm for Computing High-Dimensional Risk Parity Portfolios (2013). Available at SSRN: https://ssrn.com/abstract=2325255 """ n = len(x0) x = x0.copy() var = np.diagonal(cov) ctr = cov.dot(x) sigma_x = np.sqrt(x.T.dot(ctr)) for iteration in range(maximum_iterations): for i in range(n): alpha = var[i] beta = ctr[i] - x[i] * alpha gamma = -b[i] * sigma_x x_tilde = (-beta + np.sqrt( beta * beta - 4 * alpha * gamma)) / (2 * alpha) x_i = x[i] ctr = ctr - cov[i] * x_i + cov[i] * x_tilde sigma_x = sigma_x * sigma_x - 2 * x_i * cov[i].dot( x) + x_i * x_i * var[i] x[i] = x_tilde sigma_x = np.sqrt(sigma_x + 2 * x_tilde * cov[i].dot( x) - x_tilde * x_tilde * var[i]) # check convergence if np.power((x - x0) / x.sum(), 2).sum() < tolerance: return x / x.sum() x0 = x.copy() # no solution found raise ValueError('No solution found after {0} iterations.'.format( maximum_iterations))
[ "def", "_erc_weights_ccd", "(", "x0", ",", "cov", ",", "b", ",", "maximum_iterations", ",", "tolerance", ")", ":", "n", "=", "len", "(", "x0", ")", "x", "=", "x0", ".", "copy", "(", ")", "var", "=", "np", ".", "diagonal", "(", "cov", ")", "ctr", ...
30.508475
20.372881
def www_authenticate(self): """The `WWW-Authenticate` header in a parsed form.""" def on_update(www_auth): if not www_auth and 'www-authenticate' in self.headers: del self.headers['www-authenticate'] elif www_auth: self.headers['WWW-Authenticate'] = www_auth.to_header() header = self.headers.get('www-authenticate') return parse_www_authenticate_header(header, on_update)
[ "def", "www_authenticate", "(", "self", ")", ":", "def", "on_update", "(", "www_auth", ")", ":", "if", "not", "www_auth", "and", "'www-authenticate'", "in", "self", ".", "headers", ":", "del", "self", ".", "headers", "[", "'www-authenticate'", "]", "elif", ...
50.222222
15.666667
def match_in_kwargs(self, match_args, kwargs): """Matches against kwargs.""" for match, default in match_args: names = get_match_names(match) if names: tempvar = self.get_temp_var() self.add_def( tempvar + " = " + "".join( kwargs + '.pop("' + name + '") if "' + name + '" in ' + kwargs + " else " for name in names ) + default, ) with self.down_a_level(): self.match(match, tempvar) else: raise CoconutDeferredSyntaxError("keyword-only pattern-matching function arguments must have names", self.loc)
[ "def", "match_in_kwargs", "(", "self", ",", "match_args", ",", "kwargs", ")", ":", "for", "match", ",", "default", "in", "match_args", ":", "names", "=", "get_match_names", "(", "match", ")", "if", "names", ":", "tempvar", "=", "self", ".", "get_temp_var",...
42.333333
15.833333
def response_class(self, cls): """ Override the default wrapper used for the response. """ s = self._clone() s._response_class = cls return s
[ "def", "response_class", "(", "self", ",", "cls", ")", ":", "s", "=", "self", ".", "_clone", "(", ")", "s", ".", "_response_class", "=", "cls", "return", "s" ]
26.142857
11
def make_all_uppercase( lst: Union[list, tuple, str, set] ) -> Union[list, tuple, str, set]: """Make all characters uppercase. It supports characters in a (mix of) list, tuple, set or string. The return value is of the same type of the input value. """ if not isinstance(lst, (list, tuple, str, set)): raise TypeError('lst must be a list, a tuple, a set or a string') if isinstance(lst, str): return lst.upper() arr = list(lst) # enumerate is 70% slower than range # for i in range(len(lst)): # if isinstance(arr[i], (list, tuple, str, set)): # arr[i] = Aux.make_all_uppercase(arr[i]) arr[:] = [ Aux.make_all_uppercase(element) if ( isinstance(element, (list, tuple, str, set)) ) else element for element in arr ] if isinstance(lst, set): return set(arr) elif isinstance(lst, tuple): return tuple(arr) return arr
[ "def", "make_all_uppercase", "(", "lst", ":", "Union", "[", "list", ",", "tuple", ",", "str", ",", "set", "]", ")", "->", "Union", "[", "list", ",", "tuple", ",", "str", ",", "set", "]", ":", "if", "not", "isinstance", "(", "lst", ",", "(", "list...
31.242424
19.363636
def reset_window_layout(self): """Reset window layout to default""" answer = QMessageBox.warning(self, _("Warning"), _("Window layout will be reset to default settings: " "this affects window position, size and dockwidgets.\n" "Do you want to continue?"), QMessageBox.Yes | QMessageBox.No) if answer == QMessageBox.Yes: self.setup_layout(default=True)
[ "def", "reset_window_layout", "(", "self", ")", ":", "answer", "=", "QMessageBox", ".", "warning", "(", "self", ",", "_", "(", "\"Warning\"", ")", ",", "_", "(", "\"Window layout will be reset to default settings: \"", "\"this affects window position, size and dockwidgets...
52.777778
14.666667
def meraculous_runner(self): """ Check to make sure that the allAssembliesDir has been created, if not, make it. This will only execute for the first time an assembly has been run in this directory. Run the directory from allAssembliesDir. The self.callString instance attribute tells Meraculous to name the assembly directory self.runName. After the run is complete, create the meraculous report, passing the directory containing the run (aka self.thisAssemblyDir). """ #set the dir to temp assembly dir os.chdir(self.allAssembliesDir) print(self.callString) p = subprocess.run(self.callString, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) output = str(p.stdout) err = str(p.stderr) #generate the report for the run self._generate_report() #exit, returning the output and err return (output, err)
[ "def", "meraculous_runner", "(", "self", ")", ":", "#set the dir to temp assembly dir", "os", ".", "chdir", "(", "self", ".", "allAssembliesDir", ")", "print", "(", "self", ".", "callString", ")", "p", "=", "subprocess", ".", "run", "(", "self", ".", "callSt...
37.851852
20.518519
def is_complex_floating_dtype(dtype): """Return ``True`` if ``dtype`` is a complex floating point type.""" dtype = np.dtype(dtype) return np.issubsctype(getattr(dtype, 'base', None), np.complexfloating)
[ "def", "is_complex_floating_dtype", "(", "dtype", ")", ":", "dtype", "=", "np", ".", "dtype", "(", "dtype", ")", "return", "np", ".", "issubsctype", "(", "getattr", "(", "dtype", ",", "'base'", ",", "None", ")", ",", "np", ".", "complexfloating", ")" ]
52.75
12.75
def team_present( name, description=None, repo_names=None, privacy='secret', permission='pull', members=None, enforce_mfa=False, no_mfa_grace_seconds=0, profile="github", **kwargs): ''' Ensure a team is present name This is the name of the team in the organization. description The description of the team. repo_names The names of repositories to add the team to. privacy The level of privacy for the team, can be 'secret' or 'closed'. Defaults to secret. permission The default permission for new repositories added to the team, can be 'pull', 'push' or 'admin'. Defaults to pull. members The members belonging to the team, specified as a dict of member name to optional configuration. Options include 'enforce_mfa_from' and 'mfa_exempt'. enforce_mfa Whether to enforce MFA requirements on members of the team. If True then all members without `mfa_exempt: True` configured will be removed from the team. Note that `no_mfa_grace_seconds` may be set to allow members a grace period. no_mfa_grace_seconds The number of seconds of grace time that a member will have to enable MFA before being removed from the team. The grace period will begin from `enforce_mfa_from` on the member configuration, which defaults to 1970/01/01. Example: .. code-block:: yaml Ensure team test is present in github: github.team_present: - name: 'test' - members: user1: {} user2: {} Ensure team test_mfa is present in github: github.team_present: - name: 'test_mfa' - members: user1: enforce_mfa_from: 2016/06/15 - enforce_mfa: True .. versionadded:: 2016.11.0 ''' ret = { 'name': name, 'changes': {}, 'result': True, 'comment': '' } target = __salt__['github.get_team'](name, profile=profile, **kwargs) test_comments = [] if target: # Team already exists parameters = {} if description is not None and target['description'] != description: parameters['description'] = description if permission is not None and target['permission'] != permission: parameters['permission'] = permission if privacy is not None and target['privacy'] != privacy: parameters['privacy'] = privacy if parameters: if __opts__['test']: test_comments.append('Team properties are set to be edited: {0}' .format(parameters)) ret['result'] = None else: result = __salt__['github.edit_team'](name, profile=profile, **parameters) if result: ret['changes']['team'] = { 'old': 'Team properties were {0}'.format(target), 'new': 'Team properties (that changed) are {0}'.format(parameters) } else: ret['result'] = False ret['comment'] = 'Failed to update team properties.' return ret manage_repos = repo_names is not None current_repos = set(__salt__['github.list_team_repos'](name, profile=profile) .keys()) repo_names = set(repo_names or []) repos_to_add = repo_names - current_repos repos_to_remove = current_repos - repo_names if repo_names else [] if repos_to_add: if __opts__['test']: test_comments.append('Team {0} will have the following repos ' 'added: {1}.'.format(name, list(repos_to_add))) ret['result'] = None else: for repo_name in repos_to_add: result = (__salt__['github.add_team_repo'] (repo_name, name, profile=profile, **kwargs)) if result: ret['changes'][repo_name] = { 'old': 'Repo {0} is not in team {1}'.format(repo_name, name), 'new': 'Repo {0} is in team {1}'.format(repo_name, name) } else: ret['result'] = False ret['comment'] = ('Failed to add repo {0} to team {1}.' .format(repo_name, name)) return ret if repos_to_remove: if __opts__['test']: test_comments.append('Team {0} will have the following repos ' 'removed: {1}.'.format(name, list(repos_to_remove))) ret['result'] = None else: for repo_name in repos_to_remove: result = (__salt__['github.remove_team_repo'] (repo_name, name, profile=profile, **kwargs)) if result: ret['changes'][repo_name] = { 'old': 'Repo {0} is in team {1}'.format(repo_name, name), 'new': 'Repo {0} is not in team {1}'.format(repo_name, name) } else: ret['result'] = False ret['comment'] = ('Failed to remove repo {0} from team {1}.' .format(repo_name, name)) return ret else: # Team does not exist - it will be created. if __opts__['test']: ret['comment'] = 'Team {0} is set to be created.'.format(name) ret['result'] = None return ret result = __salt__['github.add_team']( name, description=description, repo_names=repo_names, permission=permission, privacy=privacy, profile=profile, **kwargs ) if result: ret['changes']['team'] = {} ret['changes']['team']['old'] = None ret['changes']['team']['new'] = 'Team {0} has been created'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to create team {0}.'.format(name) return ret manage_members = members is not None mfa_deadline = datetime.datetime.utcnow() - datetime.timedelta(seconds=no_mfa_grace_seconds) members_no_mfa = __salt__['github.list_members_without_mfa'](profile=profile) members_lower = {} for member_name, info in six.iteritems(members or {}): members_lower[member_name.lower()] = info member_change = False current_members = __salt__['github.list_team_members'](name, profile=profile) for member, member_info in six.iteritems(members or {}): log.info('Checking member %s in team %s', member, name) if member.lower() not in current_members: if (enforce_mfa and _member_violates_mfa(member, member_info, mfa_deadline, members_no_mfa)): if __opts__['test']: test_comments.append('User {0} will not be added to the ' 'team because they do not have MFA.' ''.format(member)) else: # Add to team member_change = True if __opts__['test']: test_comments.append('User {0} set to be added to the ' 'team.'.format(member)) ret['result'] = None else: result = (__salt__['github.add_team_member'] (member, name, profile=profile, **kwargs)) if result: ret['changes'][member] = {} ret['changes'][member]['old'] = ( 'User {0} is not in team {1}'.format(member, name)) ret['changes'][member]['new'] = ( 'User {0} is in team {1}'.format(member, name)) else: ret['result'] = False ret['comment'] = ('Failed to add user {0} to team ' '{1}.'.format(member, name)) return ret for member in current_members: mfa_violation = False if member in members_lower: mfa_violation = _member_violates_mfa(member, members_lower[member], mfa_deadline, members_no_mfa) if (manage_members and member not in members_lower or (enforce_mfa and mfa_violation)): # Remove from team member_change = True if __opts__['test']: if mfa_violation: test_comments.append('User {0} set to be removed from the ' 'team because they do not have MFA.' .format(member)) else: test_comments.append('User {0} set to be removed from ' 'the team.'.format(member)) ret['result'] = None else: result = (__salt__['github.remove_team_member'] (member, name, profile=profile, **kwargs)) if result: extra_changes = ' due to MFA violation' if mfa_violation else '' ret['changes'][member] = { 'old': 'User {0} is in team {1}'.format(member, name), 'new': 'User {0} is not in team {1}{2}'.format(member, name, extra_changes) } else: ret['result'] = False ret['comment'] = ('Failed to remove user {0} from team {1}.' .format(member, name)) return ret if member_change: # Refresh team cache __salt__['github.list_team_members'](name, profile=profile, ignore_cache=False, **kwargs) if test_comments: ret['comment'] = '\n'.join(test_comments) return ret
[ "def", "team_present", "(", "name", ",", "description", "=", "None", ",", "repo_names", "=", "None", ",", "privacy", "=", "'secret'", ",", "permission", "=", "'pull'", ",", "members", "=", "None", ",", "enforce_mfa", "=", "False", ",", "no_mfa_grace_seconds"...
40.246154
22.984615
def plot_all(self, t=0, f_start=None, f_stop=None, logged=False, if_id=0, kurtosis=True, **kwargs): """ Plot waterfall of data as well as spectrum; also, placeholder to make even more complicated plots in the future. Args: f_start (float): start frequency, in MHz f_stop (float): stop frequency, in MHz logged (bool): Plot in linear (False) or dB units (True), t (int): integration number to plot (0 -> len(data)) logged (bool): Plot in linear (False) or dB units (True) if_id (int): IF identification (if multiple IF signals in file) kwargs: keyword args to be passed to matplotlib plot() and imshow() """ if self.header[b'nbits'] <=2: logged = False nullfmt = NullFormatter() # no labels # definitions for the axes left, width = 0.35, 0.5 bottom, height = 0.45, 0.5 width2, height2 = 0.1125, 0.15 bottom2, left2 = bottom - height2 - .025, left - width2 - .02 bottom3, left3 = bottom2 - height2 - .025, 0.075 rect_waterfall = [left, bottom, width, height] rect_colorbar = [left + width, bottom, .025, height] rect_spectrum = [left, bottom2, width, height2] rect_min_max = [left, bottom3, width, height2] rect_timeseries = [left + width, bottom, width2, height] rect_kurtosis = [left3, bottom3, 0.25, height2] rect_header = [left3 - .05, bottom, 0.2, height] # -------- # axColorbar = plt.axes(rect_colorbar) # print 'Ploting Colorbar' # print plot_data.max() # print plot_data.min() # # plot_colorbar = range(plot_data.min(),plot_data.max(),int((plot_data.max()-plot_data.min())/plot_data.shape[0])) # plot_colorbar = np.array([[plot_colorbar],[plot_colorbar]]) # # plt.imshow(plot_colorbar,aspect='auto', rasterized=True, interpolation='nearest',) # axColorbar.xaxis.set_major_formatter(nullfmt) # axColorbar.yaxis.set_major_formatter(nullfmt) # heatmap = axColorbar.pcolor(plot_data, edgecolors = 'none', picker=True) # plt.colorbar(heatmap, cax = axColorbar) # -------- axMinMax = plt.axes(rect_min_max) print('Plotting Min Max') self.plot_spectrum_min_max(logged=logged, f_start=f_start, f_stop=f_stop, t=t) plt.title('') axMinMax.yaxis.tick_right() axMinMax.yaxis.set_label_position("right") # -------- axSpectrum = plt.axes(rect_spectrum,sharex=axMinMax) print('Plotting Spectrum') self.plot_spectrum(logged=logged, f_start=f_start, f_stop=f_stop, t=t) plt.title('') axSpectrum.yaxis.tick_right() axSpectrum.yaxis.set_label_position("right") plt.xlabel('') # axSpectrum.xaxis.set_major_formatter(nullfmt) plt.setp(axSpectrum.get_xticklabels(), visible=False) # -------- axWaterfall = plt.axes(rect_waterfall,sharex=axMinMax) print('Plotting Waterfall') self.plot_waterfall(f_start=f_start, f_stop=f_stop, logged=logged, cb=False) plt.xlabel('') # no labels # axWaterfall.xaxis.set_major_formatter(nullfmt) plt.setp(axWaterfall.get_xticklabels(), visible=False) # -------- axTimeseries = plt.axes(rect_timeseries) print('Plotting Timeseries') self.plot_time_series(f_start=f_start, f_stop=f_stop, orientation='v') axTimeseries.yaxis.set_major_formatter(nullfmt) # axTimeseries.xaxis.set_major_formatter(nullfmt) # -------- # Could exclude since it takes much longer to run than the other plots. if kurtosis: axKurtosis = plt.axes(rect_kurtosis) print('Plotting Kurtosis') self.plot_kurtosis(f_start=f_start, f_stop=f_stop) # -------- axHeader = plt.axes(rect_header) print('Plotting Header') # Generate nicer header telescopes = {0: 'Fake data', 1: 'Arecibo', 2: 'Ooty', 3: 'Nancay', 4: 'Parkes', 5: 'Jodrell', 6: 'GBT', 8: 'Effelsberg', 10: 'SRT', 64: 'MeerKAT', 65: 'KAT7' } telescope = telescopes.get(self.header[b"telescope_id"], self.header[b"telescope_id"]) plot_header = "%14s: %s\n" % ("TELESCOPE_ID", telescope) for key in (b'SRC_RAJ', b'SRC_DEJ', b'TSTART', b'NCHANS', b'NBEAMS', b'NIFS', b'NBITS'): try: plot_header += "%14s: %s\n" % (key, self.header[key.lower()]) except KeyError: pass fch1 = "%6.6f MHz" % self.header[b'fch1'] foff = (self.header[b'foff'] * 1e6 * u.Hz) if np.abs(foff) > 1e6 * u.Hz: foff = str(foff.to('MHz')) elif np.abs(foff) > 1e3 * u.Hz: foff = str(foff.to('kHz')) else: foff = str(foff.to('Hz')) plot_header += "%14s: %s\n" % ("FCH1", fch1) plot_header += "%14s: %s\n" % ("FOFF", foff) plt.text(0.05, .95, plot_header, ha='left', va='top', wrap=True) axHeader.set_facecolor('white') axHeader.xaxis.set_major_formatter(nullfmt) axHeader.yaxis.set_major_formatter(nullfmt)
[ "def", "plot_all", "(", "self", ",", "t", "=", "0", ",", "f_start", "=", "None", ",", "f_stop", "=", "None", ",", "logged", "=", "False", ",", "if_id", "=", "0", ",", "kurtosis", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "self", "."...
39.635036
21.20438
def add_constraints(self): """ Set the base constraints on the relation query. :rtype: None """ if self._constraints: foreign_key = getattr(self._parent, self._foreign_key, None) if foreign_key is None: self._query = None else: table = self._related.get_table() self._query.where( "{}.{}".format(table, self._other_key), "=", foreign_key )
[ "def", "add_constraints", "(", "self", ")", ":", "if", "self", ".", "_constraints", ":", "foreign_key", "=", "getattr", "(", "self", ".", "_parent", ",", "self", ".", "_foreign_key", ",", "None", ")", "if", "foreign_key", "is", "None", ":", "self", ".", ...
30.375
17.5
def run_model(self, times=None, weather=None): """ Run the model. Parameters ---------- times : None or DatetimeIndex, default None Times at which to evaluate the model. Can be None if attribute `times` is already set. weather : None or DataFrame, default None If None, assumes air temperature is 20 C, wind speed is 0 m/s and irradiation calculated from clear sky data. Column names must be 'wind_speed', 'temp_air', 'dni', 'ghi', 'dhi'. Do not pass incomplete irradiation data. Use method :py:meth:`~pvlib.modelchain.ModelChain.complete_irradiance` instead. Returns ------- self Assigns attributes: times, solar_position, airmass, irradiance, total_irrad, effective_irradiance, weather, temps, aoi, aoi_modifier, spectral_modifier, dc, ac, losses. """ self.prepare_inputs(times, weather) self.aoi_model() self.spectral_model() self.effective_irradiance_model() self.temp_model() self.dc_model() self.ac_model() self.losses_model() return self
[ "def", "run_model", "(", "self", ",", "times", "=", "None", ",", "weather", "=", "None", ")", ":", "self", ".", "prepare_inputs", "(", "times", ",", "weather", ")", "self", ".", "aoi_model", "(", ")", "self", ".", "spectral_model", "(", ")", "self", ...
32.944444
20.666667
def generate_confirmation_key(identity_secret, tag, timestamp): """Generate confirmation key for trades. Can only be used once. :param identity_secret: authenticator identity secret :type identity_secret: bytes :param tag: tag identifies what the request, see list below :type tag: str :param timestamp: timestamp to use for generating key :type timestamp: int :return: confirmation key :rtype: bytes Tag choices: * ``conf`` to load the confirmations page * ``details`` to load details about a trade * ``allow`` to confirm a trade * ``cancel`` to cancel a trade """ data = struct.pack('>Q', int(timestamp)) + tag.encode('ascii') # this will NOT stop working in 2038 return hmac_sha1(bytes(identity_secret), data)
[ "def", "generate_confirmation_key", "(", "identity_secret", ",", "tag", ",", "timestamp", ")", ":", "data", "=", "struct", ".", "pack", "(", "'>Q'", ",", "int", "(", "timestamp", ")", ")", "+", "tag", ".", "encode", "(", "'ascii'", ")", "# this will NOT st...
35.409091
20.045455
def nested_dict_to_list(path, dic, exclusion=None): """ Transform nested dict to list """ result = [] exclusion = ['__self'] if exclusion is None else exclusion for key, value in dic.items(): if not any([exclude in key for exclude in exclusion]): if isinstance(value, dict): aux = path + key + "/" result.extend(nested_dict_to_list(aux, value)) else: if path.endswith("/"): path = path[:-1] result.append([path, key, value]) return result
[ "def", "nested_dict_to_list", "(", "path", ",", "dic", ",", "exclusion", "=", "None", ")", ":", "result", "=", "[", "]", "exclusion", "=", "[", "'__self'", "]", "if", "exclusion", "is", "None", "else", "exclusion", "for", "key", ",", "value", "in", "di...
28.35
16.95
def get_plot_data(self): """ Generates the JSON report to plot the gene boxes Following the convention of the reports platform, this method returns a list of JSON/dict objects with the information about each entry in the abricate file. The information contained in this JSON is:: {contig_id: <str>, seqRange: [<int>, <int>], gene: <str>, accession: <str>, coverage: <float>, identity: <float> } Note that the `seqRange` entry contains the position in the corresponding contig, not the absolute position in the whole assembly. Returns ------- json_dic : list List of JSON/dict objects with the report data. """ json_dic = {"plotData": []} sample_dic = {} sample_assembly_map = {} for entry in self.storage.values(): sample_id = re.match("(.*)_abr", entry["log_file"]).groups()[0] if sample_id not in sample_dic: sample_dic[sample_id] = {} # Get contig ID using the same regex as in `assembly_report.py` # template contig_id = self._get_contig_id(entry["reference"]) # Get database database = entry["database"] if database not in sample_dic[sample_id]: sample_dic[sample_id][database] = [] # Update the sample-assembly correspondence dict if sample_id not in sample_assembly_map: sample_assembly_map[sample_id] = entry["infile"] sample_dic[sample_id][database].append( {"contig": contig_id, "seqRange": entry["seq_range"], "gene": entry["gene"].replace("'", ""), "accession": entry["accession"], "coverage": entry["coverage"], "identity": entry["identity"], }, ) for sample, data in sample_dic.items(): json_dic["plotData"].append( { "sample": sample, "data": {"abricateXrange": data}, "assemblyFile": sample_assembly_map[sample] } ) return json_dic
[ "def", "get_plot_data", "(", "self", ")", ":", "json_dic", "=", "{", "\"plotData\"", ":", "[", "]", "}", "sample_dic", "=", "{", "}", "sample_assembly_map", "=", "{", "}", "for", "entry", "in", "self", ".", "storage", ".", "values", "(", ")", ":", "s...
34.090909
20.015152
def get_user(self, username): """Gest a specific user""" ret = {} tmp = self._get_user(self._byte_p2(username), ALL_ATTRS) if tmp is None: raise UserDoesntExist(username, self.backend_name) attrs_tmp = tmp[1] for attr in attrs_tmp: value_tmp = attrs_tmp[attr] if len(value_tmp) == 1: ret[attr] = value_tmp[0] else: ret[attr] = value_tmp return ret
[ "def", "get_user", "(", "self", ",", "username", ")", ":", "ret", "=", "{", "}", "tmp", "=", "self", ".", "_get_user", "(", "self", ".", "_byte_p2", "(", "username", ")", ",", "ALL_ATTRS", ")", "if", "tmp", "is", "None", ":", "raise", "UserDoesntExis...
33.571429
12.571429
def set_rgbmap(self, rgbmap): """Set RGB map object used by this instance. It controls how the values in the image are mapped to color. Parameters ---------- rgbmap : `~ginga.RGBMap.RGBMapper` RGB map. """ self.rgbmap = rgbmap t_ = rgbmap.get_settings() t_.share_settings(self.t_, keylist=rgbmap.settings_keys) rgbmap.add_callback('changed', self.rgbmap_cb) self.redraw(whence=2)
[ "def", "set_rgbmap", "(", "self", ",", "rgbmap", ")", ":", "self", ".", "rgbmap", "=", "rgbmap", "t_", "=", "rgbmap", ".", "get_settings", "(", ")", "t_", ".", "share_settings", "(", "self", ".", "t_", ",", "keylist", "=", "rgbmap", ".", "settings_keys...
31.133333
16.8
def unlock(name, zk_hosts=None, # in case you need to unlock without having run lock (failed execution for example) identifier=None, max_concurrency=1, ephemeral_lease=False, profile=None, scheme=None, username=None, password=None, default_acl=None): ''' Remove lease from semaphore. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} conn_kwargs = {'profile': profile, 'scheme': scheme, 'username': username, 'password': password, 'default_acl': default_acl} if __opts__['test']: ret['result'] = None ret['comment'] = 'Released lock if it is here' return ret if identifier is None: identifier = __grains__['id'] unlocked = __salt__['zk_concurrency.unlock'](name, zk_hosts=zk_hosts, identifier=identifier, max_concurrency=max_concurrency, ephemeral_lease=ephemeral_lease, **conn_kwargs) if unlocked: ret['result'] = True else: ret['comment'] = 'Unable to find lease for path {0}'.format(name) return ret
[ "def", "unlock", "(", "name", ",", "zk_hosts", "=", "None", ",", "# in case you need to unlock without having run lock (failed execution for example)", "identifier", "=", "None", ",", "max_concurrency", "=", "1", ",", "ephemeral_lease", "=", "False", ",", "profile", "="...
33.609756
23.95122
def set_package_name(self, package_name): ''' Set package_name >>> docwriter = ApiDocWriter('sphinx') >>> import sphinx >>> docwriter.root_path == sphinx.__path__[0] True >>> docwriter.package_name = 'docutils' >>> import docutils >>> docwriter.root_path == docutils.__path__[0] True ''' # It's also possible to imagine caching the module parsing here self._package_name = package_name root_module = self._import(package_name) self.root_path = root_module.__path__[-1] self.written_modules = None
[ "def", "set_package_name", "(", "self", ",", "package_name", ")", ":", "# It's also possible to imagine caching the module parsing here", "self", ".", "_package_name", "=", "package_name", "root_module", "=", "self", ".", "_import", "(", "package_name", ")", "self", "."...
35.352941
15.352941
def build_and_train(iterations, log_stride, test=False): """Construct the data, model, loss and optimizer then train.""" # Test mode settings. batch_size = 2 if test else FLAGS.batch_size num_mems = 2 if test else FLAGS.num_mems num_heads = 1 if test else FLAGS.num_mems num_blocks = 1 if test else FLAGS.num_mems head_size = 4 if test else FLAGS.head_size max_length = 3 if test else FLAGS.max_length max_nest = 2 if test else FLAGS.max_nest mlp_size = (20,) if test else (256, 256, 256, 256) with tf.Graph().as_default(): t0 = time.time() # Initialize the dataset. lte_train = learn_to_execute.LearnToExecute( batch_size, max_length, max_nest) lte_test = learn_to_execute.LearnToExecute( batch_size, max_length, max_nest, mode=learn_to_execute.Mode.TEST) train_data_iter = lte_train.make_one_shot_iterator().get_next() test_data_iter = lte_test.make_one_shot_iterator().get_next() output_size = lte_train.state.vocab_size # Create the model. core = snt.RelationalMemory( mem_slots=num_mems, head_size=head_size, num_heads=num_heads, num_blocks=num_blocks, gate_style=FLAGS.gate_style) final_mlp = snt.nets.MLP( output_sizes=mlp_size, activate_final=True) model = SequenceModel( core=core, target_size=output_size, final_mlp=final_mlp) tf.logging.info("Instantiated models ({:3f})".format(time.time() - t0)) # Define the loss & accuracy. def loss_fn(inputs, targets, input_sequence_length, output_sequence_length): """Creates the loss and the exports.""" logits = model( inputs, targets, input_sequence_length, output_sequence_length) targets = tf.cast(targets, tf.int32) sq_sz_out_max = targets.shape[0].value # Create a mask to ignore accuracy on buffer characters. sequence_sizes = tf.cast(output_sequence_length, tf.float32) lengths_transposed = tf.expand_dims(sequence_sizes, 1) range_row = tf.expand_dims( tf.range(0, sq_sz_out_max, 1, dtype=tf.float32), 0) mask = tf.cast(tf.transpose(tf.less(range_row, lengths_transposed)), tf.float32) # Compute token accuracy and solved. correct = tf.equal(tf.argmax(logits, 2), tf.argmax(targets, 2)) solved = tf.reduce_all(tf.boolean_mask(correct, tf.squeeze(mask)), axis=0) token_acc = tf.reduce_sum(tf.cast(correct, tf.float32) * mask) token_acc /= tf.reduce_sum(sequence_sizes) # Compute Loss. mask = tf.cast(tf.tile(tf.expand_dims(mask, 2), (1, 1, logits.shape[2])), tf.float32) masked_logits = logits * mask masked_target = tf.cast(targets, tf.float32) * mask logits_flat = tf.reshape(masked_logits, [sq_sz_out_max * batch_size, -1]) target_flat = tf.reshape(masked_target, [sq_sz_out_max * batch_size, -1]) xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits_flat, labels=target_flat) loss = tf.reduce_mean(xent) return loss, token_acc, solved # Get training step counter. global_step = tf.train.get_or_create_global_step() # Create the optimizer. learning_rate_op = tf.reduce_max([ tf.train.exponential_decay( FLAGS.learning_rate, global_step, decay_steps=FLAGS.epochs // 100, decay_rate=0.9, staircase=False), FLAGS.min_learning_rate ]) optimizer = tf.train.AdamOptimizer(learning_rate_op) # Compute loss, accuracy & the step op. inputs, targets, _, input_lengths, output_lengths = train_data_iter train_loss, train_acc, train_sol = loss_fn( inputs, targets, input_lengths, output_lengths) step_op = optimizer.minimize(train_loss, global_step=global_step) inputs, targets, _, input_lengths, output_lengths = test_data_iter _, test_acc, test_sol = loss_fn( inputs, targets, input_lengths, output_lengths) tf.logging.info("Created losses and optimizers ({:3f})".format( time.time() - t0)) # Begin Training. t0 = time.time() tf.logging.info("Starting training ({:3f})".format(time.time() - t0)) with tf.train.SingularMonitoredSession() as sess: for it in six.moves.range(iterations): sess.run([step_op, learning_rate_op]) if it % log_stride == 0: loss_v, train_acc_v, test_acc_v, train_sol_v, test_sol_v = sess.run([ train_loss, train_acc, test_acc, train_sol, test_sol]) elapsed = time.time() - t0 tf.logging.info( "iter: {:2d}, train loss {:3f}; train acc {:3f}; test acc {:3f};" " train solved {:3f}; test solved {:3f}; ({:3f})".format( it, loss_v, train_acc_v, test_acc_v, train_sol_v, test_sol_v, elapsed))
[ "def", "build_and_train", "(", "iterations", ",", "log_stride", ",", "test", "=", "False", ")", ":", "# Test mode settings.", "batch_size", "=", "2", "if", "test", "else", "FLAGS", ".", "batch_size", "num_mems", "=", "2", "if", "test", "else", "FLAGS", ".", ...
41.135593
17.847458
def image_single_point_source(self, image_model_class, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps): """ return model without including the point source contributions as a list (for each point source individually) :param image_model_class: ImageModel class instance :param kwargs_lens: lens model kwargs list :param kwargs_source: source model kwargs list :param kwargs_lens_light: lens light model kwargs list :param kwargs_ps: point source model kwargs list :return: list of images with point source isolated """ # reconstructed model with given psf model, error_map, cov_param, param = image_model_class.image_linear_solve(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps) #model = image_model_class.image(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps) data = image_model_class.Data.data mask = image_model_class.ImageNumerics.mask point_source_list = image_model_class.point_sources_list(kwargs_ps, kwargs_lens) n = len(point_source_list) model_single_source_list = [] for i in range(n): model_single_source = (data - model + point_source_list[i]) * mask model_single_source_list.append(model_single_source) return model_single_source_list
[ "def", "image_single_point_source", "(", "self", ",", "image_model_class", ",", "kwargs_lens", ",", "kwargs_source", ",", "kwargs_lens_light", ",", "kwargs_ps", ")", ":", "# reconstructed model with given psf", "model", ",", "error_map", ",", "cov_param", ",", "param", ...
60
24.416667
def op_right(op): """ Returns a type instance method for the given operator, applied when the instance appears on the right side of the expression. """ def method(self, other): return op(value_left(self, other), value_right(self, other)) return method
[ "def", "op_right", "(", "op", ")", ":", "def", "method", "(", "self", ",", "other", ")", ":", "return", "op", "(", "value_left", "(", "self", ",", "other", ")", ",", "value_right", "(", "self", ",", "other", ")", ")", "return", "method" ]
34.5
17.25
def read_lines_from_file(file_path: str) -> List[str]: """ Read text lines from a file """ # check if the file exists? with open(file_path) as csv_file: content = csv_file.readlines() return content
[ "def", "read_lines_from_file", "(", "file_path", ":", "str", ")", "->", "List", "[", "str", "]", ":", "# check if the file exists?", "with", "open", "(", "file_path", ")", "as", "csv_file", ":", "content", "=", "csv_file", ".", "readlines", "(", ")", "return...
36.166667
8.333333
def get_bank_hierarchy_design_session(self, proxy): """Gets the session designing bank hierarchies. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.assessment.BankHierarchyDesignSession) - a ``BankHierarchySession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_bank_hierarchy_design() is false`` *compliance: optional -- This method must be implemented if ``supports_bank_hierarchy_design()`` is true.* """ if not self.supports_bank_hierarchy_design(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.BankHierarchyDesignSession(proxy=proxy, runtime=self._runtime)
[ "def", "get_bank_hierarchy_design_session", "(", "self", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_bank_hierarchy_design", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", ...
44.944444
17.055556
def hierarchyLookup(self, record): """ Looks up additional hierarchy information for the inputed record. :param record | <orb.Table> :return (<subclass of orb.Table> || None, <str> column) """ def _get_lookup(cls): if cls in self._hierarchyLookup: return self._hierarchyLookup[cls] for base in cls.__bases__: results = _get_lookup(base) if results: return results return (None, None) tableType, column = _get_lookup(type(record)) if tableType and column: return (tableType, column) default = self._hierarchyLookup.get(None) if default: return default return (None, None)
[ "def", "hierarchyLookup", "(", "self", ",", "record", ")", ":", "def", "_get_lookup", "(", "cls", ")", ":", "if", "cls", "in", "self", ".", "_hierarchyLookup", ":", "return", "self", ".", "_hierarchyLookup", "[", "cls", "]", "for", "base", "in", "cls", ...
30.928571
14.642857
def _delete_state(self, activity, agent, state_id=None, registration=None, etag=None): """Private method to delete a specified state from the LRS :param activity: Activity object of state to be deleted :type activity: :class:`tincan.activity.Activity` :param agent: Agent object of state to be deleted :type agent: :class:`tincan.agent.Agent` :param state_id: UUID of state to be deleted :type state_id: str | unicode :param registration: registration UUID of state to be deleted :type registration: str | unicode :param etag: etag of state to be deleted :type etag: str | unicode :return: LRS Response object with deleted state as content :rtype: :class:`tincan.lrs_response.LRSResponse` """ if not isinstance(activity, Activity): activity = Activity(activity) if not isinstance(agent, Agent): agent = Agent(agent) request = HTTPRequest( method="DELETE", resource="activities/state" ) if etag is not None: request.headers["If-Match"] = etag request.query_params = { "activityId": activity.id, "agent": agent.to_json(self.version) } if state_id is not None: request.query_params["stateId"] = state_id if registration is not None: request.query_params["registration"] = registration lrs_response = self._send_request(request) return lrs_response
[ "def", "_delete_state", "(", "self", ",", "activity", ",", "agent", ",", "state_id", "=", "None", ",", "registration", "=", "None", ",", "etag", "=", "None", ")", ":", "if", "not", "isinstance", "(", "activity", ",", "Activity", ")", ":", "activity", "...
35.302326
17.232558
def check_stripe_api_host(app_configs=None, **kwargs): """ Check that STRIPE_API_HOST is not being used in production. """ from django.conf import settings messages = [] if not settings.DEBUG and hasattr(settings, "STRIPE_API_HOST"): messages.append( checks.Warning( "STRIPE_API_HOST should not be set in production! This is most likely unintended.", hint="Remove STRIPE_API_HOST from your Django settings.", id="djstripe.W002", ) ) return messages
[ "def", "check_stripe_api_host", "(", "app_configs", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "django", ".", "conf", "import", "settings", "messages", "=", "[", "]", "if", "not", "settings", ".", "DEBUG", "and", "hasattr", "(", "settings", ...
25.722222
24.277778
def get_projection(self, axis): """ Return the projection of this vector onto the given axis. The axis does not need to be normalized. """ scale = axis.dot(self) / axis.dot(axis) return axis * scale
[ "def", "get_projection", "(", "self", ",", "axis", ")", ":", "scale", "=", "axis", ".", "dot", "(", "self", ")", "/", "axis", ".", "dot", "(", "axis", ")", "return", "axis", "*", "scale" ]
45.6
5.8
def _validate_markdown(self, expfile): '''ensure that fields are present in markdown file''' try: import yaml except: bot.error('Python yaml is required for testing yml/markdown files.') sys.exit(1) self.metadata = {} uid = os.path.basename(expfile).strip('.md') if os.path.exists(expfile): with open(expfile, "r") as stream: docs = yaml.load_all(stream) for doc in docs: if isinstance(doc,dict): for k,v in doc.items(): print('%s: %s' %(k,v)) self.metadata[k] = v self.metadata['uid'] = uid fields = ['github', 'preview', 'name', 'layout', 'tags', 'uid', 'maintainer'] # Tests for all fields for field in fields: if field not in self.metadata: return False if self.metadata[field] in ['',None]: return False if 'github' not in self.metadata['github']: return notvalid('%s: not a valid github repository' % name) if not isinstance(self.metadata['tags'],list): return notvalid('%s: tags must be a list' % name) if not re.search("(\w+://)(.+@)*([\w\d\.]+)(:[\d]+){0,1}/*(.*)", self.metadata['github']): return notvalid('%s is not a valid URL.' %(self.metadata['github'])) return True
[ "def", "_validate_markdown", "(", "self", ",", "expfile", ")", ":", "try", ":", "import", "yaml", "except", ":", "bot", ".", "error", "(", "'Python yaml is required for testing yml/markdown files.'", ")", "sys", ".", "exit", "(", "1", ")", "self", ".", "metada...
38.075
19.925
def pmll(self,*args,**kwargs): """ NAME: pmll PURPOSE: return proper motion in Galactic longitude (in mas/yr) INPUT: t - (optional) time at which to get pmll (can be Quantity) v obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer in the Galactocentric frame (in kpc and km/s) (default=[8.0,0.,0.,0.,220.,0.]; entries can be Quantities) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity) vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity) OUTPUT: pm_l(t) in mas/yr HISTORY: 2011-02-24 - Written - Bovy (NYU) """ out= self._orb.pmll(*args,**kwargs) if len(out) == 1: return out[0] else: return out
[ "def", "pmll", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "out", "=", "self", ".", "_orb", ".", "pmll", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "len", "(", "out", ")", "==", "1", ":", "return", "out", "[...
28.837838
28.783784
def generate_surface_vectors(self, film_millers, substrate_millers): """ Generates the film/substrate slab combinations for a set of given miller indicies Args: film_millers(array): all miller indices to generate slabs for film substrate_millers(array): all miller indicies to generate slabs for substrate """ vector_sets = [] for f in film_millers: film_slab = SlabGenerator(self.film, f, 20, 15, primitive=False).get_slab() film_vectors = reduce_vectors(film_slab.lattice.matrix[0], film_slab.lattice.matrix[1]) for s in substrate_millers: substrate_slab = SlabGenerator(self.substrate, s, 20, 15, primitive=False).get_slab() substrate_vectors = reduce_vectors( substrate_slab.lattice.matrix[0], substrate_slab.lattice.matrix[1]) vector_sets.append((film_vectors, substrate_vectors, f, s)) return vector_sets
[ "def", "generate_surface_vectors", "(", "self", ",", "film_millers", ",", "substrate_millers", ")", ":", "vector_sets", "=", "[", "]", "for", "f", "in", "film_millers", ":", "film_slab", "=", "SlabGenerator", "(", "self", ".", "film", ",", "f", ",", "20", ...
39.931034
23.724138
def set_config_files_(self, *config_files): """Set the list of config files. Args: config_files (pathlike): path of config files, given in the order of reading. """ self._config_files = tuple(pathlib.Path(path) for path in config_files)
[ "def", "set_config_files_", "(", "self", ",", "*", "config_files", ")", ":", "self", ".", "_config_files", "=", "tuple", "(", "pathlib", ".", "Path", "(", "path", ")", "for", "path", "in", "config_files", ")" ]
36.25
19.875
def mapping_get(index, doc_type, hosts=None, profile=None): ''' Retrieve mapping definition of index or index/type index Index for the mapping doc_type Name of the document type CLI example:: salt myminion elasticsearch.mapping_get testindex user ''' es = _get_instance(hosts, profile) try: return es.indices.get_mapping(index=index, doc_type=doc_type) except elasticsearch.exceptions.NotFoundError: return None except elasticsearch.TransportError as e: raise CommandExecutionError("Cannot retrieve mapping {0}, server returned code {1} with message {2}".format(index, e.status_code, e.error))
[ "def", "mapping_get", "(", "index", ",", "doc_type", ",", "hosts", "=", "None", ",", "profile", "=", "None", ")", ":", "es", "=", "_get_instance", "(", "hosts", ",", "profile", ")", "try", ":", "return", "es", ".", "indices", ".", "get_mapping", "(", ...
31.666667
27.952381
def reload_context(self, es_based, **kwargs): """ Reload `self.context` object into a DB or ES object. A reload is performed by getting the object ID from :kwargs: and then getting a context key item from the new instance of `self._factory` which is an ACL class used by the current view. Arguments: :es_based: Boolean. Whether to init ACL ac es-based or not. This affects the backend which will be queried - either DB or ES :kwargs: Kwargs that contain value for current resource 'id_name' key """ from .acl import BaseACL key = self._get_context_key(**kwargs) kwargs = {'request': self.request} if issubclass(self._factory, BaseACL): kwargs['es_based'] = es_based acl = self._factory(**kwargs) if acl.item_model is None: acl.item_model = self.Model self.context = acl[key]
[ "def", "reload_context", "(", "self", ",", "es_based", ",", "*", "*", "kwargs", ")", ":", "from", ".", "acl", "import", "BaseACL", "key", "=", "self", ".", "_get_context_key", "(", "*", "*", "kwargs", ")", "kwargs", "=", "{", "'request'", ":", "self", ...
39.125
18.5
def initialize(self, conf, ctx): """Initialization steps: 1. Get :func:`~birding.search.search_manager_from_config`. 2. Prepare to track searched terms as to avoid redundant searches. """ self.manager = get_search_manager() config = get_config()['TwitterSearchBolt'] self.term_shelf = shelf_from_config(config)
[ "def", "initialize", "(", "self", ",", "conf", ",", "ctx", ")", ":", "self", ".", "manager", "=", "get_search_manager", "(", ")", "config", "=", "get_config", "(", ")", "[", "'TwitterSearchBolt'", "]", "self", ".", "term_shelf", "=", "shelf_from_config", "...
39.888889
14.666667
def _handle_compound(self, node, scope, ctxt, stream): """Handle Compound nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """ self._dlog("handling compound statement") #scope.push() try: for child in node.children(): self._handle_node(child, scope, ctxt, stream) # in case a return occurs, be sure to pop the scope # (returns are implemented by raising an exception) finally: #scope.pop() pass
[ "def", "_handle_compound", "(", "self", ",", "node", ",", "scope", ",", "ctxt", ",", "stream", ")", ":", "self", ".", "_dlog", "(", "\"handling compound statement\"", ")", "#scope.push()", "try", ":", "for", "child", "in", "node", ".", "children", "(", ")"...
25.272727
20.590909
def include_feature(self, name): """Request inclusion of feature named 'name'""" if self.feature_is_included(name) == 0: descr = self.features[name].description raise DistutilsOptionError( descr + " is required, but was excluded or is not available" ) self.features[name].include_in(self) self._set_feature(name, 1)
[ "def", "include_feature", "(", "self", ",", "name", ")", ":", "if", "self", ".", "feature_is_included", "(", "name", ")", "==", "0", ":", "descr", "=", "self", ".", "features", "[", "name", "]", ".", "description", "raise", "DistutilsOptionError", "(", "...
39.1
14
def transloadsForPeer(self, peer): """ Returns an iterator of transloads that apply to a particular peer. """ for tl in self.transloads.itervalues(): if peer in tl.peers: yield tl
[ "def", "transloadsForPeer", "(", "self", ",", "peer", ")", ":", "for", "tl", "in", "self", ".", "transloads", ".", "itervalues", "(", ")", ":", "if", "peer", "in", "tl", ".", "peers", ":", "yield", "tl" ]
33.285714
10.142857
def getslide(self,slide_num): """ Return the triggers with a specific slide number. @param slide_num: the slide number to recover (contained in the event_id) """ slideTrigs = self.copy() slideTrigs.extend(row for row in self if row.get_slide_number() == slide_num) return slideTrigs
[ "def", "getslide", "(", "self", ",", "slide_num", ")", ":", "slideTrigs", "=", "self", ".", "copy", "(", ")", "slideTrigs", ".", "extend", "(", "row", "for", "row", "in", "self", "if", "row", ".", "get_slide_number", "(", ")", "==", "slide_num", ")", ...
36.125
16.375
def crypto_scalarmult_base(n): """ Computes and returns the scalar product of a standard group element and an integer ``n``. :param n: bytes :rtype: bytes """ q = ffi.new("unsigned char[]", crypto_scalarmult_BYTES) rc = lib.crypto_scalarmult_base(q, n) ensure(rc == 0, 'Unexpected library error', raising=exc.RuntimeError) return ffi.buffer(q, crypto_scalarmult_SCALARBYTES)[:]
[ "def", "crypto_scalarmult_base", "(", "n", ")", ":", "q", "=", "ffi", ".", "new", "(", "\"unsigned char[]\"", ",", "crypto_scalarmult_BYTES", ")", "rc", "=", "lib", ".", "crypto_scalarmult_base", "(", "q", ",", "n", ")", "ensure", "(", "rc", "==", "0", "...
26.6875
18.6875
def parse_reports(self): """ Find bamtools stats reports and parse their data """ # Set up vars self.bamtools_stats_data = dict() regexes = { 'total_reads': r"Total reads:\s*(\d+)", 'mapped_reads': r"Mapped reads:\s*(\d+)", 'mapped_reads_pct': r"Mapped reads:\s*\d+\s+\(([\d\.]+)%\)", 'forward_strand': r"Forward strand:\s*(\d+)", 'forward_strand_pct': r"Forward strand:\s*\d+\s+\(([\d\.]+)%\)", 'reverse_strand': r"Reverse strand:\s*(\d+)", 'reverse_strand_pct': r"Reverse strand:\s*\d+\s+\(([\d\.]+)%\)", 'failed_qc': r"Failed QC:\s*(\d+)", 'failed_qc_pct': r"Failed QC:\s*\d+\s+\(([\d\.]+)%\)", 'duplicates': r"Duplicates:\s*(\d+)", 'duplicates_pct': r"Duplicates:\s*\d+\s+\(([\d\.]+)%\)", 'paired_end': r"Paired-end reads:\s*(\d+)", 'paired_end_pct': r"Paired-end reads:\s*\d+\s+\(([\d\.]+)%\)", 'proper_pairs': r"'Proper-pairs'\s*(\d+)", 'proper_pairs_pct': r"'Proper-pairs'\s*\d+\s+\(([\d\.]+)%\)", 'both_mapped': r"Both pairs mapped:\s*(\d+)", 'both_mapped_pct': r"Both pairs mapped:\s*\d+\s+\(([\d\.]+)%\)", 'read_1': r"Read 1:\s*(\d+)", 'read_2': r"Read 2:\s*(\d+)", 'singletons': r"Singletons:\s*(\d+)", 'singletons_pct': r"Singletons:\s*\d+\s+\(([\d\.]+)%\)", } # Go through files and parse data using regexes for f in self.find_log_files('bamtools/stats'): d = dict() for k, r in regexes.items(): r_search = re.search(r, f['f'], re.MULTILINE) if r_search: d[k] = float(r_search.group(1)) if len(d) > 0: if f['s_name'] in self.bamtools_stats_data: log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name'])) self.add_data_source(f, section='stats') self.bamtools_stats_data[f['s_name']] = d # Filter to strip out ignored sample names self.bamtools_stats_data = self.ignore_samples(self.bamtools_stats_data) if len(self.bamtools_stats_data) > 0: # Write to file self.write_data_file(self.bamtools_stats_data, 'multiqc_bamtools_stats') # Add to general stats table self.general_stats_headers['duplicates_pct'] = { 'title': '% Duplicates', 'description': '% Duplicate Reads', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'OrRd' } self.general_stats_headers['mapped_reads_pct'] = { 'title': '% Mapped', 'description': '% Mapped Reads', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'RdYlGn' } for s_name in self.bamtools_stats_data: if s_name not in self.general_stats_data: self.general_stats_data[s_name] = dict() self.general_stats_data[s_name].update( self.bamtools_stats_data[s_name] ) # Make dot plot of counts keys = OrderedDict() defaults = { 'min': 0, 'max': 100, 'decimalPlaces': 2, 'suffix': '%' } num_defaults = { 'min': 0, 'modify': lambda x: float(x) / 1000000.0, 'decimalPlaces': 2 } keys['total_reads'] = dict(num_defaults, **{'title': 'Total reads', 'description': 'Total reads (millions)' }); keys['mapped_reads_pct'] = dict(defaults, **{'title': 'Mapped reads' }) keys['forward_strand_pct'] = dict(defaults, **{'title': 'Forward strand' }) keys['reverse_strand_pct'] = dict(defaults, **{'title': 'Reverse strand' }) keys['failed_qc_pct'] = dict(defaults, **{'title': 'Failed QC' }) keys['duplicates_pct'] = dict(defaults, **{'title': 'Duplicates' }) keys['paired_end_pct'] = dict(defaults, **{'title': 'Paired-end', 'description': 'Paired-end reads' }) keys['proper_pairs_pct'] = dict(defaults, **{'title': 'Proper-pairs' }) keys['both_mapped_pct'] = dict(defaults, **{'title': 'Both mapped', 'description': 'Both pairs mapped' }) keys['bt_read_1'] = dict(num_defaults, **{'title': 'Read 1', 'description': 'Read 1 (millions)' }); keys['bt_read_2'] = dict(num_defaults, **{'title': 'Read 2', 'description': 'Read 2 (millions)' }); keys['singletons_pct'] = dict(defaults, **{'title': 'Singletons' }) self.add_section ( name = 'Bamtools Stats', anchor = 'bamtools-stats', plot = beeswarm.plot(self.bamtools_stats_data, keys) ) # Return number of samples found return len(self.bamtools_stats_data)
[ "def", "parse_reports", "(", "self", ")", ":", "# Set up vars", "self", ".", "bamtools_stats_data", "=", "dict", "(", ")", "regexes", "=", "{", "'total_reads'", ":", "r\"Total reads:\\s*(\\d+)\"", ",", "'mapped_reads'", ":", "r\"Mapped reads:\\s*(\\d+)\"", ",", "'ma...
42.62037
22.583333
def _create_session(self): """ Creates a fresh session with no/default headers and proxies """ logger.debug("Create new phantomjs web driver") self.driver = webdriver.PhantomJS(desired_capabilities=self.dcap, **self.driver_args) self.set_cookies(self.current_cookies) self.driver.set_window_size(1920, 1080)
[ "def", "_create_session", "(", "self", ")", ":", "logger", ".", "debug", "(", "\"Create new phantomjs web driver\"", ")", "self", ".", "driver", "=", "webdriver", ".", "PhantomJS", "(", "desired_capabilities", "=", "self", ".", "dcap", ",", "*", "*", "self", ...
44.111111
13.666667
def add(self, *args, **kwargs): """ add(other, rho=0, inplace=True) Adds an *other* number instance. The correlation coefficient *rho* can be configured per uncertainty when passed as a dict. When *inplace* is *False*, a new instance is returned. """ return self._apply(operator.add, *args, **kwargs)
[ "def", "add", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_apply", "(", "operator", ".", "add", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
55.833333
23.166667
def get_fragment(self, list_of_indextuples, give_only_index=False, use_lookup=None): """Get the indices of the atoms in a fragment. The list_of_indextuples contains all bondings from the molecule to the fragment. ``[(1,3), (2,4)]`` means for example that the fragment is connected over two bonds. The first bond is from atom 1 in the molecule to atom 3 in the fragment. The second bond is from atom 2 in the molecule to atom 4 in the fragment. Args: list_of_indextuples (list): give_only_index (bool): If ``True`` a set of indices is returned. Otherwise a new Cartesian instance. use_lookup (bool): Use a lookup variable for :meth:`~chemcoord.Cartesian.get_bonds`. The default is specified in ``settings['defaults']['use_lookup']`` Returns: A set of indices or a new Cartesian instance. """ if use_lookup is None: use_lookup = settings['defaults']['use_lookup'] exclude = [tuple[0] for tuple in list_of_indextuples] index_of_atom = list_of_indextuples[0][1] fragment_index = self.get_coordination_sphere( index_of_atom, exclude=set(exclude), n_sphere=float('inf'), only_surface=False, give_only_index=True, use_lookup=use_lookup) if give_only_index: return fragment_index else: return self.loc[fragment_index, :]
[ "def", "get_fragment", "(", "self", ",", "list_of_indextuples", ",", "give_only_index", "=", "False", ",", "use_lookup", "=", "None", ")", ":", "if", "use_lookup", "is", "None", ":", "use_lookup", "=", "settings", "[", "'defaults'", "]", "[", "'use_lookup'", ...
44.878788
21.757576
def _set_response_handlers(self, stanza, res_handler, err_handler, timeout_handler = None, timeout = None): """Same as `set_response_handlers` but assume `self.lock` is acquired.""" # pylint: disable-msg=R0913 self.fix_out_stanza(stanza) to_jid = stanza.to_jid if to_jid: to_jid = unicode(to_jid) if timeout_handler: def callback(dummy1, dummy2): """Wrapper for the timeout handler to make it compatible with the `ExpiringDictionary` """ timeout_handler() self._iq_response_handlers.set_item( (stanza.stanza_id, to_jid), (res_handler,err_handler), timeout, callback) else: self._iq_response_handlers.set_item( (stanza.stanza_id, to_jid), (res_handler, err_handler), timeout)
[ "def", "_set_response_handlers", "(", "self", ",", "stanza", ",", "res_handler", ",", "err_handler", ",", "timeout_handler", "=", "None", ",", "timeout", "=", "None", ")", ":", "# pylint: disable-msg=R0913", "self", ".", "fix_out_stanza", "(", "stanza", ")", "to...
46.086957
12.043478
def asr_breaking(self, tol_eigendisplacements=1e-5): """ Returns the breaking of the acoustic sum rule for the three acoustic modes, if Gamma is present. None otherwise. If eigendisplacements are available they are used to determine the acoustic modes: selects the bands corresponding to the eigendisplacements that represent to a translation within tol_eigendisplacements. If these are not identified or eigendisplacements are missing the first 3 modes will be used (indices [0:3]). """ for i in range(self.nb_qpoints): if np.allclose(self.qpoints[i].frac_coords, (0, 0, 0)): if self.has_eigendisplacements: acoustic_modes_index = [] for j in range(self.nb_bands): eig = self.eigendisplacements[j][i] if np.max(np.abs(eig[1:] - eig[:1])) < tol_eigendisplacements: acoustic_modes_index.append(j) # if acoustic modes are not correctly identified return use # the first three modes if len(acoustic_modes_index) != 3: acoustic_modes_index = [0, 1, 2] return self.bands[acoustic_modes_index, i] else: return self.bands[:3, i] return None
[ "def", "asr_breaking", "(", "self", ",", "tol_eigendisplacements", "=", "1e-5", ")", ":", "for", "i", "in", "range", "(", "self", ".", "nb_qpoints", ")", ":", "if", "np", ".", "allclose", "(", "self", ".", "qpoints", "[", "i", "]", ".", "frac_coords", ...
47.62069
21.758621