code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _check_for_eltorito_boot_info_table(self, ino): # type: (inode.Inode) -> None ''' An internal method to check a boot directory record to see if it has an El Torito Boot Info Table embedded inside of it. Parameters: ino - The Inode to check for a Boot Info Table. Returns: Nothing. ''' orig = self._cdfp.tell() with inode.InodeOpenData(ino, self.pvd.logical_block_size()) as (data_fp, data_len): data_fp.seek(8, os.SEEK_CUR) bi_table = eltorito.EltoritoBootInfoTable() if bi_table.parse(self.pvd, data_fp.read(eltorito.EltoritoBootInfoTable.header_length()), ino): data_fp.seek(-24, os.SEEK_CUR) # OK, the rest of the stuff checks out; do a final # check to make sure the checksum is reasonable. csum = self._calculate_eltorito_boot_info_table_csum(data_fp, data_len) if csum == bi_table.csum: ino.add_boot_info_table(bi_table) self._cdfp.seek(orig)
An internal method to check a boot directory record to see if it has an El Torito Boot Info Table embedded inside of it. Parameters: ino - The Inode to check for a Boot Info Table. Returns: Nothing.
def launch_slurm(jobname: str, cmd: str, memory_mb: int, project: str, qos: str, email: str, duration: timedelta, tasks_per_node: int, cpus_per_task: int, partition: str = "", modules: List[str] = None, directory: str = os.getcwd(), encoding: str = "ascii") -> None: """ Launch a job into the SLURM environment. Args: jobname: name of the job cmd: command to be executed memory_mb: maximum memory requirement per process (Mb) project: project name qos: quality-of-service name email: user's e-mail address duration: maximum duration per job tasks_per_node: tasks per (cluster) node cpus_per_task: CPUs per task partition: cluster partition name modules: SLURM modules to load directory: directory to change to encoding: encoding to apply to launch script as sent to ``sbatch`` """ if partition: partition_cmd = "#SBATCH -p {}".format(partition) else: partition_cmd = "" if modules is None: modules = ["default-wbic"] log.info("Launching SLURM job: {}", jobname) script = """#!/bin/bash #! Name of the job: #SBATCH -J {jobname} #! Which project should jobs run under: #SBATCH -A {project} #! What QoS [Quality of Service] should the job run in? #SBATCH --qos={qos} #! How much resource should be allocated? #SBATCH --tasks-per-node={tasks_per_node} #SBATCH --cpus-per-task={cpus_per_task} #! Memory requirements #SBATCH --mem={memory_mb} #! How much wall-clock time will be required? #SBATCH --time={duration} #! What e-mail address to use for notifications? #SBATCH --mail-user={email} #! What types of email messages do you wish to receive? #SBATCH --mail-type=ALL #! Uncomment this to prevent the job from being requeued (e.g. if #! interrupted by node failure or system downtime): #! SBATCH --no-requeue #! Partition {partition_cmd} #! sbatch directives end here (put any additional directives above this line) #! ############################################################ #! Modify the settings below to specify the application's environment, location #! and launch method: #! Optionally modify the environment seen by the application #! (note that SLURM reproduces the environment at submission irrespective of ~/.bashrc): . /etc/profile.d/modules.sh # Leave this line (enables the module command) module purge # Removes all modules still loaded module load {modules} # Basic one, e.g. default-wbic, is REQUIRED - loads the basic environment #! Insert additional module load commands after this line if needed: #! Full path to your application executable: application="hostname" #! Run options for the application: options="" #! Work directory (i.e. where the job will run): workdir="$SLURM_SUBMIT_DIR" # The value of SLURM_SUBMIT_DIR sets workdir to the directory # in which sbatch is run. #! Are you using OpenMP (NB this is **unrelated to OpenMPI**)? If so increase this #! safe value to no more than 24: export OMP_NUM_THREADS=24 # Command line to be submited by SLURM: CMD="{cmd}" ############################################################### ### You should not have to change anything below this line #### ############################################################### cd $workdir echo -e "Changed directory to `pwd`.\n" JOBID=$SLURM_JOB_ID echo -e "JobID: $JOBID\n======" echo "Time: `date`" echo "Running on master node: `hostname`" echo "Current directory: `pwd`" if [ "$SLURM_JOB_NODELIST" ]; then #! Create a machine file: export NODEFILE=`/usr/bin/generate_pbs_nodefile` cat $NODEFILE | uniq > machine.file.$JOBID echo -e "\nNodes allocated:\n================" echo `cat machine.file.$JOBID | sed -e 's/\..*$//g'` fi echo -e "\nExecuting command:\n==================\n$CMD\n" eval $CMD """.format( # noqa cmd=cmd, cpus_per_task=cpus_per_task, duration=strfdelta(duration, SLURM_TIMEDELTA_FMT), email=email, jobname=jobname, memory_mb=memory_mb, modules=" ".join(modules), partition_cmd=partition_cmd, project=project, qos=qos, tasks_per_node=tasks_per_node, ) cmdargs = ["sbatch"] with pushd(directory): p = Popen(cmdargs, stdin=PIPE) p.communicate(input=script.encode(encoding))
Launch a job into the SLURM environment. Args: jobname: name of the job cmd: command to be executed memory_mb: maximum memory requirement per process (Mb) project: project name qos: quality-of-service name email: user's e-mail address duration: maximum duration per job tasks_per_node: tasks per (cluster) node cpus_per_task: CPUs per task partition: cluster partition name modules: SLURM modules to load directory: directory to change to encoding: encoding to apply to launch script as sent to ``sbatch``
def safe_sparse_dot(a, b, dense_output=False): """Dot product that handle the sparse matrix case correctly Uses BLAS GEMM as replacement for numpy.dot where possible to avoid unnecessary copies. Parameters ---------- a : array or sparse matrix b : array or sparse matrix dense_output : boolean, default False When False, either ``a`` or ``b`` being sparse will yield sparse output. When True, output will always be an array. Returns ------- dot_product : array or sparse matrix sparse if ``a`` or ``b`` is sparse and ``dense_output=False``. """ if issparse(a) or issparse(b): ret = a * b if dense_output and hasattr(ret, "toarray"): ret = ret.toarray() return ret else: return np.dot(a, b)
Dot product that handle the sparse matrix case correctly Uses BLAS GEMM as replacement for numpy.dot where possible to avoid unnecessary copies. Parameters ---------- a : array or sparse matrix b : array or sparse matrix dense_output : boolean, default False When False, either ``a`` or ``b`` being sparse will yield sparse output. When True, output will always be an array. Returns ------- dot_product : array or sparse matrix sparse if ``a`` or ``b`` is sparse and ``dense_output=False``.
def lookup_values_from_error_table(scores, err_df): """ Find matching q-value for each score in 'scores' """ ix = find_nearest_matches(np.float32(err_df.cutoff.values), np.float32(scores)) return err_df.pvalue.iloc[ix].values, err_df.svalue.iloc[ix].values, err_df.pep.iloc[ix].values, err_df.qvalue.iloc[ix].values
Find matching q-value for each score in 'scores'
def addGaussNoise(self, sigma): """ Add gaussian noise. :param float sigma: sigma is expressed in percent of the diagonal size of actor. :Example: .. code-block:: python from vtkplotter import Sphere Sphere().addGaussNoise(1.0).show() """ sz = self.diagonalSize() pts = self.coordinates() n = len(pts) ns = np.random.randn(n, 3) * sigma * sz / 100 vpts = vtk.vtkPoints() vpts.SetNumberOfPoints(n) vpts.SetData(numpy_to_vtk(pts + ns, deep=True)) self.poly.SetPoints(vpts) self.poly.GetPoints().Modified() return self
Add gaussian noise. :param float sigma: sigma is expressed in percent of the diagonal size of actor. :Example: .. code-block:: python from vtkplotter import Sphere Sphere().addGaussNoise(1.0).show()
def get_dump(self, fmap='', with_stats=False): """ Returns the dump the model as a list of strings. """ length = ctypes.c_ulong() sarr = ctypes.POINTER(ctypes.c_char_p)() if self.feature_names is not None and fmap == '': flen = int(len(self.feature_names)) fname = from_pystr_to_cstr(self.feature_names) if self.feature_types is None: # use quantitative as default # {'q': quantitative, 'i': indicator} ftype = from_pystr_to_cstr(['q'] * flen) else: ftype = from_pystr_to_cstr(self.feature_types) _check_call(_LIB.XGBoosterDumpModelWithFeatures(self.handle, flen, fname, ftype, int(with_stats), ctypes.byref(length), ctypes.byref(sarr))) else: if fmap != '' and not os.path.exists(fmap): raise ValueError("No such file: {0}".format(fmap)) _check_call(_LIB.XGBoosterDumpModel(self.handle, c_str(fmap), int(with_stats), ctypes.byref(length), ctypes.byref(sarr))) res = from_cstr_to_pystr(sarr, length) return res
Returns the dump the model as a list of strings.
def to_grey(self, on: bool=False): """ Change the LED to grey. :param on: Unused, here for API consistency with the other states :return: None """ self._on = False self._load_new(led_grey)
Change the LED to grey. :param on: Unused, here for API consistency with the other states :return: None
def fit(self, blocks, y=None): """ Fit a k-means clustering model using an ordered sequence of blocks. """ self.kmeans.fit(make_weninger_features(blocks)) # set the cluster center closest to the origin to exactly (0.0, 0.0) self.kmeans.cluster_centers_.sort(axis=0) self.kmeans.cluster_centers_[0, :] = np.zeros(2) return self
Fit a k-means clustering model using an ordered sequence of blocks.
def ParseMultiple(self, stats, file_objs, kb): """Process files together.""" fileset = {stat.pathspec.path: obj for stat, obj in zip(stats, file_objs)} return self.ParseFileset(fileset)
Process files together.
def zero_datetime(dt, tz=None): """ Return the given datetime with hour/minutes/seconds/ms zeroed and the timezone coerced to the given ``tz`` (or UTC if none is given). """ if tz is None: tz = get_current_timezone() return coerce_naive(dt).replace(hour=0, minute=0, second=0, microsecond=0)
Return the given datetime with hour/minutes/seconds/ms zeroed and the timezone coerced to the given ``tz`` (or UTC if none is given).
def pandas(self): """Return a Pandas dataframe.""" if self._pandas is None: self._pandas = pd.DataFrame().from_records(self.list_of_dicts) return self._pandas
Return a Pandas dataframe.
def hs_join(ls_hsi, hso): """ [Many-to-one] Synchronizes (joins) a list of input handshake interfaces: output is ready when ALL inputs are ready ls_hsi - (i) list of input handshake tuples (ready, valid) hso - (o) an output handshake tuple (ready, valid) """ N = len(ls_hsi) ls_hsi_rdy, ls_hsi_vld = zip(*ls_hsi) ls_hsi_rdy, ls_hsi_vld = list(ls_hsi_rdy), list(ls_hsi_vld) hso_rdy, hso_vld = hso @always_comb def _hsjoin(): all_vld = True for i in range(N): all_vld = all_vld and ls_hsi_vld[i] hso_vld.next = all_vld for i in range(N): ls_hsi_rdy[i].next = all_vld and hso_rdy return _hsjoin
[Many-to-one] Synchronizes (joins) a list of input handshake interfaces: output is ready when ALL inputs are ready ls_hsi - (i) list of input handshake tuples (ready, valid) hso - (o) an output handshake tuple (ready, valid)
def _add_styles(self, add_paragraph=True, add_text=True): """ Adds paragraph and span wrappers if necessary based on style """ p_styles = self.get_para_styles() t_styles = self.get_span_styles() for s in self.slide.pending_styles: if isinstance(s, ParagraphStyle): p_styles.update(s.styles) elif isinstance(s, TextStyle): t_styles.update(s.styles) para = ParagraphStyle(**p_styles) if add_paragraph or self.slide.paragraph_attribs: p_attrib = {ns("text", "style-name"): para.name} p_attrib.update(self.slide.paragraph_attribs) if not self._in_tag(ns("text", "p"), p_attrib): self.parent_of(ns("text", "p")) # Create paragraph style first self.slide._preso.add_style(para) self.add_node("text:p", attrib=p_attrib) # span is only necessary if style changes if add_text and t_styles: text = TextStyle(**t_styles) children = self.cur_node.getchildren() if children: # if we already are using this text style, reuse the last one last = children[-1] if ( last.tag == ns("text", "span") and last.attrib[ns("text", "style-name")] == text.name and last.tail is None ): # if we have a tail, we can't reuse self.cur_node = children[-1] return if not self._is_node( ns("text", "span"), {ns("text", "style-name"): text.name} ): # Create text style self.slide._preso.add_style(text) self.add_node("text:span", attrib={"text:style-name": text.name})
Adds paragraph and span wrappers if necessary based on style
def make_data(n,m): """make_data: prepare matrix of m times n random processing times""" p = {} for i in range(1,m+1): for j in range(1,n+1): p[i,j] = random.randint(1,10) return p
make_data: prepare matrix of m times n random processing times
def basic_stats(G, area=None, clean_intersects=False, tolerance=15, circuity_dist='gc'): """ Calculate basic descriptive metric and topological stats for a graph. For an unprojected lat-lng graph, tolerance and graph units should be in degrees, and circuity_dist should be 'gc'. For a projected graph, tolerance and graph units should be in meters (or similar) and circuity_dist should be 'euclidean'. Parameters ---------- G : networkx multidigraph area : numeric the area covered by the street network, in square meters (typically land area); if none, will skip all density-based metrics clean_intersects : bool if True, calculate clean intersections count (and density, if area is provided) tolerance : numeric tolerance value passed along if clean_intersects=True, see clean_intersections() function documentation for details and usage circuity_dist : str 'gc' or 'euclidean', how to calculate straight-line distances for circuity measurement; use former for lat-lng networks and latter for projected networks Returns ------- stats : dict dictionary of network measures containing the following elements (some keys may not be present, based on the arguments passed into the function): - n = number of nodes in the graph - m = number of edges in the graph - k_avg = average node degree of the graph - intersection_count = number of intersections in graph, that is, nodes with >1 street emanating from them - streets_per_node_avg = how many streets (edges in the undirected representation of the graph) emanate from each node (ie, intersection or dead-end) on average (mean) - streets_per_node_counts = dict, with keys of number of streets emanating from the node, and values of number of nodes with this count - streets_per_node_proportion = dict, same as previous, but as a proportion of the total, rather than counts - edge_length_total = sum of all edge lengths in the graph, in meters - edge_length_avg = mean edge length in the graph, in meters - street_length_total = sum of all edges in the undirected representation of the graph - street_length_avg = mean edge length in the undirected representation of the graph, in meters - street_segments_count = number of edges in the undirected representation of the graph - node_density_km = n divided by area in square kilometers - intersection_density_km = intersection_count divided by area in square kilometers - edge_density_km = edge_length_total divided by area in square kilometers - street_density_km = street_length_total divided by area in square kilometers - circuity_avg = edge_length_total divided by the sum of the great circle distances between the nodes of each edge - self_loop_proportion = proportion of edges that have a single node as its two endpoints (ie, the edge links nodes u and v, and u==v) - clean_intersection_count = number of intersections in street network, merging complex ones into single points - clean_intersection_density_km = clean_intersection_count divided by area in square kilometers """ sq_m_in_sq_km = 1e6 #there are 1 million sq meters in 1 sq km G_undirected = None # calculate the number of nodes, n, and the number of edges, m, in the graph n = len(list(G.nodes())) m = len(list(G.edges())) # calculate the average degree of the graph k_avg = 2 * m / n if 'streets_per_node' in G.graph: # get the degrees saved as a graph attribute (from an undirected # representation of the graph). this is not the degree of the nodes in # the directed graph, but rather represents the number of streets # (unidirected edges) emanating from each node. see # count_streets_per_node function. streets_per_node = G.graph['streets_per_node'] else: # count how many street segments emanate from each node in this graph streets_per_node = count_streets_per_node(G) # count number of intersections in graph, as nodes with >1 street emanating # from them node_ids = set(G.nodes()) intersection_count = len([True for node, count in streets_per_node.items() if (count > 1) and (node in node_ids)]) # calculate the average number of streets (unidirected edges) incident to # each node streets_per_node_avg = sum(streets_per_node.values()) / n # create a dict where key = number of streets (unidirected edges) incident # to each node, and value = how many nodes are of this number in the graph streets_per_node_counts = {num:list(streets_per_node.values()).count(num) for num in range(max(streets_per_node.values()) + 1)} # degree proportions: dict where key = each degree and value = what # proportion of nodes are of this degree in the graph streets_per_node_proportion = {num:count/n for num, count in streets_per_node_counts.items()} # calculate the total and average edge lengths edge_length_total = sum([d['length'] for u, v, d in G.edges(data=True)]) edge_length_avg = edge_length_total / m # calculate the total and average street segment lengths (so, edges without # double-counting two-way streets) if G_undirected is None: G_undirected = G.to_undirected(reciprocal=False) street_length_total = sum([d['length'] for u, v, d in G_undirected.edges(data=True)]) street_segments_count = len(list(G_undirected.edges(keys=True))) street_length_avg = street_length_total / street_segments_count # calculate clean intersection counts if clean_intersects: clean_intersection_points = clean_intersections(G, tolerance=tolerance, dead_ends=False ) clean_intersection_count = len(clean_intersection_points) else: clean_intersection_count = None # we can calculate density metrics only if area is not null if area is not None: area_km = area / sq_m_in_sq_km # calculate node density as nodes per sq km node_density_km = n / area_km # calculate intersection density as nodes with >1 street emanating from # them, per sq km intersection_density_km = intersection_count / area_km # calculate edge density as linear meters per sq km edge_density_km = edge_length_total / area_km # calculate street density as linear meters per sq km street_density_km = street_length_total / area_km if clean_intersects: clean_intersection_density_km = clean_intersection_count / area_km else: clean_intersection_density_km = None else: # if area is None, then we cannot calculate density node_density_km = None intersection_density_km = None edge_density_km = None street_density_km = None clean_intersection_density_km = None # average circuity: sum of edge lengths divided by sum of straight-line # distance between edge endpoints. first load all the edges origin and # destination coordinates as a dataframe, then calculate the straight-line # distance coords = np.array([[G.nodes[u]['y'], G.nodes[u]['x'], G.nodes[v]['y'], G.nodes[v]['x']] for u, v, k in G.edges(keys=True)]) df_coords = pd.DataFrame(coords, columns=['u_y', 'u_x', 'v_y', 'v_x']) if circuity_dist == 'gc': gc_distances = great_circle_vec(lat1=df_coords['u_y'], lng1=df_coords['u_x'], lat2=df_coords['v_y'], lng2=df_coords['v_x']) elif circuity_dist == 'euclidean': gc_distances = euclidean_dist_vec(y1=df_coords['u_y'], x1=df_coords['u_x'], y2=df_coords['v_y'], x2=df_coords['v_x']) else: raise ValueError('circuity_dist must be "gc" or "euclidean"') gc_distances = gc_distances.fillna(value=0) try: circuity_avg = edge_length_total / gc_distances.sum() except ZeroDivisionError: circuity_avg = np.nan # percent of edges that are self-loops, ie both endpoints are the same node self_loops = [True for u, v, k in G.edges(keys=True) if u == v] self_loops_count = len(self_loops) self_loop_proportion = self_loops_count / m # assemble the results stats = {'n':n, 'm':m, 'k_avg':k_avg, 'intersection_count':intersection_count, 'streets_per_node_avg':streets_per_node_avg, 'streets_per_node_counts':streets_per_node_counts, 'streets_per_node_proportion':streets_per_node_proportion, 'edge_length_total':edge_length_total, 'edge_length_avg':edge_length_avg, 'street_length_total':street_length_total, 'street_length_avg':street_length_avg, 'street_segments_count':street_segments_count, 'node_density_km':node_density_km, 'intersection_density_km':intersection_density_km, 'edge_density_km':edge_density_km, 'street_density_km':street_density_km, 'circuity_avg':circuity_avg, 'self_loop_proportion':self_loop_proportion, 'clean_intersection_count':clean_intersection_count, 'clean_intersection_density_km':clean_intersection_density_km} # return the results return stats
Calculate basic descriptive metric and topological stats for a graph. For an unprojected lat-lng graph, tolerance and graph units should be in degrees, and circuity_dist should be 'gc'. For a projected graph, tolerance and graph units should be in meters (or similar) and circuity_dist should be 'euclidean'. Parameters ---------- G : networkx multidigraph area : numeric the area covered by the street network, in square meters (typically land area); if none, will skip all density-based metrics clean_intersects : bool if True, calculate clean intersections count (and density, if area is provided) tolerance : numeric tolerance value passed along if clean_intersects=True, see clean_intersections() function documentation for details and usage circuity_dist : str 'gc' or 'euclidean', how to calculate straight-line distances for circuity measurement; use former for lat-lng networks and latter for projected networks Returns ------- stats : dict dictionary of network measures containing the following elements (some keys may not be present, based on the arguments passed into the function): - n = number of nodes in the graph - m = number of edges in the graph - k_avg = average node degree of the graph - intersection_count = number of intersections in graph, that is, nodes with >1 street emanating from them - streets_per_node_avg = how many streets (edges in the undirected representation of the graph) emanate from each node (ie, intersection or dead-end) on average (mean) - streets_per_node_counts = dict, with keys of number of streets emanating from the node, and values of number of nodes with this count - streets_per_node_proportion = dict, same as previous, but as a proportion of the total, rather than counts - edge_length_total = sum of all edge lengths in the graph, in meters - edge_length_avg = mean edge length in the graph, in meters - street_length_total = sum of all edges in the undirected representation of the graph - street_length_avg = mean edge length in the undirected representation of the graph, in meters - street_segments_count = number of edges in the undirected representation of the graph - node_density_km = n divided by area in square kilometers - intersection_density_km = intersection_count divided by area in square kilometers - edge_density_km = edge_length_total divided by area in square kilometers - street_density_km = street_length_total divided by area in square kilometers - circuity_avg = edge_length_total divided by the sum of the great circle distances between the nodes of each edge - self_loop_proportion = proportion of edges that have a single node as its two endpoints (ie, the edge links nodes u and v, and u==v) - clean_intersection_count = number of intersections in street network, merging complex ones into single points - clean_intersection_density_km = clean_intersection_count divided by area in square kilometers
def create_connection(port=_PORT_, timeout=_TIMEOUT_, restart=False): """ Create Bloomberg connection Returns: (Bloomberg connection, if connection is new) """ if _CON_SYM_ in globals(): if not isinstance(globals()[_CON_SYM_], pdblp.BCon): del globals()[_CON_SYM_] if (_CON_SYM_ in globals()) and (not restart): con = globals()[_CON_SYM_] if getattr(con, '_session').start(): con.start() return con, False else: con = pdblp.BCon(port=port, timeout=timeout) globals()[_CON_SYM_] = con con.start() return con, True
Create Bloomberg connection Returns: (Bloomberg connection, if connection is new)
def stop(self, timeout=None): """Stop the thread.""" logger.debug("docker plugin - Close thread for container {}".format(self._container.name)) self._stopper.set()
Stop the thread.
def on_message(self, con, event): """Handles messge stanzas""" msg_type = event.getType() nick = event.getFrom().getResource() from_jid = event.getFrom().getStripped() body = event.getBody() if msg_type == 'chat' and body is None: return logger.debug('msg_type[%s] from[%s] nick[%s] body[%s]' % (msg_type, from_jid, nick, body,)) sender = filter(lambda m: m['JID'] == from_jid, self.params['MEMBERS']) should_process = msg_type in ['message', 'chat', None] and body is not None and len(sender) == 1 if not should_process: return sender = sender[0] try: for p in self.command_patterns: reg, cmd = p m = reg.match(body) if m: logger.info('pattern matched for bot command \'%s\'' % (cmd,)) function = getattr(self, str(cmd), None) if function: return function(sender, body, m) words = body.split(' ') cmd, args = words[0], words[1:] if cmd and cmd[0] == '/': cmd = cmd[1:] command_handler = getattr(self, 'do_'+cmd, None) if command_handler: return command_handler(sender, body, args) broadcast_body = '[%s] %s' % (sender['NICK'], body,) return self.broadcast(broadcast_body, exclude=(sender,)) except: logger.exception('Error handling message [%s] from [%s]' % (body, sender['JID']))
Handles messge stanzas
def GetStructFormatString(self): """Retrieves the Python struct format string. Returns: str: format string as used by Python struct or None if format string cannot be determined. """ if not self._element_data_type_map: return None number_of_elements = None if self._data_type_definition.elements_data_size: element_byte_size = self._element_data_type_definition.GetByteSize() if element_byte_size is None: return None number_of_elements, _ = divmod( self._data_type_definition.elements_data_size, element_byte_size) elif self._data_type_definition.number_of_elements: number_of_elements = self._data_type_definition.number_of_elements format_string = self._element_data_type_map.GetStructFormatString() if not number_of_elements or not format_string: return None return '{0:d}{1:s}'.format(number_of_elements, format_string)
Retrieves the Python struct format string. Returns: str: format string as used by Python struct or None if format string cannot be determined.
def create_schema(self, model, waiting_models): """ Creates search schemas. Args: model: model to execute waiting_models: if riak can't return response immediately, model is taken to queue. After first execution session, method is executed with waiting models and controlled. And be ensured that all given models are executed properly. Returns: """ bucket_name = model._get_bucket_name() index_name = "%s_%s" % (settings.DEFAULT_BUCKET_TYPE, bucket_name) ins = model(fake_context) fields = self.get_schema_fields(ins._collect_index_fields()) new_schema = self.compile_schema(fields) schema = get_schema_from_solr(index_name) if not (schema == new_schema): try: client.create_search_schema(index_name, new_schema) print("+ %s (%s) search schema is created." % (model.__name__, index_name)) except: print("+ %s (%s) search schema checking operation is taken to queue." % ( model.__name__, index_name)) waiting_models.append(model)
Creates search schemas. Args: model: model to execute waiting_models: if riak can't return response immediately, model is taken to queue. After first execution session, method is executed with waiting models and controlled. And be ensured that all given models are executed properly. Returns:
def generate_local_url(self, js_name): """ Generate the local url for a js file. :param js_name: :return: """ host = self._settings['local_host'].format(**self._host_context).rstrip('/') return '{}/{}.js'.format(host, js_name)
Generate the local url for a js file. :param js_name: :return:
def toxml(self): """ Exports this object into a LEMS XML object """ return '<ComponentRequirement name="{0}"'.format(self.name) + '' + \ (' description = "{0}"'.format(self.description) if self.description else '') +\ '/>'
Exports this object into a LEMS XML object
def run(self): """ Index the document. Since ids are predictable, we won't index anything twice. """ with self.input().open() as handle: body = json.loads(handle.read()) es = elasticsearch.Elasticsearch() id = body.get('_id') es.index(index='frontpage', doc_type='html', id=id, body=body)
Index the document. Since ids are predictable, we won't index anything twice.
def _add_relations(self, relations): """Add all of the relations for the services.""" for k, v in six.iteritems(relations): self.d.relate(k, v)
Add all of the relations for the services.
def process_streamers(self): """Check if any streamers should be handed to the stream manager.""" # Check for any triggered streamers and pass them to stream manager in_progress = self._stream_manager.in_progress() triggered = self.graph.check_streamers(blacklist=in_progress) for streamer in triggered: self._stream_manager.process_streamer(streamer, callback=self._handle_streamer_finished)
Check if any streamers should be handed to the stream manager.
def setbridgeprio(self, prio): """ Set bridge priority value. """ _runshell([brctlexe, 'setbridgeprio', self.name, str(prio)], "Could not set bridge priority in %s." % self.name)
Set bridge priority value.
def generate_tensor_filename(self, field_name, file_num, compressed=True): """ Generate a filename for a tensor. """ file_ext = TENSOR_EXT if compressed: file_ext = COMPRESSED_TENSOR_EXT filename = os.path.join(self.filename, 'tensors', '%s_%05d%s' %(field_name, file_num, file_ext)) return filename
Generate a filename for a tensor.
def start(self, use_atexit=True): '''Start the executable. Args: use_atexit (bool): If True, the process will automatically be terminated at exit. ''' assert not self._process _logger.debug('Starting process %s', self._proc_args) process_future = asyncio.create_subprocess_exec( stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, *self._proc_args ) self._process = yield from process_future self._stderr_reader = asyncio.async(self._read_stderr()) self._stdout_reader = asyncio.async(self._read_stdout()) if use_atexit: atexit.register(self.close)
Start the executable. Args: use_atexit (bool): If True, the process will automatically be terminated at exit.
def _item_keys_match(crypto_config, item1, item2): # type: (CryptoConfig, Dict, Dict) -> Bool """Determines whether the values in the primary and sort keys (if they exist) are the same :param CryptoConfig crypto_config: CryptoConfig used in encrypting the given items :param dict item1: The first item to compare :param dict item2: The second item to compare :return: Bool response, True if the key attributes match :rtype: bool """ partition_key_name = crypto_config.encryption_context.partition_key_name sort_key_name = crypto_config.encryption_context.sort_key_name partition_keys_match = item1[partition_key_name] == item2[partition_key_name] if sort_key_name is None: return partition_keys_match return partition_keys_match and item1[sort_key_name] == item2[sort_key_name]
Determines whether the values in the primary and sort keys (if they exist) are the same :param CryptoConfig crypto_config: CryptoConfig used in encrypting the given items :param dict item1: The first item to compare :param dict item2: The second item to compare :return: Bool response, True if the key attributes match :rtype: bool
def refresh_address_presence(self, address): """ Update synthesized address presence state from cached user presence states. Triggers callback (if any) in case the state has changed. This method is only provided to cover an edge case in our use of the Matrix protocol and should **not** generally be used. """ composite_presence = { self._fetch_user_presence(uid) for uid in self._address_to_userids[address] } # Iterate over UserPresence in definition order (most to least online) and pick # first matching state new_presence = UserPresence.UNKNOWN for presence in UserPresence.__members__.values(): if presence in composite_presence: new_presence = presence break new_address_reachability = USER_PRESENCE_TO_ADDRESS_REACHABILITY[new_presence] if new_address_reachability == self._address_to_reachability.get(address): # Cached address reachability matches new state, do nothing return log.debug( 'Changing address presence state', current_user=self._user_id, address=to_normalized_address(address), prev_state=self._address_to_reachability.get(address), state=new_address_reachability, ) self._address_to_reachability[address] = new_address_reachability self._address_reachability_changed_callback(address, new_address_reachability)
Update synthesized address presence state from cached user presence states. Triggers callback (if any) in case the state has changed. This method is only provided to cover an edge case in our use of the Matrix protocol and should **not** generally be used.
def close(self): """Release libpci resources.""" if self._access is not None: _logger.debug("Cleaning up") pci_cleanup(self._access) self._access = None
Release libpci resources.
def create_address(cls, address, **kwargs): """Create Address Create a new Address This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_address(address, async=True) >>> result = thread.get() :param async bool :param Address address: Attributes of address to create (required) :return: Address If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_address_with_http_info(address, **kwargs) else: (data) = cls._create_address_with_http_info(address, **kwargs) return data
Create Address Create a new Address This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_address(address, async=True) >>> result = thread.get() :param async bool :param Address address: Attributes of address to create (required) :return: Address If the method is called asynchronously, returns the request thread.
def _lowfreq_linear_filter(tumor_index, is_paired): """Linear classifier for removing low frequency false positives. Uses a logistic classifier based on 0.5% tumor only variants from the smcounter2 paper: https://github.com/bcbio/bcbio_validations/tree/master/somatic-lowfreq The classifier uses strand bias (SBF) and read mismatches (NM) and applies only for low frequency (<2%) and low depth (<30) variants. """ if is_paired: sbf = "FORMAT/SBF[%s]" % tumor_index nm = "FORMAT/NM[%s]" % tumor_index else: sbf = "INFO/SBF" nm = "INFO/NM" cmd = ("""bcftools filter --soft-filter 'LowFreqBias' --mode '+' """ """-e 'FORMAT/AF[{tumor_index}] < 0.02 && FORMAT/VD[{tumor_index}] < 30 """ """&& {sbf} < 0.1 && {nm} >= 2.0'""") return cmd.format(**locals())
Linear classifier for removing low frequency false positives. Uses a logistic classifier based on 0.5% tumor only variants from the smcounter2 paper: https://github.com/bcbio/bcbio_validations/tree/master/somatic-lowfreq The classifier uses strand bias (SBF) and read mismatches (NM) and applies only for low frequency (<2%) and low depth (<30) variants.
def selected(self, new): """Set selected from list or instance of object or name. Over-writes existing selection """ def preprocess(item): if isinstance(item, str): return self.options[item] return item items = coerce_to_list(new, preprocess) self.widget.value = items
Set selected from list or instance of object or name. Over-writes existing selection
def get_first_recipient_with_address(self): """ Returns the first recipient found with a non blank address :return: First Recipient :rtype: Recipient """ recipients_with_address = [recipient for recipient in self._recipients if recipient.address] if recipients_with_address: return recipients_with_address[0] else: return None
Returns the first recipient found with a non blank address :return: First Recipient :rtype: Recipient
def safe_listdir(path): """ Attempt to list contents of path, but suppress some exceptions. """ try: return os.listdir(path) except (PermissionError, NotADirectoryError): pass except OSError as e: # Ignore the directory if does not exist, not a directory or # permission denied ignorable = ( e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT) # Python 2 on Windows needs to be handled this way :( or getattr(e, "winerror", None) == 267 ) if not ignorable: raise return ()
Attempt to list contents of path, but suppress some exceptions.
def on_canvas_slave__electrode_pair_selected(self, slave, data): ''' Process pair of selected electrodes. For now, this consists of finding the shortest path between the two electrodes and appending it to the list of droplet routes for the current step. Note that the droplet routes for a step are stored in a frame/table in the `DmfDeviceController` step options. .. versionchanged:: 0.11 Clear any temporary routes (drawn while mouse is down) from routes list. .. versionchanged:: 0.11.3 Clear temporary routes by setting ``df_routes`` property of :attr:`canvas_slave`. ''' import networkx as nx source_id = data['source_id'] target_id = data['target_id'] if self.canvas_slave.device is None or self.plugin is None: return # XXX Negative `route_i` corresponds to temporary route being # drawn. Since electrode pair selection terminates route drawing, # clear any rows corresponding to negative `route_i` values from the # routes table. slave.df_routes = slave.df_routes.loc[slave.df_routes.route_i >= 0].copy() try: shortest_path = self.canvas_slave.device.find_path(source_id, target_id) self.plugin.execute_async('droplet_planning_plugin', 'add_route', drop_route=shortest_path) except nx.NetworkXNoPath: logger.error('No path found between %s and %s.', source_id, target_id)
Process pair of selected electrodes. For now, this consists of finding the shortest path between the two electrodes and appending it to the list of droplet routes for the current step. Note that the droplet routes for a step are stored in a frame/table in the `DmfDeviceController` step options. .. versionchanged:: 0.11 Clear any temporary routes (drawn while mouse is down) from routes list. .. versionchanged:: 0.11.3 Clear temporary routes by setting ``df_routes`` property of :attr:`canvas_slave`.
def det_4x3(a,b,c,d): ''' det_4x3(a,b,c,d) yields the determinate of the matrix formed the given rows, which may have more than 1 dimension, in which case the later dimensions are multiplied and added point-wise. The point's must be 3D points; the matrix is given a fourth column of 1s and the resulting determinant is of this matrix. ''' # I just solved this in Mathematica, copy-pasted, and replaced the string '] m' with ']*m': # Mathematica code: Det@Table[If[j == 3, 1, i[j]], {i, {a, b, c, d}}, {j, 0, 3}] return (a[1]*b[2]*c[0] + a[2]*b[0]*c[1] - a[2]*b[1]*c[0] - a[0]*b[2]*c[1] - a[1]*b[0]*c[2] + a[0]*b[1]*c[2] + a[2]*b[1]*d[0] - a[1]*b[2]*d[0] - a[2]*c[1]*d[0] + b[2]*c[1]*d[0] + a[1]*c[2]*d[0] - b[1]*c[2]*d[0] - a[2]*b[0]*d[1] + a[0]*b[2]*d[1] + a[2]*c[0]*d[1] - b[2]*c[0]*d[1] - a[0]*c[2]*d[1] + b[0]*c[2]*d[1] + a[1]*b[0]*d[2] - a[0]*b[1]*d[2] - a[1]*c[0]*d[2] + b[1]*c[0]*d[2] + a[0]*c[1]*d[2] - b[0]*c[1]*d[2])
det_4x3(a,b,c,d) yields the determinate of the matrix formed the given rows, which may have more than 1 dimension, in which case the later dimensions are multiplied and added point-wise. The point's must be 3D points; the matrix is given a fourth column of 1s and the resulting determinant is of this matrix.
def update(self, read, write, manage): """ Update the SyncListPermissionInstance :param bool read: Read access. :param bool write: Write access. :param bool manage: Manage access. :returns: Updated SyncListPermissionInstance :rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionInstance """ data = values.of({'Read': read, 'Write': write, 'Manage': manage, }) payload = self._version.update( 'POST', self._uri, data=data, ) return SyncListPermissionInstance( self._version, payload, service_sid=self._solution['service_sid'], list_sid=self._solution['list_sid'], identity=self._solution['identity'], )
Update the SyncListPermissionInstance :param bool read: Read access. :param bool write: Write access. :param bool manage: Manage access. :returns: Updated SyncListPermissionInstance :rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionInstance
def get_vlan_brief_input_request_type_get_next_request_last_rcvd_vlan_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vlan_brief = ET.Element("get_vlan_brief") config = get_vlan_brief input = ET.SubElement(get_vlan_brief, "input") request_type = ET.SubElement(input, "request-type") get_next_request = ET.SubElement(request_type, "get-next-request") last_rcvd_vlan_id = ET.SubElement(get_next_request, "last-rcvd-vlan-id") last_rcvd_vlan_id.text = kwargs.pop('last_rcvd_vlan_id') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def get_sparse_matrix_keys(session, key_table): """Return a list of keys for the sparse matrix.""" return session.query(key_table).order_by(key_table.name).all()
Return a list of keys for the sparse matrix.
def get_most_severe_consequence(transcripts): """Get the most severe consequence Go through all transcripts and get the most severe consequence Args: transcripts (list): A list of transcripts to evaluate Returns: most_severe_consequence (str): The most severe consequence """ most_severe_consequence = None most_severe_score = None for transcript in transcripts: for consequence in transcript['consequence'].split('&'): logger.debug("Checking severity score for consequence: {0}".format( consequence )) severity_score = SEVERITY_DICT.get( consequence ) logger.debug("Severity score found: {0}".format( severity_score )) if severity_score != None: if most_severe_score: if severity_score < most_severe_score: most_severe_consequence = consequence most_severe_score = severity_score else: most_severe_consequence = consequence most_severe_score = severity_score return most_severe_consequence
Get the most severe consequence Go through all transcripts and get the most severe consequence Args: transcripts (list): A list of transcripts to evaluate Returns: most_severe_consequence (str): The most severe consequence
def magic_timeit(setup, stmt, ncalls=None, repeat=3, force_ms=False): """Time execution of a Python statement or expression Usage:\\ %timeit [-n<N> -r<R> [-t|-c]] statement Time execution of a Python statement or expression using the timeit module. Options: -n<N>: execute the given statement <N> times in a loop. If this value is not given, a fitting value is chosen. -r<R>: repeat the loop iteration <R> times and take the best result. Default: 3 -t: use time.time to measure the time, which is the default on Unix. This function measures wall time. -c: use time.clock to measure the time, which is the default on Windows and measures wall time. On Unix, resource.getrusage is used instead and returns the CPU user time. -p<P>: use a precision of <P> digits to display the timing result. Default: 3 Examples: In [1]: %timeit pass 10000000 loops, best of 3: 53.3 ns per loop In [2]: u = None In [3]: %timeit u is None 10000000 loops, best of 3: 184 ns per loop In [4]: %timeit -r 4 u == None 1000000 loops, best of 4: 242 ns per loop In [5]: import time In [6]: %timeit -n1 time.sleep(2) 1 loops, best of 3: 2 s per loop The times reported by %timeit will be slightly higher than those reported by the timeit.py script when variables are accessed. This is due to the fact that %timeit executes the statement in the namespace of the shell, compared with timeit.py, which uses a single setup statement to import function or create variables. Generally, the bias does not matter as long as results from timeit.py are not mixed with those from %timeit.""" import timeit import math units = ["s", "ms", 'us', "ns"] scaling = [1, 1e3, 1e6, 1e9] timer = timeit.Timer(stmt, setup) if ncalls is None: # determine number so that 0.2 <= total time < 2.0 number = 1 for _ in range(1, 10): if timer.timeit(number) >= 0.1: break number *= 10 else: number = ncalls best = min(timer.repeat(repeat, number)) / number if force_ms: order = 1 else: if best > 0.0 and best < 1000.0: order = min(-int(math.floor(math.log10(best)) // 3), 3) elif best >= 1000.0: order = 0 else: order = 3 return {'loops': number, 'repeat': repeat, 'timing': best * scaling[order], 'units': units[order]}
Time execution of a Python statement or expression Usage:\\ %timeit [-n<N> -r<R> [-t|-c]] statement Time execution of a Python statement or expression using the timeit module. Options: -n<N>: execute the given statement <N> times in a loop. If this value is not given, a fitting value is chosen. -r<R>: repeat the loop iteration <R> times and take the best result. Default: 3 -t: use time.time to measure the time, which is the default on Unix. This function measures wall time. -c: use time.clock to measure the time, which is the default on Windows and measures wall time. On Unix, resource.getrusage is used instead and returns the CPU user time. -p<P>: use a precision of <P> digits to display the timing result. Default: 3 Examples: In [1]: %timeit pass 10000000 loops, best of 3: 53.3 ns per loop In [2]: u = None In [3]: %timeit u is None 10000000 loops, best of 3: 184 ns per loop In [4]: %timeit -r 4 u == None 1000000 loops, best of 4: 242 ns per loop In [5]: import time In [6]: %timeit -n1 time.sleep(2) 1 loops, best of 3: 2 s per loop The times reported by %timeit will be slightly higher than those reported by the timeit.py script when variables are accessed. This is due to the fact that %timeit executes the statement in the namespace of the shell, compared with timeit.py, which uses a single setup statement to import function or create variables. Generally, the bias does not matter as long as results from timeit.py are not mixed with those from %timeit.
def deploy_config(model, initial_instance_count, instance_type, endpoint_name=None, tags=None): """Export Airflow deploy config from a SageMaker model Args: model (sagemaker.model.Model): The SageMaker model to export the Airflow config from. instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'. initial_instance_count (int): The initial number of instances to run in the ``Endpoint`` created from this ``Model``. endpoint_name (str): The name of the endpoint to create (default: None). If not specified, a unique endpoint name will be created. tags (list[dict]): List of tags for labeling a training job. For more, see https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html. Returns: dict: Deploy config that can be directly used by SageMakerEndpointOperator in Airflow. """ model_base_config = model_config(instance_type, model) production_variant = sagemaker.production_variant(model.name, instance_type, initial_instance_count) name = model.name config_options = {'EndpointConfigName': name, 'ProductionVariants': [production_variant]} if tags is not None: config_options['Tags'] = tags endpoint_name = endpoint_name or name endpoint_base_config = { 'EndpointName': endpoint_name, 'EndpointConfigName': name } config = { 'Model': model_base_config, 'EndpointConfig': config_options, 'Endpoint': endpoint_base_config } # if there is s3 operations needed for model, move it to root level of config s3_operations = model_base_config.pop('S3Operations', None) if s3_operations is not None: config['S3Operations'] = s3_operations return config
Export Airflow deploy config from a SageMaker model Args: model (sagemaker.model.Model): The SageMaker model to export the Airflow config from. instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'. initial_instance_count (int): The initial number of instances to run in the ``Endpoint`` created from this ``Model``. endpoint_name (str): The name of the endpoint to create (default: None). If not specified, a unique endpoint name will be created. tags (list[dict]): List of tags for labeling a training job. For more, see https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html. Returns: dict: Deploy config that can be directly used by SageMakerEndpointOperator in Airflow.
def consolidate(self, args): """ Consolidate the provided arguments. If the provided arguments have matching options, this performs a type conversion. For any option that has a default value and is not present in the provided arguments, the default value is added. Args: args (dict): A dictionary of the provided arguments. Returns: dict: A dictionary with the type converted and with default options enriched arguments. """ result = dict(args) for opt in self: if opt.name in result: result[opt.name] = opt.convert(result[opt.name]) else: if opt.default is not None: result[opt.name] = opt.convert(opt.default) return result
Consolidate the provided arguments. If the provided arguments have matching options, this performs a type conversion. For any option that has a default value and is not present in the provided arguments, the default value is added. Args: args (dict): A dictionary of the provided arguments. Returns: dict: A dictionary with the type converted and with default options enriched arguments.
async def close(self) -> None: """ Explicit exit. Closes pool. For use when keeping pool open across multiple calls. """ LOGGER.debug('NodePool.close >>>') if not self.handle: LOGGER.warning('Abstaining from closing pool %s: already closed', self.name) else: await pool.close_pool_ledger(self.handle) self._handle = None LOGGER.debug('NodePool.close <<<')
Explicit exit. Closes pool. For use when keeping pool open across multiple calls.
def entry_point() -> None: """**cxflow** entry point.""" # make sure the path contains the current working directory sys.path.insert(0, os.getcwd()) parser = get_cxflow_arg_parser(True) # parse CLI arguments known_args, unknown_args = parser.parse_known_args() # show help if no subcommand was specified. if not hasattr(known_args, 'subcommand'): parser.print_help() quit(1) # set up global logger logger = logging.getLogger('') logger.setLevel(logging.DEBUG if known_args.verbose else logging.INFO) logger.handlers = [] # remove default handlers # set up STDERR handler stderr_handler = logging.StreamHandler(sys.stderr) stderr_handler.setFormatter(logging.Formatter(CXF_LOG_FORMAT, datefmt=CXF_LOG_DATE_FORMAT)) logger.addHandler(stderr_handler) if known_args.subcommand == 'train': train(config_path=known_args.config_file, cl_arguments=unknown_args, output_root=known_args.output_root) elif known_args.subcommand == 'resume': resume(config_path=known_args.config_path, restore_from=known_args.restore_from, cl_arguments=unknown_args, output_root=known_args.output_root) elif known_args.subcommand == 'predict': logging.warning('Predict command is deprecated and will be removed, use ``cxflow eval predict ...`` instead') predict(config_path=known_args.config_path, restore_from=known_args.restore_from, cl_arguments=unknown_args, output_root=known_args.output_root) elif known_args.subcommand == 'eval': evaluate(model_path=known_args.model_path, stream_name=known_args.stream_name, config_path=known_args.config, cl_arguments=unknown_args, output_root=known_args.output_root) elif known_args.subcommand == 'dataset': invoke_dataset_method(config_path=known_args.config_file, method_name=known_args.method, cl_arguments=unknown_args, output_root=known_args.output_root) elif known_args.subcommand == 'gridsearch': grid_search(script=known_args.script, params=known_args.params, dry_run=known_args.dry_run) elif known_args.subcommand == 'ls': list_train_dirs(known_args.dir, known_args.recursive, known_args.all, known_args.long, known_args.verbose) elif known_args.subcommand == 'prune': prune_train_dirs(known_args.dir, known_args.epochs, known_args.subdirs)
**cxflow** entry point.
def parse_reports(self): """ Find Picard HsMetrics reports and parse their data """ # Set up vars self.picard_HsMetrics_data = dict() # Go through logs and find Metrics for f in self.find_log_files('picard/hsmetrics', filehandles=True): parsed_data = dict() s_name = None keys = None commadecimal = None for l in f['f']: # New log starting if 'CalculateHsMetrics' in l or 'CollectHsMetrics' in l and 'INPUT' in l: s_name = None keys = None # Pull sample name from input fn_search = re.search(r"INPUT(?:=|\s+)(\[?[^\s]+\]?)", l, flags=re.IGNORECASE) if fn_search: s_name = os.path.basename(fn_search.group(1).strip('[]')) s_name = self.clean_s_name(s_name, f['root']) parsed_data[s_name] = dict() if s_name is not None: if 'HsMetrics' in l and '## METRICS CLASS' in l: keys = f['f'].readline().strip("\n").split("\t") elif keys: vals = l.strip("\n").split("\t") if len(vals) == len(keys): j = 'NA' if keys[0] == 'BAIT_SET': j = vals[0] parsed_data[s_name][j] = dict() # Check that we're not using commas for decimal places if commadecimal is None: for i, k in enumerate(keys): if k.startswith('PCT_'): if ',' in vals[i]: commadecimal = True else: commadecimal = False for i, k in enumerate(keys): try: if commadecimal: vals[i] = vals[i].replace('.', '') vals[i] = vals[i].replace(',', '.') parsed_data[s_name][j][k] = float(vals[i]) except ValueError: parsed_data[s_name][j][k] = vals[i] else: s_name = None keys = None # Remove empty dictionaries for s_name in list(parsed_data.keys()): for j in parsed_data[s_name].keys(): if len(parsed_data[s_name][j]) == 0: parsed_data[s_name].pop(j, None) if len(parsed_data[s_name]) == 0: parsed_data.pop(s_name, None) # Manipulate sample names if multiple baits found for s_name in parsed_data.keys(): for j in parsed_data[s_name].keys(): this_s_name = s_name if(len(parsed_data[s_name]) > 1): this_s_name = "{}: {}".format(s_name, j) if this_s_name in self.picard_HsMetrics_data: log.debug("Duplicate sample name found in {}! Overwriting: {}".format(f['fn'], this_s_name)) self.add_data_source(f, this_s_name, section='HsMetrics') self.picard_HsMetrics_data[this_s_name] = parsed_data[s_name][j] # Filter to strip out ignored sample names self.picard_HsMetrics_data = self.ignore_samples(self.picard_HsMetrics_data) if len(self.picard_HsMetrics_data) > 0: # Write parsed data to a file self.write_data_file(self.picard_HsMetrics_data, 'multiqc_picard_HsMetrics') # Add to general stats table # Swap question marks with -1 data = self.picard_HsMetrics_data for s_name in data: if data[s_name]['FOLD_ENRICHMENT'] == '?': data[s_name]['FOLD_ENRICHMENT'] = -1 self.general_stats_headers['FOLD_ENRICHMENT'] = { 'title': 'Fold Enrichment', 'min': 0, 'format': '{:,.0f}', 'scale': 'Blues', } try: covs = config.picard_config['general_stats_target_coverage'] assert type(covs) == list assert len(covs) > 0 covs = [str(i) for i in covs] log.debug("Custom Picard coverage thresholds: {}".format(", ".join([i for i in covs]))) except (AttributeError, TypeError, AssertionError): covs = ['30'] for c in covs: self.general_stats_headers['PCT_TARGET_BASES_{}X'.format(c)] = { 'id': 'picard_target_bases_{}X'.format(c), 'title': 'Target Bases {}X'.format(c), 'description': 'Percent of target bases with coverage &ge; {}X'.format(c), 'max': 100, 'min': 0, 'suffix': '%', 'format': '{:,.0f}', 'scale': 'RdYlGn', 'modify': lambda x: self.multiply_hundred(x) } for s_name in data: if s_name not in self.general_stats_data: self.general_stats_data[s_name] = dict() self.general_stats_data[s_name].update( data[s_name] ) data_table = _clean_table(data) self.add_section ( name = 'HSMetrics', anchor = 'picard_hsmetrics', plot = table.plot(data_table, _get_headers(data_table)) ) tbases = _add_target_bases(data) self.add_section ( name = tbases['name'], anchor = tbases['anchor'], description = tbases['description'], plot = tbases['plot'] ) hs_pen = _add_hs_penalty(data) if hs_pen is not None: self.add_section ( name = hs_pen['name'], anchor = hs_pen['anchor'], description = hs_pen['description'], plot = hs_pen['plot'] ) # Return the number of detected samples to the parent module return len(self.picard_HsMetrics_data)
Find Picard HsMetrics reports and parse their data
def action(self): """ This class overrides this method """ self.return_value = self.function(*self.args, **self.kwargs)
This class overrides this method
def add_unique(self, attr, item): """ 在对象此字段对应的数组末尾添加指定对象,如果此对象并没有包含在字段中。 :param attr: 字段名 :param item: 要添加的对象 :return: 当前对象 """ return self.set(attr, operation.AddUnique([item]))
在对象此字段对应的数组末尾添加指定对象,如果此对象并没有包含在字段中。 :param attr: 字段名 :param item: 要添加的对象 :return: 当前对象
def Call(method,url,payload,silent=False,hide_errors=[],session=None,recursion_cnt=0,debug=False): """Execute v1 API call. :param url: URL paths associated with the API call :param payload: dict containing all parameters to submit with POST call :param hide_errors: list of API error codes to ignore. These are not http error codes but returned from the API itself :param recursion_cnt: recursion counter. This call is recursed if we experience a transient error :returns: decoded API json result """ if not clc._LOGIN_COOKIE_V1: API._Login() if session is None: session = clc._REQUESTS_SESSION session.headers.update({'content-type': 'application/json'}) r = session.request(method,"%s%s/JSON" % (clc.defaults.ENDPOINT_URL_V1,url), params=payload, cookies=clc._LOGIN_COOKIE_V1, verify=API._ResourcePath('clc/cacert.pem')) if debug: API._DebugRequest(request=requests.Request(method,"%s%s/JSON" % (clc.defaults.ENDPOINT_URL_V1,url), data=payload,headers=session.headers).prepare(),response=r) try: if int(r.json()['StatusCode']) == 0: if clc.args and not silent: clc.v1.output.Status('SUCCESS',2,'%s' % (r.json()['Message'])) return(r.json()) elif int(r.json()['StatusCode']) in hide_errors: return(r.json()) elif int(r.json()['StatusCode']) == 2: # Account is deleted #raise clc.v1.Account.eletedException(r.json()['Message']) if clc.args and not silent: clc.v1.output.Status('ERROR',3,'%s' % (r.json()['Message'])) raise Exception(r.json()['Message']) elif int(r.json()['StatusCode']) == 5: # Account or datacenter does not exist raise clc.v1.AccountDoesNotExistException(r.json()['Message']) elif int(r.json()['StatusCode']) == 100 and recursion_cnt<2: # Not logged in - this is a transient failure clc._LOGIN_COOKIE_V1 = False return(clc.v1.API.Call(method,url,payload,silent,hide_errors,recursion_cnt+1)) elif int(r.json()['StatusCode']) == 100: # Not logged in - this keeps recurring - bail raise clc.v1.AccountLoginException(r.json()['Message']) else: if clc.args and (not hide_errors or not silent): clc.v1.output.Status('ERROR',3,'Error calling %s. Status code %s. %s' % (url,r.json()['StatusCode'],r.json()['Message'])) raise Exception('Error calling %s. Status code %s. %s' % (url,r.json()['StatusCode'],r.json()['Message'])) #except clc.v1.Account.eletedException, clc.v1.Account.oginException: except clc.CLCException: raise except: if clc.args and (not hide_errors or not silent): clc.v1.output.Status('ERROR',3,'Error calling %s. Server response %s' % (url,r.status_code)) #print "Request: %s %s params=%s" % (method,"%s%s/JSON" % (clc.defaults.ENDPOINT_URL_V1,url),payload) #print "Response: %s" % (r.text) #print r.url #print url #print payload #print r.text raise Exception('Error calling %s. Server response %s' % (url,r.status_code))
Execute v1 API call. :param url: URL paths associated with the API call :param payload: dict containing all parameters to submit with POST call :param hide_errors: list of API error codes to ignore. These are not http error codes but returned from the API itself :param recursion_cnt: recursion counter. This call is recursed if we experience a transient error :returns: decoded API json result
def post(arguments): '''Post text to a given twitter account.''' twitter = api.API(arguments) params = {} if arguments.update == '-': params['status'] = sys.stdin.read() else: params['status'] = arguments.update if arguments.media_file: medias = [twitter.media_upload(m) for m in arguments.media_file] params['media_ids'] = [m.media_id for m in medias] try: logging.getLogger(arguments.screen_name).info('status: %s', params['status']) if not arguments.dry_run: twitter.update_status(**params) except tweepy.TweepError as e: logging.getLogger(arguments.screen_name).error(e.message)
Post text to a given twitter account.
def close(self, code=3000, message='Go away!'): """ Close session or endpoint connection. @param code: Closing code @param message: Close message """ if self.state != SESSION_STATE.CLOSED: try: self.conn.connectionLost() except Exception as e: log.msg("Failed to call connectionLost(): %r." % e) finally: self.state = SESSION_STATE.CLOSED self.close_reason = (code, message) # Bump stats self.stats.sessionClosed(self.transport_name) # If we have active handler, notify that session was closed if self.handler is not None: self.handler.session_closed()
Close session or endpoint connection. @param code: Closing code @param message: Close message
def tweets_for(query_type, args, per_user=None): """ Retrieve tweets for a user, list or search term. The optional ``per_user`` arg limits the number of tweets per user, for example to allow a fair spread of tweets per user for a list. """ lookup = {"query_type": query_type, "value": args[0]} try: tweets = Tweet.objects.get_for(**lookup) except TwitterQueryException: return [] if per_user is not None: _tweets = defaultdict(list) for tweet in tweets: if len(_tweets[tweet.user_name]) < per_user: _tweets[tweet.user_name].append(tweet) tweets = sum(_tweets.values(), []) tweets.sort(key=lambda t: t.created_at, reverse=True) if len(args) > 1 and str(args[-1]).isdigit(): tweets = tweets[:int(args[-1])] return tweets
Retrieve tweets for a user, list or search term. The optional ``per_user`` arg limits the number of tweets per user, for example to allow a fair spread of tweets per user for a list.
def widen(self, other): """ Widen current range. """ if self.low < other.low: low = -float("inf") else: low = self.low if self.high > other.high: high = float("inf") else: high = self.high return Interval(low, high)
Widen current range.
def convert_data_iterable(data_iterable, filter_func=None, converter_func=None): # TODO: add concatenate parameter '''Convert raw data in data iterable. Parameters ---------- data_iterable : iterable Iterable where each element is a tuple with following content: (raw data, timestamp_start, timestamp_stop, status). filter_func : function Function that takes array and returns true or false for each item in array. converter_func : function Function that takes array and returns an array or tuple of arrays. Returns ------- data_list : list Data list of the form [(converted data, timestamp_start, timestamp_stop, status), (...), ...] ''' data_list = [] for item in data_iterable: data_list.append((convert_data_array(item[0], filter_func=filter_func, converter_func=converter_func), item[1], item[2], item[3])) return data_list
Convert raw data in data iterable. Parameters ---------- data_iterable : iterable Iterable where each element is a tuple with following content: (raw data, timestamp_start, timestamp_stop, status). filter_func : function Function that takes array and returns true or false for each item in array. converter_func : function Function that takes array and returns an array or tuple of arrays. Returns ------- data_list : list Data list of the form [(converted data, timestamp_start, timestamp_stop, status), (...), ...]
def inside_try(func, options={}): """ decorator to silence exceptions, for logging we want a "safe" fail of the functions """ if six.PY2: name = func.func_name else: name = func.__name__ @wraps(func) def silenceit(*args, **kwargs): """ the function func to be silenced is wrapped inside a try catch and returned, exceptions are logged exceptions are returned in an error dict takes all kinds of arguments and passes to the original func """ excpt = None try: return func(*args, **kwargs) # pylint: disable=W0703 # inside_try.silenceit: Catching too general exception Exception # that's the idea! except Exception as excpt: # first tell the object in charge if 'ctx' in kwargs: ctx = kwargs['ctx'] else: # otherwise tell object defined in options # if we can be sure there is a context ctx = get_try_option(None, 'ctx') if not ctx: # tell a new object ctx = Bubble('Inside Try') # ctx.set_verbose(100); #todo: move to magic head = name + ': silenced function inside_try:Error:' if get_try_option(ctx, 'count_it'): ctx.gbc.cry(head + 'counting') if get_try_option(ctx, 'print_it'): ctx.gbc.cry(head + 'printing:' + str(excpt)) if get_try_option(ctx, 'print_args'): ctx.gbc.cry(head + 'printing ak:' + str(excpt)) ctx.gbc.cry('args', stuff=args) ctx.gbc.cry('kwargs', stuff=kwargs) if get_try_option(ctx, 'inspect_it'): ctx.gbc.cry(head + 'inspecting:', stuff=excpt) for s in inspect.stack(): ctx.gbc.cry(head + ':stack:', stuff=s) if get_try_option(ctx, 'log_it'): ctx.gbc.cry(head + 'logging') for s in inspect.stack(): ctx.gbc.cry(head + ':stack:', stuff=s) if get_try_option(ctx, 'reraise_it'): ctx.gbc.cry(head + 'reraising') raise excpt # always return error return {'error': str(excpt), 'silenced': name, 'args': args, 'kwargs': kwargs} return silenceit
decorator to silence exceptions, for logging we want a "safe" fail of the functions
def process_records(records): """Converts queue entries into object changes. :param records: an iterable containing `LiveSyncQueueEntry` objects :return: a dict mapping object references to `SimpleChange` bitsets """ changes = defaultdict(int) cascaded_update_records = set() cascaded_delete_records = set() for record in records: if record.change != ChangeType.deleted and record.object is None: # Skip entries which are not deletions but have no corresponding objects. # Probably they are updates for objects that got deleted afterwards. continue if record.change == ChangeType.created: assert record.type != EntryType.category changes[record.object] |= SimpleChange.created elif record.change == ChangeType.deleted: assert record.type != EntryType.category cascaded_delete_records.add(record) elif record.change in {ChangeType.moved, ChangeType.protection_changed}: cascaded_update_records.add(record) elif record.change == ChangeType.data_changed: assert record.type != EntryType.category changes[record.object] |= SimpleChange.updated for obj in _process_cascaded_category_contents(cascaded_update_records): changes[obj] |= SimpleChange.updated for obj in _process_cascaded_event_contents(cascaded_delete_records): changes[obj] |= SimpleChange.deleted return changes
Converts queue entries into object changes. :param records: an iterable containing `LiveSyncQueueEntry` objects :return: a dict mapping object references to `SimpleChange` bitsets
def delete_files(): """ Delete one or more files from the server """ session_token = request.headers['session_token'] repository = request.headers['repository'] #=== current_user = have_authenticated_user(request.environ['REMOTE_ADDR'], repository, session_token) if current_user is False: return fail(user_auth_fail_msg) #=== repository_path = config['repositories'][repository]['path'] body_data = request.get_json() def with_exclusive_lock(): if not varify_user_lock(repository_path, session_token): return fail(lock_fail_msg) try: data_store = versioned_storage(repository_path) if not data_store.have_active_commit(): return fail(no_active_commit_msg) #------------- for fle in json.loads(body_data['files']): data_store.fs_delete(fle) # updates the user lock expiry update_user_lock(repository_path, session_token) return success() except Exception: return fail() # pylint: disable=broad-except return lock_access(repository_path, with_exclusive_lock)
Delete one or more files from the server
def makeOuputDir(outputDir, force): """ Create or check for an output directory. @param outputDir: A C{str} output directory name, or C{None}. @param force: If C{True}, allow overwriting of pre-existing files. @return: The C{str} output directory name. """ if outputDir: if exists(outputDir): if not force: print('Will not overwrite pre-existing files. Use --force to ' 'make me.', file=sys.stderr) sys.exit(1) else: mkdir(outputDir) else: outputDir = mkdtemp() print('Writing output files to %s' % outputDir) return outputDir
Create or check for an output directory. @param outputDir: A C{str} output directory name, or C{None}. @param force: If C{True}, allow overwriting of pre-existing files. @return: The C{str} output directory name.
def __deserialize_primitive(self, data, klass): """Deserializes string to primitive type. :param data: str. :param klass: class literal. :return: int, long, float, str, bool. """ try: return klass(data) except UnicodeEncodeError: return six.text_type(data) except TypeError: return data
Deserializes string to primitive type. :param data: str. :param klass: class literal. :return: int, long, float, str, bool.
def saltenviron(environ): ''' Make Salt's opts dict and the APIClient available in the WSGI environ ''' if '__opts__' not in locals(): import salt.config __opts__ = salt.config.client_config( os.environ.get('SALT_MASTER_CONFIG', '/etc/salt/master')) environ['SALT_OPTS'] = __opts__ environ['SALT_APIClient'] = salt.netapi.NetapiClient(__opts__)
Make Salt's opts dict and the APIClient available in the WSGI environ
def userToId(url): """ Extract the username from a contact URL. Matches addresses containing ``users/<user>`` or ``users/ME/contacts/<user>``. Args: url (str): Skype API URL Returns: str: extracted identifier """ match = re.search(r"users(/ME/contacts)?/[0-9]+:([^/]+)", url) return match.group(2) if match else None
Extract the username from a contact URL. Matches addresses containing ``users/<user>`` or ``users/ME/contacts/<user>``. Args: url (str): Skype API URL Returns: str: extracted identifier
def create_html(self, fname, title="ClassTracker Statistics"): """ Create HTML page `fname` and additional files in a directory derived from `fname`. """ # Create a folder to store the charts and additional HTML files. self.basedir = os.path.dirname(os.path.abspath(fname)) self.filesdir = os.path.splitext(fname)[0] + '_files' if not os.path.isdir(self.filesdir): os.mkdir(self.filesdir) self.filesdir = os.path.abspath(self.filesdir) self.links = {} # Annotate all snapshots in advance self.annotate() # Create charts. The tags to show the images are returned and stored in # the self.charts dictionary. This allows to return alternative text if # the chart creation framework is not available. self.charts = {} fn = os.path.join(self.filesdir, 'timespace.png') self.charts['snapshots'] = self.create_snapshot_chart(fn) for fp, idx in zip(self.snapshots, list(range(len(self.snapshots)))): fn = os.path.join(self.filesdir, 'fp%d.png' % (idx)) self.charts[fp] = self.create_pie_chart(fp, fn) for cn in list(self.index.keys()): fn = os.path.join(self.filesdir, cn.replace('.', '_')+'-lt.png') self.charts[cn] = self.create_lifetime_chart(cn, fn) # Create HTML pages first for each class and then the index page. for cn in list(self.index.keys()): fn = os.path.join(self.filesdir, cn.replace('.', '_')+'.html') self.links[cn] = fn self.print_class_details(fn, cn) self.create_title_page(fname, title=title)
Create HTML page `fname` and additional files in a directory derived from `fname`.
def select_balanced_subset(items, select_count, categories, select_count_values=None, seed=None): """ Select items so the summed category weights are balanced. Each item has a dictionary containing the category weights. Items are selected until ``select_count`` is reached. The value that is added to ``select_count`` for an item can be defined in the dictionary ``select_count_values``. If this is not defined it is assumed to be 1, which means `select_count` items are selected. Args: items (dict): Dictionary containing items with category weights. select_count (float): Value to reach for selected items. categories (list): List of all categories. select_count_values (dict): The select_count values to be used. For example an utterance with multiple labels: The category weights (label-lengths) are used for balance, but the utterance-duration is used for reaching the select_count. Returns: list: List of item ids, containing ``number_of_items`` (or ``len(items)`` if smaller). Example: >>> items = { >>> 'utt-1' : {'m': 1, 's': 0, 'n': 0}, >>> 'utt-2' : {'m': 0, 's': 2, 'n': 1}, >>> ... >>> } >>> select_balanced_subset(items, 5) >>> ['utt-1', 'utt-3', 'utt-9', 'utt-33', 'utt-34'] """ rand = random.Random() rand.seed(seed) if select_count_values is None: select_count_values = {item_id: 1 for item_id in items.keys()} if sum(select_count_values.values()) < select_count: return list(items.keys()) available_item_ids = sorted(list(items.keys())) weight_per_category = np.zeros(len(categories)) selected_item_ids = [] available_item_weights = [] current_select_count = 0 rand.shuffle(available_item_ids) # Create dict with weights as vectors for item_id in available_item_ids: weights = items[item_id] all_weights = np.zeros(len(categories)) for category, weight in weights.items(): all_weights[categories.index(category)] = float(weight) available_item_weights.append(all_weights) # Always add best next item while current_select_count < select_count: best_item_index = 0 best_item_id = None best_item_dist = float('inf') current_item_index = 0 while current_item_index < len(available_item_ids) and best_item_dist > 0: item_id = available_item_ids[current_item_index] item_weights = available_item_weights[current_item_index] temp_total_weights = weight_per_category + item_weights dist = temp_total_weights.var() if dist < best_item_dist: best_item_index = current_item_index best_item_dist = dist best_item_id = item_id current_item_index += 1 weight_per_category += available_item_weights[best_item_index] selected_item_ids.append(best_item_id) del available_item_ids[best_item_index] del available_item_weights[best_item_index] current_select_count += select_count_values[best_item_id] return selected_item_ids
Select items so the summed category weights are balanced. Each item has a dictionary containing the category weights. Items are selected until ``select_count`` is reached. The value that is added to ``select_count`` for an item can be defined in the dictionary ``select_count_values``. If this is not defined it is assumed to be 1, which means `select_count` items are selected. Args: items (dict): Dictionary containing items with category weights. select_count (float): Value to reach for selected items. categories (list): List of all categories. select_count_values (dict): The select_count values to be used. For example an utterance with multiple labels: The category weights (label-lengths) are used for balance, but the utterance-duration is used for reaching the select_count. Returns: list: List of item ids, containing ``number_of_items`` (or ``len(items)`` if smaller). Example: >>> items = { >>> 'utt-1' : {'m': 1, 's': 0, 'n': 0}, >>> 'utt-2' : {'m': 0, 's': 2, 'n': 1}, >>> ... >>> } >>> select_balanced_subset(items, 5) >>> ['utt-1', 'utt-3', 'utt-9', 'utt-33', 'utt-34']
def _init_from_csc(self, csc): """ Initialize data from a CSC matrix. """ if len(csc.indices) != len(csc.data): raise ValueError('length mismatch: {} vs {}'.format(len(csc.indices), len(csc.data))) handle = ctypes.c_void_p() _check_call(_LIB.XGDMatrixCreateFromCSCEx(c_array(ctypes.c_size_t, csc.indptr), c_array(ctypes.c_uint, csc.indices), c_array(ctypes.c_float, csc.data), ctypes.c_size_t(len(csc.indptr)), ctypes.c_size_t(len(csc.data)), ctypes.c_size_t(csc.shape[0]), ctypes.byref(handle))) self.handle = handle
Initialize data from a CSC matrix.
def voxelwise_diff(img_spec1=None, img_spec2=None, abs_value=True, cmap='gray', overlay_image=False, overlay_alpha=0.8, num_rows=2, num_cols=6, rescale_method='global', background_threshold=0.05, annot=None, padding=5, output_path=None, figsize=None): """ Voxel-wise difference map. Parameters ---------- img_spec1 : str or nibabel image-like object MR image (or path to one) to be visualized img_spec2 : str or nibabel image-like object MR image (or path to one) to be visualized abs_value : bool Flag indicating whether to take the absolute value of the diffenence or not. Default: True, display absolute differences only (so order of images does not matter) Colormap to show the difference values. overlay_image : bool Flag to specify whether to overlay the difference values on the original image. .. note: This feature is not reliable and supported well yet. num_rows : int number of rows (top to bottom) per each of 3 dimensions num_cols : int number of panels (left to right) per row of each dimension. rescale_method : bool or str or list or None Range to rescale the intensity values to Default: 'global', min and max values computed based on ranges from both images. If false or None, no rescaling is done (does not work yet). background_threshold : float or str A threshold value below which all the background voxels will be set to zero. Default : 0.05. Other option is a string specifying a percentile: '5%', '10%'. Specify None if you don't want any thresholding. annot : str Text to display to annotate the visualization padding : int number of voxels to pad around each panel. output_path : str path to save the generate collage to. figsize : list Size of figure in inches to be passed on to plt.figure() e.g. [12, 12] or [20, 20] Returns ------- fig : figure handle handle to the collage figure generated. """ if not isinstance(abs_value, bool): abs_value = bool(abs_value) mixer_params = dict(abs_value=abs_value, cmap=cmap, overlay_image=overlay_image, overlay_alpha=overlay_alpha) fig = _compare(img_spec1, img_spec2, num_rows=num_rows, num_cols=num_cols, mixer='voxelwise_diff', annot=annot, padding=padding, rescale_method=rescale_method, bkground_thresh=background_threshold, output_path=output_path, figsize=figsize, **mixer_params) return fig
Voxel-wise difference map. Parameters ---------- img_spec1 : str or nibabel image-like object MR image (or path to one) to be visualized img_spec2 : str or nibabel image-like object MR image (or path to one) to be visualized abs_value : bool Flag indicating whether to take the absolute value of the diffenence or not. Default: True, display absolute differences only (so order of images does not matter) Colormap to show the difference values. overlay_image : bool Flag to specify whether to overlay the difference values on the original image. .. note: This feature is not reliable and supported well yet. num_rows : int number of rows (top to bottom) per each of 3 dimensions num_cols : int number of panels (left to right) per row of each dimension. rescale_method : bool or str or list or None Range to rescale the intensity values to Default: 'global', min and max values computed based on ranges from both images. If false or None, no rescaling is done (does not work yet). background_threshold : float or str A threshold value below which all the background voxels will be set to zero. Default : 0.05. Other option is a string specifying a percentile: '5%', '10%'. Specify None if you don't want any thresholding. annot : str Text to display to annotate the visualization padding : int number of voxels to pad around each panel. output_path : str path to save the generate collage to. figsize : list Size of figure in inches to be passed on to plt.figure() e.g. [12, 12] or [20, 20] Returns ------- fig : figure handle handle to the collage figure generated.
def options(argv=[]): """ A helper function that returns a dictionary of the default key-values pairs """ parser = HendrixOptionParser parsed_args = parser.parse_args(argv) return vars(parsed_args[0])
A helper function that returns a dictionary of the default key-values pairs
def _set_vrrpv3e(self, v, load=False): """ Setter method for vrrpv3e, mapped from YANG variable /routing_system/interface/ve/ipv6/vrrpv3e (list) If this variable is read-only (config: false) in the source YANG file, then _set_vrrpv3e is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vrrpv3e() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("vrid",vrrpv3e.vrrpv3e, yang_name="vrrpv3e", rest_name="vrrp-extended-group", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrid', extensions={u'tailf-common': {u'info': u'Start VRRPE configuration', u'cli-no-key-completion': None, u'alt-name': u'vrrp-extended-group', u'sort-priority': u'143', u'cli-suppress-list-no': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'vrrpv3eSessionVlan'}}), is_container='list', yang_name="vrrpv3e", rest_name="vrrp-extended-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Start VRRPE configuration', u'cli-no-key-completion': None, u'alt-name': u'vrrp-extended-group', u'sort-priority': u'143', u'cli-suppress-list-no': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'vrrpv3eSessionVlan'}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """vrrpv3e must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("vrid",vrrpv3e.vrrpv3e, yang_name="vrrpv3e", rest_name="vrrp-extended-group", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrid', extensions={u'tailf-common': {u'info': u'Start VRRPE configuration', u'cli-no-key-completion': None, u'alt-name': u'vrrp-extended-group', u'sort-priority': u'143', u'cli-suppress-list-no': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'vrrpv3eSessionVlan'}}), is_container='list', yang_name="vrrpv3e", rest_name="vrrp-extended-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Start VRRPE configuration', u'cli-no-key-completion': None, u'alt-name': u'vrrp-extended-group', u'sort-priority': u'143', u'cli-suppress-list-no': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'vrrpv3eSessionVlan'}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='list', is_config=True)""", }) self.__vrrpv3e = t if hasattr(self, '_set'): self._set()
Setter method for vrrpv3e, mapped from YANG variable /routing_system/interface/ve/ipv6/vrrpv3e (list) If this variable is read-only (config: false) in the source YANG file, then _set_vrrpv3e is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vrrpv3e() directly.
def create_option_vip(self): """Get an instance of option_vip services facade.""" return OptionVIP( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of option_vip services facade.
def _results_accumulator(self, filename): """ :type filename: str :param filename: name of file, used as a key to store in self.data :yields: (dict, detect_secrets.plugins.base.BasePlugin) Caller is responsible for updating the dictionary with results of plugin analysis. """ file_results = {} for plugin in self.plugins: yield file_results, plugin if not file_results: return if filename not in self.data: self.data[filename] = file_results else: self.data[filename].update(file_results)
:type filename: str :param filename: name of file, used as a key to store in self.data :yields: (dict, detect_secrets.plugins.base.BasePlugin) Caller is responsible for updating the dictionary with results of plugin analysis.
def color_args(args, *indexes): """ Color a list of arguments on particular indexes >>> c = color_args([None,'blue'], 1) >>> c.next() None >>> c.next() '0000FF' """ for i,arg in enumerate(args): if i in indexes: yield lookup_color(arg) else: yield arg
Color a list of arguments on particular indexes >>> c = color_args([None,'blue'], 1) >>> c.next() None >>> c.next() '0000FF'
def get_discrete_task_agent(generators, market, nStates, nOffer, markups, withholds, maxSteps, learner, Pd0=None, Pd_min=0.0): """ Returns a tuple of task and agent for the given learner. """ env = pyreto.discrete.MarketEnvironment(generators, market, numStates=nStates, numOffbids=nOffer, markups=markups, withholds=withholds, Pd0=Pd0, Pd_min=Pd_min) task = pyreto.discrete.ProfitTask(env, maxSteps=maxSteps) nActions = len(env._allActions) module = ActionValueTable(numStates=nStates, numActions=nActions) agent = LearningAgent(module, learner) return task, agent
Returns a tuple of task and agent for the given learner.
def from_code(cls, code: int) -> 'ColorCode': """ Return a ColorCode from a terminal code. """ c = cls() c._init_code(code) return c
Return a ColorCode from a terminal code.
def get_datetext(year, month, day): """year=2005, month=11, day=16 => '2005-11-16 00:00:00'""" input_format = "%Y-%m-%d" try: datestruct = time.strptime("%i-%i-%i" % (year, month, day), input_format) return strftime(datetext_format, datestruct) except: return datetext_default
year=2005, month=11, day=16 => '2005-11-16 00:00:00
def add(self, value): """ Add a value to the buffer. """ ind = int(self._ind % self.shape) self._pos = self._ind % self.shape self._values[ind] = value if self._ind < self.shape: self._ind += 1 # fast fill else: self._ind += self._splitValue self._splitPos += self._splitValue self._cached = False
Add a value to the buffer.
def _worker_thread_disk(self): # type: (Downloader) -> None """Worker thread for disk :param Downloader self: this """ while not self.termination_check: try: dd, offsets, data = self._disk_queue.get( block=False, timeout=0.1) except queue.Empty: continue try: self._process_data(dd, offsets, data) except Exception as e: with self._transfer_lock: self._exceptions.append(e)
Worker thread for disk :param Downloader self: this
def update(self, resource, rid, updates): """ Updates the resource with id 'rid' with the given updates dictionary. """ if resource[-1] != '/': resource += '/' resource += str(rid) return self.put(resource, data=updates)
Updates the resource with id 'rid' with the given updates dictionary.
def dls(self)->List[DeviceDataLoader]: "Returns a list of all DeviceDataLoaders. If you need a specific DeviceDataLoader, access via the relevant property (`train_dl`, `valid_dl`, etc) as the index of DLs in this list is not guaranteed to remain constant." res = [self.train_dl, self.fix_dl, self.single_dl] # Preserve the original ordering of Train, Valid, Fix, Single, Test Data Loaders # (Unknown/not verified as of 1.0.47 whether there are other methods explicitly using DLs their list index) if self.valid_dl: res.insert(1, self.valid_dl) return res if not self.test_dl else res + [self.test_dl]
Returns a list of all DeviceDataLoaders. If you need a specific DeviceDataLoader, access via the relevant property (`train_dl`, `valid_dl`, etc) as the index of DLs in this list is not guaranteed to remain constant.
def format(self, vertices): """Format instance to dump vertices is dict of name to Vertex """ buf = io.StringIO() buf.write(self.name + '\n') buf.write('{\n') buf.write(' type {};\n'.format(self.type_)) buf.write(' faces\n') buf.write(' (\n') for f in self.faces: s = f.format(vertices) buf.write(' {}\n'.format(s)) buf.write(' );\n') buf.write('}') return buf.getvalue()
Format instance to dump vertices is dict of name to Vertex
def dump( self, stream, progress=None, lower=None, upper=None, incremental=False, deltas=False ): """Dump the repository to a dumpfile stream. :param stream: A file stream to which the dumpfile is written :param progress: A file stream to which progress is written :param lower: Must be a numeric version number :param upper: Must be a numeric version number See ``svnadmin help dump`` for details on the other arguments. """ cmd = [SVNADMIN, 'dump', '.'] if progress is None: cmd.append('-q') if lower is not None: cmd.append('-r') if upper is None: cmd.append(str(int(lower))) else: cmd.append('%d:%d' % (int(lower), int(upper))) if incremental: cmd.append('--incremental') if deltas: cmd.append('--deltas') p = subprocess.Popen(cmd, cwd=self.path, stdout=stream, stderr=progress) p.wait() if p.returncode != 0: raise subprocess.CalledProcessError(p.returncode, cmd)
Dump the repository to a dumpfile stream. :param stream: A file stream to which the dumpfile is written :param progress: A file stream to which progress is written :param lower: Must be a numeric version number :param upper: Must be a numeric version number See ``svnadmin help dump`` for details on the other arguments.
def decimate(self, fraction=0.5, N=None, boundaries=False, verbose=True): """ Downsample the number of vertices in a mesh. :param float fraction: the desired target of reduction. :param int N: the desired number of final points (**fraction** is recalculated based on it). :param bool boundaries: (True), decide whether to leave boundaries untouched or not. .. note:: Setting ``fraction=0.1`` leaves 10% of the original nr of vertices. .. hint:: |skeletonize| |skeletonize.py|_ """ poly = self.polydata(True) if N: # N = desired number of points Np = poly.GetNumberOfPoints() fraction = float(N) / Np if fraction >= 1: return self decimate = vtk.vtkDecimatePro() decimate.SetInputData(poly) decimate.SetTargetReduction(1 - fraction) decimate.PreserveTopologyOff() if boundaries: decimate.BoundaryVertexDeletionOff() else: decimate.BoundaryVertexDeletionOn() decimate.Update() if verbose: print("Nr. of pts, input:", poly.GetNumberOfPoints(), end="") print(" output:", decimate.GetOutput().GetNumberOfPoints()) return self.updateMesh(decimate.GetOutput())
Downsample the number of vertices in a mesh. :param float fraction: the desired target of reduction. :param int N: the desired number of final points (**fraction** is recalculated based on it). :param bool boundaries: (True), decide whether to leave boundaries untouched or not. .. note:: Setting ``fraction=0.1`` leaves 10% of the original nr of vertices. .. hint:: |skeletonize| |skeletonize.py|_
def do_forget(self, repo): ''' Drop definition of a repo. forget REPO ''' self.abort_on_nonexisting_repo(repo, 'forget') self.network.forget(repo)
Drop definition of a repo. forget REPO
def ParseNumericOption(self, options, name, base=10, default_value=None): """Parses a numeric option. If the option is not set the default value is returned. Args: options (argparse.Namespace): command line arguments. name (str): name of the numeric option. base (Optional[int]): base of the numeric value. default_value (Optional[object]): default value. Returns: int: numeric value. Raises: BadConfigOption: if the options are invalid. """ numeric_value = getattr(options, name, None) if not numeric_value: return default_value try: return int(numeric_value, base) except (TypeError, ValueError): name = name.replace('_', ' ') raise errors.BadConfigOption( 'Unsupported numeric value {0:s}: {1!s}.'.format( name, numeric_value))
Parses a numeric option. If the option is not set the default value is returned. Args: options (argparse.Namespace): command line arguments. name (str): name of the numeric option. base (Optional[int]): base of the numeric value. default_value (Optional[object]): default value. Returns: int: numeric value. Raises: BadConfigOption: if the options are invalid.
def per_from_id(flavors=chat_flavors+inline_flavors): """ :param flavors: ``all`` or a list of flavors :return: a seeder function that returns the from id only if the message flavor is in ``flavors``. """ return _wrap_none(lambda msg: msg['from']['id'] if flavors == 'all' or flavor(msg) in flavors else None)
:param flavors: ``all`` or a list of flavors :return: a seeder function that returns the from id only if the message flavor is in ``flavors``.
def collapse_nodes(graph, survivor_mapping: Mapping[BaseEntity, Set[BaseEntity]]) -> None: """Collapse all nodes in values to the key nodes, in place. :param pybel.BELGraph graph: A BEL graph :param survivor_mapping: A dictionary with survivors as their keys, and iterables of the corresponding victims as values. """ inconsistencies = surviors_are_inconsistent(survivor_mapping) if inconsistencies: raise ValueError('survivor mapping is inconsistent: {}'.format(inconsistencies)) for survivor, victims in survivor_mapping.items(): for victim in victims: collapse_pair(graph, survivor=survivor, victim=victim) _remove_self_edges(graph)
Collapse all nodes in values to the key nodes, in place. :param pybel.BELGraph graph: A BEL graph :param survivor_mapping: A dictionary with survivors as their keys, and iterables of the corresponding victims as values.
def get_body_text(self): """ Parse the body html and returns the body text using bs4 :return: body text :rtype: str """ if self.body_type != 'HTML': return self.body try: soup = bs(self.body, 'html.parser') except RuntimeError: return self.body else: return soup.body.text
Parse the body html and returns the body text using bs4 :return: body text :rtype: str
def get_branches(self, local=True, remote_branches=True): """Returns a list of local and remote branches.""" if not self.repo.remotes: remote_branches = False branches = [] if remote_branches: # Remote refs. try: for b in self.remote.refs: name = '/'.join(b.name.split('/')[1:]) if name not in legit_settings.forbidden_branches: branches.append(Branch(name, is_published=True)) except (IndexError, AssertionError): pass if local: # Local refs. for b in [h.name for h in self.repo.heads]: if (not remote_branches) or (b not in [br.name for br in branches]): if b not in legit_settings.forbidden_branches: branches.append(Branch(b, is_published=False)) return sorted(branches, key=attrgetter('name'))
Returns a list of local and remote branches.
def decorate(self, function_or_name): '''Decorate a function to time the execution The method can be called with or without a name. If no name is given the function defaults to the name of the function. :keyword function_or_name: The name to post to or the function to wrap >>> from statsd import Timer >>> timer = Timer('application_name') >>> >>> @timer.decorate ... def some_function(): ... # resulting timer name: application_name.some_function ... pass >>> >>> @timer.decorate('my_timer') ... def some_other_function(): ... # resulting timer name: application_name.my_timer ... pass ''' if callable(function_or_name): return self._decorate(function_or_name.__name__, function_or_name) else: return partial(self._decorate, function_or_name)
Decorate a function to time the execution The method can be called with or without a name. If no name is given the function defaults to the name of the function. :keyword function_or_name: The name to post to or the function to wrap >>> from statsd import Timer >>> timer = Timer('application_name') >>> >>> @timer.decorate ... def some_function(): ... # resulting timer name: application_name.some_function ... pass >>> >>> @timer.decorate('my_timer') ... def some_other_function(): ... # resulting timer name: application_name.my_timer ... pass
def connect(self): ''' activates the connection object ''' if not HAVE_ZMQ: raise errors.AnsibleError("zmq is not installed") # this is rough/temporary and will likely be optimized later ... self.context = zmq.Context() socket = self.context.socket(zmq.REQ) addr = "tcp://%s:%s" % (self.host, self.port) socket.connect(addr) self.socket = socket return self
activates the connection object
def normalize_weight(self, samples): """normalize weight Parameters ---------- samples: list a collection of sample, it's a (NUM_OF_INSTANCE * NUM_OF_FUNCTIONS) matrix, representing{{w11, w12, ..., w1k}, {w21, w22, ... w2k}, ...{wk1, wk2,..., wkk}} Returns ------- list samples after normalize weight """ for i in range(NUM_OF_INSTANCE): total = 0 for j in range(self.effective_model_num): total += samples[i][j] for j in range(self.effective_model_num): samples[i][j] /= total return samples
normalize weight Parameters ---------- samples: list a collection of sample, it's a (NUM_OF_INSTANCE * NUM_OF_FUNCTIONS) matrix, representing{{w11, w12, ..., w1k}, {w21, w22, ... w2k}, ...{wk1, wk2,..., wkk}} Returns ------- list samples after normalize weight
async def api_call(self, verb, action, params=None, add_authorization_token=True, retry=False): """Send api call.""" if add_authorization_token and not self.token: await self.refresh_token() try: return await self._api_call_impl(verb, action, params, add_authorization_token) except InvalidToken: if not retry and add_authorization_token: await self.refresh_token() # Recursive call of api_call return await self.api_call(verb, action, params, add_authorization_token, True) raise
Send api call.
def body(self) -> Union[bytes, str, List[Any], Dict[Any, Any], RawIOBase, None]: """ 获取body """ return self._body
获取body
def get_eci_assignment_number(encoding): """\ Returns the ECI number for the provided encoding. :param str encoding: A encoding name :return str: The ECI number. """ try: return consts.ECI_ASSIGNMENT_NUM[codecs.lookup(encoding).name] except KeyError: raise QRCodeError('Unknown ECI assignment number for encoding "{0}".' .format(encoding))
\ Returns the ECI number for the provided encoding. :param str encoding: A encoding name :return str: The ECI number.
def MSTORE8(self, address, value): """Save byte to memory""" if istainted(self.pc): for taint in get_taints(self.pc): value = taint_with(value, taint) self._allocate(address, 1) self._store(address, Operators.EXTRACT(value, 0, 8), 1)
Save byte to memory
def fix_nls(self, in_, out_): """Fixes submitted translations by filtering carriage returns and pairing newlines at the begging and end of the translated string with the original """ if 0 == len(in_) or 0 == len(out_): return out_ if "\r" in out_ and "\r" not in in_: out_ = out_.replace("\r", '') if "\n" == in_[0] and "\n" != out_[0]: out_ = "\n" + out_ elif "\n" != in_[0] and "\n" == out_[0]: out_ = out_.lstrip() if 0 == len(out_): pass elif "\n" == in_[-1] and "\n" != out_[-1]: out_ = out_ + "\n" elif "\n" != in_[-1] and "\n" == out_[-1]: out_ = out_.rstrip() return out_
Fixes submitted translations by filtering carriage returns and pairing newlines at the begging and end of the translated string with the original
def parse(self): """ Parse the options. """ # Run the parser opt, arg = self.parser.parse_known_args(self.arguments) self.opt = opt self.arg = arg self.check() # Enable --all if no particular stat or group selected opt.all = not any([ getattr(opt, stat.dest) or getattr(opt, group.dest) for group in self.sample_stats.stats for stat in group.stats]) # Time period handling if opt.since is None and opt.until is None: opt.since, opt.until, period = did.base.Date.period(arg) else: opt.since = did.base.Date(opt.since or "1993-01-01") opt.until = did.base.Date(opt.until or "today") # Make the 'until' limit inclusive opt.until.date += delta(days=1) period = "given date range" # Validate the date range if not opt.since.date < opt.until.date: raise RuntimeError( "Invalid date range ({0} to {1})".format( opt.since, opt.until.date - delta(days=1))) header = "Status report for {0} ({1} to {2}).".format( period, opt.since, opt.until.date - delta(days=1)) # Finito log.debug("Gathered options:") log.debug('options = {0}'.format(opt)) return opt, header
Parse the options.
def _write_branch_and_tag_to_meta_yaml(self): """ Write branch and tag to meta.yaml by editing in place """ ## set the branch to pull source from with open(self.meta_yaml.replace("meta", "template"), 'r') as infile: dat = infile.read() newdat = dat.format(**{'tag': self.tag, 'branch': self.branch}) with open(self.meta_yaml, 'w') as outfile: outfile.write(newdat)
Write branch and tag to meta.yaml by editing in place
def initialize(self, training_info, model, environment, device): """ Initialize policy gradient from reinforcer settings """ self.target_model = self.model_factory.instantiate(action_space=environment.action_space).to(device) self.target_model.load_state_dict(model.state_dict()) self.target_model.eval() histogram_info = model.histogram_info() self.vmin = histogram_info['vmin'] self.vmax = histogram_info['vmax'] self.num_atoms = histogram_info['num_atoms'] self.support_atoms = histogram_info['support_atoms'] self.atom_delta = histogram_info['atom_delta']
Initialize policy gradient from reinforcer settings
def to_rest_models(models, includes=None): """ Convert the models into a dict for serialization models should be an array of single model objects that will each be serialized. :return: dict """ props = {} props['data'] = [] for model in models: props['data'].append(_to_rest(model, includes=includes)) props['included'] = _to_rest_includes(models, includes=includes) return props
Convert the models into a dict for serialization models should be an array of single model objects that will each be serialized. :return: dict