positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def packSeptets(octets, padBits=0): """ Packs the specified octets into septets Typically the output of encodeGsm7 would be used as input to this function. The resulting bytearray contains the original GSM-7 characters packed into septets ready for transmission. :rtype: bytearray """ result = bytearray() if type(octets) == str: octets = iter(rawStrToByteArray(octets)) elif type(octets) == bytearray: octets = iter(octets) shift = padBits if padBits == 0: prevSeptet = next(octets) else: prevSeptet = 0x00 for octet in octets: septet = octet & 0x7f; if shift == 7: # prevSeptet has already been fully added to result shift = 0 prevSeptet = septet continue b = ((septet << (7 - shift)) & 0xFF) | (prevSeptet >> shift) prevSeptet = septet shift += 1 result.append(b) if shift != 7: # There is a bit "left over" from prevSeptet result.append(prevSeptet >> shift) return result
Packs the specified octets into septets Typically the output of encodeGsm7 would be used as input to this function. The resulting bytearray contains the original GSM-7 characters packed into septets ready for transmission. :rtype: bytearray
def get_smart_contract(self, hex_contract_address: str, is_full: bool = False) -> dict: """ This interface is used to get the information of smart contract based on the specified hexadecimal hash value. :param hex_contract_address: str, a hexadecimal hash value. :param is_full: :return: the information of smart contract in dictionary form. """ if not isinstance(hex_contract_address, str): raise SDKException(ErrorCode.param_err('a hexadecimal contract address is required.')) if len(hex_contract_address) != 40: raise SDKException(ErrorCode.param_err('the length of the contract address should be 40 bytes.')) payload = self.generate_json_rpc_payload(RpcMethod.GET_SMART_CONTRACT, [hex_contract_address, 1]) response = self.__post(self.__url, payload) if is_full: return response return response['result']
This interface is used to get the information of smart contract based on the specified hexadecimal hash value. :param hex_contract_address: str, a hexadecimal hash value. :param is_full: :return: the information of smart contract in dictionary form.
def scores2recos(self, scores, candidates, rev=False): """Get recommendation list for a user u_index based on scores. Args: scores (numpy array; (n_target_items,)): Scores for the target items. Smaller score indicates a promising item. candidates (numpy array; (# target items, )): Target items' indices. Only these items are considered as the recommendation candidates. rev (bool): If true, return items in an descending order. A ascending order (i.e., smaller scores are more promising) is default. Returns: (numpy array, numpy array) : (Sorted list of items, Sorted scores). """ sorted_indices = np.argsort(scores) if rev: sorted_indices = sorted_indices[::-1] return candidates[sorted_indices], scores[sorted_indices]
Get recommendation list for a user u_index based on scores. Args: scores (numpy array; (n_target_items,)): Scores for the target items. Smaller score indicates a promising item. candidates (numpy array; (# target items, )): Target items' indices. Only these items are considered as the recommendation candidates. rev (bool): If true, return items in an descending order. A ascending order (i.e., smaller scores are more promising) is default. Returns: (numpy array, numpy array) : (Sorted list of items, Sorted scores).
def clitable_to_dict(cli_table): """Converts TextFSM cli_table object to list of dictionaries.""" objs = [] for row in cli_table: temp_dict = {} for index, element in enumerate(row): temp_dict[cli_table.header[index].lower()] = element objs.append(temp_dict) return objs
Converts TextFSM cli_table object to list of dictionaries.
def _make_node_str_list(l): """Take a list of python objects and make a MPV string node array from it. As an example, the python list ``l = [ "foo", 23, false ]`` will result in the following MPV node object:: struct mpv_node { .format = MPV_NODE_ARRAY, .u.list = *(struct mpv_node_array){ .num = len(l), .keys = NULL, .values = struct mpv_node[len(l)] { { .format = MPV_NODE_STRING, .u.string = l[0] }, { .format = MPV_NODE_STRING, .u.string = l[1] }, ... } } } """ char_ps = [ c_char_p(_mpv_coax_proptype(e, str)) for e in l ] node_list = MpvNodeList( num=len(l), keys=None, values=( MpvNode * len(l))( *[ MpvNode( format=MpvFormat.STRING, val=MpvNodeUnion(string=p)) for p in char_ps ])) node = MpvNode( format=MpvFormat.NODE_ARRAY, val=MpvNodeUnion(list=pointer(node_list))) return char_ps, node_list, node, cast(pointer(node), c_void_p)
Take a list of python objects and make a MPV string node array from it. As an example, the python list ``l = [ "foo", 23, false ]`` will result in the following MPV node object:: struct mpv_node { .format = MPV_NODE_ARRAY, .u.list = *(struct mpv_node_array){ .num = len(l), .keys = NULL, .values = struct mpv_node[len(l)] { { .format = MPV_NODE_STRING, .u.string = l[0] }, { .format = MPV_NODE_STRING, .u.string = l[1] }, ... } } }
def has_no_dangling_branch(neuron): '''Check if the neuron has dangling neurites''' soma_center = neuron.soma.points[:, COLS.XYZ].mean(axis=0) recentered_soma = neuron.soma.points[:, COLS.XYZ] - soma_center radius = np.linalg.norm(recentered_soma, axis=1) soma_max_radius = radius.max() def is_dangling(neurite): '''Is the neurite dangling ?''' starting_point = neurite.points[1][COLS.XYZ] if np.linalg.norm(starting_point - soma_center) - soma_max_radius <= 12.: return False if neurite.type != NeuriteType.axon: return True all_points = list(chain.from_iterable(n.points[1:] for n in iter_neurites(neurite) if n.type != NeuriteType.axon)) res = [np.linalg.norm(starting_point - p[COLS.XYZ]) >= 2 * p[COLS.R] + 2 for p in all_points] return all(res) bad_ids = [(n.root_node.id, [n.root_node.points[1]]) for n in iter_neurites(neuron) if is_dangling(n)] return CheckResult(len(bad_ids) == 0, bad_ids)
Check if the neuron has dangling neurites
def hashVariantAnnotation(cls, gaVariant, gaVariantAnnotation): """ Produces an MD5 hash of the gaVariant and gaVariantAnnotation objects """ treffs = [treff.id for treff in gaVariantAnnotation.transcript_effects] return hashlib.md5( "{}\t{}\t{}\t".format( gaVariant.reference_bases, tuple(gaVariant.alternate_bases), treffs) ).hexdigest()
Produces an MD5 hash of the gaVariant and gaVariantAnnotation objects
def _get_jid_snapshots(jid, config='root'): ''' Returns pre/post snapshots made by a given Salt jid Looks for 'salt_jid' entries into snapshots userdata which are created when 'snapper.run' is executed. ''' jid_snapshots = [x for x in list_snapshots(config) if x['userdata'].get("salt_jid") == jid] pre_snapshot = [x for x in jid_snapshots if x['type'] == "pre"] post_snapshot = [x for x in jid_snapshots if x['type'] == "post"] if not pre_snapshot or not post_snapshot: raise CommandExecutionError("Jid '{0}' snapshots not found".format(jid)) return ( pre_snapshot[0]['id'], post_snapshot[0]['id'] )
Returns pre/post snapshots made by a given Salt jid Looks for 'salt_jid' entries into snapshots userdata which are created when 'snapper.run' is executed.
def dataframe(self): """ Returns a pandas DataFrame containing all other class properties and values. The index for the DataFrame is the string URI that is used to instantiate the class, such as '2018-01-08-georgia'. """ if self._away_points is None and self._home_points is None: return None fields_to_include = { 'away_first_downs': self.away_first_downs, 'away_fumbles': self.away_fumbles, 'away_fumbles_lost': self.away_fumbles_lost, 'away_interceptions': self.away_interceptions, 'away_pass_attempts': self.away_pass_attempts, 'away_pass_completions': self.away_pass_completions, 'away_pass_touchdowns': self.away_pass_touchdowns, 'away_pass_yards': self.away_pass_yards, 'away_penalties': self.away_penalties, 'away_points': self.away_points, 'away_rush_attempts': self.away_rush_attempts, 'away_rush_touchdowns': self.away_rush_touchdowns, 'away_rush_yards': self.away_rush_yards, 'away_total_yards': self.away_total_yards, 'away_turnovers': self.away_turnovers, 'away_yards_from_penalties': self.away_yards_from_penalties, 'date': self.date, 'home_first_downs': self.home_first_downs, 'home_fumbles': self.home_fumbles, 'home_fumbles_lost': self.home_fumbles_lost, 'home_interceptions': self.home_interceptions, 'home_pass_attempts': self.home_pass_attempts, 'home_pass_completions': self.home_pass_completions, 'home_pass_touchdowns': self.home_pass_touchdowns, 'home_pass_yards': self.home_pass_yards, 'home_penalties': self.home_penalties, 'home_points': self.home_points, 'home_rush_attempts': self.home_rush_attempts, 'home_rush_touchdowns': self.home_rush_touchdowns, 'home_rush_yards': self.home_rush_yards, 'home_total_yards': self.home_total_yards, 'home_turnovers': self.home_turnovers, 'home_yards_from_penalties': self.home_yards_from_penalties, 'losing_abbr': self.losing_abbr, 'losing_name': self.losing_name, 'stadium': self.stadium, 'time': self.time, 'winner': self.winner, 'winning_abbr': self.winning_abbr, 'winning_name': self.winning_name } return pd.DataFrame([fields_to_include], index=[self._uri])
Returns a pandas DataFrame containing all other class properties and values. The index for the DataFrame is the string URI that is used to instantiate the class, such as '2018-01-08-georgia'.
def construct_notebook_index(title, pthlst, pthidx): """ Construct a string containing a markdown format index for the list of paths in `pthlst`. The title for the index is in `title`, and `pthidx` is a dict giving label text for each path. """ # Insert title text txt = '"""\n## %s\n"""\n\n"""' % title # Insert entry for each item in pthlst for pth in pthlst: # If pth refers to a .py file, replace .py with .ipynb, otherwise # assume it's a directory name and append '/index.ipynb' if pth[-3:] == '.py': link = os.path.splitext(pth)[0] + '.ipynb' else: link = os.path.join(pth, 'index.ipynb') txt += '- [%s](%s)\n' % (pthidx[pth], link) txt += '"""' return txt
Construct a string containing a markdown format index for the list of paths in `pthlst`. The title for the index is in `title`, and `pthidx` is a dict giving label text for each path.
def axis_as_object(arr, axis=-1): """cast the given axis of an array to a void object if the axis to be cast is contiguous, a view is returned, otherwise a copy is made this is useful for efficiently sorting by the content of an axis, for instance Parameters ---------- arr : ndarray array to view as void object type axis : int axis to view as a void object type Returns ------- ndarray array with the given axis viewed as a void object """ shape = arr.shape # make axis to be viewed as a void object as contiguous items arr = np.ascontiguousarray(np.rollaxis(arr, axis, arr.ndim)) # number of bytes in each void object nbytes = arr.dtype.itemsize * shape[axis] # void type with the correct number of bytes voidtype = np.dtype((np.void, nbytes)) # return the view as such, with the reduced shape return arr.view(voidtype).reshape(np.delete(shape, axis))
cast the given axis of an array to a void object if the axis to be cast is contiguous, a view is returned, otherwise a copy is made this is useful for efficiently sorting by the content of an axis, for instance Parameters ---------- arr : ndarray array to view as void object type axis : int axis to view as a void object type Returns ------- ndarray array with the given axis viewed as a void object
def Not(x, simplify=True): """Expression negation operator If *simplify* is ``True``, return a simplified expression. """ x = Expression.box(x).node y = exprnode.not_(x) if simplify: y = y.simplify() return _expr(y)
Expression negation operator If *simplify* is ``True``, return a simplified expression.
def _destroy(self): """Destruction code to decrement counters""" self.unuse_region() if self._rlist is not None: # Actual client count, which doesn't include the reference kept by the manager, nor ours # as we are about to be deleted try: if len(self._rlist) == 0: # Free all resources associated with the mapped file self._manager._fdict.pop(self._rlist.path_or_fd()) # END remove regions list from manager except (TypeError, KeyError): # sometimes, during shutdown, getrefcount is None. Its possible # to re-import it, however, its probably better to just ignore # this python problem (for now). # The next step is to get rid of the error prone getrefcount alltogether. pass
Destruction code to decrement counters
def _isomorphisms(q, g, check_varprops=True): """ Inspired by Turbo_ISO: http://dl.acm.org/citation.cfm?id=2465300 """ # convert MRSs to be more graph-like, and add some indices qig = _IsoGraph(q, varprops=check_varprops) # qig = q isograph gig = _IsoGraph(g, varprops=check_varprops) # gig = q isograph # qsigs, qsigidx = _isomorphism_sigs(q, check_varprops) # gsigs, gsigidx = _isomorphism_sigs(g, check_varprops) # (it would be nice to not have to do this... maybe later) # qadj = _isomorphism_adj(q, qsigidx) # gadj = _isomorphism_adj(g, gsigidx) # the degree of each node is useful (but can it be combined with adj?) # qdeg = _isomorphism_deg(qadj) # gdeg = _isomorphism_deg(gadj) u_s = _isomorphism_choose_start_q_vertex(qig, gig, subgraph=False) q_ = _isomorphism_rewrite_to_NECtree(u_s, qig) for v_s in gsigs.get(qsigidx[u_s], []): cr = _isomorphism_explore_CR(q_, {v_s}, qig, gig) if cr is None: continue order = _isomorphism_determine_matching_order(q_, cr) update_state(M,F,{u_s}, {v_s}) subraph_search(q, q_, g, order, 1) # 1="the first query vertex to match" restore_state(M, F, {u_s}, {v_s})
Inspired by Turbo_ISO: http://dl.acm.org/citation.cfm?id=2465300
def clear_callbacks(obj): """Remove all callbacks from an object.""" callbacks = obj._callbacks if isinstance(callbacks, dllist): # Help the garbage collector by clearing all links. callbacks.clear() obj._callbacks = None
Remove all callbacks from an object.
def keep_sources(self, keep): """Keep only the specified sources in the decomposition. """ if self.unmixing_ is None or self.mixing_ is None: raise RuntimeError("No sources available (run do_mvarica first)") n_sources = self.mixing_.shape[0] self.remove_sources(np.setdiff1d(np.arange(n_sources), np.array(keep))) return self
Keep only the specified sources in the decomposition.
def build_table(self, table, force=False): """Build all of the sources for a table """ sources = self._resolve_sources(None, [table]) for source in sources: self.build_source(None, source, force=force) self.unify_partitions()
Build all of the sources for a table
def x10_unitcode(self): """Emit the X10 unit code.""" unitcode = None if self.is_x10: unitcode = insteonplm.utils.byte_to_unitcode(self.addr[2]) return unitcode
Emit the X10 unit code.
def start_state_id(self, start_state_id, to_outcome=None): """Set the start state of the container state See property :param start_state_id: The state id of the state which should be executed first in the Container state :raises exceptions.ValueError: if the start_state_id does not exist in :py:attr:`rafcon.core.states.container_state.ContainerState.states` """ if start_state_id is not None and start_state_id not in self.states: raise ValueError("start_state_id does not exist") if start_state_id is None and to_outcome is not None: # this is the case if the start state is the state itself if to_outcome not in self.outcomes: raise ValueError("to_outcome does not exist") if start_state_id != self.state_id: raise ValueError("to_outcome defined but start_state_id is not state_id") # First we remove the transition to the start state for transition_id in self.transitions: if self.transitions[transition_id].from_state is None: # If the current start state is the same as the old one, we don't have to do anything if self.transitions[transition_id].to_state == start_state_id: return self.remove_transition(transition_id) break if start_state_id is not None: self.add_transition(None, None, start_state_id, to_outcome)
Set the start state of the container state See property :param start_state_id: The state id of the state which should be executed first in the Container state :raises exceptions.ValueError: if the start_state_id does not exist in :py:attr:`rafcon.core.states.container_state.ContainerState.states`
def xml_report(self, file_path): """Generate and save XML report""" self.logger.debug('Generating XML report') report = self.zap.core.xmlreport() self._write_report(report, file_path)
Generate and save XML report
def print_status(total, current, start_time=None): """ Show how much work was done / how much work is remaining. Parameters ---------- total : float The total amount of work current : float The work that has been done so far start_time : int The start time in seconds since 1970 to estimate the remaining time. """ percentage_done = float(current) / total sys.stdout.write("\r%0.2f%% " % (percentage_done * 100)) if start_time is not None: current_running_time = time.time() - start_time remaining_seconds = current_running_time / percentage_done tmp = datetime.timedelta(seconds=remaining_seconds) sys.stdout.write("(%s remaining) " % str(tmp)) sys.stdout.flush()
Show how much work was done / how much work is remaining. Parameters ---------- total : float The total amount of work current : float The work that has been done so far start_time : int The start time in seconds since 1970 to estimate the remaining time.
def Rzderiv(self,R,Z,phi=0.,t=0.): """ NAME: Rzderiv PURPOSE: evaluate the mixed R,z derivative INPUT: R - Galactocentric radius (can be Quantity) Z - vertical height (can be Quantity) phi - Galactocentric azimuth (can be Quantity) t - time (can be Quantity) OUTPUT: d2phi/dz/dR HISTORY: 2013-08-26 - Written - Bovy (IAS) """ try: return self._amp*self._Rzderiv(R,Z,phi=phi,t=t) except AttributeError: #pragma: no cover raise PotentialError("'_Rzderiv' function not implemented for this potential")
NAME: Rzderiv PURPOSE: evaluate the mixed R,z derivative INPUT: R - Galactocentric radius (can be Quantity) Z - vertical height (can be Quantity) phi - Galactocentric azimuth (can be Quantity) t - time (can be Quantity) OUTPUT: d2phi/dz/dR HISTORY: 2013-08-26 - Written - Bovy (IAS)
def update_mode(arg_namespace): """Check command line arguments and run update function.""" try: updater.update(custom_sources=arg_namespace.custom) except (PermissionError, FileNotFoundError) as exception: if isinstance(exception, PermissionError): print('No write permission for current working directory.') if isinstance(exception, FileNotFoundError): print('Necessary resources for updating not found in current ' 'working directory.')
Check command line arguments and run update function.
def parse_args(): ''' Parse the script arguments ''' parser = optparse.OptionParser() parser.add_option("-v", "--verbose", action="store_true") mode_group = optparse.OptionGroup(parser, "Program Mode") mode_group.add_option("-u", "--update-status", action="store_const", dest="mode", const="update_status") mode_group.add_option("-g", "--generate-cfg", action="store_const", dest="mode", const="generate_cfg") parser.add_option_group(mode_group) parser.set_defaults(mode="update_status") general_options = optparse.OptionGroup(parser, "CM API Configuration") general_options.add_option("-H", "--host", metavar="HOST", help="CM API hostname") general_options.add_option("-p", "--port", help="CM API port", default=None) general_options.add_option("-P", "--passfile", metavar="FILE", help="File containing CM API username and password, " "colon-delimited on a single line. E.g. " "\"user:pass\"") general_options.add_option("--use-tls", action="store_true", help="Use TLS", default=False) parser.add_option_group(general_options) polling_options = optparse.OptionGroup(parser, "Status Update Options") polling_options.add_option("-c", "--cmd-file", metavar="FILE", help="Path to the file that Nagios checks for " "external command requests.") polling_options.add_option("-n", "--use-send-nsca", action="store_true", default=False, help="Use send_nsca to report status via a nsca " "daemon. When using this option, the " "send_nsca program must be available and the " "nsca daemon host and port must be provided." "Default is false.") polling_options.add_option("--send-nsca-path", metavar="PATH", default="/usr/sbin/send_nsca", help="Path to send_nsca, default is " "/usr/sbin/send_nsca") polling_options.add_option("--nsca-host", metavar="HOST", default="localhost", help="When using send_nsca, the hostname of NSCA " "server, default is localhost.") polling_options.add_option("--nsca-port", metavar="PORT", default=None, help="When using send_nsca, the port on which the " "server is running, default is 5667.") polling_options.add_option("--send-nsca-config", metavar="FILE", default=None, help="Config file passed to send_nsca -c. Default" " is to not specify the config parameter.") parser.add_option_group(polling_options) generate_options = optparse.OptionGroup(parser, "Generate Config Options") generate_options.add_option("--cfg-dir", metavar="DIR", default=getcwd(), help="Directory for generated Nagios cfg files.") parser.add_option_group(generate_options) (options, args) = parser.parse_args() ''' Parse the 'passfile' - it must contain the username and password, colon-delimited on a single line. E.g.: $ cat ~/protected/cm_pass admin:admin ''' required = ["host", "passfile"] if options.mode == "update_status": if not options.use_send_nsca: required.append("cmd_file") for required_opt in required: if getattr(options, required_opt) is None: parser.error("Please specify the required argument: --%s" % (required_opt.replace('_','-'),)) return (options, args)
Parse the script arguments
def unpack_all(self, all_packed, devices): """ Args: all_packed: K lists of packed gradients. """ all_grads = [] # #GPU x #Var for dev, packed_grads_single_device in zip(devices, all_packed): with tf.device(dev): all_grads.append(self.unpack(packed_grads_single_device)) return all_grads
Args: all_packed: K lists of packed gradients.
def wait_for_element(self, timeout=None, message='', *args, **kwds): """ Shortcut for waiting for element. If it not ends with exception, it returns that element. Detault timeout is `~.default_wait_timeout`. Same as following: .. code-block:: python selenium.webdriver.support.wait.WebDriverWait(driver, timeout).until(lambda driver: driver.get_elm(...)) .. versionchanged:: 2.5 Waits only for visible elements. .. versionchanged:: 2.6 Returned functionality back in favor of new method :py:meth:`~._WebdriverBaseWrapper.wait_for_element_show`. """ if not timeout: timeout = self.default_wait_timeout if not message: message = _create_exception_msg(*args, url=self.current_url, **kwds) self.wait(timeout).until(lambda driver: driver.get_elm(*args, **kwds), message=message) # Also return that element for which is waiting. elm = self.get_elm(*args, **kwds) return elm
Shortcut for waiting for element. If it not ends with exception, it returns that element. Detault timeout is `~.default_wait_timeout`. Same as following: .. code-block:: python selenium.webdriver.support.wait.WebDriverWait(driver, timeout).until(lambda driver: driver.get_elm(...)) .. versionchanged:: 2.5 Waits only for visible elements. .. versionchanged:: 2.6 Returned functionality back in favor of new method :py:meth:`~._WebdriverBaseWrapper.wait_for_element_show`.
def _get_cache_dates(self): """ Get s list of dates (:py:class:`datetime.datetime`) present in cache, beginning with the longest contiguous set of dates that isn't missing more than one date in series. :return: list of datetime objects for contiguous dates in cache :rtype: ``list`` """ all_dates = self.cache.get_dates_for_project(self.project_name) dates = [] last_date = None for val in sorted(all_dates): if last_date is None: last_date = val continue if val - last_date > timedelta(hours=48): # reset dates to start from here logger.warning("Last cache date was %s, current date is %s; " "delta is too large. Starting cache date series " "at current date.", last_date, val) dates = [] last_date = val dates.append(val) # find the first download record, and only look at dates after that for idx, cache_date in enumerate(dates): data = self._cache_get(cache_date) if not self._is_empty_cache_record(data): logger.debug("First cache date with data: %s", cache_date) return dates[idx:] return dates
Get s list of dates (:py:class:`datetime.datetime`) present in cache, beginning with the longest contiguous set of dates that isn't missing more than one date in series. :return: list of datetime objects for contiguous dates in cache :rtype: ``list``
def compute_ranges(self, obj, key, ranges): """ Given an object, a specific key, and the normalization options, this method will find the specified normalization options on the appropriate OptionTree, group the elements according to the selected normalization option (i.e. either per frame or over the whole animation) and finally compute the dimension ranges in each group. The new set of ranges is returned. """ all_table = all(isinstance(el, Table) for el in obj.traverse(lambda x: x, [Element])) if obj is None or not self.normalize or all_table: return OrderedDict() # Get inherited ranges ranges = self.ranges if ranges is None else dict(ranges) # Get element identifiers from current object and resolve # with selected normalization options norm_opts = self._get_norm_opts(obj) # Traverse displayed object if normalization applies # at this level, and ranges for the group have not # been supplied from a composite plot return_fn = lambda x: x if isinstance(x, Element) else None for group, (axiswise, framewise) in norm_opts.items(): elements = [] # Skip if ranges are cached or already computed by a # higher-level container object. framewise = framewise or self.dynamic or len(elements) == 1 if group in ranges and (not framewise or ranges is not self.ranges): continue elif not framewise: # Traverse to get all elements elements = obj.traverse(return_fn, [group]) elif key is not None: # Traverse to get elements for each frame frame = self._get_frame(key) elements = [] if frame is None else frame.traverse(return_fn, [group]) # Only compute ranges if not axiswise on a composite plot # or not framewise on a Overlay or ElementPlot if (not (axiswise and not isinstance(obj, HoloMap)) or (not framewise and isinstance(obj, HoloMap))): self._compute_group_range(group, elements, ranges) self.ranges.update(ranges) return ranges
Given an object, a specific key, and the normalization options, this method will find the specified normalization options on the appropriate OptionTree, group the elements according to the selected normalization option (i.e. either per frame or over the whole animation) and finally compute the dimension ranges in each group. The new set of ranges is returned.
def set(self, name, value, **kw): """Set the field to the given value. The keyword arguments represent the other field values to integrate constraints to other values. """ # fetch the field by name field = api.get_field(self.context, name) # bail out if we have no field if not field: return False # call the field adapter and set the value fieldmanager = IFieldManager(field) return fieldmanager.set(self.context, value, **kw)
Set the field to the given value. The keyword arguments represent the other field values to integrate constraints to other values.
def update_selection_sm_prior(self): """State machine prior update of tree selection""" if self._do_selection_update: return self._do_selection_update = True tree_selection, selected_model_list, sm_selection, sm_selected_model_list = self.get_selections() if tree_selection is not None: for path, row in enumerate(self.list_store): model = row[self.MODEL_STORAGE_ID] if model not in sm_selected_model_list and model in selected_model_list: tree_selection.unselect_path(Gtk.TreePath.new_from_indices([path])) if model in sm_selected_model_list and model not in selected_model_list: tree_selection.select_path(Gtk.TreePath.new_from_indices([path])) self._do_selection_update = False
State machine prior update of tree selection
def _encode_codepage(codepage, text): """ Args: codepage (int) text (text) Returns: `bytes` Encode text using the given code page. Will not fail if a char can't be encoded using that codepage. """ assert isinstance(text, text_type) if not text: return b"" size = (len(text.encode("utf-16-le", _surrogatepass)) // ctypes.sizeof(winapi.WCHAR)) # get the required buffer size length = winapi.WideCharToMultiByte( codepage, 0, text, size, None, 0, None, None) if length == 0: raise ctypes.WinError() # decode to the buffer buf = ctypes.create_string_buffer(length) length = winapi.WideCharToMultiByte( codepage, 0, text, size, buf, length, None, None) if length == 0: raise ctypes.WinError() return buf[:length]
Args: codepage (int) text (text) Returns: `bytes` Encode text using the given code page. Will not fail if a char can't be encoded using that codepage.
async def _replace(self, key: Text, data: Dict[Text, Any]) -> None: """ Replace the register with a new value. """ with await self.pool as r: await r.set(self.register_key(key), ujson.dumps(data))
Replace the register with a new value.
def gui(): """Main function""" global SCREEN_SIZE # ####### # setup all objects # ####### os.environ['SDL_VIDEO_CENTERED'] = '1' # centers the windows screen = new_screen() pygame.display.set_caption('Empty project') pygame.event.set_allowed([QUIT, KEYDOWN, MOUSEBUTTONDOWN]) clock = pygame.time.Clock() fps = FPSIndicator(clock) while True: # ####### # Input loop # ####### # mouse = pygame.mouse.get_pos() for e in pygame.event.get(): if e.type == QUIT: return 0 elif e.type == KEYDOWN: if e.key == K_ESCAPE: return 0 if e.key == K_F4 and e.mod & KMOD_ALT: # Alt+F4 --> quits return 0 if e.type == VIDEORESIZE: SCREEN_SIZE = e.size screen = new_screen() # ####### # Draw all # ####### screen.fill(WHITE) fps.render(screen) pygame.display.update() clock.tick(FPS)
Main function
def file_exists(original_file): """ Validate the original file is in the S3 bucket """ s3 = boto3.resource('s3') bucket_name, object_key = _parse_s3_file(original_file) bucket = s3.Bucket(bucket_name) bucket_iterator = bucket.objects.filter(Prefix=object_key) bucket_list = [x for x in bucket_iterator] logger.debug("Bucket List: {0}".format(", ".join([x.key for x in bucket_list]))) logger.debug("bucket_list length: {0}".format(len(bucket_list))) return len(bucket_list) == 1
Validate the original file is in the S3 bucket
def run_activity(self): """ runs the method that referenced from current task """ activity = self.current.activity if activity: if activity not in self.wf_activities: self._load_activity(activity) self.current.log.debug( "Calling Activity %s from %s" % (activity, self.wf_activities[activity])) self.wf_activities[self.current.activity](self.current)
runs the method that referenced from current task
def explode(self, vector=None, origin=None): """ Explode a scene around a point and vector. Parameters ----------- vector : (3,) float or float Explode radially around a direction vector or spherically origin : (3,) float Point to explode around """ if origin is None: origin = self.centroid if vector is None: vector = self.scale / 25.0 vector = np.asanyarray(vector, dtype=np.float64) origin = np.asanyarray(origin, dtype=np.float64) for node_name in self.graph.nodes_geometry: transform, geometry_name = self.graph[node_name] centroid = self.geometry[geometry_name].centroid # transform centroid into nodes location centroid = np.dot(transform, np.append(centroid, 1))[:3] if vector.shape == (): # case where our vector is a single number offset = (centroid - origin) * vector elif np.shape(vector) == (3,): projected = np.dot(vector, (centroid - origin)) offset = vector * projected else: raise ValueError('explode vector wrong shape!') transform[0:3, 3] += offset self.graph[node_name] = transform
Explode a scene around a point and vector. Parameters ----------- vector : (3,) float or float Explode radially around a direction vector or spherically origin : (3,) float Point to explode around
def _create_ec2_instance(): """ Creates EC2 Instance """ print(_yellow("Creating instance")) conn = boto.ec2.connect_to_region(ec2_region, aws_access_key_id=fabconf['AWS_ACCESS_KEY'], aws_secret_access_key=fabconf['AWS_SECRET_KEY']) image = conn.get_all_images(ec2_amis) reservation = image[0].run(1, 1, ec2_keypair, ec2_secgroups, instance_type=ec2_instancetype) instance = reservation.instances[0] conn.create_tags([instance.id], {"Name":fabconf['INSTANCE_NAME_TAG']}) while instance.state == u'pending': print(_yellow("Instance state: %s" % instance.state)) time.sleep(10) instance.update() print(_green("Instance state: %s" % instance.state)) print(_green("Public dns: %s" % instance.public_dns_name)) return instance.public_dns_name
Creates EC2 Instance
def options(self, context, module_options): ''' PATH Path to the file containing raw shellcode to inject PROCID Process ID to inject into (default: current powershell process) ''' if not 'PATH' in module_options: context.log.error('PATH option is required!') exit(1) self.shellcode_path = os.path.expanduser(module_options['PATH']) if not os.path.exists(self.shellcode_path): context.log.error('Invalid path to shellcode!') exit(1) self.procid = None if 'PROCID' in module_options.keys(): self.procid = module_options['PROCID'] self.ps_script = obfs_ps_script('powersploit/CodeExecution/Invoke-Shellcode.ps1')
PATH Path to the file containing raw shellcode to inject PROCID Process ID to inject into (default: current powershell process)
def closest_point_naive(mesh, points): """ Given a mesh and a list of points find the closest point on any triangle. Does this by constructing a very large intermediate array and comparing every point to every triangle. Parameters ---------- mesh : Trimesh Takes mesh to have same interfaces as `closest_point` points : (m, 3) float Points in space Returns ---------- closest : (m, 3) float Closest point on triangles for each point distance : (m,) float Distances between point and triangle triangle_id : (m,) int Index of triangle containing closest point """ # get triangles from mesh triangles = mesh.triangles.view(np.ndarray) # establish that input points are sane points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(triangles, (-1, 3, 3)): raise ValueError('triangles shape incorrect') if not util.is_shape(points, (-1, 3)): raise ValueError('points must be (n,3)') # create a giant tiled array of each point tiled len(triangles) times points_tiled = np.tile(points, (1, len(triangles))) on_triangle = np.array([closest_point_corresponding( triangles, i.reshape((-1, 3))) for i in points_tiled]) # distance squared distance_2 = [((i - q)**2).sum(axis=1) for i, q in zip(on_triangle, points)] triangle_id = np.array([i.argmin() for i in distance_2]) # closest cartesian point closest = np.array([g[i] for i, g in zip(triangle_id, on_triangle)]) distance = np.array([g[i] for i, g in zip(triangle_id, distance_2)]) ** .5 return closest, distance, triangle_id
Given a mesh and a list of points find the closest point on any triangle. Does this by constructing a very large intermediate array and comparing every point to every triangle. Parameters ---------- mesh : Trimesh Takes mesh to have same interfaces as `closest_point` points : (m, 3) float Points in space Returns ---------- closest : (m, 3) float Closest point on triangles for each point distance : (m,) float Distances between point and triangle triangle_id : (m,) int Index of triangle containing closest point
def read_data(archive, arc_type, day, stachans, length=86400): """ Function to read the appropriate data from an archive for a day. :type archive: str :param archive: The archive source - if arc_type is seishub, this should be a url, if the arc_type is FDSN then this can be either a url or a known obspy client. If arc_type is day_vols, then this is the path to the top directory. :type arc_type: str :param arc_type: The type of archive, can be: seishub, FDSN, day_volumes :type day: datetime.date :param day: Date to retrieve data for :type stachans: list :param stachans: List of tuples of Stations and channels to try and get, will not fail if stations are not available, but will warn. :type length: float :param length: Data length to extract in seconds, defaults to 1 day. :returns: Stream of data :rtype: obspy.core.stream.Stream .. note:: A note on arc_types, if arc_type is day_vols, then this will \ look for directories labelled in the IRIS DMC conventions of \ Yyyyy/Rjjj.01/... where yyyy is the year and jjj is the julian day. \ Data within these files directories should be stored as day-long, \ single-channel files. This is not implemented in the fasted way \ possible to allow for a more general situation. If you require more \ speed you will need to re-write this. .. rubric:: Example >>> from obspy import UTCDateTime >>> t1 = UTCDateTime(2012, 3, 26) >>> stachans = [('JCNB', 'SP1')] >>> st = read_data('NCEDC', 'FDSN', t1, stachans) >>> print(st) 1 Trace(s) in Stream: BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\ 950000Z | 20.0 Hz, 1728000 samples .. rubric:: Example, missing data >>> t1 = UTCDateTime(2012, 3, 26) >>> stachans = [('JCNB', 'SP1'), ('GCSZ', 'HHZ')] >>> st = read_data('NCEDC', 'FDSN', t1, stachans) >>> print(st) 1 Trace(s) in Stream: BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\ 950000Z | 20.0 Hz, 1728000 samples .. rubric:: Example, local day-volumes >>> # Get the path to the test data >>> import eqcorrscan >>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data' >>> t1 = UTCDateTime(2012, 3, 26) >>> stachans = [('WHYM', 'SHZ'), ('EORO', 'SHZ')] >>> st = read_data(TEST_PATH + '/day_vols', 'day_vols', ... t1, stachans) >>> print(st) 2 Trace(s) in Stream: AF.WHYM..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \ | 1.0 Hz, 86400 samples AF.EORO..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \ | 1.0 Hz, 86400 samples """ st = [] available_stations = _check_available_data(archive, arc_type, day) for station in stachans: if len(station[1]) == 2: # Cope with two char channel naming in seisan station_map = (station[0], station[1][0] + '*' + station[1][1]) available_stations_map = [(sta[0], sta[1][0] + '*' + sta[1][-1]) for sta in available_stations] else: station_map = station available_stations_map = available_stations if station_map not in available_stations_map: msg = ' '.join([station[0], station_map[1], 'is not available for', day.strftime('%Y/%m/%d')]) warnings.warn(msg) continue if arc_type.lower() == 'seishub': client = SeishubClient(archive) st += client.get_waveforms( network='*', station=station_map[0], location='*', channel=station_map[1], starttime=UTCDateTime(day), endtime=UTCDateTime(day) + length) elif arc_type.upper() == "FDSN": client = FDSNClient(archive) try: st += client.get_waveforms( network='*', station=station_map[0], location='*', channel=station_map[1], starttime=UTCDateTime(day), endtime=UTCDateTime(day) + length) except FDSNException: warnings.warn('No data on server despite station being ' + 'available...') continue elif arc_type.lower() == 'day_vols': wavfiles = _get_station_file(os.path.join( archive, day.strftime('Y%Y' + os.sep + 'R%j.01')), station_map[0], station_map[1]) for wavfile in wavfiles: st += read(wavfile, starttime=day, endtime=day + length) st = Stream(st) return st
Function to read the appropriate data from an archive for a day. :type archive: str :param archive: The archive source - if arc_type is seishub, this should be a url, if the arc_type is FDSN then this can be either a url or a known obspy client. If arc_type is day_vols, then this is the path to the top directory. :type arc_type: str :param arc_type: The type of archive, can be: seishub, FDSN, day_volumes :type day: datetime.date :param day: Date to retrieve data for :type stachans: list :param stachans: List of tuples of Stations and channels to try and get, will not fail if stations are not available, but will warn. :type length: float :param length: Data length to extract in seconds, defaults to 1 day. :returns: Stream of data :rtype: obspy.core.stream.Stream .. note:: A note on arc_types, if arc_type is day_vols, then this will \ look for directories labelled in the IRIS DMC conventions of \ Yyyyy/Rjjj.01/... where yyyy is the year and jjj is the julian day. \ Data within these files directories should be stored as day-long, \ single-channel files. This is not implemented in the fasted way \ possible to allow for a more general situation. If you require more \ speed you will need to re-write this. .. rubric:: Example >>> from obspy import UTCDateTime >>> t1 = UTCDateTime(2012, 3, 26) >>> stachans = [('JCNB', 'SP1')] >>> st = read_data('NCEDC', 'FDSN', t1, stachans) >>> print(st) 1 Trace(s) in Stream: BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\ 950000Z | 20.0 Hz, 1728000 samples .. rubric:: Example, missing data >>> t1 = UTCDateTime(2012, 3, 26) >>> stachans = [('JCNB', 'SP1'), ('GCSZ', 'HHZ')] >>> st = read_data('NCEDC', 'FDSN', t1, stachans) >>> print(st) 1 Trace(s) in Stream: BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\ 950000Z | 20.0 Hz, 1728000 samples .. rubric:: Example, local day-volumes >>> # Get the path to the test data >>> import eqcorrscan >>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data' >>> t1 = UTCDateTime(2012, 3, 26) >>> stachans = [('WHYM', 'SHZ'), ('EORO', 'SHZ')] >>> st = read_data(TEST_PATH + '/day_vols', 'day_vols', ... t1, stachans) >>> print(st) 2 Trace(s) in Stream: AF.WHYM..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \ | 1.0 Hz, 86400 samples AF.EORO..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \ | 1.0 Hz, 86400 samples
def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict())
.. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory
def model_actions(self): """ Estimate state-value of the transition next state """ observations = self.get('rollout:observations') model_action = self.model.action(observations) return model_action
Estimate state-value of the transition next state
def plot_doc_topic_heatmap(fig, ax, doc_topic_distrib, doc_labels, topic_labels=None, which_documents=None, which_document_indices=None, which_topics=None, which_topic_indices=None, xaxislabel=None, yaxislabel=None, **kwargs): """ Plot a heatmap for a document-topic distribution `doc_topic_distrib` to a matplotlib Figure `fig` and Axes `ax` using `doc_labels` as document labels on the y-axis and topics from 1 to `n_topics=doc_topic_distrib.shape[1]` on the x-axis. Custom topic labels can be passed as `topic_labels`. A subset of documents can be specified either with a sequence `which_documents` containing a subset of document labels from `doc_labels` or `which_document_indices` containing a sequence of document indices. A subset of topics can be specified either with a sequence `which_topics` containing sequence of numbers between [1, n_topics] or `which_topic_indices` which is a number between [0, n_topics-1] Additional arguments can be passed via `kwargs` to `plot_heatmap`. Please note that it is almost always necessary to select a subset of your document-topic distribution with the `which_documents` or `which_topics` parameters, as otherwise the amount of data to be plotted will be too high to give a reasonable picture. """ if which_documents is not None and which_document_indices is not None: raise ValueError('only `which_documents` or `which_document_indices` can be set, not both') if which_topics is not None and which_topic_indices is not None: raise ValueError('only `which_topics` or `which_topic_indices` can be set, not both') if which_documents is not None: which_document_indices = np.where(np.isin(doc_labels, which_documents))[0] if which_topics is not None: which_topic_indices = np.array(which_topics) - 1 select_distrib_subset = False if topic_labels is None: topic_labels = np.array(range(1, doc_topic_distrib.shape[1]+1)) elif not isinstance(topic_labels, np.ndarray): topic_labels = np.array(topic_labels) if which_document_indices is not None: select_distrib_subset = True doc_labels = np.array(doc_labels)[which_document_indices] if which_topic_indices is not None: select_distrib_subset = True topic_labels = topic_labels[which_topic_indices] if select_distrib_subset: doc_topic_distrib = mat2d_window_from_indices(doc_topic_distrib, which_document_indices, which_topic_indices) return plot_heatmap(fig, ax, doc_topic_distrib, xaxislabel=xaxislabel or 'topic', yaxislabel=yaxislabel or 'document', xticklabels=topic_labels, yticklabels=doc_labels, **kwargs)
Plot a heatmap for a document-topic distribution `doc_topic_distrib` to a matplotlib Figure `fig` and Axes `ax` using `doc_labels` as document labels on the y-axis and topics from 1 to `n_topics=doc_topic_distrib.shape[1]` on the x-axis. Custom topic labels can be passed as `topic_labels`. A subset of documents can be specified either with a sequence `which_documents` containing a subset of document labels from `doc_labels` or `which_document_indices` containing a sequence of document indices. A subset of topics can be specified either with a sequence `which_topics` containing sequence of numbers between [1, n_topics] or `which_topic_indices` which is a number between [0, n_topics-1] Additional arguments can be passed via `kwargs` to `plot_heatmap`. Please note that it is almost always necessary to select a subset of your document-topic distribution with the `which_documents` or `which_topics` parameters, as otherwise the amount of data to be plotted will be too high to give a reasonable picture.
def parameter_from_numpy(self, name, array): """ Create parameter with its value initialized according to a numpy tensor Parameters ---------- name : str parameter name array : np.ndarray initiation value Returns ------- mxnet.gluon.parameter a parameter object """ p = self.params.get(name, shape=array.shape, init=mx.init.Constant(array)) return p
Create parameter with its value initialized according to a numpy tensor Parameters ---------- name : str parameter name array : np.ndarray initiation value Returns ------- mxnet.gluon.parameter a parameter object
def create_container(self, image, name=None, **kwargs): """ Identical to :meth:`docker.api.container.ContainerApiMixin.create_container` with additional logging. """ name_str = " '{0}'".format(name) if name else "" self.push_log("Creating container{0} from image '{1}'.".format(name_str, image)) return super(DockerFabricClient, self).create_container(image, name=name, **kwargs)
Identical to :meth:`docker.api.container.ContainerApiMixin.create_container` with additional logging.
def advance_dialog(self, *args): """Try to display the next dialog described in my ``todo``.""" self.clear_widgets() try: self._update_dialog(self.todo[self.idx]) except IndexError: pass
Try to display the next dialog described in my ``todo``.
def main(): """Main""" opts = dict( name="clamavmirror", version='0.0.4', description="ClamAV Signature Mirroring Tool", long_description=get_readme(), keywords="clamav mirror mirroring mirror-tool signatures", author="Andrew Colin Kissa", author_email="andrew@topdog.za.net", url="https://github.com/akissa/clamavmirror", license="MPL 2.0", packages=[], entry_points={ 'console_scripts': [ 'clamavmirror=clamavmirror:main' ], }, include_package_data=True, zip_safe=False, install_requires=['urllib3', 'dnspython', 'certifi'], classifiers=[ 'Development Status :: 4 - Beta', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Topic :: Software Development :: Libraries :: Python Modules', 'Intended Audience :: System Administrators', 'Environment :: Console', 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)', 'Natural Language :: English', 'Operating System :: OS Independent'],) setup(**opts)
Main
def AExn(mt, x, n): """ AExn : Returns the EPV of a endowment insurance. An endowment insurance provides a combination of a term insurance and a pure endowment """ return (mt.Mx[x] - mt.Mx[x + n]) / mt.Dx[x] + mt.Dx[x + n] / mt.Dx[x]
AExn : Returns the EPV of a endowment insurance. An endowment insurance provides a combination of a term insurance and a pure endowment
def plot_before_after_filter(signal, sr, band_begin, band_end, order=1, x_lim=[], y_lim=[], orientation="hor", show_plot=False, file_name=None): """ ----- Brief ----- The use of the current function is very useful for comparing two power spectrum's (before and after filtering the signal). This function invokes "plot_informational_band" in order to get the power spectrum before applying the signal to the lowpass filter. ----------- Description ----------- The FFT Power Spectrum, of an input signal, can be generated through plotfft function of novainstrumentation package (or periogram function of scipy package). The x axis (freqs) represents the frequency components of the signal, after decomposition was achieved by applying the Fourier Transform. The y axis (power) defines the relative weight of each frequency component (sinusoidal function) in the process of reconstructing the signal by re-summing of decomposition components. It is presented a 1x2 gridplot for compaing the differences in frequency composition of the signal under analysis (before and after filtering). Additionally, it is also graphically presented a rectangular box showing which are the frequency components with relevant information for studying our input physiological signal. Applied in the Notebook "Digital Filtering - A Fundamental Pre-Processing Step". ---------- Parameters ---------- signal : list List containing the acquired signal samples. sr : int Sampling rate. band_begin : float Lower frequency inside the signal informational band. band_end : float Higher frequency inside the signal informational band. order : int Filter order. x_lim : list A list with length equal to 2, defining the first and last x value that should be presented. y_lim : list A list with length equal to 2, defining the first and last y value that should be presented. orientation : str If "hor" then the generated figures will be joined together in an horizontal gridplot. When "vert" the gridplot will be a vertical grid and when "same" the plots are generated at the same figure. show_plot : bool If True then the generated figure/plot will be shown to the user. file_name : str Path containing the destination folder where the Bokeh figure will be stored. Returns ------- out : list List of Bokeh figures that compose the generated gridplot. """ # Generation of the HTML file where the plot will be stored. #file_name = _generate_bokeh_file(file_name) # Generation of FFT power spectrum accordingly to the filter order. for i in range(0, order + 1): # Initialisation and appending of data to the figures list. if i == 0: # Power spectrum freqs_after, power_after = plotfft(signal, sr) figure_after = plot_informational_band(freqs_after, power_after, signal, sr, band_begin, band_end, legend="Signal Power Spectrum", x_lim=x_lim, y_lim=y_lim) # List that store the figure handler list_figures = [[figure_after]] else: filter_signal = lowpass(signal, f=band_end, order=i, fs=sr) # Power spectrum freqs_after, power_after = plotfft(filter_signal, sr) if orientation != "same": figure_after = plot_informational_band(freqs_after, power_after, filter_signal, sr, band_begin, band_end, legend="Filtered FFT (Order " + str(i) + ")", x_lim=x_lim, y_lim=y_lim) # Append data accordingly to the desired direction of representation. if orientation == "hor": # Append to the figure list the power spectrum of the signal after filtering. list_figures[-1].append(figure_after) elif orientation == "vert": list_figures.append([figure_after]) else: list_figures[-1][0].line(freqs_after, power_after, legend="Filtered FFT (Order " + str(i) + ")", **opensignals_kwargs("line")) # Show gridplot. grid_plot_1 = gridplot(list_figures, **opensignals_kwargs("gridplot")) if show_plot is True: show(grid_plot_1) return list_figures
----- Brief ----- The use of the current function is very useful for comparing two power spectrum's (before and after filtering the signal). This function invokes "plot_informational_band" in order to get the power spectrum before applying the signal to the lowpass filter. ----------- Description ----------- The FFT Power Spectrum, of an input signal, can be generated through plotfft function of novainstrumentation package (or periogram function of scipy package). The x axis (freqs) represents the frequency components of the signal, after decomposition was achieved by applying the Fourier Transform. The y axis (power) defines the relative weight of each frequency component (sinusoidal function) in the process of reconstructing the signal by re-summing of decomposition components. It is presented a 1x2 gridplot for compaing the differences in frequency composition of the signal under analysis (before and after filtering). Additionally, it is also graphically presented a rectangular box showing which are the frequency components with relevant information for studying our input physiological signal. Applied in the Notebook "Digital Filtering - A Fundamental Pre-Processing Step". ---------- Parameters ---------- signal : list List containing the acquired signal samples. sr : int Sampling rate. band_begin : float Lower frequency inside the signal informational band. band_end : float Higher frequency inside the signal informational band. order : int Filter order. x_lim : list A list with length equal to 2, defining the first and last x value that should be presented. y_lim : list A list with length equal to 2, defining the first and last y value that should be presented. orientation : str If "hor" then the generated figures will be joined together in an horizontal gridplot. When "vert" the gridplot will be a vertical grid and when "same" the plots are generated at the same figure. show_plot : bool If True then the generated figure/plot will be shown to the user. file_name : str Path containing the destination folder where the Bokeh figure will be stored. Returns ------- out : list List of Bokeh figures that compose the generated gridplot.
async def _trigger_event(self, event, *args, **kwargs): """Invoke an event handler.""" run_async = kwargs.pop('run_async', False) ret = None if event in self.handlers: if asyncio.iscoroutinefunction(self.handlers[event]) is True: if run_async: return self.start_background_task(self.handlers[event], *args) else: try: ret = await self.handlers[event](*args) except asyncio.CancelledError: # pragma: no cover pass except: self.logger.exception(event + ' async handler error') if event == 'connect': # if connect handler raised error we reject the # connection return False else: if run_async: async def async_handler(): return self.handlers[event](*args) return self.start_background_task(async_handler) else: try: ret = self.handlers[event](*args) except: self.logger.exception(event + ' handler error') if event == 'connect': # if connect handler raised error we reject the # connection return False return ret
Invoke an event handler.
def _onerror(cls, kmsg, result): """ To execute on execution failure :param kser.schemas.Message kmsg: Kafka message :param kser.result.Result result: Execution result :return: Execution result :rtype: kser.result.Result """ logger.error( "{}.Failed: {}[{}]: {}".format( cls.__name__, kmsg.entrypoint, kmsg.uuid, result ), extra=dict( kmsg=kmsg.dump(), kresult=ResultSchema().dump(result) if result else dict() ) ) return cls.onerror(kmsg, result)
To execute on execution failure :param kser.schemas.Message kmsg: Kafka message :param kser.result.Result result: Execution result :return: Execution result :rtype: kser.result.Result
def set_base_path(self, value): """Munge in the base path into the configuration values :param str value: The path value """ if config.PATHS not in self.config.application: self.config.application[config.PATHS] = dict() if config.BASE not in self.config.application[config.PATHS]: self.config.application[config.PATHS][config.BASE] = value
Munge in the base path into the configuration values :param str value: The path value
def range(self, start, end=None, step=1, numPartitions=None): """ Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named ``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with step value ``step``. :param start: the start value :param end: the end value (exclusive) :param step: the incremental step (default: 1) :param numPartitions: the number of partitions of the DataFrame :return: :class:`DataFrame` >>> spark.range(1, 7, 2).collect() [Row(id=1), Row(id=3), Row(id=5)] If only one argument is specified, it will be used as the end value. >>> spark.range(3).collect() [Row(id=0), Row(id=1), Row(id=2)] """ if numPartitions is None: numPartitions = self._sc.defaultParallelism if end is None: jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions)) else: jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions)) return DataFrame(jdf, self._wrapped)
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named ``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with step value ``step``. :param start: the start value :param end: the end value (exclusive) :param step: the incremental step (default: 1) :param numPartitions: the number of partitions of the DataFrame :return: :class:`DataFrame` >>> spark.range(1, 7, 2).collect() [Row(id=1), Row(id=3), Row(id=5)] If only one argument is specified, it will be used as the end value. >>> spark.range(3).collect() [Row(id=0), Row(id=1), Row(id=2)]
def StdStringVector_2_seq(vec, seq=None): """Converts a :class:`tango.StdStringVector` to a python sequence<str> :param seq: the :class:`tango.StdStringVector` :type seq: :class:`tango.StdStringVector` :param vec: (optional, default is None) a python sequence to be filled. If None is given, a new list is created :return: a python sequence filled with the same contents as seq :rtype: sequence<str> """ if seq is None: seq = [] if not isinstance(vec, StdStringVector): raise TypeError('vec must be a tango.StdStringVector') for e in vec: seq.append(str(e)) return seq
Converts a :class:`tango.StdStringVector` to a python sequence<str> :param seq: the :class:`tango.StdStringVector` :type seq: :class:`tango.StdStringVector` :param vec: (optional, default is None) a python sequence to be filled. If None is given, a new list is created :return: a python sequence filled with the same contents as seq :rtype: sequence<str>
def reverse(self): """ Reverse the order of all items in the dictionary. Example: omd = omdict([(1,1), (1,11), (1,111), (2,2), (3,3)]) omd.reverse() omd.allitems() == [(3,3), (2,2), (1,111), (1,11), (1,1)] Returns: <self>. """ for key in six.iterkeys(self._map): self._map[key].reverse() self._items.reverse() return self
Reverse the order of all items in the dictionary. Example: omd = omdict([(1,1), (1,11), (1,111), (2,2), (3,3)]) omd.reverse() omd.allitems() == [(3,3), (2,2), (1,111), (1,11), (1,1)] Returns: <self>.
def sent_tokenize(self, text, **kwargs): """Returns a list of sentences. Each sentence is a space-separated string of tokens (words). Handles common cases of abbreviations (e.g., etc., ...). Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence. Headings without an ending period are inferred by line breaks. """ sentences = find_sentences(text, punctuation=kwargs.get( "punctuation", PUNCTUATION), abbreviations=kwargs.get( "abbreviations", ABBREVIATIONS_DE), replace=kwargs.get("replace", replacements), linebreak=r"\n{2,}") return sentences
Returns a list of sentences. Each sentence is a space-separated string of tokens (words). Handles common cases of abbreviations (e.g., etc., ...). Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence. Headings without an ending period are inferred by line breaks.
def run_job(self, **kwds): """ Create a DRMAA job template, populate with specified properties, run the job, and return the external_job_id. """ template = DrmaaSession.session.createJobTemplate() try: for key in kwds: setattr(template, key, kwds[key]) with DrmaaSession.session_lock: return DrmaaSession.session.runJob(template) finally: DrmaaSession.session.deleteJobTemplate(template)
Create a DRMAA job template, populate with specified properties, run the job, and return the external_job_id.
def plotgwsrc(gwb): """ Plot a GWB source population as a mollweide projection. """ theta, phi, omega, polarization = gwb.gw_dist() rho = phi-N.pi eta = 0.5*N.pi - theta # I don't know how to get rid of the RuntimeWarning -- RvH, Oct 10, 2014: # /Users/vhaaster/env/dev/lib/python2.7/site-packages/matplotlib/projections/geo.py:485: # RuntimeWarning: invalid value encountered in arcsin theta = np.arcsin(y / np.sqrt(2)) #old_settings = N.seterr(invalid='ignore') P.title("GWB source population") ax = P.axes(projection='mollweide') foo = P.scatter(rho, eta, marker='.', s=1) #bar = N.seterr(**old_settings) return foo
Plot a GWB source population as a mollweide projection.
def could_be_unfinished_char(seq, encoding): """Whether seq bytes might create a char in encoding if more bytes were added""" if decodable(seq, encoding): return False # any sensible encoding surely doesn't require lookahead (right?) # (if seq bytes encoding a character, adding another byte shouldn't also encode something) if encodings.codecs.getdecoder('utf8') is encodings.codecs.getdecoder(encoding): return could_be_unfinished_utf8(seq) elif encodings.codecs.getdecoder('ascii') is encodings.codecs.getdecoder(encoding): return False else: return True
Whether seq bytes might create a char in encoding if more bytes were added
async def set_chat_photo(self, chat_id: typing.Union[base.Integer, base.String], photo: base.InputFile) -> base.Boolean: """ Use this method to set a new profile photo for the chat. Photos can't be changed for private chats. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’ setting is off in the target group. Source: https://core.telegram.org/bots/api#setchatphoto :param chat_id: Unique identifier for the target chat or username of the target channel :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :param photo: New chat photo, uploaded using multipart/form-data :type photo: :obj:`base.InputFile` :return: Returns True on success :rtype: :obj:`base.Boolean` """ payload = generate_payload(**locals(), exclude=['photo']) files = {} prepare_file(payload, files, 'photo', photo) result = await self.request(api.Methods.SET_CHAT_PHOTO, payload, files) return result
Use this method to set a new profile photo for the chat. Photos can't be changed for private chats. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’ setting is off in the target group. Source: https://core.telegram.org/bots/api#setchatphoto :param chat_id: Unique identifier for the target chat or username of the target channel :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :param photo: New chat photo, uploaded using multipart/form-data :type photo: :obj:`base.InputFile` :return: Returns True on success :rtype: :obj:`base.Boolean`
def sign_message(body: ByteString, secret: Text) -> Text: """ Compute a message's signature. """ return 'sha1={}'.format( hmac.new(secret.encode(), body, sha1).hexdigest() )
Compute a message's signature.
def _notebook(trigger, note_store): """ :param trigger: trigger object :param note_store: note_store object :return: note object """ note = Types.Note() if trigger.notebook: # get the notebookGUID ... notebook_id = EvernoteMgr.get_notebook(note_store, trigger.notebook) # create notebookGUID if it does not exist then return its id note.notebookGuid = EvernoteMgr.set_notebook(note_store, trigger.notebook, notebook_id) if trigger.tag: # ... and get the tagGUID if a tag has been provided tag_id = EvernoteMgr.get_tag(note_store, trigger.tag) if tag_id is False: tag_id = EvernoteMgr.set_tag(note_store, trigger.tag, tag_id) # set the tag to the note if a tag has been provided if tag_id: note.tagGuids = tag_id logger.debug("notebook that will be used %s", trigger.notebook) return note
:param trigger: trigger object :param note_store: note_store object :return: note object
def set_right_margin(self, right_margin): """ Set the right margin of the menu. This will determine the number of spaces between the right edge of the screen and the right menu border. :param right_margin: an integer value """ self.__header.style.margins.right = right_margin self.__prologue.style.margins.right = right_margin self.__items_section.style.margins.right = right_margin self.__epilogue.style.margins.right = right_margin self.__footer.style.margins.right = right_margin self.__prompt.style.margins.right = right_margin return self
Set the right margin of the menu. This will determine the number of spaces between the right edge of the screen and the right menu border. :param right_margin: an integer value
def in_array_list(array_list, a, tol=1e-5): """ Extremely efficient nd-array comparison using numpy's broadcasting. This function checks if a particular array a, is present in a list of arrays. It works for arrays of any size, e.g., even matrix searches. Args: array_list ([array]): A list of arrays to compare to. a (array): The test array for comparison. tol (float): The tolerance. Defaults to 1e-5. If 0, an exact match is done. Returns: (bool) """ if len(array_list) == 0: return False axes = tuple(range(1, a.ndim + 1)) if not tol: return np.any(np.all(np.equal(array_list, a[None, :]), axes)) else: return np.any(np.sum(np.abs(array_list - a[None, :]), axes) < tol)
Extremely efficient nd-array comparison using numpy's broadcasting. This function checks if a particular array a, is present in a list of arrays. It works for arrays of any size, e.g., even matrix searches. Args: array_list ([array]): A list of arrays to compare to. a (array): The test array for comparison. tol (float): The tolerance. Defaults to 1e-5. If 0, an exact match is done. Returns: (bool)
def run(self, forever=True): """start the bot""" loop = self.create_connection() self.add_signal_handlers() if forever: loop.run_forever()
start the bot
def get_genomic_seq_for_transcript(self, transcript_id, expand): """ obtain the sequence for a transcript from ensembl """ headers = {"content-type": "application/json"} self.attempt = 0 ext = "/sequence/id/{0}?type=genomic;expand_3prime={1};expand_5prime={1}".format(transcript_id, expand) r = self.ensembl_request(ext, headers) gene = json.loads(r) seq = gene["seq"] seq_id = gene["id"] if seq_id != transcript_id: raise ValueError("ensembl gave the wrong transcript") desc = gene["desc"].split(":") chrom = desc[2] start = int(desc[3]) + expand end = int(desc[4]) - expand strand_temp = int(desc[5]) strand = "+" if strand_temp == -1: strand = "-" return (chrom, start, end, strand, seq)
obtain the sequence for a transcript from ensembl
def do_access_control(self): """`before_request` handler to check if user should be redirected to login page.""" from abilian.services import get_service if current_app.testing and current_app.config.get("NO_LOGIN"): # Special case for tests user = User.query.get(0) login_user(user, force=True) return state = self.app_state user = unwrap(current_user) # Another special case for tests if current_app.testing and getattr(user, "is_admin", False): return security = get_service("security") user_roles = frozenset(security.get_roles(user)) endpoint = request.endpoint blueprint = request.blueprint access_controllers = [] access_controllers.extend(state.bp_access_controllers.get(None, [])) if blueprint and blueprint in state.bp_access_controllers: access_controllers.extend(state.bp_access_controllers[blueprint]) if endpoint and endpoint in state.endpoint_access_controllers: access_controllers.extend(state.endpoint_access_controllers[endpoint]) for access_controller in reversed(access_controllers): verdict = access_controller(user=user, roles=user_roles) if verdict is None: continue elif verdict is True: return else: if user.is_anonymous: return self.redirect_to_login() raise Forbidden() # default policy if current_app.config.get("PRIVATE_SITE") and user.is_anonymous: return self.redirect_to_login()
`before_request` handler to check if user should be redirected to login page.
def get_queryset(self): ''' This is overwritten in order to not exclude drafts and pages submitted for moderation ''' request = self.request # Allow pages to be filtered to a specific type if 'type' not in request.GET: model = Page else: model_name = request.GET['type'] try: model = resolve_model_string(model_name) except LookupError: raise BadRequestError("type doesn't exist") if not issubclass(model, Page): raise BadRequestError("type doesn't exist") # This is the overwritten line queryset = model.objects.public() # exclude .live() # Filter by site queryset = queryset.descendant_of( request.site.root_page, inclusive=True) return queryset
This is overwritten in order to not exclude drafts and pages submitted for moderation
def _perform_validation(self, path, value, results): """ Validates a given value against the schema and configured validation rules. :param path: a dot notation path to the value. :param value: a value to be validated. :param results: a list with validation results to add new results. """ name = path if path != None else "value" value = ObjectReader.get_value(value) super(ArraySchema, self)._perform_validation(path, value, results) if value == None: return if isinstance(value, list) or isinstance(value, set) or isinstance(value, tuple): index = 0 for element in value: element_path = str(index) if path == None or len(path) == 0 else path + "." + str(index) self._perform_type_validation(element_path, self.value_type, element, results) index += 1 else: results.append( ValidationResult( path, ValidationResultType.Error, "VALUE_ISNOT_ARRAY", name + " type must be List or Array", "List", type(value) ) )
Validates a given value against the schema and configured validation rules. :param path: a dot notation path to the value. :param value: a value to be validated. :param results: a list with validation results to add new results.
def text(self, value): """Set the text value. Args: value (str): Text value. """ self._text = value self.timestamps.edited = datetime.datetime.utcnow() self.touch(True)
Set the text value. Args: value (str): Text value.
def colormapper(value, lower=0, upper=1, cmap=None): """ Maps values to colors by normalizing within [a,b], obtaining rgba from the given matplotlib color map for heatmap polygon coloring. Parameters ---------- value: float The value to be colormapped lower: float Lower bound of colors upper: float Upper bound of colors cmap: String or matplotlib.colors.Colormap (optional) Colormap object to prevent repeated lookup Returns ------- hex_, float The value mapped to an appropriate RGBA color value """ cmap = get_cmap(cmap) if upper - lower == 0: rgba = cmap(0) else: rgba = cmap((value - lower) / float(upper - lower)) hex_ = rgb2hex(rgba) return hex_
Maps values to colors by normalizing within [a,b], obtaining rgba from the given matplotlib color map for heatmap polygon coloring. Parameters ---------- value: float The value to be colormapped lower: float Lower bound of colors upper: float Upper bound of colors cmap: String or matplotlib.colors.Colormap (optional) Colormap object to prevent repeated lookup Returns ------- hex_, float The value mapped to an appropriate RGBA color value
def _construct_as_path_attr(self, as_path_attr, as4_path_attr): """Marge AS_PATH and AS4_PATH attribute instances into a single AS_PATH instance.""" def _listify(li): """Reconstruct AS_PATH list. Example:: >>> _listify([[1, 2, 3], {4, 5}, [6, 7]]) [1, 2, 3, {4, 5}, 6, 7] """ lo = [] for l in li: if isinstance(l, list): lo.extend(l) elif isinstance(l, set): lo.append(l) else: pass return lo # If AS4_PATH attribute is None, returns the given AS_PATH attribute if as4_path_attr is None: return as_path_attr # If AS_PATH is shorter than AS4_PATH, AS4_PATH should be ignored. if as_path_attr.get_as_path_len() < as4_path_attr.get_as_path_len(): return as_path_attr org_as_path_list = _listify(as_path_attr.path_seg_list) as4_path_list = _listify(as4_path_attr.path_seg_list) # Reverse to compare backward. org_as_path_list.reverse() as4_path_list.reverse() new_as_path_list = [] tmp_list = [] for as_path, as4_path in zip_longest(org_as_path_list, as4_path_list): if as4_path is None: if isinstance(as_path, int): tmp_list.insert(0, as_path) elif isinstance(as_path, set): if tmp_list: new_as_path_list.insert(0, tmp_list) tmp_list = [] new_as_path_list.insert(0, as_path) else: pass elif isinstance(as4_path, int): tmp_list.insert(0, as4_path) elif isinstance(as4_path, set): if tmp_list: new_as_path_list.insert(0, tmp_list) tmp_list = [] new_as_path_list.insert(0, as4_path) else: pass if tmp_list: new_as_path_list.insert(0, tmp_list) return bgp.BGPPathAttributeAsPath(new_as_path_list)
Marge AS_PATH and AS4_PATH attribute instances into a single AS_PATH instance.
def determine_extra_packages(self, packages): """ Return all packages that are installed, but missing from "packages". Return value is a tuple of the package names """ args = [ "pip", "freeze", ] installed = subprocess.check_output(args, universal_newlines=True) installed_list = set() lines = installed.strip().split('\n') for (package, version) in self._parse_requirements(lines): installed_list.add(package) package_list = set() for (package, version) in self._parse_requirements(packages.readlines()): package_list.add(package) removal_list = installed_list - package_list return tuple(removal_list)
Return all packages that are installed, but missing from "packages". Return value is a tuple of the package names
def setup_exchange(self): """Declare the exchange When completed, the on_exchange_declareok method will be invoked by pika. """ logger.debug('Declaring exchange %s', self._exchange) self._channel.exchange_declare(self.on_exchange_declareok, self._exchange, self._exchange_type, durable=True)
Declare the exchange When completed, the on_exchange_declareok method will be invoked by pika.
def is_visible(self): """ see also :meth:`visible_if` :return: whether this parameter is currently visible (and therefore shown in ParameterSets and visible to :meth:`ParameterSet.filter`) :rtype: bool """ def is_visible_single(visible_if): # visible_if syntax: [ignore,these]qualifier:value if visible_if.lower() == 'false': return False # otherwise we need to find the parameter we're referencing and check its value if visible_if[0]=='[': remove_metawargs, visible_if = visible_if[1:].split(']') remove_metawargs = remove_metawargs.split(',') else: remove_metawargs = [] qualifier, value = visible_if.split(':') if 'hierarchy.' in qualifier: # TODO: set specific syntax (hierarchy.get_meshables:2) # then this needs to do some logic on the hierarchy hier = self._bundle.hierarchy if not len(hier.get_value()): # then hierarchy hasn't been set yet, so we can't do any # of these tests return True method = qualifier.split('.')[1] if value in ['true', 'True']: value = True elif value in ['false', 'False']: value = False return getattr(hier, method)(self.component) == value else: # the parameter needs to have all the same meta data except qualifier # TODO: switch this to use self.get_parent_ps ? metawargs = {k:v for k,v in self.get_meta(ignore=['twig', 'uniquetwig', 'uniqueid']+remove_metawargs).items() if v is not None} metawargs['qualifier'] = qualifier # metawargs['twig'] = None # metawargs['uniquetwig'] = None # metawargs['uniqueid'] = None # if metawargs.get('component', None) == '_default': # metawargs['component'] = None try: # this call is quite expensive and bloats every get_parameter(check_visible=True) param = self._bundle.get_parameter(check_visible=False, check_default=False, **metawargs) except ValueError: # let's not let this hold us up - sometimes this can happen when copying # parameters (from copy_for) in order that the visible_if parameter # happens later logger.debug("parameter not found when trying to determine if visible, {}".format(metawargs)) return True #~ print "***", qualifier, param.qualifier, param.get_value(), value if isinstance(param, BoolParameter): if value in ['true', 'True']: value = True elif value in ['false', 'False']: value = False if isinstance(value, str) and value[0] in ['!', '~']: return param.get_value() != value[1:] elif value=='<notempty>': return len(param.get_value()) > 0 else: return param.get_value() == value if self.visible_if is None: return True if not self._bundle: # then we may not be able to do the check, for now let's just return True return True return np.all([is_visible_single(visible_if_i) for visible_if_i in self.visible_if.split(',')])
see also :meth:`visible_if` :return: whether this parameter is currently visible (and therefore shown in ParameterSets and visible to :meth:`ParameterSet.filter`) :rtype: bool
def execute(self): """ Execute the actions necessary to perform a `molecule init role` and returns None. :return: None """ role_name = self._command_args['role_name'] role_directory = os.getcwd() msg = 'Initializing new role {}...'.format(role_name) LOG.info(msg) if os.path.isdir(role_name): msg = ('The directory {} exists. ' 'Cannot create new role.').format(role_name) util.sysexit_with_message(msg) template_directory = '' if 'template' in self._command_args.keys(): template_directory = self._command_args['template'] else: template_directory = 'role' self._process_templates(template_directory, self._command_args, role_directory) scenario_base_directory = os.path.join(role_directory, role_name) templates = [ 'scenario/driver/{driver_name}'.format(**self._command_args), 'scenario/verifier/{verifier_name}'.format(**self._command_args), ] for template in templates: self._process_templates(template, self._command_args, scenario_base_directory) self._process_templates('molecule', self._command_args, role_directory) role_directory = os.path.join(role_directory, role_name) msg = 'Initialized role in {} successfully.'.format(role_directory) LOG.success(msg)
Execute the actions necessary to perform a `molecule init role` and returns None. :return: None
def add_edge_by_index(self, source_index: int, target_index: int, weight: float, save_to_cache: bool = True) -> None: """ Adds an edge between the nodes with the specified indices to the graph. Arguments: source_index (int): The index of the source node of the edge to add. target_index (int): The index of the target node of the edge to add. weight (float): The weight of the edge. save_to_cache (bool): Whether the edge should be saved to the local database. This argument is necessary (and `False`) when we load edges from the local cache. """ source: Node = self._nodes.get_node(source_index) target: Node = self._nodes.get_node(target_index) if source is None or target is None: return self.add_edge( source=source, target=target, weight=weight, save_to_cache=save_to_cache )
Adds an edge between the nodes with the specified indices to the graph. Arguments: source_index (int): The index of the source node of the edge to add. target_index (int): The index of the target node of the edge to add. weight (float): The weight of the edge. save_to_cache (bool): Whether the edge should be saved to the local database. This argument is necessary (and `False`) when we load edges from the local cache.
def after(self, callback: Union[Callable, str]) -> "Control": """Register a control method that reacts after the trigger method is called. Parameters: callback: The control method. If given as a callable, then that function will be used as the callback. If given as a string, then the control will look up a method with that name when reacting (useful when subclassing). """ if isinstance(callback, Control): callback = callback._after self._after = callback return self
Register a control method that reacts after the trigger method is called. Parameters: callback: The control method. If given as a callable, then that function will be used as the callback. If given as a string, then the control will look up a method with that name when reacting (useful when subclassing).
def readWarp(self): """ Read the warp element :: <warp> <axis name="weight"> <map input="0" output="0" /> <map input="500" output="200" /> <map input="1000" output="1000" /> </axis> </warp> """ warpDict = {} for warpAxisElement in self.root.findall(".warp/axis"): axisName = warpAxisElement.attrib.get("name") warpDict[axisName] = [] for warpPoint in warpAxisElement.findall(".map"): inputValue = float(warpPoint.attrib.get("input")) outputValue = float(warpPoint.attrib.get("output")) warpDict[axisName].append((inputValue, outputValue)) self.warpDict = warpDict
Read the warp element :: <warp> <axis name="weight"> <map input="0" output="0" /> <map input="500" output="200" /> <map input="1000" output="1000" /> </axis> </warp>
def get_instance(self, payload): """ Build an instance of TaskQueueRealTimeStatisticsInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsInstance """ return TaskQueueRealTimeStatisticsInstance( self._version, payload, workspace_sid=self._solution['workspace_sid'], task_queue_sid=self._solution['task_queue_sid'], )
Build an instance of TaskQueueRealTimeStatisticsInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsInstance
def parity(num: int) -> int: """Return the parity of a non-negative integer. For example, here are the parities of the first ten integers: >>> [parity(n) for n in range(10)] [0, 1, 1, 0, 1, 0, 0, 1, 1, 0] This function is undefined for negative integers: >>> parity(-1) Traceback (most recent call last): ... ValueError: expected num >= 0 """ if num < 0: raise ValueError("expected num >= 0") par = 0 while num: par ^= (num & 1) num >>= 1 return par
Return the parity of a non-negative integer. For example, here are the parities of the first ten integers: >>> [parity(n) for n in range(10)] [0, 1, 1, 0, 1, 0, 0, 1, 1, 0] This function is undefined for negative integers: >>> parity(-1) Traceback (most recent call last): ... ValueError: expected num >= 0
def reset(cls, *args, **kwargs): """Undo call to prepare, useful for testing.""" cls.local.tchannel = None cls.args = None cls.kwargs = None cls.prepared = False
Undo call to prepare, useful for testing.
def get_events(self): """ :calls: `GET /repos/:owner/:repo/issues/:issue_number/events <http://developer.github.com/v3/issues/events>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.IssueEvent.IssueEvent` """ return github.PaginatedList.PaginatedList( github.IssueEvent.IssueEvent, self._requester, self.url + "/events", None, headers={'Accept': Consts.mediaTypeLockReasonPreview} )
:calls: `GET /repos/:owner/:repo/issues/:issue_number/events <http://developer.github.com/v3/issues/events>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.IssueEvent.IssueEvent`
def rename_file(self, old_save_name, new_save_name, new_path): """This only updates the name and path we use to track the file's size and upload progress. Doesn't rename it on the back end or make us upload from anywhere else. """ if old_save_name in self._files: del self._files[old_save_name] self.update_file(new_save_name, new_path)
This only updates the name and path we use to track the file's size and upload progress. Doesn't rename it on the back end or make us upload from anywhere else.
def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub()
Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}}
def set_preferences(self, user=None, **kwargs): """Set preferences from keyword arguments.""" if user is None: user = current_user d = {pref.key: pref for pref in user.preferences} for k, v in kwargs.items(): if k in d: d[k].value = v else: d[k] = UserPreference(user=user, key=k, value=v) db.session.add(d[k])
Set preferences from keyword arguments.
def spy(iterable, n=1): """Return a 2-tuple with a list containing the first *n* elements of *iterable*, and an iterator with the same items as *iterable*. This allows you to "look ahead" at the items in the iterable without advancing it. There is one item in the list by default: >>> iterable = 'abcdefg' >>> head, iterable = spy(iterable) >>> head ['a'] >>> list(iterable) ['a', 'b', 'c', 'd', 'e', 'f', 'g'] You may use unpacking to retrieve items instead of lists: >>> (head,), iterable = spy('abcdefg') >>> head 'a' >>> (first, second), iterable = spy('abcdefg', 2) >>> first 'a' >>> second 'b' The number of items requested can be larger than the number of items in the iterable: >>> iterable = [1, 2, 3, 4, 5] >>> head, iterable = spy(iterable, 10) >>> head [1, 2, 3, 4, 5] >>> list(iterable) [1, 2, 3, 4, 5] """ it = iter(iterable) head = take(n, it) return head, chain(head, it)
Return a 2-tuple with a list containing the first *n* elements of *iterable*, and an iterator with the same items as *iterable*. This allows you to "look ahead" at the items in the iterable without advancing it. There is one item in the list by default: >>> iterable = 'abcdefg' >>> head, iterable = spy(iterable) >>> head ['a'] >>> list(iterable) ['a', 'b', 'c', 'd', 'e', 'f', 'g'] You may use unpacking to retrieve items instead of lists: >>> (head,), iterable = spy('abcdefg') >>> head 'a' >>> (first, second), iterable = spy('abcdefg', 2) >>> first 'a' >>> second 'b' The number of items requested can be larger than the number of items in the iterable: >>> iterable = [1, 2, 3, 4, 5] >>> head, iterable = spy(iterable, 10) >>> head [1, 2, 3, 4, 5] >>> list(iterable) [1, 2, 3, 4, 5]
def getattrd(obj, name, default=sentinel): """ Same as getattr(), but allows dot notation lookup Source: http://stackoverflow.com/a/14324459 """ try: return functools.reduce(getattr, name.split("."), obj) except AttributeError as e: if default is not sentinel: return default raise
Same as getattr(), but allows dot notation lookup Source: http://stackoverflow.com/a/14324459
def submit(self, pixels, queue=None, debug=False, configfile=None): """ Submit the likelihood job for the given pixel(s). """ # For backwards compatibility batch = self.config['scan'].get('batch',self.config['batch']) queue = batch['cluster'] if queue is None else queue # Need to develop some way to take command line arguments... self.batch = ugali.utils.batch.batchFactory(queue,**batch['opts']) self.batch.max_jobs = batch.get('max_jobs',200) if np.isscalar(pixels): pixels = np.array([pixels]) outdir = mkdir(self.config['output']['likedir']) logdir = mkdir(join(outdir,'log')) subdir = mkdir(join(outdir,'sub')) # Save the current configuation settings; avoid writing # file multiple times if configfile passed as argument. if configfile is None: shutil.copy(self.config.filename,outdir) configfile = join(outdir,os.path.basename(self.config.filename)) lon,lat = pix2ang(self.nside_likelihood,pixels) commands = [] chunk = batch['chunk'] istart = 0 logger.info('=== Submit Likelihood ===') for ii,pix in enumerate(pixels): msg = ' (%i/%i) pixel=%i nside=%i; (lon, lat) = (%.2f, %.2f)' msg = msg%(ii+1,len(pixels),pix, self.nside_likelihood,lon[ii],lat[ii]) logger.info(msg) # Create outfile name outfile = self.config.likefile%(pix,self.config['coords']['coordsys'].lower()) outbase = os.path.basename(outfile) jobname = batch['jobname'] # Submission command sub = not os.path.exists(outfile) cmd = self.command(outfile,configfile,pix) commands.append([ii,cmd,lon[ii],lat[ii],sub]) if chunk == 0: # No chunking command = cmd submit = sub logfile = join(logdir,os.path.splitext(outbase)[0]+'.log') elif (len(commands)%chunk==0) or (ii+1 == len(pixels)): # End of chunk, create submission script commands = np.array(commands,dtype=object) istart, iend = commands[0][0], commands[-1][0] subfile = join(subdir,'submit_%08i_%08i.sh'%(istart,iend)) logfile = join(logdir,'submit_%08i_%08i.log'%(istart,iend)) command = "sh %s"%subfile submit = np.any(commands[:,-1]) if submit: self.write_script(subfile,commands) else: # Not end of chunk continue commands=[] # Actual job submission if not submit: logger.info(self.skip) continue else: job = self.batch.submit(command,jobname,logfile) logger.info(" "+job) time.sleep(0.5)
Submit the likelihood job for the given pixel(s).
def resample(self,N,minval=None,maxval=None,log=False,res=1e4): """Returns random samples generated according to the distribution Mirrors basic functionality of `rvs` method for `scipy.stats` random variates. Implemented by mapping uniform numbers onto the inverse CDF using a closest-matching grid approach. Parameters ---------- N : int Number of samples to return minval,maxval : float, optional Minimum/maximum values to resample. Should both usually just be `None`, which will default to `self.minval`/`self.maxval`. log : bool, optional Whether grid should be log- or linear-spaced. res : int, optional Resolution of CDF grid used. Returns ------- values : ndarray N samples. Raises ------ ValueError If maxval/minval are +/- infinity, this doesn't work because of the grid-based approach. """ N = int(N) if minval is None: if hasattr(self,'minval_cdf'): minval = self.minval_cdf else: minval = self.minval if maxval is None: if hasattr(self,'maxval_cdf'): maxval = self.maxval_cdf else: maxval = self.maxval if maxval==np.inf or minval==-np.inf: raise ValueError('must have finite upper and lower bounds to resample. (set minval, maxval kws)') u = rand.random(size=N) if log: vals = np.logspace(log10(minval),log10(maxval),res) else: vals = np.linspace(minval,maxval,res) #sometimes cdf is flat. so ys will need to be uniqued ys,yinds = np.unique(self.cdf(vals), return_index=True) vals = vals[yinds] inds = np.digitize(u,ys) return vals[inds]
Returns random samples generated according to the distribution Mirrors basic functionality of `rvs` method for `scipy.stats` random variates. Implemented by mapping uniform numbers onto the inverse CDF using a closest-matching grid approach. Parameters ---------- N : int Number of samples to return minval,maxval : float, optional Minimum/maximum values to resample. Should both usually just be `None`, which will default to `self.minval`/`self.maxval`. log : bool, optional Whether grid should be log- or linear-spaced. res : int, optional Resolution of CDF grid used. Returns ------- values : ndarray N samples. Raises ------ ValueError If maxval/minval are +/- infinity, this doesn't work because of the grid-based approach.
def skip(self, content): """ Get whether to skip this I{content}. Should be skipped when the content is optional and value is either None or an empty list. @param content: Content to skip. @type content: L{Object} @return: True if content is to be skipped. @rtype: bool """ if self.optional(content): v = content.value if v is None: return True if isinstance(v, (list, tuple)) and not v: return True return False
Get whether to skip this I{content}. Should be skipped when the content is optional and value is either None or an empty list. @param content: Content to skip. @type content: L{Object} @return: True if content is to be skipped. @rtype: bool
def results(self, Pc): r""" """ p_inv, t_inv = super().results(Pc).values() phase = self.project.find_phase(self) quantity = self.settings['quantity'].split('.')[-1] lpf = np.array([1]) if self.settings['pore_partial_filling']: # Set pressure on phase to current capillary pressure phase['pore.'+quantity] = Pc # Regenerate corresponding physics model for phys in self.project.find_physics(phase=phase): phys.regenerate_models(self.settings['pore_partial_filling']) # Fetch partial filling fraction from phase object (0->1) lpf = phase[self.settings['pore_partial_filling']] # Calculate filled throat volumes ltf = np.array([1]) if self.settings['throat_partial_filling']: # Set pressure on phase to current capillary pressure phase['throat.'+quantity] = Pc # Regenerate corresponding physics model for phys in self.project.find_physics(phase=phase): phys.regenerate_models(self.settings['throat_partial_filling']) # Fetch partial filling fraction from phase object (0->1) ltf = phase[self.settings['throat_partial_filling']] p_inv = p_inv*lpf t_inv = t_inv*ltf return {'pore.occupancy': p_inv, 'throat.occupancy': t_inv}
r"""
def incidence(boundary): """ given an Nxm matrix containing boundary info between simplices, compute indidence info matrix not very reusable; should probably not be in this lib """ return GroupBy(boundary).split(np.arange(boundary.size) // boundary.shape[1])
given an Nxm matrix containing boundary info between simplices, compute indidence info matrix not very reusable; should probably not be in this lib
def _get_no_rowscols(self, bbox): """Returns tuple of number of rows and cols from bbox""" if bbox is None: return 1, 1 else: (bb_top, bb_left), (bb_bottom, bb_right) = bbox if bb_top is None: bb_top = 0 if bb_left is None: bb_left = 0 if bb_bottom is None: bb_bottom = self.grid.code_array.shape[0] - 1 if bb_right is None: bb_right = self.grid.code_array.shape[1] - 1 return bb_bottom - bb_top + 1, bb_right - bb_left + 1
Returns tuple of number of rows and cols from bbox
def up(self, migration_id=None, fake=False): """Executes migrations.""" if not self.check_directory(): return for migration in self.get_migrations_to_up(migration_id): logger.info('Executing migration: %s' % migration.filename) migration_module = self.load_migration_file(migration.filename) if not fake: if hasattr(migration_module, 'up'): migration_module.up(self.db) else: logger.error('No up method on migration %s' % migration.filename) record = migration.as_dict() record['date'] = datetime.utcnow() self.collection.insert(record)
Executes migrations.
def get(self, path, data=None): """Executes a GET. 'path' may not be None. Should include the full path to the resource. 'data' may be None or a dictionary. These values will be appended to the path as key/value pairs. Returns a named tuple that includes: status: the HTTP status code json: the returned JSON-HAL If the key was not set, throws an APIConfigurationException.""" # Argument error checking. assert path is not None # Execute the request. response = self.conn.request('GET', path, data, self._get_headers()) # Extract the result. self._last_status = response_status = response.status response_content = response.data.decode() return Result(status=response_status, json=response_content)
Executes a GET. 'path' may not be None. Should include the full path to the resource. 'data' may be None or a dictionary. These values will be appended to the path as key/value pairs. Returns a named tuple that includes: status: the HTTP status code json: the returned JSON-HAL If the key was not set, throws an APIConfigurationException.
def validate_overlap(comp1, comp2, force): """Validate the overlap between the wavelength sets of the two given components. Parameters ---------- comp1, comp2 : `~pysynphot.spectrum.SourceSpectrum` or `~pysynphot.spectrum.SpectralElement` Source spectrum and bandpass of an observation. force : {'extrap', 'taper', `None`} If not `None`, the components may be adjusted by extrapolation or tapering. Returns ------- comp1, comp2 Same as inputs. However, ``comp1`` might be tapered if that option is selected. warnings : dict Maps warning keyword to its description. Raises ------ KeyError Invalid ``force``. pysynphot.exceptions.DisjointError No overlap detected when ``force`` is `None`. pysynphot.exceptions.PartialOverlap Partial overlap detected when ``force`` is `None`. """ warnings = dict() if force is None: stat = comp2.check_overlap(comp1) if stat=='full': pass elif stat == 'partial': raise(exceptions.PartialOverlap('Spectrum and bandpass do not fully overlap. You may use force=[extrap|taper] to force this Observation anyway.')) elif stat == 'none': raise(exceptions.DisjointError('Spectrum and bandpass are disjoint')) elif force.lower() == 'taper': try: comp1=comp1.taper() except AttributeError: comp1=comp1.tabulate().taper() warnings['PartialOverlap']=force elif force.lower().startswith('extrap'): #default behavior works, but check the overlap so we can set the warning stat=comp2.check_overlap(comp1) if stat == 'partial': warnings['PartialOverlap']=force else: raise(KeyError("Illegal value force=%s; legal values=('taper','extrap')"%force)) return comp1, comp2, warnings
Validate the overlap between the wavelength sets of the two given components. Parameters ---------- comp1, comp2 : `~pysynphot.spectrum.SourceSpectrum` or `~pysynphot.spectrum.SpectralElement` Source spectrum and bandpass of an observation. force : {'extrap', 'taper', `None`} If not `None`, the components may be adjusted by extrapolation or tapering. Returns ------- comp1, comp2 Same as inputs. However, ``comp1`` might be tapered if that option is selected. warnings : dict Maps warning keyword to its description. Raises ------ KeyError Invalid ``force``. pysynphot.exceptions.DisjointError No overlap detected when ``force`` is `None`. pysynphot.exceptions.PartialOverlap Partial overlap detected when ``force`` is `None`.
def get_collections(self, pattern="*", libtype="*"): """Returns a list of collection name/summary tuples""" sql = """SELECT collection.collection_id, collection.name, collection.doc, collection.type, collection.path FROM collection_table as collection WHERE name like ? AND type like ? ORDER BY collection.name """ cursor = self._execute(sql, (self._glob_to_sql(pattern), self._glob_to_sql(libtype))) sql_result = cursor.fetchall() return [{"collection_id": result[0], "name": result[1], "synopsis": result[2].split("\n")[0], "type": result[3], "path": result[4] } for result in sql_result]
Returns a list of collection name/summary tuples
def _make_timestamps(start_time, minimum, maximum, steps): """Create timestamps on x-axis, every so often. Parameters ---------- start_time : instance of datetime actual start time of the dataset minimum : int start time of the recording from start_time, in s maximum : int end time of the recording from start_time, in s steps : int how often you want a label, in s Returns ------- dict where the key is the label and the value is the time point where the label should be placed. Notes ----- This function takes care that labels are placed at the meaningful time, not at random values. """ t0 = start_time + timedelta(seconds=minimum) t1 = start_time + timedelta(seconds=maximum) t0_midnight = t0.replace(hour=0, minute=0, second=0, microsecond=0) d0 = t0 - t0_midnight d1 = t1 - t0_midnight first_stamp = ceil(d0.total_seconds() / steps) * steps last_stamp = ceil(d1.total_seconds() / steps) * steps stamp_label = [] stamp_time = [] for stamp in range(first_stamp, last_stamp, steps): stamp_as_datetime = t0_midnight + timedelta(seconds=stamp) stamp_label.append(stamp_as_datetime.strftime('%H:%M')) stamp_time.append(stamp - d0.total_seconds()) return stamp_label, stamp_time
Create timestamps on x-axis, every so often. Parameters ---------- start_time : instance of datetime actual start time of the dataset minimum : int start time of the recording from start_time, in s maximum : int end time of the recording from start_time, in s steps : int how often you want a label, in s Returns ------- dict where the key is the label and the value is the time point where the label should be placed. Notes ----- This function takes care that labels are placed at the meaningful time, not at random values.
def validate_uuid_representation(dummy, value): """Validate the uuid representation option selected in the URI. """ try: return _UUID_REPRESENTATIONS[value] except KeyError: raise ValueError("%s is an invalid UUID representation. " "Must be one of " "%s" % (value, tuple(_UUID_REPRESENTATIONS)))
Validate the uuid representation option selected in the URI.