code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def list_ape (archive, compression, cmd, verbosity, interactive): """List an APE archive.""" return stripext(cmd, archive, verbosity, extension=".wav")
List an APE archive.
Below is the the instruction that describes the task: ### Input: List an APE archive. ### Response: def list_ape (archive, compression, cmd, verbosity, interactive): """List an APE archive.""" return stripext(cmd, archive, verbosity, extension=".wav")
def _average_precision(self, rec, prec): """ calculate average precision Params: ---------- rec : numpy.array cumulated recall prec : numpy.array cumulated precision Returns: ---------- ap as float """ # append sentinel values at both ends mrec = np.concatenate(([0.], rec, [1.])) mpre = np.concatenate(([0.], prec, [0.])) # compute precision integration ladder for i in range(mpre.size - 1, 0, -1): mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) # look for recall value changes i = np.where(mrec[1:] != mrec[:-1])[0] # sum (\delta recall) * prec ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) return ap
calculate average precision Params: ---------- rec : numpy.array cumulated recall prec : numpy.array cumulated precision Returns: ---------- ap as float
Below is the the instruction that describes the task: ### Input: calculate average precision Params: ---------- rec : numpy.array cumulated recall prec : numpy.array cumulated precision Returns: ---------- ap as float ### Response: def _average_precision(self, rec, prec): """ calculate average precision Params: ---------- rec : numpy.array cumulated recall prec : numpy.array cumulated precision Returns: ---------- ap as float """ # append sentinel values at both ends mrec = np.concatenate(([0.], rec, [1.])) mpre = np.concatenate(([0.], prec, [0.])) # compute precision integration ladder for i in range(mpre.size - 1, 0, -1): mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) # look for recall value changes i = np.where(mrec[1:] != mrec[:-1])[0] # sum (\delta recall) * prec ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) return ap
def augment_initial_layout(self, base_response, initial_arguments=None): 'Add application state to initial values' if self.use_dash_layout() and not initial_arguments and False: return base_response.data, base_response.mimetype # Adjust the base layout response baseDataInBytes = base_response.data baseData = json.loads(baseDataInBytes.decode('utf-8')) # Also add in any initial arguments if initial_arguments: if isinstance(initial_arguments, str): initial_arguments = json.loads(initial_arguments) # Walk tree. If at any point we have an element whose id # matches, then replace any named values at this level reworked_data = self.walk_tree_and_replace(baseData, initial_arguments) response_data = json.dumps(reworked_data, cls=PlotlyJSONEncoder) return response_data, base_response.mimetype
Add application state to initial values
Below is the the instruction that describes the task: ### Input: Add application state to initial values ### Response: def augment_initial_layout(self, base_response, initial_arguments=None): 'Add application state to initial values' if self.use_dash_layout() and not initial_arguments and False: return base_response.data, base_response.mimetype # Adjust the base layout response baseDataInBytes = base_response.data baseData = json.loads(baseDataInBytes.decode('utf-8')) # Also add in any initial arguments if initial_arguments: if isinstance(initial_arguments, str): initial_arguments = json.loads(initial_arguments) # Walk tree. If at any point we have an element whose id # matches, then replace any named values at this level reworked_data = self.walk_tree_and_replace(baseData, initial_arguments) response_data = json.dumps(reworked_data, cls=PlotlyJSONEncoder) return response_data, base_response.mimetype
def verify(self): """ Verifies that the request timestamp is not beyond our allowable timestamp mismatch and that the request signature matches our expectations. """ try: if self.timestamp_mismatch is not None: m = _iso8601_timestamp_regex.match(self.request_timestamp) year = int(m.group("year")) month = int(m.group("month")) day = int(m.group("day")) hour = int(m.group("hour")) minute = int(m.group("minute")) second = int(m.group("second")) req_ts = datetime(year, month, day, hour, minute, second) now = datetime.utcnow() if abs(req_ts - now) > timedelta(0, self.timestamp_mismatch): raise InvalidSignatureError("Timestamp mismatch") if self.expected_signature != self.request_signature: raise InvalidSignatureError( "Signature mismatch: expected %r, got %r" % ( self.expected_signature, self.request_signature)) except (AttributeError, KeyError, ValueError) as e: raise InvalidSignatureError(str(e)) return True
Verifies that the request timestamp is not beyond our allowable timestamp mismatch and that the request signature matches our expectations.
Below is the the instruction that describes the task: ### Input: Verifies that the request timestamp is not beyond our allowable timestamp mismatch and that the request signature matches our expectations. ### Response: def verify(self): """ Verifies that the request timestamp is not beyond our allowable timestamp mismatch and that the request signature matches our expectations. """ try: if self.timestamp_mismatch is not None: m = _iso8601_timestamp_regex.match(self.request_timestamp) year = int(m.group("year")) month = int(m.group("month")) day = int(m.group("day")) hour = int(m.group("hour")) minute = int(m.group("minute")) second = int(m.group("second")) req_ts = datetime(year, month, day, hour, minute, second) now = datetime.utcnow() if abs(req_ts - now) > timedelta(0, self.timestamp_mismatch): raise InvalidSignatureError("Timestamp mismatch") if self.expected_signature != self.request_signature: raise InvalidSignatureError( "Signature mismatch: expected %r, got %r" % ( self.expected_signature, self.request_signature)) except (AttributeError, KeyError, ValueError) as e: raise InvalidSignatureError(str(e)) return True
def putcolslice(self, value, blc, trc, inc=[], startrow=0, nrow=-1, rowincr=1): """Put into a slice in a table column holding arrays. (see :func:`table.putcolslice`)""" return self._table.putcolslice(self._column, value, blc, trc, inc, startrow, nrow, rowincr)
Put into a slice in a table column holding arrays. (see :func:`table.putcolslice`)
Below is the the instruction that describes the task: ### Input: Put into a slice in a table column holding arrays. (see :func:`table.putcolslice`) ### Response: def putcolslice(self, value, blc, trc, inc=[], startrow=0, nrow=-1, rowincr=1): """Put into a slice in a table column holding arrays. (see :func:`table.putcolslice`)""" return self._table.putcolslice(self._column, value, blc, trc, inc, startrow, nrow, rowincr)
def end_of_directory(self, succeeded=True, update_listing=False, cache_to_disc=True): '''Wrapper for xbmcplugin.endOfDirectory. Records state in self._end_of_directory. Typically it is not necessary to call this method directly, as calling :meth:`~xbmcswift2.Plugin.finish` will call this method. ''' self._update_listing = update_listing if not self._end_of_directory: self._end_of_directory = True # Finalize the directory items return xbmcplugin.endOfDirectory(self.handle, succeeded, update_listing, cache_to_disc) assert False, 'Already called endOfDirectory.'
Wrapper for xbmcplugin.endOfDirectory. Records state in self._end_of_directory. Typically it is not necessary to call this method directly, as calling :meth:`~xbmcswift2.Plugin.finish` will call this method.
Below is the the instruction that describes the task: ### Input: Wrapper for xbmcplugin.endOfDirectory. Records state in self._end_of_directory. Typically it is not necessary to call this method directly, as calling :meth:`~xbmcswift2.Plugin.finish` will call this method. ### Response: def end_of_directory(self, succeeded=True, update_listing=False, cache_to_disc=True): '''Wrapper for xbmcplugin.endOfDirectory. Records state in self._end_of_directory. Typically it is not necessary to call this method directly, as calling :meth:`~xbmcswift2.Plugin.finish` will call this method. ''' self._update_listing = update_listing if not self._end_of_directory: self._end_of_directory = True # Finalize the directory items return xbmcplugin.endOfDirectory(self.handle, succeeded, update_listing, cache_to_disc) assert False, 'Already called endOfDirectory.'
def peek_assoc(store, container, _stack=None): """ Deserialize association lists. """ assoc = [] try: if store.getRecordAttr('key', container) == 'escaped': for i in container: assoc.append(store.peek(i, container, _stack=_stack)) else: for i in container: assoc.append((store.strRecord(i, container), store.peek(i, container, _stack=_stack))) #print(assoc) # debugging except TypeError as e: try: for i in container: pass raise e except TypeError: raise TypeError("container is not iterable; peek is not compatible\n\t{}".format(e.args[0])) return assoc
Deserialize association lists.
Below is the the instruction that describes the task: ### Input: Deserialize association lists. ### Response: def peek_assoc(store, container, _stack=None): """ Deserialize association lists. """ assoc = [] try: if store.getRecordAttr('key', container) == 'escaped': for i in container: assoc.append(store.peek(i, container, _stack=_stack)) else: for i in container: assoc.append((store.strRecord(i, container), store.peek(i, container, _stack=_stack))) #print(assoc) # debugging except TypeError as e: try: for i in container: pass raise e except TypeError: raise TypeError("container is not iterable; peek is not compatible\n\t{}".format(e.args[0])) return assoc
def notify_observers(table, kind, primary_key=None): """Transmit ORM table change notification. :param table: Name of the table that has changed :param kind: Change type :param primary_key: Primary key of the affected instance """ if IN_MIGRATIONS: return # Don't propagate events when there are no observers to receive them. if not Observer.objects.filter(dependencies__table=table).exists(): return def handler(): """Send a notification to the given channel.""" try: async_to_sync(get_channel_layer().send)( CHANNEL_MAIN, { 'type': TYPE_ORM_NOTIFY, 'table': table, 'kind': kind, 'primary_key': str(primary_key), }, ) except ChannelFull: logger.exception("Unable to notify workers.") batcher = PrioritizedBatcher.global_instance() if batcher.is_started: # If a batch is open, queue the send via the batcher. batcher.add( 'rest_framework_reactive', handler, group_by=(table, kind, primary_key) ) else: # If no batch is open, invoke immediately. handler()
Transmit ORM table change notification. :param table: Name of the table that has changed :param kind: Change type :param primary_key: Primary key of the affected instance
Below is the the instruction that describes the task: ### Input: Transmit ORM table change notification. :param table: Name of the table that has changed :param kind: Change type :param primary_key: Primary key of the affected instance ### Response: def notify_observers(table, kind, primary_key=None): """Transmit ORM table change notification. :param table: Name of the table that has changed :param kind: Change type :param primary_key: Primary key of the affected instance """ if IN_MIGRATIONS: return # Don't propagate events when there are no observers to receive them. if not Observer.objects.filter(dependencies__table=table).exists(): return def handler(): """Send a notification to the given channel.""" try: async_to_sync(get_channel_layer().send)( CHANNEL_MAIN, { 'type': TYPE_ORM_NOTIFY, 'table': table, 'kind': kind, 'primary_key': str(primary_key), }, ) except ChannelFull: logger.exception("Unable to notify workers.") batcher = PrioritizedBatcher.global_instance() if batcher.is_started: # If a batch is open, queue the send via the batcher. batcher.add( 'rest_framework_reactive', handler, group_by=(table, kind, primary_key) ) else: # If no batch is open, invoke immediately. handler()
def manage_actor(self, monitor, actor, stop=False): '''If an actor failed to notify itself to the arbiter for more than the timeout, stop the actor. :param actor: the :class:`Actor` to manage. :param stop: if ``True``, stop the actor. :return: if the actor is alive 0 if it is not. ''' if not monitor.is_running(): stop = True if not actor.is_alive(): if not actor.should_be_alive() and not stop: return 1 actor.join() self._remove_monitored_actor(monitor, actor) return 0 timeout = None started_stopping = bool(actor.stopping_start) # if started_stopping is True, set stop to True stop = stop or started_stopping if not stop and actor.notified: gap = time() - actor.notified stop = timeout = gap > actor.cfg.timeout if stop: # we are stopping the actor dt = actor.should_terminate() if not actor.mailbox or dt: if not actor.mailbox: monitor.logger.warning('kill %s - no mailbox.', actor) else: monitor.logger.warning('kill %s - could not stop ' 'after %.2f seconds.', actor, dt) actor.kill() self._remove_monitored_actor(monitor, actor) return 0 elif not started_stopping: if timeout: monitor.logger.warning('Stopping %s. Timeout %.2f', actor, timeout) else: monitor.logger.info('Stopping %s.', actor) actor.stop() return 1
If an actor failed to notify itself to the arbiter for more than the timeout, stop the actor. :param actor: the :class:`Actor` to manage. :param stop: if ``True``, stop the actor. :return: if the actor is alive 0 if it is not.
Below is the the instruction that describes the task: ### Input: If an actor failed to notify itself to the arbiter for more than the timeout, stop the actor. :param actor: the :class:`Actor` to manage. :param stop: if ``True``, stop the actor. :return: if the actor is alive 0 if it is not. ### Response: def manage_actor(self, monitor, actor, stop=False): '''If an actor failed to notify itself to the arbiter for more than the timeout, stop the actor. :param actor: the :class:`Actor` to manage. :param stop: if ``True``, stop the actor. :return: if the actor is alive 0 if it is not. ''' if not monitor.is_running(): stop = True if not actor.is_alive(): if not actor.should_be_alive() and not stop: return 1 actor.join() self._remove_monitored_actor(monitor, actor) return 0 timeout = None started_stopping = bool(actor.stopping_start) # if started_stopping is True, set stop to True stop = stop or started_stopping if not stop and actor.notified: gap = time() - actor.notified stop = timeout = gap > actor.cfg.timeout if stop: # we are stopping the actor dt = actor.should_terminate() if not actor.mailbox or dt: if not actor.mailbox: monitor.logger.warning('kill %s - no mailbox.', actor) else: monitor.logger.warning('kill %s - could not stop ' 'after %.2f seconds.', actor, dt) actor.kill() self._remove_monitored_actor(monitor, actor) return 0 elif not started_stopping: if timeout: monitor.logger.warning('Stopping %s. Timeout %.2f', actor, timeout) else: monitor.logger.info('Stopping %s.', actor) actor.stop() return 1
def summary_df_from_array(results_array, names, axis=0, **kwargs): """Make a panda data frame of the mean and std devs of an array of results, including the uncertainties on the values. This function converts the array to a DataFrame and calls summary_df on it. Parameters ---------- results_array: 2d numpy array names: list of str Names for the output df's columns. axis: int, optional Axis on which to calculate summary statistics. Returns ------- df: MultiIndex DataFrame See summary_df docstring for more details. """ assert axis == 0 or axis == 1 df = pd.DataFrame(results_array) if axis == 1: df = df.T df.columns = names return summary_df(df, **kwargs)
Make a panda data frame of the mean and std devs of an array of results, including the uncertainties on the values. This function converts the array to a DataFrame and calls summary_df on it. Parameters ---------- results_array: 2d numpy array names: list of str Names for the output df's columns. axis: int, optional Axis on which to calculate summary statistics. Returns ------- df: MultiIndex DataFrame See summary_df docstring for more details.
Below is the the instruction that describes the task: ### Input: Make a panda data frame of the mean and std devs of an array of results, including the uncertainties on the values. This function converts the array to a DataFrame and calls summary_df on it. Parameters ---------- results_array: 2d numpy array names: list of str Names for the output df's columns. axis: int, optional Axis on which to calculate summary statistics. Returns ------- df: MultiIndex DataFrame See summary_df docstring for more details. ### Response: def summary_df_from_array(results_array, names, axis=0, **kwargs): """Make a panda data frame of the mean and std devs of an array of results, including the uncertainties on the values. This function converts the array to a DataFrame and calls summary_df on it. Parameters ---------- results_array: 2d numpy array names: list of str Names for the output df's columns. axis: int, optional Axis on which to calculate summary statistics. Returns ------- df: MultiIndex DataFrame See summary_df docstring for more details. """ assert axis == 0 or axis == 1 df = pd.DataFrame(results_array) if axis == 1: df = df.T df.columns = names return summary_df(df, **kwargs)
def checkInstalled(versions, optionsRequired=False): """ Check if there is a version of wxPython installed that matches one of the versions given. Returns True if so, False if not. This can be used to determine if calling `select` will succeed or not. :param versions: Same as in `select`, either a string or a list of strings specifying the version(s) to check for. :param optionsRequired: Same as in `select`. """ if type(versions) == str: versions = [versions] installed = _find_installed() bestMatch = _get_best_match(installed, versions, optionsRequired) return bestMatch is not None
Check if there is a version of wxPython installed that matches one of the versions given. Returns True if so, False if not. This can be used to determine if calling `select` will succeed or not. :param versions: Same as in `select`, either a string or a list of strings specifying the version(s) to check for. :param optionsRequired: Same as in `select`.
Below is the the instruction that describes the task: ### Input: Check if there is a version of wxPython installed that matches one of the versions given. Returns True if so, False if not. This can be used to determine if calling `select` will succeed or not. :param versions: Same as in `select`, either a string or a list of strings specifying the version(s) to check for. :param optionsRequired: Same as in `select`. ### Response: def checkInstalled(versions, optionsRequired=False): """ Check if there is a version of wxPython installed that matches one of the versions given. Returns True if so, False if not. This can be used to determine if calling `select` will succeed or not. :param versions: Same as in `select`, either a string or a list of strings specifying the version(s) to check for. :param optionsRequired: Same as in `select`. """ if type(versions) == str: versions = [versions] installed = _find_installed() bestMatch = _get_best_match(installed, versions, optionsRequired) return bestMatch is not None
def reset_dirty_flags(self): """Set all marked_dirty flags of the state machine to false.""" for sm_id, sm in self.state_machines.items(): sm.marked_dirty = False
Set all marked_dirty flags of the state machine to false.
Below is the the instruction that describes the task: ### Input: Set all marked_dirty flags of the state machine to false. ### Response: def reset_dirty_flags(self): """Set all marked_dirty flags of the state machine to false.""" for sm_id, sm in self.state_machines.items(): sm.marked_dirty = False
def callback_oauth1(self, request, **kwargs): """ Process for oAuth 1 :param request: contains the current session :param kwargs: keyword args :type request: dict :type kwargs: dict :rtype: string """ if kwargs.get('access_token') == '' or kwargs.get('access_token') is None: access_token = self.get_access_token(request.session['oauth_token'], request.session.get('oauth_token_secret', ''), request.GET.get('oauth_verifier', '')) else: access_token = kwargs.get('access_token') if type(access_token) == str: token = access_token else: token = '#TH#'.join((access_token.get('oauth_token'), access_token.get('oauth_token_secret'))) return token
Process for oAuth 1 :param request: contains the current session :param kwargs: keyword args :type request: dict :type kwargs: dict :rtype: string
Below is the the instruction that describes the task: ### Input: Process for oAuth 1 :param request: contains the current session :param kwargs: keyword args :type request: dict :type kwargs: dict :rtype: string ### Response: def callback_oauth1(self, request, **kwargs): """ Process for oAuth 1 :param request: contains the current session :param kwargs: keyword args :type request: dict :type kwargs: dict :rtype: string """ if kwargs.get('access_token') == '' or kwargs.get('access_token') is None: access_token = self.get_access_token(request.session['oauth_token'], request.session.get('oauth_token_secret', ''), request.GET.get('oauth_verifier', '')) else: access_token = kwargs.get('access_token') if type(access_token) == str: token = access_token else: token = '#TH#'.join((access_token.get('oauth_token'), access_token.get('oauth_token_secret'))) return token
def get_islamic_holidays(self): """Return a list of Islamic (month, day, label) for islamic holidays. Please take note that these dates must be expressed using the Islamic Calendar""" days = list(super(IslamicMixin, self).get_islamic_holidays()) if self.include_islamic_new_year: days.append((1, 1, "Islamic New Year")) if self.include_prophet_birthday: days.append((3, 12, "Prophet's Birthday")) if self.include_day_after_prophet_birthday: days.append((3, 13, "Day after Prophet's Birthday")) if self.include_start_ramadan: days.append((9, 1, "Start of ramadan")) if self.include_nuzul_al_quran: days.append((9, 17, "Nuzul Al-Qur'an")) if self.include_eid_al_fitr: for x in range(self.length_eid_al_fitr): days.append((10, x + 1, self.eid_al_fitr_label)) if self.include_eid_al_adha: for x in range(self.length_eid_al_adha): days.append((12, x + 10, "Eid al-Adha")) if self.include_day_of_sacrifice: days.append((12, 10, self.day_of_sacrifice_label)) if self.include_laylat_al_qadr: warnings.warn("The Islamic holiday named Laylat al-Qadr is decided" " by the religious authorities. It is not possible" " to compute it. You'll have to add it manually.") return tuple(days)
Return a list of Islamic (month, day, label) for islamic holidays. Please take note that these dates must be expressed using the Islamic Calendar
Below is the the instruction that describes the task: ### Input: Return a list of Islamic (month, day, label) for islamic holidays. Please take note that these dates must be expressed using the Islamic Calendar ### Response: def get_islamic_holidays(self): """Return a list of Islamic (month, day, label) for islamic holidays. Please take note that these dates must be expressed using the Islamic Calendar""" days = list(super(IslamicMixin, self).get_islamic_holidays()) if self.include_islamic_new_year: days.append((1, 1, "Islamic New Year")) if self.include_prophet_birthday: days.append((3, 12, "Prophet's Birthday")) if self.include_day_after_prophet_birthday: days.append((3, 13, "Day after Prophet's Birthday")) if self.include_start_ramadan: days.append((9, 1, "Start of ramadan")) if self.include_nuzul_al_quran: days.append((9, 17, "Nuzul Al-Qur'an")) if self.include_eid_al_fitr: for x in range(self.length_eid_al_fitr): days.append((10, x + 1, self.eid_al_fitr_label)) if self.include_eid_al_adha: for x in range(self.length_eid_al_adha): days.append((12, x + 10, "Eid al-Adha")) if self.include_day_of_sacrifice: days.append((12, 10, self.day_of_sacrifice_label)) if self.include_laylat_al_qadr: warnings.warn("The Islamic holiday named Laylat al-Qadr is decided" " by the religious authorities. It is not possible" " to compute it. You'll have to add it manually.") return tuple(days)
def add_link(self, rel, target, wrap=False, **kwargs): """Adds a link to the document. Calling code should use this method to add links instead of modifying ``links`` directly. This method adds a link to the given ``target`` to the document with the given ``rel``. If one or more links are already present for that link relationship type, the new link will be added to the existing links for that link relationship type. If ``target`` is a string, a link is added with ``target`` as its ``href`` property and other properties from the keyword arguments. If ``target`` is a ``Link`` object, it is added to the document and the keyword arguments are ignored. If ``target`` is a ``Document`` object, ``target``'s ``self`` link is added to this document and the keyword arguments are ignored. If ``target`` is a ``Builder`` object, ``target``'s ``self`` link is added to this document and the keyword arguments are ignored. Arguments: - ``rel``: a string specifying the link relationship type of the link. It should be a well-known link relation name from the IANA registry (http://www.iana.org/assignments/link-relations/link-relations.xml), a full URI, or a CURIE. - ``target``: the destination of the link. - ``wrap``: Defaults to False, but if True, specifies that the link object should be initally wrapped in a JSON array even if it is the first link for the given ``rel``. """ if hasattr(target, 'as_link'): link = target.as_link() else: link = self.link(target, **kwargs) links = self.o.setdefault(LINKS_KEY, {}) new_link = link.as_object() collected_links = CanonicalRels(links, self.curies, self.base_uri) if rel not in collected_links: if wrap: links[rel] = [new_link] else: links[rel] = new_link return original_rel = collected_links.original_key(rel) current_links = links[original_rel] if isinstance(current_links, list): current_links.append(new_link) else: links[original_rel] = [current_links, new_link]
Adds a link to the document. Calling code should use this method to add links instead of modifying ``links`` directly. This method adds a link to the given ``target`` to the document with the given ``rel``. If one or more links are already present for that link relationship type, the new link will be added to the existing links for that link relationship type. If ``target`` is a string, a link is added with ``target`` as its ``href`` property and other properties from the keyword arguments. If ``target`` is a ``Link`` object, it is added to the document and the keyword arguments are ignored. If ``target`` is a ``Document`` object, ``target``'s ``self`` link is added to this document and the keyword arguments are ignored. If ``target`` is a ``Builder`` object, ``target``'s ``self`` link is added to this document and the keyword arguments are ignored. Arguments: - ``rel``: a string specifying the link relationship type of the link. It should be a well-known link relation name from the IANA registry (http://www.iana.org/assignments/link-relations/link-relations.xml), a full URI, or a CURIE. - ``target``: the destination of the link. - ``wrap``: Defaults to False, but if True, specifies that the link object should be initally wrapped in a JSON array even if it is the first link for the given ``rel``.
Below is the the instruction that describes the task: ### Input: Adds a link to the document. Calling code should use this method to add links instead of modifying ``links`` directly. This method adds a link to the given ``target`` to the document with the given ``rel``. If one or more links are already present for that link relationship type, the new link will be added to the existing links for that link relationship type. If ``target`` is a string, a link is added with ``target`` as its ``href`` property and other properties from the keyword arguments. If ``target`` is a ``Link`` object, it is added to the document and the keyword arguments are ignored. If ``target`` is a ``Document`` object, ``target``'s ``self`` link is added to this document and the keyword arguments are ignored. If ``target`` is a ``Builder`` object, ``target``'s ``self`` link is added to this document and the keyword arguments are ignored. Arguments: - ``rel``: a string specifying the link relationship type of the link. It should be a well-known link relation name from the IANA registry (http://www.iana.org/assignments/link-relations/link-relations.xml), a full URI, or a CURIE. - ``target``: the destination of the link. - ``wrap``: Defaults to False, but if True, specifies that the link object should be initally wrapped in a JSON array even if it is the first link for the given ``rel``. ### Response: def add_link(self, rel, target, wrap=False, **kwargs): """Adds a link to the document. Calling code should use this method to add links instead of modifying ``links`` directly. This method adds a link to the given ``target`` to the document with the given ``rel``. If one or more links are already present for that link relationship type, the new link will be added to the existing links for that link relationship type. If ``target`` is a string, a link is added with ``target`` as its ``href`` property and other properties from the keyword arguments. If ``target`` is a ``Link`` object, it is added to the document and the keyword arguments are ignored. If ``target`` is a ``Document`` object, ``target``'s ``self`` link is added to this document and the keyword arguments are ignored. If ``target`` is a ``Builder`` object, ``target``'s ``self`` link is added to this document and the keyword arguments are ignored. Arguments: - ``rel``: a string specifying the link relationship type of the link. It should be a well-known link relation name from the IANA registry (http://www.iana.org/assignments/link-relations/link-relations.xml), a full URI, or a CURIE. - ``target``: the destination of the link. - ``wrap``: Defaults to False, but if True, specifies that the link object should be initally wrapped in a JSON array even if it is the first link for the given ``rel``. """ if hasattr(target, 'as_link'): link = target.as_link() else: link = self.link(target, **kwargs) links = self.o.setdefault(LINKS_KEY, {}) new_link = link.as_object() collected_links = CanonicalRels(links, self.curies, self.base_uri) if rel not in collected_links: if wrap: links[rel] = [new_link] else: links[rel] = new_link return original_rel = collected_links.original_key(rel) current_links = links[original_rel] if isinstance(current_links, list): current_links.append(new_link) else: links[original_rel] = [current_links, new_link]
def get_child_objective_banks(self, objective_bank_id): """Gets the children of the given objective bank. arg: objective_bank_id (osid.id.Id): the ``Id`` to query return: (osid.learning.ObjectiveBankList) - the children of the objective bank raise: NotFound - ``objective_bank_id`` is not found raise: NullArgument - ``objective_bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_child_bins if self._catalog_session is not None: return self._catalog_session.get_child_catalogs(catalog_id=objective_bank_id) return ObjectiveBankLookupSession( self._proxy, self._runtime).get_objective_banks_by_ids( list(self.get_child_objective_bank_ids(objective_bank_id)))
Gets the children of the given objective bank. arg: objective_bank_id (osid.id.Id): the ``Id`` to query return: (osid.learning.ObjectiveBankList) - the children of the objective bank raise: NotFound - ``objective_bank_id`` is not found raise: NullArgument - ``objective_bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
Below is the the instruction that describes the task: ### Input: Gets the children of the given objective bank. arg: objective_bank_id (osid.id.Id): the ``Id`` to query return: (osid.learning.ObjectiveBankList) - the children of the objective bank raise: NotFound - ``objective_bank_id`` is not found raise: NullArgument - ``objective_bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* ### Response: def get_child_objective_banks(self, objective_bank_id): """Gets the children of the given objective bank. arg: objective_bank_id (osid.id.Id): the ``Id`` to query return: (osid.learning.ObjectiveBankList) - the children of the objective bank raise: NotFound - ``objective_bank_id`` is not found raise: NullArgument - ``objective_bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_child_bins if self._catalog_session is not None: return self._catalog_session.get_child_catalogs(catalog_id=objective_bank_id) return ObjectiveBankLookupSession( self._proxy, self._runtime).get_objective_banks_by_ids( list(self.get_child_objective_bank_ids(objective_bank_id)))
def _forward_iterator(self): "Returns a forward iterator over the trie" path = [(self, 0, Bits())] while path: node, idx, prefix = path.pop() if idx==0 and node.value is not None and not node.prune_value: yield (self._unpickle_key(prefix), self._unpickle_value(node.value)) if idx<len(node.children): path.append((node, idx+1, prefix)) link = node.children[idx] if not link.pruned: path.append((link.node, 0, prefix + link.prefix))
Returns a forward iterator over the trie
Below is the the instruction that describes the task: ### Input: Returns a forward iterator over the trie ### Response: def _forward_iterator(self): "Returns a forward iterator over the trie" path = [(self, 0, Bits())] while path: node, idx, prefix = path.pop() if idx==0 and node.value is not None and not node.prune_value: yield (self._unpickle_key(prefix), self._unpickle_value(node.value)) if idx<len(node.children): path.append((node, idx+1, prefix)) link = node.children[idx] if not link.pruned: path.append((link.node, 0, prefix + link.prefix))
def CMOVAE(cpu, dest, src): """ Conditional move - Above or equal/not below. Tests the status flags in the EFLAGS register and moves the source operand (second operand) to the destination operand (first operand) if the given test condition is true. :param cpu: current CPU. :param dest: destination operand. :param src: source operand. """ dest.write(Operators.ITEBV(dest.size, cpu.CF == False, src.read(), dest.read()))
Conditional move - Above or equal/not below. Tests the status flags in the EFLAGS register and moves the source operand (second operand) to the destination operand (first operand) if the given test condition is true. :param cpu: current CPU. :param dest: destination operand. :param src: source operand.
Below is the the instruction that describes the task: ### Input: Conditional move - Above or equal/not below. Tests the status flags in the EFLAGS register and moves the source operand (second operand) to the destination operand (first operand) if the given test condition is true. :param cpu: current CPU. :param dest: destination operand. :param src: source operand. ### Response: def CMOVAE(cpu, dest, src): """ Conditional move - Above or equal/not below. Tests the status flags in the EFLAGS register and moves the source operand (second operand) to the destination operand (first operand) if the given test condition is true. :param cpu: current CPU. :param dest: destination operand. :param src: source operand. """ dest.write(Operators.ITEBV(dest.size, cpu.CF == False, src.read(), dest.read()))
def get_parent(self, path): '''Get the parent entity of the entity pointed by the given path. Args: path (str): The path of the entity whose parent is needed Returns: A JSON object of the parent entity if found. Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes ''' self.__validate_storage_path(path, projects_allowed=False) path_steps = [step for step in path.split('/') if step] del path_steps[-1] parent_path = '/{0}'.format('/'.join(path_steps)) return self.api_client.get_entity_by_query(path=parent_path)
Get the parent entity of the entity pointed by the given path. Args: path (str): The path of the entity whose parent is needed Returns: A JSON object of the parent entity if found. Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
Below is the the instruction that describes the task: ### Input: Get the parent entity of the entity pointed by the given path. Args: path (str): The path of the entity whose parent is needed Returns: A JSON object of the parent entity if found. Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes ### Response: def get_parent(self, path): '''Get the parent entity of the entity pointed by the given path. Args: path (str): The path of the entity whose parent is needed Returns: A JSON object of the parent entity if found. Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes ''' self.__validate_storage_path(path, projects_allowed=False) path_steps = [step for step in path.split('/') if step] del path_steps[-1] parent_path = '/{0}'.format('/'.join(path_steps)) return self.api_client.get_entity_by_query(path=parent_path)
def get_client(self, service, region, public=True, cached=True, client_class=None): """ Returns the client object for the specified service and region. By default the public endpoint is used. If you wish to work with a services internal endpoints, specify `public=False`. By default, if a client has already been created for the given service, region, and public values, that will be returned. To force a new client to be created, pass 'cached=False'. """ if not self.authenticated: raise exc.NotAuthenticated("You must authenticate before trying " "to create clients.") clt = ep = None mapped_service = self.service_mapping.get(service) or service svc = self.services.get(mapped_service) if svc: ep = svc.endpoints.get(region) if ep: clt = ep._get_client(public=public, cached=cached, client_class=client_class) if not clt: raise exc.NoSuchClient("There is no client available for the " "service '%s' in the region '%s'." % (service, region)) return clt
Returns the client object for the specified service and region. By default the public endpoint is used. If you wish to work with a services internal endpoints, specify `public=False`. By default, if a client has already been created for the given service, region, and public values, that will be returned. To force a new client to be created, pass 'cached=False'.
Below is the the instruction that describes the task: ### Input: Returns the client object for the specified service and region. By default the public endpoint is used. If you wish to work with a services internal endpoints, specify `public=False`. By default, if a client has already been created for the given service, region, and public values, that will be returned. To force a new client to be created, pass 'cached=False'. ### Response: def get_client(self, service, region, public=True, cached=True, client_class=None): """ Returns the client object for the specified service and region. By default the public endpoint is used. If you wish to work with a services internal endpoints, specify `public=False`. By default, if a client has already been created for the given service, region, and public values, that will be returned. To force a new client to be created, pass 'cached=False'. """ if not self.authenticated: raise exc.NotAuthenticated("You must authenticate before trying " "to create clients.") clt = ep = None mapped_service = self.service_mapping.get(service) or service svc = self.services.get(mapped_service) if svc: ep = svc.endpoints.get(region) if ep: clt = ep._get_client(public=public, cached=cached, client_class=client_class) if not clt: raise exc.NoSuchClient("There is no client available for the " "service '%s' in the region '%s'." % (service, region)) return clt
def grab(self, monitor): # type: (Monitor) -> ScreenShot """ Retrieve all pixels from a monitor. Pixels have to be RGB. In the code, there are few interesting things: [1] bmi.bmiHeader.biHeight = -height A bottom-up DIB is specified by setting the height to a positive number, while a top-down DIB is specified by setting the height to a negative number. https://msdn.microsoft.com/en-us/library/ms787796.aspx https://msdn.microsoft.com/en-us/library/dd144879%28v=vs.85%29.aspx [2] bmi.bmiHeader.biBitCount = 32 image_data = create_string_buffer(height * width * 4) We grab the image in RGBX mode, so that each word is 32bit and we have no striding, then we transform to RGB. Inspired by https://github.com/zoofIO/flexx [3] bmi.bmiHeader.biClrUsed = 0 bmi.bmiHeader.biClrImportant = 0 When biClrUsed and biClrImportant are set to zero, there is "no" color table, so we can read the pixels of the bitmap retrieved by gdi32.GetDIBits() as a sequence of RGB values. Thanks to http://stackoverflow.com/a/3688682 """ # Convert PIL bbox style if isinstance(monitor, tuple): monitor = { "left": monitor[0], "top": monitor[1], "width": monitor[2] - monitor[0], "height": monitor[3] - monitor[1], } srcdc, memdc = MSS.srcdc, MSS.memdc width, height = monitor["width"], monitor["height"] if (self._bbox["height"], self._bbox["width"]) != (height, width): self._bbox = monitor self._bmi.bmiHeader.biWidth = width self._bmi.bmiHeader.biHeight = -height # Why minus? [1] self._data = ctypes.create_string_buffer(width * height * 4) # [2] if MSS.bmp: self.gdi32.DeleteObject(MSS.bmp) MSS.bmp = self.gdi32.CreateCompatibleBitmap(srcdc, width, height) self.gdi32.SelectObject(memdc, MSS.bmp) self.gdi32.BitBlt( memdc, 0, 0, width, height, srcdc, monitor["left"], monitor["top"], SRCCOPY | CAPTUREBLT, ) bits = self.gdi32.GetDIBits( memdc, MSS.bmp, 0, height, self._data, self._bmi, DIB_RGB_COLORS ) if bits != height: raise ScreenShotError("gdi32.GetDIBits() failed.") return self.cls_image(bytearray(self._data), monitor)
Retrieve all pixels from a monitor. Pixels have to be RGB. In the code, there are few interesting things: [1] bmi.bmiHeader.biHeight = -height A bottom-up DIB is specified by setting the height to a positive number, while a top-down DIB is specified by setting the height to a negative number. https://msdn.microsoft.com/en-us/library/ms787796.aspx https://msdn.microsoft.com/en-us/library/dd144879%28v=vs.85%29.aspx [2] bmi.bmiHeader.biBitCount = 32 image_data = create_string_buffer(height * width * 4) We grab the image in RGBX mode, so that each word is 32bit and we have no striding, then we transform to RGB. Inspired by https://github.com/zoofIO/flexx [3] bmi.bmiHeader.biClrUsed = 0 bmi.bmiHeader.biClrImportant = 0 When biClrUsed and biClrImportant are set to zero, there is "no" color table, so we can read the pixels of the bitmap retrieved by gdi32.GetDIBits() as a sequence of RGB values. Thanks to http://stackoverflow.com/a/3688682
Below is the the instruction that describes the task: ### Input: Retrieve all pixels from a monitor. Pixels have to be RGB. In the code, there are few interesting things: [1] bmi.bmiHeader.biHeight = -height A bottom-up DIB is specified by setting the height to a positive number, while a top-down DIB is specified by setting the height to a negative number. https://msdn.microsoft.com/en-us/library/ms787796.aspx https://msdn.microsoft.com/en-us/library/dd144879%28v=vs.85%29.aspx [2] bmi.bmiHeader.biBitCount = 32 image_data = create_string_buffer(height * width * 4) We grab the image in RGBX mode, so that each word is 32bit and we have no striding, then we transform to RGB. Inspired by https://github.com/zoofIO/flexx [3] bmi.bmiHeader.biClrUsed = 0 bmi.bmiHeader.biClrImportant = 0 When biClrUsed and biClrImportant are set to zero, there is "no" color table, so we can read the pixels of the bitmap retrieved by gdi32.GetDIBits() as a sequence of RGB values. Thanks to http://stackoverflow.com/a/3688682 ### Response: def grab(self, monitor): # type: (Monitor) -> ScreenShot """ Retrieve all pixels from a monitor. Pixels have to be RGB. In the code, there are few interesting things: [1] bmi.bmiHeader.biHeight = -height A bottom-up DIB is specified by setting the height to a positive number, while a top-down DIB is specified by setting the height to a negative number. https://msdn.microsoft.com/en-us/library/ms787796.aspx https://msdn.microsoft.com/en-us/library/dd144879%28v=vs.85%29.aspx [2] bmi.bmiHeader.biBitCount = 32 image_data = create_string_buffer(height * width * 4) We grab the image in RGBX mode, so that each word is 32bit and we have no striding, then we transform to RGB. Inspired by https://github.com/zoofIO/flexx [3] bmi.bmiHeader.biClrUsed = 0 bmi.bmiHeader.biClrImportant = 0 When biClrUsed and biClrImportant are set to zero, there is "no" color table, so we can read the pixels of the bitmap retrieved by gdi32.GetDIBits() as a sequence of RGB values. Thanks to http://stackoverflow.com/a/3688682 """ # Convert PIL bbox style if isinstance(monitor, tuple): monitor = { "left": monitor[0], "top": monitor[1], "width": monitor[2] - monitor[0], "height": monitor[3] - monitor[1], } srcdc, memdc = MSS.srcdc, MSS.memdc width, height = monitor["width"], monitor["height"] if (self._bbox["height"], self._bbox["width"]) != (height, width): self._bbox = monitor self._bmi.bmiHeader.biWidth = width self._bmi.bmiHeader.biHeight = -height # Why minus? [1] self._data = ctypes.create_string_buffer(width * height * 4) # [2] if MSS.bmp: self.gdi32.DeleteObject(MSS.bmp) MSS.bmp = self.gdi32.CreateCompatibleBitmap(srcdc, width, height) self.gdi32.SelectObject(memdc, MSS.bmp) self.gdi32.BitBlt( memdc, 0, 0, width, height, srcdc, monitor["left"], monitor["top"], SRCCOPY | CAPTUREBLT, ) bits = self.gdi32.GetDIBits( memdc, MSS.bmp, 0, height, self._data, self._bmi, DIB_RGB_COLORS ) if bits != height: raise ScreenShotError("gdi32.GetDIBits() failed.") return self.cls_image(bytearray(self._data), monitor)
def state(self, *args, **kwargs): """ Get AWS State for a worker type Return the state of a given workertype as stored by the provisioner. This state is stored as three lists: 1 for running instances, 1 for pending requests. The `summary` property contains an updated summary similar to that returned from `listWorkerTypeSummaries`. This method is ``stable`` """ return self._makeApiCall(self.funcinfo["state"], *args, **kwargs)
Get AWS State for a worker type Return the state of a given workertype as stored by the provisioner. This state is stored as three lists: 1 for running instances, 1 for pending requests. The `summary` property contains an updated summary similar to that returned from `listWorkerTypeSummaries`. This method is ``stable``
Below is the the instruction that describes the task: ### Input: Get AWS State for a worker type Return the state of a given workertype as stored by the provisioner. This state is stored as three lists: 1 for running instances, 1 for pending requests. The `summary` property contains an updated summary similar to that returned from `listWorkerTypeSummaries`. This method is ``stable`` ### Response: def state(self, *args, **kwargs): """ Get AWS State for a worker type Return the state of a given workertype as stored by the provisioner. This state is stored as three lists: 1 for running instances, 1 for pending requests. The `summary` property contains an updated summary similar to that returned from `listWorkerTypeSummaries`. This method is ``stable`` """ return self._makeApiCall(self.funcinfo["state"], *args, **kwargs)
def derive_temporalnetwork(self, params, update_pipeline=True, tag=None, njobs=1, confound_corr_report=True): """ Derive time-varying connectivity on the selected files. Parameters ---------- params : dict. See teneto.timeseries.derive_temporalnetwork for the structure of the param dictionary. Assumes dimord is time,node (output of other TenetoBIDS funcitons) update_pipeline : bool If true, the object updates the selected files with those derived here. njobs : int How many parallel jobs to run confound_corr_report : bool If true, histograms and summary statistics of TVC and confounds are plotted in a report directory. tag : str any additional tag that will be placed in the saved file name. Will be placed as 'desc-[tag]' Returns ------- dfc : files saved in .../derivatives/teneto/sub-xxx/tvc/..._tvc.npy """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) files = self.get_selected_files(quiet=1) confound_files = self.get_selected_files(quiet=1, pipeline='confound') if confound_files: confounds_exist = True else: confounds_exist = False if not confound_corr_report: confounds_exist = False if not tag: tag = '' else: tag = 'desc-' + tag with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit(self._derive_temporalnetwork, f, i, tag, params, confounds_exist, confound_files) for i, f in enumerate(files) if f} for j in as_completed(job): j.result() if update_pipeline == True: if not self.confound_pipeline and len(self.get_selected_files(quiet=1, pipeline='confound')) > 0: self.set_confound_pipeline = self.pipeline self.set_pipeline('teneto_' + teneto.__version__) self.set_pipeline_subdir('tvc') self.set_bids_suffix('tvcconn')
Derive time-varying connectivity on the selected files. Parameters ---------- params : dict. See teneto.timeseries.derive_temporalnetwork for the structure of the param dictionary. Assumes dimord is time,node (output of other TenetoBIDS funcitons) update_pipeline : bool If true, the object updates the selected files with those derived here. njobs : int How many parallel jobs to run confound_corr_report : bool If true, histograms and summary statistics of TVC and confounds are plotted in a report directory. tag : str any additional tag that will be placed in the saved file name. Will be placed as 'desc-[tag]' Returns ------- dfc : files saved in .../derivatives/teneto/sub-xxx/tvc/..._tvc.npy
Below is the the instruction that describes the task: ### Input: Derive time-varying connectivity on the selected files. Parameters ---------- params : dict. See teneto.timeseries.derive_temporalnetwork for the structure of the param dictionary. Assumes dimord is time,node (output of other TenetoBIDS funcitons) update_pipeline : bool If true, the object updates the selected files with those derived here. njobs : int How many parallel jobs to run confound_corr_report : bool If true, histograms and summary statistics of TVC and confounds are plotted in a report directory. tag : str any additional tag that will be placed in the saved file name. Will be placed as 'desc-[tag]' Returns ------- dfc : files saved in .../derivatives/teneto/sub-xxx/tvc/..._tvc.npy ### Response: def derive_temporalnetwork(self, params, update_pipeline=True, tag=None, njobs=1, confound_corr_report=True): """ Derive time-varying connectivity on the selected files. Parameters ---------- params : dict. See teneto.timeseries.derive_temporalnetwork for the structure of the param dictionary. Assumes dimord is time,node (output of other TenetoBIDS funcitons) update_pipeline : bool If true, the object updates the selected files with those derived here. njobs : int How many parallel jobs to run confound_corr_report : bool If true, histograms and summary statistics of TVC and confounds are plotted in a report directory. tag : str any additional tag that will be placed in the saved file name. Will be placed as 'desc-[tag]' Returns ------- dfc : files saved in .../derivatives/teneto/sub-xxx/tvc/..._tvc.npy """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) files = self.get_selected_files(quiet=1) confound_files = self.get_selected_files(quiet=1, pipeline='confound') if confound_files: confounds_exist = True else: confounds_exist = False if not confound_corr_report: confounds_exist = False if not tag: tag = '' else: tag = 'desc-' + tag with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit(self._derive_temporalnetwork, f, i, tag, params, confounds_exist, confound_files) for i, f in enumerate(files) if f} for j in as_completed(job): j.result() if update_pipeline == True: if not self.confound_pipeline and len(self.get_selected_files(quiet=1, pipeline='confound')) > 0: self.set_confound_pipeline = self.pipeline self.set_pipeline('teneto_' + teneto.__version__) self.set_pipeline_subdir('tvc') self.set_bids_suffix('tvcconn')
def index(self, key): """Get the position of an entry or raise :exc:`ValueError`. :param key: The key to be looked up. .. versionchanged:: 0.5 This used to raise :exc:`IndexError`, which was inconsistent with the list API. """ if isinstance(key, str): for idx, (item, quality) in enumerate(self): if self._value_matches(key, item): return idx raise ValueError(key) return list.index(self, key)
Get the position of an entry or raise :exc:`ValueError`. :param key: The key to be looked up. .. versionchanged:: 0.5 This used to raise :exc:`IndexError`, which was inconsistent with the list API.
Below is the the instruction that describes the task: ### Input: Get the position of an entry or raise :exc:`ValueError`. :param key: The key to be looked up. .. versionchanged:: 0.5 This used to raise :exc:`IndexError`, which was inconsistent with the list API. ### Response: def index(self, key): """Get the position of an entry or raise :exc:`ValueError`. :param key: The key to be looked up. .. versionchanged:: 0.5 This used to raise :exc:`IndexError`, which was inconsistent with the list API. """ if isinstance(key, str): for idx, (item, quality) in enumerate(self): if self._value_matches(key, item): return idx raise ValueError(key) return list.index(self, key)
def child_isinstance(block, child_id, block_class_or_mixin): """ Efficiently check if a child of an XBlock is an instance of the given class. Arguments: block -- the parent (or ancestor) of the child block in question child_id -- the usage key of the child block we are wondering about block_class_or_mixin -- We return true if block's child indentified by child_id is an instance of this. This method is equivalent to isinstance(block.runtime.get_block(child_id), block_class_or_mixin) but is far more efficient, as it avoids the need to instantiate the child. """ def_id = block.runtime.id_reader.get_definition_id(child_id) type_name = block.runtime.id_reader.get_block_type(def_id) child_class = block.runtime.load_block_type(type_name) return issubclass(child_class, block_class_or_mixin)
Efficiently check if a child of an XBlock is an instance of the given class. Arguments: block -- the parent (or ancestor) of the child block in question child_id -- the usage key of the child block we are wondering about block_class_or_mixin -- We return true if block's child indentified by child_id is an instance of this. This method is equivalent to isinstance(block.runtime.get_block(child_id), block_class_or_mixin) but is far more efficient, as it avoids the need to instantiate the child.
Below is the the instruction that describes the task: ### Input: Efficiently check if a child of an XBlock is an instance of the given class. Arguments: block -- the parent (or ancestor) of the child block in question child_id -- the usage key of the child block we are wondering about block_class_or_mixin -- We return true if block's child indentified by child_id is an instance of this. This method is equivalent to isinstance(block.runtime.get_block(child_id), block_class_or_mixin) but is far more efficient, as it avoids the need to instantiate the child. ### Response: def child_isinstance(block, child_id, block_class_or_mixin): """ Efficiently check if a child of an XBlock is an instance of the given class. Arguments: block -- the parent (or ancestor) of the child block in question child_id -- the usage key of the child block we are wondering about block_class_or_mixin -- We return true if block's child indentified by child_id is an instance of this. This method is equivalent to isinstance(block.runtime.get_block(child_id), block_class_or_mixin) but is far more efficient, as it avoids the need to instantiate the child. """ def_id = block.runtime.id_reader.get_definition_id(child_id) type_name = block.runtime.id_reader.get_block_type(def_id) child_class = block.runtime.load_block_type(type_name) return issubclass(child_class, block_class_or_mixin)
def status(self, value): """ Property for getting or setting the bug status >>> bug.status = "REOPENED" """ if self._bug.get('id', None): if value in VALID_STATUS: self._bug['status'] = value else: raise BugException("Invalid status type was used") else: raise BugException("Can not set status unless there is a bug id." " Please call Update() before setting")
Property for getting or setting the bug status >>> bug.status = "REOPENED"
Below is the the instruction that describes the task: ### Input: Property for getting or setting the bug status >>> bug.status = "REOPENED" ### Response: def status(self, value): """ Property for getting or setting the bug status >>> bug.status = "REOPENED" """ if self._bug.get('id', None): if value in VALID_STATUS: self._bug['status'] = value else: raise BugException("Invalid status type was used") else: raise BugException("Can not set status unless there is a bug id." " Please call Update() before setting")
def vswitch_set_vlan_id_for_user(self, vswitch_name, userid, vlan_id): """Set vlan id for user when connecting to the vswitch :param str vswitch_name: the name of the vswitch :param str userid: the user id of the vm :param int vlan_id: the VLAN id """ self._networkops.set_vswitch_port_vlan_id(vswitch_name, userid, vlan_id)
Set vlan id for user when connecting to the vswitch :param str vswitch_name: the name of the vswitch :param str userid: the user id of the vm :param int vlan_id: the VLAN id
Below is the the instruction that describes the task: ### Input: Set vlan id for user when connecting to the vswitch :param str vswitch_name: the name of the vswitch :param str userid: the user id of the vm :param int vlan_id: the VLAN id ### Response: def vswitch_set_vlan_id_for_user(self, vswitch_name, userid, vlan_id): """Set vlan id for user when connecting to the vswitch :param str vswitch_name: the name of the vswitch :param str userid: the user id of the vm :param int vlan_id: the VLAN id """ self._networkops.set_vswitch_port_vlan_id(vswitch_name, userid, vlan_id)
def _serialize(element, compress, relicSizeBinFunc, relicWriteBinFunc): """ Serializes an @element using the proper function @relicWriteBinFunc into a bytearray. @compress specifies whether the element should be compressed. @relicSizeBinFunc is used to determine the size of the serialized output. This is underlying implementation for serialize G1, G2, and Gt. """ cFlag = c_int(compress) size = relicSizeBinFunc(byref(element), cFlag) # Make an array of the correct size. binArray = (c_ubyte*size)() # Serialize relicWriteBinFunc(byref(binArray), size, byref(element), cFlag) return bytearray(binArray)
Serializes an @element using the proper function @relicWriteBinFunc into a bytearray. @compress specifies whether the element should be compressed. @relicSizeBinFunc is used to determine the size of the serialized output. This is underlying implementation for serialize G1, G2, and Gt.
Below is the the instruction that describes the task: ### Input: Serializes an @element using the proper function @relicWriteBinFunc into a bytearray. @compress specifies whether the element should be compressed. @relicSizeBinFunc is used to determine the size of the serialized output. This is underlying implementation for serialize G1, G2, and Gt. ### Response: def _serialize(element, compress, relicSizeBinFunc, relicWriteBinFunc): """ Serializes an @element using the proper function @relicWriteBinFunc into a bytearray. @compress specifies whether the element should be compressed. @relicSizeBinFunc is used to determine the size of the serialized output. This is underlying implementation for serialize G1, G2, and Gt. """ cFlag = c_int(compress) size = relicSizeBinFunc(byref(element), cFlag) # Make an array of the correct size. binArray = (c_ubyte*size)() # Serialize relicWriteBinFunc(byref(binArray), size, byref(element), cFlag) return bytearray(binArray)
def display_line(self): """A single line describing this image""" msg = ["Image {0}".format(self.name)] if self.image_index: msg.append("Pushes to {0}".format(self.image_name)) return ' : '.join(msg)
A single line describing this image
Below is the the instruction that describes the task: ### Input: A single line describing this image ### Response: def display_line(self): """A single line describing this image""" msg = ["Image {0}".format(self.name)] if self.image_index: msg.append("Pushes to {0}".format(self.image_name)) return ' : '.join(msg)
def cluster_list(verbose=False): ''' Return a list of cluster of Postgres server (tuples of version and name). CLI Example: .. code-block:: bash salt '*' postgres.cluster_list salt '*' postgres.cluster_list verbose=True ''' cmd = [salt.utils.path.which('pg_lsclusters'), '--no-header'] ret = __salt__['cmd.run_all'](' '.join([pipes.quote(c) for c in cmd])) if ret.get('retcode', 0) != 0: log.error('Error listing clusters') cluster_dict = _parse_pg_lscluster(ret['stdout']) if verbose: return cluster_dict return cluster_dict.keys()
Return a list of cluster of Postgres server (tuples of version and name). CLI Example: .. code-block:: bash salt '*' postgres.cluster_list salt '*' postgres.cluster_list verbose=True
Below is the the instruction that describes the task: ### Input: Return a list of cluster of Postgres server (tuples of version and name). CLI Example: .. code-block:: bash salt '*' postgres.cluster_list salt '*' postgres.cluster_list verbose=True ### Response: def cluster_list(verbose=False): ''' Return a list of cluster of Postgres server (tuples of version and name). CLI Example: .. code-block:: bash salt '*' postgres.cluster_list salt '*' postgres.cluster_list verbose=True ''' cmd = [salt.utils.path.which('pg_lsclusters'), '--no-header'] ret = __salt__['cmd.run_all'](' '.join([pipes.quote(c) for c in cmd])) if ret.get('retcode', 0) != 0: log.error('Error listing clusters') cluster_dict = _parse_pg_lscluster(ret['stdout']) if verbose: return cluster_dict return cluster_dict.keys()
def _parse_args(argv): """ Show supported config format types or usage. :param argv: Argument list to parse or None (sys.argv will be set). :return: argparse.Namespace object or None (exit before return) """ parser = make_parser() args = parser.parse_args(argv) LOGGER.setLevel(to_log_level(args.loglevel)) if args.inputs: if '-' in args.inputs: args.inputs = sys.stdin else: if args.list: _show_psrs() elif args.env: cnf = os.environ.copy() _output_result(cnf, args.output, args.otype or "json", None, None) sys.exit(0) else: parser.print_usage() sys.exit(1) if args.validate and args.schema is None: _exit_with_output("--validate option requires --scheme option", 1) return args
Show supported config format types or usage. :param argv: Argument list to parse or None (sys.argv will be set). :return: argparse.Namespace object or None (exit before return)
Below is the the instruction that describes the task: ### Input: Show supported config format types or usage. :param argv: Argument list to parse or None (sys.argv will be set). :return: argparse.Namespace object or None (exit before return) ### Response: def _parse_args(argv): """ Show supported config format types or usage. :param argv: Argument list to parse or None (sys.argv will be set). :return: argparse.Namespace object or None (exit before return) """ parser = make_parser() args = parser.parse_args(argv) LOGGER.setLevel(to_log_level(args.loglevel)) if args.inputs: if '-' in args.inputs: args.inputs = sys.stdin else: if args.list: _show_psrs() elif args.env: cnf = os.environ.copy() _output_result(cnf, args.output, args.otype or "json", None, None) sys.exit(0) else: parser.print_usage() sys.exit(1) if args.validate and args.schema is None: _exit_with_output("--validate option requires --scheme option", 1) return args
async def _query(server, method, parameters, timeout=DEFAULT_TIMEOUT, verify_ssl=True, loop: asyncio.AbstractEventLoop=None): """Formats and performs the asynchronous query against the API :param server: The server to query. :param method: The method name. :param parameters: A dict of parameters to send :param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes). :param verify_ssl: Whether or not to verify SSL connections :return: The JSON-decoded result from the server :raise MyGeotabException: Raises when an exception occurs on the MyGeotab server :raise TimeoutException: Raises when the request does not respond after some time. :raise aiohttp.ClientResponseError: Raises when there is an HTTP status code that indicates failure. """ api_endpoint = api.get_api_url(server) params = dict(id=-1, method=method, params=parameters) headers = get_headers() ssl_context = None verify = verify_ssl if verify_ssl: ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) conn = aiohttp.TCPConnector(verify_ssl=verify, ssl_context=ssl_context, loop=loop) try: async with aiohttp.ClientSession(connector=conn, loop=loop) as session: response = await session.post(api_endpoint, data=json.dumps(params, default=object_serializer), headers=headers, timeout=timeout, allow_redirects=True) response.raise_for_status() content_type = response.headers.get('Content-Type') body = await response.text() except TimeoutError: raise TimeoutException(server) if content_type and 'application/json' not in content_type.lower(): return body return api._process(json.loads(body, object_hook=object_deserializer))
Formats and performs the asynchronous query against the API :param server: The server to query. :param method: The method name. :param parameters: A dict of parameters to send :param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes). :param verify_ssl: Whether or not to verify SSL connections :return: The JSON-decoded result from the server :raise MyGeotabException: Raises when an exception occurs on the MyGeotab server :raise TimeoutException: Raises when the request does not respond after some time. :raise aiohttp.ClientResponseError: Raises when there is an HTTP status code that indicates failure.
Below is the the instruction that describes the task: ### Input: Formats and performs the asynchronous query against the API :param server: The server to query. :param method: The method name. :param parameters: A dict of parameters to send :param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes). :param verify_ssl: Whether or not to verify SSL connections :return: The JSON-decoded result from the server :raise MyGeotabException: Raises when an exception occurs on the MyGeotab server :raise TimeoutException: Raises when the request does not respond after some time. :raise aiohttp.ClientResponseError: Raises when there is an HTTP status code that indicates failure. ### Response: async def _query(server, method, parameters, timeout=DEFAULT_TIMEOUT, verify_ssl=True, loop: asyncio.AbstractEventLoop=None): """Formats and performs the asynchronous query against the API :param server: The server to query. :param method: The method name. :param parameters: A dict of parameters to send :param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes). :param verify_ssl: Whether or not to verify SSL connections :return: The JSON-decoded result from the server :raise MyGeotabException: Raises when an exception occurs on the MyGeotab server :raise TimeoutException: Raises when the request does not respond after some time. :raise aiohttp.ClientResponseError: Raises when there is an HTTP status code that indicates failure. """ api_endpoint = api.get_api_url(server) params = dict(id=-1, method=method, params=parameters) headers = get_headers() ssl_context = None verify = verify_ssl if verify_ssl: ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) conn = aiohttp.TCPConnector(verify_ssl=verify, ssl_context=ssl_context, loop=loop) try: async with aiohttp.ClientSession(connector=conn, loop=loop) as session: response = await session.post(api_endpoint, data=json.dumps(params, default=object_serializer), headers=headers, timeout=timeout, allow_redirects=True) response.raise_for_status() content_type = response.headers.get('Content-Type') body = await response.text() except TimeoutError: raise TimeoutException(server) if content_type and 'application/json' not in content_type.lower(): return body return api._process(json.loads(body, object_hook=object_deserializer))
def get_block_hash(self, block_index, **kwargs): """ Returns the hash value associated with a specific block index. :param block_index: a block index (block height) :type block_index: int :return: hash of the block associated with the considered index :rtype: str """ return self._call(JSONRPCMethods.GET_BLOCK_HASH.value, [block_index, ], **kwargs)
Returns the hash value associated with a specific block index. :param block_index: a block index (block height) :type block_index: int :return: hash of the block associated with the considered index :rtype: str
Below is the the instruction that describes the task: ### Input: Returns the hash value associated with a specific block index. :param block_index: a block index (block height) :type block_index: int :return: hash of the block associated with the considered index :rtype: str ### Response: def get_block_hash(self, block_index, **kwargs): """ Returns the hash value associated with a specific block index. :param block_index: a block index (block height) :type block_index: int :return: hash of the block associated with the considered index :rtype: str """ return self._call(JSONRPCMethods.GET_BLOCK_HASH.value, [block_index, ], **kwargs)
def fix_whitespace(tokens, start, result): """Fix whitespace around hyphens and commas. Can be used to remove whitespace tokenization artefacts.""" for e in result: for child in e.iter(): child.text = child.text.replace(' , ', ', ') for hyphen in HYPHENS: child.text = child.text.replace(' %s ' % hyphen, '%s' % hyphen) child.text = re.sub(r'- (.) -', r'-\1-', child.text) return result
Fix whitespace around hyphens and commas. Can be used to remove whitespace tokenization artefacts.
Below is the the instruction that describes the task: ### Input: Fix whitespace around hyphens and commas. Can be used to remove whitespace tokenization artefacts. ### Response: def fix_whitespace(tokens, start, result): """Fix whitespace around hyphens and commas. Can be used to remove whitespace tokenization artefacts.""" for e in result: for child in e.iter(): child.text = child.text.replace(' , ', ', ') for hyphen in HYPHENS: child.text = child.text.replace(' %s ' % hyphen, '%s' % hyphen) child.text = re.sub(r'- (.) -', r'-\1-', child.text) return result
def auth_edit(name, **kwargs): """ Interactively edits an authorization group. """ ctx = Context(**kwargs) ctx.timeout = None ctx.execute_action('auth:group:edit', **{ 'storage': ctx.repo.create_secure_service('storage'), 'name': name, })
Interactively edits an authorization group.
Below is the the instruction that describes the task: ### Input: Interactively edits an authorization group. ### Response: def auth_edit(name, **kwargs): """ Interactively edits an authorization group. """ ctx = Context(**kwargs) ctx.timeout = None ctx.execute_action('auth:group:edit', **{ 'storage': ctx.repo.create_secure_service('storage'), 'name': name, })
def files_delete(self, *, id: str, **kwargs) -> SlackResponse: """Deletes a file. Args: id (str): The file id. e.g. 'F1234467890' """ kwargs.update({"id": id}) return self.api_call("files.delete", json=kwargs)
Deletes a file. Args: id (str): The file id. e.g. 'F1234467890'
Below is the the instruction that describes the task: ### Input: Deletes a file. Args: id (str): The file id. e.g. 'F1234467890' ### Response: def files_delete(self, *, id: str, **kwargs) -> SlackResponse: """Deletes a file. Args: id (str): The file id. e.g. 'F1234467890' """ kwargs.update({"id": id}) return self.api_call("files.delete", json=kwargs)
def get_named_unicode(self, i): """Get named Unicode.""" index = i.index value = [] try: if next(i) != '{': raise SyntaxError("Named Unicode missing '{'' at %d!" % (i.index - 1)) c = next(i) while c != '}': value.append(c) c = next(i) except StopIteration: raise SyntaxError("Unmatched '}' at %d!" % index) return ''.join(value)
Get named Unicode.
Below is the the instruction that describes the task: ### Input: Get named Unicode. ### Response: def get_named_unicode(self, i): """Get named Unicode.""" index = i.index value = [] try: if next(i) != '{': raise SyntaxError("Named Unicode missing '{'' at %d!" % (i.index - 1)) c = next(i) while c != '}': value.append(c) c = next(i) except StopIteration: raise SyntaxError("Unmatched '}' at %d!" % index) return ''.join(value)
def compute_boundaries(self, synt_anchors): """ Compute the min cost path between the two waves, and return a list of boundary points, representing the argmin values with respect to the provided ``synt_anchors`` timings. If ``synt_anchors`` has ``k`` elements, the returned array will have ``k+1`` elements, accounting for the tail fragment. :param synt_anchors: the anchor time values (in seconds) of the synthesized fragments, each representing the begin time in the synthesized wave of the corresponding fragment :type synt_anchors: list of :class:`~aeneas.exacttiming.TimeValue` Return the list of boundary indices. :rtype: :class:`numpy.ndarray` (1D) """ self._setup_dtw() if self.dtw is None: self.log(u"Inner self.dtw is None => returning artificial boundary indices") begin = self.real_wave_mfcc.middle_begin end = self.real_wave_mfcc.tail_begin n = len(synt_anchors) step = float(end - begin) / n boundary_indices = [begin + int(i * step) for i in range(n)] + [end] return numpy.array(boundary_indices) self.log(u"Computing path...") real_indices, synt_indices = self.compute_path() self.log(u"Computing path... done") self.log(u"Computing boundary indices...") # both real_indices and synt_indices are w.r.t. the full wave self.log([u"Fragments: %d", len(synt_anchors)]) self.log([u"Path length: %d", len(real_indices)]) # synt_anchors as in seconds, convert them in MFCC indices # see also issue #102 mws = self.rconf.mws sample_rate = self.rconf.sample_rate samples_per_mws = mws * sample_rate if samples_per_mws.is_integer: anchor_indices = numpy.array([int(a[0] / mws) for a in synt_anchors]) else: # # NOTE this is not elegant, but it saves the day for the user # self.log_warn(u"The number of samples in each window shift is not an integer, time drift might occur.") anchor_indices = numpy.array([(int(a[0] * sample_rate / mws) / sample_rate) for a in synt_anchors]) # # right side sets the split point at the very beginning of "next" fragment # # NOTE clip() is needed since searchsorted() with side="right" might return # an index == len(synt_indices) == len(real_indices) # when the insertion point is past the last element of synt_indices # causing the fancy indexing real_indices[...] below might fail begin_indices = numpy.clip(numpy.searchsorted(synt_indices, anchor_indices, side="right"), 0, len(synt_indices) - 1) # first split must occur at zero begin_indices[0] = 0 # # map onto real indices, obtaining "default" boundary indices # # NOTE since len(synt_indices) == len(real_indices) # and because the numpy.clip() above, the fancy indexing is always valid # boundary_indices = numpy.append(real_indices[begin_indices], self.real_wave_mfcc.tail_begin) self.log([u"Boundary indices: %d", len(boundary_indices)]) self.log(u"Computing boundary indices... done") return boundary_indices
Compute the min cost path between the two waves, and return a list of boundary points, representing the argmin values with respect to the provided ``synt_anchors`` timings. If ``synt_anchors`` has ``k`` elements, the returned array will have ``k+1`` elements, accounting for the tail fragment. :param synt_anchors: the anchor time values (in seconds) of the synthesized fragments, each representing the begin time in the synthesized wave of the corresponding fragment :type synt_anchors: list of :class:`~aeneas.exacttiming.TimeValue` Return the list of boundary indices. :rtype: :class:`numpy.ndarray` (1D)
Below is the the instruction that describes the task: ### Input: Compute the min cost path between the two waves, and return a list of boundary points, representing the argmin values with respect to the provided ``synt_anchors`` timings. If ``synt_anchors`` has ``k`` elements, the returned array will have ``k+1`` elements, accounting for the tail fragment. :param synt_anchors: the anchor time values (in seconds) of the synthesized fragments, each representing the begin time in the synthesized wave of the corresponding fragment :type synt_anchors: list of :class:`~aeneas.exacttiming.TimeValue` Return the list of boundary indices. :rtype: :class:`numpy.ndarray` (1D) ### Response: def compute_boundaries(self, synt_anchors): """ Compute the min cost path between the two waves, and return a list of boundary points, representing the argmin values with respect to the provided ``synt_anchors`` timings. If ``synt_anchors`` has ``k`` elements, the returned array will have ``k+1`` elements, accounting for the tail fragment. :param synt_anchors: the anchor time values (in seconds) of the synthesized fragments, each representing the begin time in the synthesized wave of the corresponding fragment :type synt_anchors: list of :class:`~aeneas.exacttiming.TimeValue` Return the list of boundary indices. :rtype: :class:`numpy.ndarray` (1D) """ self._setup_dtw() if self.dtw is None: self.log(u"Inner self.dtw is None => returning artificial boundary indices") begin = self.real_wave_mfcc.middle_begin end = self.real_wave_mfcc.tail_begin n = len(synt_anchors) step = float(end - begin) / n boundary_indices = [begin + int(i * step) for i in range(n)] + [end] return numpy.array(boundary_indices) self.log(u"Computing path...") real_indices, synt_indices = self.compute_path() self.log(u"Computing path... done") self.log(u"Computing boundary indices...") # both real_indices and synt_indices are w.r.t. the full wave self.log([u"Fragments: %d", len(synt_anchors)]) self.log([u"Path length: %d", len(real_indices)]) # synt_anchors as in seconds, convert them in MFCC indices # see also issue #102 mws = self.rconf.mws sample_rate = self.rconf.sample_rate samples_per_mws = mws * sample_rate if samples_per_mws.is_integer: anchor_indices = numpy.array([int(a[0] / mws) for a in synt_anchors]) else: # # NOTE this is not elegant, but it saves the day for the user # self.log_warn(u"The number of samples in each window shift is not an integer, time drift might occur.") anchor_indices = numpy.array([(int(a[0] * sample_rate / mws) / sample_rate) for a in synt_anchors]) # # right side sets the split point at the very beginning of "next" fragment # # NOTE clip() is needed since searchsorted() with side="right" might return # an index == len(synt_indices) == len(real_indices) # when the insertion point is past the last element of synt_indices # causing the fancy indexing real_indices[...] below might fail begin_indices = numpy.clip(numpy.searchsorted(synt_indices, anchor_indices, side="right"), 0, len(synt_indices) - 1) # first split must occur at zero begin_indices[0] = 0 # # map onto real indices, obtaining "default" boundary indices # # NOTE since len(synt_indices) == len(real_indices) # and because the numpy.clip() above, the fancy indexing is always valid # boundary_indices = numpy.append(real_indices[begin_indices], self.real_wave_mfcc.tail_begin) self.log([u"Boundary indices: %d", len(boundary_indices)]) self.log(u"Computing boundary indices... done") return boundary_indices
def add_member_email(self, email, permissions=None): """ Add a member to the project using member email. :param email: Member email. :param permissions: Permissions dictionary. :return: Member object. """ data = {'email': email} if isinstance(permissions, dict): data.update({ 'permissions': permissions }) extra = { 'resource': self.__class__.__name__, 'query': { 'id': self.id, 'data': data, } } logger.info('Adding member using email', extra=extra) response = self._api.post( url=self._URL['members_query'].format(id=self.id), data=data) member_data = response.json() return Member(api=self._api, **member_data)
Add a member to the project using member email. :param email: Member email. :param permissions: Permissions dictionary. :return: Member object.
Below is the the instruction that describes the task: ### Input: Add a member to the project using member email. :param email: Member email. :param permissions: Permissions dictionary. :return: Member object. ### Response: def add_member_email(self, email, permissions=None): """ Add a member to the project using member email. :param email: Member email. :param permissions: Permissions dictionary. :return: Member object. """ data = {'email': email} if isinstance(permissions, dict): data.update({ 'permissions': permissions }) extra = { 'resource': self.__class__.__name__, 'query': { 'id': self.id, 'data': data, } } logger.info('Adding member using email', extra=extra) response = self._api.post( url=self._URL['members_query'].format(id=self.id), data=data) member_data = response.json() return Member(api=self._api, **member_data)
def _set_duty(self, motor_duty_file, duty, friction_offset, voltage_comp): """Function to set the duty cycle of the motors.""" # Compensate for nominal voltage and round the input duty_int = int(round(duty*voltage_comp)) # Add or subtract offset and clamp the value between -100 and 100 if duty_int > 0: duty_int = min(100, duty_int + friction_offset) elif duty_int < 0: duty_int = max(-100, duty_int - friction_offset) # Apply the signal to the motor self._fast_write(motor_duty_file, duty_int)
Function to set the duty cycle of the motors.
Below is the the instruction that describes the task: ### Input: Function to set the duty cycle of the motors. ### Response: def _set_duty(self, motor_duty_file, duty, friction_offset, voltage_comp): """Function to set the duty cycle of the motors.""" # Compensate for nominal voltage and round the input duty_int = int(round(duty*voltage_comp)) # Add or subtract offset and clamp the value between -100 and 100 if duty_int > 0: duty_int = min(100, duty_int + friction_offset) elif duty_int < 0: duty_int = max(-100, duty_int - friction_offset) # Apply the signal to the motor self._fast_write(motor_duty_file, duty_int)
def start(cls): """Start background thread if not already started""" if cls._thread is None: cls._thread = threading.Thread(target=cls._run, name="Heartbeat") cls._thread.daemon = True cls._thread.start()
Start background thread if not already started
Below is the the instruction that describes the task: ### Input: Start background thread if not already started ### Response: def start(cls): """Start background thread if not already started""" if cls._thread is None: cls._thread = threading.Thread(target=cls._run, name="Heartbeat") cls._thread.daemon = True cls._thread.start()
def by_login(cls, session, login, local=True): """ Get a user from a given login. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param login: the user login :type login: unicode :return: the associated user :rtype: :class:`pyshop.models.User` """ user = cls.first(session, where=((cls.login == login), (cls.local == local),) ) # XXX it's appear that this is not case sensitive ! return user if user and user.login == login else None
Get a user from a given login. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param login: the user login :type login: unicode :return: the associated user :rtype: :class:`pyshop.models.User`
Below is the the instruction that describes the task: ### Input: Get a user from a given login. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param login: the user login :type login: unicode :return: the associated user :rtype: :class:`pyshop.models.User` ### Response: def by_login(cls, session, login, local=True): """ Get a user from a given login. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param login: the user login :type login: unicode :return: the associated user :rtype: :class:`pyshop.models.User` """ user = cls.first(session, where=((cls.login == login), (cls.local == local),) ) # XXX it's appear that this is not case sensitive ! return user if user and user.login == login else None
def delete_property(self, content_id, property_key, callback=None): """ Deletes a content property. :param content_id (string): The ID for the content that owns the property to be deleted. :param property_key (string): The name of the property to be deleted. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: Empty if successful, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """ return self._service_delete_request("rest/api/content/{id}/property/{key}" "".format(id=content_id, key=property_key), callback=callback)
Deletes a content property. :param content_id (string): The ID for the content that owns the property to be deleted. :param property_key (string): The name of the property to be deleted. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: Empty if successful, or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
Below is the the instruction that describes the task: ### Input: Deletes a content property. :param content_id (string): The ID for the content that owns the property to be deleted. :param property_key (string): The name of the property to be deleted. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: Empty if successful, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. ### Response: def delete_property(self, content_id, property_key, callback=None): """ Deletes a content property. :param content_id (string): The ID for the content that owns the property to be deleted. :param property_key (string): The name of the property to be deleted. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: Empty if successful, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """ return self._service_delete_request("rest/api/content/{id}/property/{key}" "".format(id=content_id, key=property_key), callback=callback)
def get_gpg_home( appname, config_dir=None ): """ Get the GPG keyring directory for a particular application. Return the path. """ assert is_valid_appname(appname) config_dir = get_config_dir( config_dir ) path = os.path.join( config_dir, "gpgkeys", appname ) return path
Get the GPG keyring directory for a particular application. Return the path.
Below is the the instruction that describes the task: ### Input: Get the GPG keyring directory for a particular application. Return the path. ### Response: def get_gpg_home( appname, config_dir=None ): """ Get the GPG keyring directory for a particular application. Return the path. """ assert is_valid_appname(appname) config_dir = get_config_dir( config_dir ) path = os.path.join( config_dir, "gpgkeys", appname ) return path
def imread(filename, format=None): """Read image data from disk Requires imageio or PIL. Parameters ---------- filename : str Filename to read. format : str | None Format of the file. If None, it will be inferred from the filename. Returns ------- data : array Image data. See also -------- imsave, read_png, write_png """ imageio, PIL = _check_img_lib() if imageio is not None: return imageio.imread(filename, format) elif PIL is not None: im = PIL.Image.open(filename) if im.mode == 'P': im = im.convert() # Make numpy array a = np.asarray(im) if len(a.shape) == 0: raise MemoryError("Too little memory to convert PIL image to " "array") return a else: raise RuntimeError("imread requires the imageio or PIL package.")
Read image data from disk Requires imageio or PIL. Parameters ---------- filename : str Filename to read. format : str | None Format of the file. If None, it will be inferred from the filename. Returns ------- data : array Image data. See also -------- imsave, read_png, write_png
Below is the the instruction that describes the task: ### Input: Read image data from disk Requires imageio or PIL. Parameters ---------- filename : str Filename to read. format : str | None Format of the file. If None, it will be inferred from the filename. Returns ------- data : array Image data. See also -------- imsave, read_png, write_png ### Response: def imread(filename, format=None): """Read image data from disk Requires imageio or PIL. Parameters ---------- filename : str Filename to read. format : str | None Format of the file. If None, it will be inferred from the filename. Returns ------- data : array Image data. See also -------- imsave, read_png, write_png """ imageio, PIL = _check_img_lib() if imageio is not None: return imageio.imread(filename, format) elif PIL is not None: im = PIL.Image.open(filename) if im.mode == 'P': im = im.convert() # Make numpy array a = np.asarray(im) if len(a.shape) == 0: raise MemoryError("Too little memory to convert PIL image to " "array") return a else: raise RuntimeError("imread requires the imageio or PIL package.")
def round(col, scale=0): """ Round the given value to `scale` decimal places using HALF_UP rounding mode if `scale` >= 0 or at integral part when `scale` < 0. >>> spark.createDataFrame([(2.5,)], ['a']).select(round('a', 0).alias('r')).collect() [Row(r=3.0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.round(_to_java_column(col), scale))
Round the given value to `scale` decimal places using HALF_UP rounding mode if `scale` >= 0 or at integral part when `scale` < 0. >>> spark.createDataFrame([(2.5,)], ['a']).select(round('a', 0).alias('r')).collect() [Row(r=3.0)]
Below is the the instruction that describes the task: ### Input: Round the given value to `scale` decimal places using HALF_UP rounding mode if `scale` >= 0 or at integral part when `scale` < 0. >>> spark.createDataFrame([(2.5,)], ['a']).select(round('a', 0).alias('r')).collect() [Row(r=3.0)] ### Response: def round(col, scale=0): """ Round the given value to `scale` decimal places using HALF_UP rounding mode if `scale` >= 0 or at integral part when `scale` < 0. >>> spark.createDataFrame([(2.5,)], ['a']).select(round('a', 0).alias('r')).collect() [Row(r=3.0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.round(_to_java_column(col), scale))
def _replace_type_to_regex(cls, match): """ /<int:id> -> r'(?P<id>\d+)' """ groupdict = match.groupdict() _type = groupdict.get('type') type_regex = cls.TYPE_REGEX_MAP.get(_type, '[^/]+') name = groupdict.get('name') return r'(?P<{name}>{type_regex})'.format( name=name, type_regex=type_regex )
/<int:id> -> r'(?P<id>\d+)'
Below is the the instruction that describes the task: ### Input: /<int:id> -> r'(?P<id>\d+)' ### Response: def _replace_type_to_regex(cls, match): """ /<int:id> -> r'(?P<id>\d+)' """ groupdict = match.groupdict() _type = groupdict.get('type') type_regex = cls.TYPE_REGEX_MAP.get(_type, '[^/]+') name = groupdict.get('name') return r'(?P<{name}>{type_regex})'.format( name=name, type_regex=type_regex )
def map_fit(interface, state, label, inp): """ Function calculates sigmoid function (g) for every sample. With g it calculates part of Hessian matrix and gradient, aggregates and output them. It also calculates J function which is needed for checking the convergence of parameters theta. """ import numpy as np out = interface.output(0) H, J, grad = 0, 0, 0 for row in inp: row = row.strip().split(state["delimiter"]) # split row if len(row) > 1: # check if row is empty # add intercept term to every sample x = np.array([1] + [(0 if v in state["missing_vals"] else float(v)) for i, v in enumerate(row) if i in state["X_indices"]]) # map label value to 0 or 1. If label does not match set error y = 0 if state["y_map"][0] == row[state["y_index"]] else 1 if state["y_map"][1] == row[ state["y_index"]] else "Error" g = 1. / (1 + np.exp(-np.dot(x, state["thetas"]))) # sigmoid function grad += x * (g - y) # gradient H += np.multiply(np.outer(x, x), g * (1 - g)) # Hessian matrix J -= np.log(g) if y == 1 else np.log(1 - g) # J cost function out.add("grad", grad) out.add("J", J) for i, row in enumerate(H): out.add(i, row)
Function calculates sigmoid function (g) for every sample. With g it calculates part of Hessian matrix and gradient, aggregates and output them. It also calculates J function which is needed for checking the convergence of parameters theta.
Below is the the instruction that describes the task: ### Input: Function calculates sigmoid function (g) for every sample. With g it calculates part of Hessian matrix and gradient, aggregates and output them. It also calculates J function which is needed for checking the convergence of parameters theta. ### Response: def map_fit(interface, state, label, inp): """ Function calculates sigmoid function (g) for every sample. With g it calculates part of Hessian matrix and gradient, aggregates and output them. It also calculates J function which is needed for checking the convergence of parameters theta. """ import numpy as np out = interface.output(0) H, J, grad = 0, 0, 0 for row in inp: row = row.strip().split(state["delimiter"]) # split row if len(row) > 1: # check if row is empty # add intercept term to every sample x = np.array([1] + [(0 if v in state["missing_vals"] else float(v)) for i, v in enumerate(row) if i in state["X_indices"]]) # map label value to 0 or 1. If label does not match set error y = 0 if state["y_map"][0] == row[state["y_index"]] else 1 if state["y_map"][1] == row[ state["y_index"]] else "Error" g = 1. / (1 + np.exp(-np.dot(x, state["thetas"]))) # sigmoid function grad += x * (g - y) # gradient H += np.multiply(np.outer(x, x), g * (1 - g)) # Hessian matrix J -= np.log(g) if y == 1 else np.log(1 - g) # J cost function out.add("grad", grad) out.add("J", J) for i, row in enumerate(H): out.add(i, row)
def send_to_azure(instance, data, replace=True, types=None, primary_key=(), sub_commit=True): """ data = { "table_name" : 'name_of_the_azure_schema' + '.' + 'name_of_the_azure_table' #Must already exist, "columns_name" : [first_column_name,second_column_name,...,last_column_name], "rows" : [[first_raw_value,second_raw_value,...,last_raw_value],...] } """ # Time initialization start = datetime.datetime.now() # Extract info rows = data["rows"] if not rows: return 0 table_name = data["table_name"] columns_name = data["columns_name"] total_len_data = len(rows) # Create table if needed if not existing_test(instance, table_name) or (types is not None) or (primary_key != ()): create.create_table(instance, data, primary_key, types) # Clean table if needed if replace: cleaning_function(instance, table_name) connection_kwargs = credential(instance) # Create an SSH tunnel ssh_host = os.environ.get("SSH_%s_HOST" % instance) ssh_user = os.environ.get("SSH_%s_USER" % instance) ssh_path_private_key = os.environ.get("SSH_%s_PATH_PRIVATE_KEY" % instance) if ssh_host: tunnel = SSHTunnelForwarder( (ssh_host, 22), ssh_username=ssh_user, ssh_private_key=ssh_path_private_key, remote_bind_address=( os.environ.get("AZURE_%s_HOST" % instance), int(os.environ.get("AZURE_%s_PORT" % instance))), local_bind_address=('localhost', 1433), # could be any available port ) # Start the tunnel try: tunnel.start() print("Tunnel opened!") except sshtunnel.HandlerSSHTunnelForwarderError: pass connection_kwargs["host"] = "localhost,1433" connection_kwargs["port"] = 1433 cnxn = pyodbc.connect(**connection_kwargs) cursor = cnxn.cursor() small_batch_size = int(2099 / len(columns_name)) print("Initiate send_to_azure...") # Initialize counters boolean = True question_mark_pattern = "(%s)" % ",".join(["?" for i in range(len(rows[0]))]) counter = 0 while boolean: temp_row = [] question_mark_list = [] for i in range(small_batch_size): if rows: temp_row.append(rows.pop()) question_mark_list.append(question_mark_pattern) else: boolean = False continue counter = counter + len(temp_row) # percent = round(float(counter * 100) / total_len_data) if sub_commit: suffix = "%% rows sent" print_progress_bar(counter, total_len_data, suffix=suffix) # print("%s %% rows sent" % str(percent)) else: suffix = "% rows prepared to be sent" print_progress_bar(counter, total_len_data, suffix=suffix) # print("%s %% rows prepared to be sent" % str(percent)) data_values_str = ','.join(question_mark_list) columns_name_str = ", ".join(columns_name) inserting_request = '''INSERT INTO %s (%s) VALUES %s ;''' % (table_name, columns_name_str, data_values_str) final_data = [y for x in temp_row for y in x] if final_data: cursor.execute(inserting_request, final_data) if sub_commit: commit_function(cnxn) if not sub_commit: commit_function(cnxn) cursor.close() cnxn.close() if ssh_host: tunnel.close() print("Tunnel closed!") print("data sent to azure") print("Total rows: %s" % str(total_len_data)) print(C.BOLD + "Total time in seconds : %s" % str((datetime.datetime.now() - start).seconds) + C.ENDC) return 0
data = { "table_name" : 'name_of_the_azure_schema' + '.' + 'name_of_the_azure_table' #Must already exist, "columns_name" : [first_column_name,second_column_name,...,last_column_name], "rows" : [[first_raw_value,second_raw_value,...,last_raw_value],...] }
Below is the the instruction that describes the task: ### Input: data = { "table_name" : 'name_of_the_azure_schema' + '.' + 'name_of_the_azure_table' #Must already exist, "columns_name" : [first_column_name,second_column_name,...,last_column_name], "rows" : [[first_raw_value,second_raw_value,...,last_raw_value],...] } ### Response: def send_to_azure(instance, data, replace=True, types=None, primary_key=(), sub_commit=True): """ data = { "table_name" : 'name_of_the_azure_schema' + '.' + 'name_of_the_azure_table' #Must already exist, "columns_name" : [first_column_name,second_column_name,...,last_column_name], "rows" : [[first_raw_value,second_raw_value,...,last_raw_value],...] } """ # Time initialization start = datetime.datetime.now() # Extract info rows = data["rows"] if not rows: return 0 table_name = data["table_name"] columns_name = data["columns_name"] total_len_data = len(rows) # Create table if needed if not existing_test(instance, table_name) or (types is not None) or (primary_key != ()): create.create_table(instance, data, primary_key, types) # Clean table if needed if replace: cleaning_function(instance, table_name) connection_kwargs = credential(instance) # Create an SSH tunnel ssh_host = os.environ.get("SSH_%s_HOST" % instance) ssh_user = os.environ.get("SSH_%s_USER" % instance) ssh_path_private_key = os.environ.get("SSH_%s_PATH_PRIVATE_KEY" % instance) if ssh_host: tunnel = SSHTunnelForwarder( (ssh_host, 22), ssh_username=ssh_user, ssh_private_key=ssh_path_private_key, remote_bind_address=( os.environ.get("AZURE_%s_HOST" % instance), int(os.environ.get("AZURE_%s_PORT" % instance))), local_bind_address=('localhost', 1433), # could be any available port ) # Start the tunnel try: tunnel.start() print("Tunnel opened!") except sshtunnel.HandlerSSHTunnelForwarderError: pass connection_kwargs["host"] = "localhost,1433" connection_kwargs["port"] = 1433 cnxn = pyodbc.connect(**connection_kwargs) cursor = cnxn.cursor() small_batch_size = int(2099 / len(columns_name)) print("Initiate send_to_azure...") # Initialize counters boolean = True question_mark_pattern = "(%s)" % ",".join(["?" for i in range(len(rows[0]))]) counter = 0 while boolean: temp_row = [] question_mark_list = [] for i in range(small_batch_size): if rows: temp_row.append(rows.pop()) question_mark_list.append(question_mark_pattern) else: boolean = False continue counter = counter + len(temp_row) # percent = round(float(counter * 100) / total_len_data) if sub_commit: suffix = "%% rows sent" print_progress_bar(counter, total_len_data, suffix=suffix) # print("%s %% rows sent" % str(percent)) else: suffix = "% rows prepared to be sent" print_progress_bar(counter, total_len_data, suffix=suffix) # print("%s %% rows prepared to be sent" % str(percent)) data_values_str = ','.join(question_mark_list) columns_name_str = ", ".join(columns_name) inserting_request = '''INSERT INTO %s (%s) VALUES %s ;''' % (table_name, columns_name_str, data_values_str) final_data = [y for x in temp_row for y in x] if final_data: cursor.execute(inserting_request, final_data) if sub_commit: commit_function(cnxn) if not sub_commit: commit_function(cnxn) cursor.close() cnxn.close() if ssh_host: tunnel.close() print("Tunnel closed!") print("data sent to azure") print("Total rows: %s" % str(total_len_data)) print(C.BOLD + "Total time in seconds : %s" % str((datetime.datetime.now() - start).seconds) + C.ENDC) return 0
def from_yamlf(cls, fpath: str, encoding: str='utf8', force_snake_case=True, force_cast: bool=False, restrict: bool=True) -> T: """From yaml file path to instance :param fpath: Yaml file path :param encoding: Yaml file encoding :param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True :param force_cast: Cast forcibly if True :param restrict: Prohibit extra parameters if True :return: Instance """ return cls.from_dict(util.load_yamlf(fpath, encoding), force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict)
From yaml file path to instance :param fpath: Yaml file path :param encoding: Yaml file encoding :param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True :param force_cast: Cast forcibly if True :param restrict: Prohibit extra parameters if True :return: Instance
Below is the the instruction that describes the task: ### Input: From yaml file path to instance :param fpath: Yaml file path :param encoding: Yaml file encoding :param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True :param force_cast: Cast forcibly if True :param restrict: Prohibit extra parameters if True :return: Instance ### Response: def from_yamlf(cls, fpath: str, encoding: str='utf8', force_snake_case=True, force_cast: bool=False, restrict: bool=True) -> T: """From yaml file path to instance :param fpath: Yaml file path :param encoding: Yaml file encoding :param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True :param force_cast: Cast forcibly if True :param restrict: Prohibit extra parameters if True :return: Instance """ return cls.from_dict(util.load_yamlf(fpath, encoding), force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict)
def value_to_python_log_level(config_val, evar): """ Convert an evar value into a Python logging level constant. :param str config_val: The env var value. :param EnvironmentVariable evar: The EVar object we are validating a value for. :return: A validated string. :raises: ValueError if the log level is invalid. """ if not config_val: config_val = evar.default_val config_val = config_val.upper() # noinspection PyProtectedMember return logging._checkLevel(config_val)
Convert an evar value into a Python logging level constant. :param str config_val: The env var value. :param EnvironmentVariable evar: The EVar object we are validating a value for. :return: A validated string. :raises: ValueError if the log level is invalid.
Below is the the instruction that describes the task: ### Input: Convert an evar value into a Python logging level constant. :param str config_val: The env var value. :param EnvironmentVariable evar: The EVar object we are validating a value for. :return: A validated string. :raises: ValueError if the log level is invalid. ### Response: def value_to_python_log_level(config_val, evar): """ Convert an evar value into a Python logging level constant. :param str config_val: The env var value. :param EnvironmentVariable evar: The EVar object we are validating a value for. :return: A validated string. :raises: ValueError if the log level is invalid. """ if not config_val: config_val = evar.default_val config_val = config_val.upper() # noinspection PyProtectedMember return logging._checkLevel(config_val)
def paginate(self, q): """ Filters the query so that a given page is returned. The record count is computed automatically from query. :param q: Query to be paged. :return: Paged query. """ self.record_count = q.count() return self.apply_pagination(q).all()
Filters the query so that a given page is returned. The record count is computed automatically from query. :param q: Query to be paged. :return: Paged query.
Below is the the instruction that describes the task: ### Input: Filters the query so that a given page is returned. The record count is computed automatically from query. :param q: Query to be paged. :return: Paged query. ### Response: def paginate(self, q): """ Filters the query so that a given page is returned. The record count is computed automatically from query. :param q: Query to be paged. :return: Paged query. """ self.record_count = q.count() return self.apply_pagination(q).all()
def t_asmcomment_NEWLINE(self, t): r'\r?\n' # New line => remove whatever state in top of the stack and replace it with INITIAL t.lexer.lineno += 1 t.lexer.pop_state() return t
r'\r?\n
Below is the the instruction that describes the task: ### Input: r'\r?\n ### Response: def t_asmcomment_NEWLINE(self, t): r'\r?\n' # New line => remove whatever state in top of the stack and replace it with INITIAL t.lexer.lineno += 1 t.lexer.pop_state() return t
def pistacking(rings_bs, rings_lig): """Return all pi-stackings between the given aromatic ring systems in receptor and ligand.""" data = namedtuple( 'pistack', 'proteinring ligandring distance angle offset type restype resnr reschain restype_l resnr_l reschain_l') pairings = [] for r, l in itertools.product(rings_bs, rings_lig): # DISTANCE AND RING ANGLE CALCULATION d = euclidean3d(r.center, l.center) b = vecangle(r.normal, l.normal) a = min(b, 180 - b if not 180 - b < 0 else b) # Smallest of two angles, depending on direction of normal # RING CENTER OFFSET CALCULATION (project each ring center into the other ring) proj1 = projection(l.normal, l.center, r.center) proj2 = projection(r.normal, r.center, l.center) offset = min(euclidean3d(proj1, l.center), euclidean3d(proj2, r.center)) # RECEPTOR DATA resnr, restype, reschain = whichresnumber(r.atoms[0]), whichrestype(r.atoms[0]), whichchain(r.atoms[0]) resnr_l, restype_l, reschain_l = whichresnumber(l.orig_atoms[0]), whichrestype( l.orig_atoms[0]), whichchain(l.orig_atoms[0]) # SELECTION BY DISTANCE, ANGLE AND OFFSET passed = False if not config.MIN_DIST < d < config.PISTACK_DIST_MAX: continue if 0 < a < config.PISTACK_ANG_DEV and offset < config.PISTACK_OFFSET_MAX: ptype = 'P' passed = True if 90 - config.PISTACK_ANG_DEV < a < 90 + config.PISTACK_ANG_DEV and offset < config.PISTACK_OFFSET_MAX: ptype = 'T' passed = True if passed: contact = data(proteinring=r, ligandring=l, distance=d, angle=a, offset=offset, type=ptype, resnr=resnr, restype=restype, reschain=reschain, resnr_l=resnr_l, restype_l=restype_l, reschain_l=reschain_l) pairings.append(contact) return filter_contacts(pairings)
Return all pi-stackings between the given aromatic ring systems in receptor and ligand.
Below is the the instruction that describes the task: ### Input: Return all pi-stackings between the given aromatic ring systems in receptor and ligand. ### Response: def pistacking(rings_bs, rings_lig): """Return all pi-stackings between the given aromatic ring systems in receptor and ligand.""" data = namedtuple( 'pistack', 'proteinring ligandring distance angle offset type restype resnr reschain restype_l resnr_l reschain_l') pairings = [] for r, l in itertools.product(rings_bs, rings_lig): # DISTANCE AND RING ANGLE CALCULATION d = euclidean3d(r.center, l.center) b = vecangle(r.normal, l.normal) a = min(b, 180 - b if not 180 - b < 0 else b) # Smallest of two angles, depending on direction of normal # RING CENTER OFFSET CALCULATION (project each ring center into the other ring) proj1 = projection(l.normal, l.center, r.center) proj2 = projection(r.normal, r.center, l.center) offset = min(euclidean3d(proj1, l.center), euclidean3d(proj2, r.center)) # RECEPTOR DATA resnr, restype, reschain = whichresnumber(r.atoms[0]), whichrestype(r.atoms[0]), whichchain(r.atoms[0]) resnr_l, restype_l, reschain_l = whichresnumber(l.orig_atoms[0]), whichrestype( l.orig_atoms[0]), whichchain(l.orig_atoms[0]) # SELECTION BY DISTANCE, ANGLE AND OFFSET passed = False if not config.MIN_DIST < d < config.PISTACK_DIST_MAX: continue if 0 < a < config.PISTACK_ANG_DEV and offset < config.PISTACK_OFFSET_MAX: ptype = 'P' passed = True if 90 - config.PISTACK_ANG_DEV < a < 90 + config.PISTACK_ANG_DEV and offset < config.PISTACK_OFFSET_MAX: ptype = 'T' passed = True if passed: contact = data(proteinring=r, ligandring=l, distance=d, angle=a, offset=offset, type=ptype, resnr=resnr, restype=restype, reschain=reschain, resnr_l=resnr_l, restype_l=restype_l, reschain_l=reschain_l) pairings.append(contact) return filter_contacts(pairings)
def on_draw(): global yrot win.clear() glLoadIdentity() glTranslatef(0, 0, -100) glRotatef(yrot, 0.0, 1.0, 0.0) default_system.draw() ''' glBindTexture(GL_TEXTURE_2D, 1) glEnable(GL_TEXTURE_2D) glEnable(GL_POINT_SPRITE) glPointSize(100); glBegin(GL_POINTS) glVertex2f(0,0) glEnd() glBindTexture(GL_TEXTURE_2D, 2) glEnable(GL_TEXTURE_2D) glEnable(GL_POINT_SPRITE) glPointSize(100); glBegin(GL_POINTS) glVertex2f(50,0) glEnd() glBindTexture(GL_TEXTURE_2D, 0) '''
glBindTexture(GL_TEXTURE_2D, 1) glEnable(GL_TEXTURE_2D) glEnable(GL_POINT_SPRITE) glPointSize(100); glBegin(GL_POINTS) glVertex2f(0,0) glEnd() glBindTexture(GL_TEXTURE_2D, 2) glEnable(GL_TEXTURE_2D) glEnable(GL_POINT_SPRITE) glPointSize(100); glBegin(GL_POINTS) glVertex2f(50,0) glEnd() glBindTexture(GL_TEXTURE_2D, 0)
Below is the the instruction that describes the task: ### Input: glBindTexture(GL_TEXTURE_2D, 1) glEnable(GL_TEXTURE_2D) glEnable(GL_POINT_SPRITE) glPointSize(100); glBegin(GL_POINTS) glVertex2f(0,0) glEnd() glBindTexture(GL_TEXTURE_2D, 2) glEnable(GL_TEXTURE_2D) glEnable(GL_POINT_SPRITE) glPointSize(100); glBegin(GL_POINTS) glVertex2f(50,0) glEnd() glBindTexture(GL_TEXTURE_2D, 0) ### Response: def on_draw(): global yrot win.clear() glLoadIdentity() glTranslatef(0, 0, -100) glRotatef(yrot, 0.0, 1.0, 0.0) default_system.draw() ''' glBindTexture(GL_TEXTURE_2D, 1) glEnable(GL_TEXTURE_2D) glEnable(GL_POINT_SPRITE) glPointSize(100); glBegin(GL_POINTS) glVertex2f(0,0) glEnd() glBindTexture(GL_TEXTURE_2D, 2) glEnable(GL_TEXTURE_2D) glEnable(GL_POINT_SPRITE) glPointSize(100); glBegin(GL_POINTS) glVertex2f(50,0) glEnd() glBindTexture(GL_TEXTURE_2D, 0) '''
def create_action_token(self, action, expires_in): """ Create a url safe action token attached to the user :param action: :param expires_in: :return: """ return utils.sign_url_safe(self.user.id, secret_key=get_jwt_secret(), salt=action, expires_in=expires_in)
Create a url safe action token attached to the user :param action: :param expires_in: :return:
Below is the the instruction that describes the task: ### Input: Create a url safe action token attached to the user :param action: :param expires_in: :return: ### Response: def create_action_token(self, action, expires_in): """ Create a url safe action token attached to the user :param action: :param expires_in: :return: """ return utils.sign_url_safe(self.user.id, secret_key=get_jwt_secret(), salt=action, expires_in=expires_in)
def update_variant(self, variant_obj): """Update one variant document in the database. This means that the variant in the database will be replaced by variant_obj. Args: variant_obj(dict) Returns: new_variant(dict) """ LOG.debug('Updating variant %s', variant_obj.get('simple_id')) new_variant = self.variant_collection.find_one_and_replace( {'_id': variant_obj['_id']}, variant_obj, return_document=pymongo.ReturnDocument.AFTER ) return new_variant
Update one variant document in the database. This means that the variant in the database will be replaced by variant_obj. Args: variant_obj(dict) Returns: new_variant(dict)
Below is the the instruction that describes the task: ### Input: Update one variant document in the database. This means that the variant in the database will be replaced by variant_obj. Args: variant_obj(dict) Returns: new_variant(dict) ### Response: def update_variant(self, variant_obj): """Update one variant document in the database. This means that the variant in the database will be replaced by variant_obj. Args: variant_obj(dict) Returns: new_variant(dict) """ LOG.debug('Updating variant %s', variant_obj.get('simple_id')) new_variant = self.variant_collection.find_one_and_replace( {'_id': variant_obj['_id']}, variant_obj, return_document=pymongo.ReturnDocument.AFTER ) return new_variant
def is_base_datatype(datatype, version=None): """ Check if the given datatype is a base datatype of the specified version :type datatype: ``str`` :param datatype: the datatype (e.g. ST) :type version: ``str`` :param version: the HL7 version (e.g. 2.5) :return: ``True`` if it is a base datatype, ``False`` otherwise >>> is_base_datatype('ST') True >>> is_base_datatype('CE') False """ if version is None: version = get_default_version() lib = load_library(version) return lib.is_base_datatype(datatype)
Check if the given datatype is a base datatype of the specified version :type datatype: ``str`` :param datatype: the datatype (e.g. ST) :type version: ``str`` :param version: the HL7 version (e.g. 2.5) :return: ``True`` if it is a base datatype, ``False`` otherwise >>> is_base_datatype('ST') True >>> is_base_datatype('CE') False
Below is the the instruction that describes the task: ### Input: Check if the given datatype is a base datatype of the specified version :type datatype: ``str`` :param datatype: the datatype (e.g. ST) :type version: ``str`` :param version: the HL7 version (e.g. 2.5) :return: ``True`` if it is a base datatype, ``False`` otherwise >>> is_base_datatype('ST') True >>> is_base_datatype('CE') False ### Response: def is_base_datatype(datatype, version=None): """ Check if the given datatype is a base datatype of the specified version :type datatype: ``str`` :param datatype: the datatype (e.g. ST) :type version: ``str`` :param version: the HL7 version (e.g. 2.5) :return: ``True`` if it is a base datatype, ``False`` otherwise >>> is_base_datatype('ST') True >>> is_base_datatype('CE') False """ if version is None: version = get_default_version() lib = load_library(version) return lib.is_base_datatype(datatype)
def less(x, y): """ Return True if x < y and False otherwise. This function returns False whenever x and/or y is a NaN. """ x = BigFloat._implicit_convert(x) y = BigFloat._implicit_convert(y) return mpfr.mpfr_less_p(x, y)
Return True if x < y and False otherwise. This function returns False whenever x and/or y is a NaN.
Below is the the instruction that describes the task: ### Input: Return True if x < y and False otherwise. This function returns False whenever x and/or y is a NaN. ### Response: def less(x, y): """ Return True if x < y and False otherwise. This function returns False whenever x and/or y is a NaN. """ x = BigFloat._implicit_convert(x) y = BigFloat._implicit_convert(y) return mpfr.mpfr_less_p(x, y)
def format_units(self, value, unit="B", optimal=5, auto=True, si=False): """ Takes a value and formats it for user output, we can choose the unit to use eg B, MiB, kbits/second. This is mainly for use with bytes/bits it converts the value into a human readable form. It has various additional options but they are really only for special cases. The function returns a tuple containing the new value (this is a number so that the user can still format it if required) and a unit that is the units that we have been converted to. By supplying unit to the function we can force those units to be used eg ``unit=KiB`` would force the output to be in Kibibytes. By default we use non-si units but if the unit is si eg kB then we will switch to si units. Units can also be things like ``Mbit/sec``. If the auto parameter is False then we use the unit provided. This only makes sense when the unit is singular eg 'Bytes' and we want the result in bytes and not say converted to MBytes. optimal is used to control the size of the output value. We try to provide an output value of that number of characters (including decimal point), it may also be less due to rounding. If a fixed unit is used the output may be more than this number of characters. """ UNITS = "KMGTPEZY" DECIMAL_SIZE = 1000 BINARY_SIZE = 1024 CUTOFF = 1000 can_round = False if unit: # try to guess the unit. Do we have a known prefix too it? if unit[0].upper() in UNITS: index = UNITS.index(unit[0].upper()) + 1 post = unit[1:] si = len(unit) > 1 and unit[1] != "i" if si: post = post[1:] if unit[1] == "b": value *= 8 auto = False else: index = 0 post = unit if si: size = DECIMAL_SIZE else: size = BINARY_SIZE if auto: # we will try to use an appropriate prefix if value < CUTOFF: unit_out = post else: value /= size for prefix in UNITS: if abs(value) < CUTOFF: break value /= size if si: # si kilo is lowercase if prefix == "K": prefix = "k" else: post = "i" + post unit_out = prefix + post can_round = True else: # we are using a fixed unit unit_out = unit size = pow(size, index) if size: value /= size can_round = True if can_round and optimal and value: # we will try to make the output value the desired size # we need to keep out value as a numeric type places = int(log10(abs(value))) if places >= optimal - 2: value = int(value) else: value = round(value, max(optimal - places - 2, 0)) return value, unit_out
Takes a value and formats it for user output, we can choose the unit to use eg B, MiB, kbits/second. This is mainly for use with bytes/bits it converts the value into a human readable form. It has various additional options but they are really only for special cases. The function returns a tuple containing the new value (this is a number so that the user can still format it if required) and a unit that is the units that we have been converted to. By supplying unit to the function we can force those units to be used eg ``unit=KiB`` would force the output to be in Kibibytes. By default we use non-si units but if the unit is si eg kB then we will switch to si units. Units can also be things like ``Mbit/sec``. If the auto parameter is False then we use the unit provided. This only makes sense when the unit is singular eg 'Bytes' and we want the result in bytes and not say converted to MBytes. optimal is used to control the size of the output value. We try to provide an output value of that number of characters (including decimal point), it may also be less due to rounding. If a fixed unit is used the output may be more than this number of characters.
Below is the the instruction that describes the task: ### Input: Takes a value and formats it for user output, we can choose the unit to use eg B, MiB, kbits/second. This is mainly for use with bytes/bits it converts the value into a human readable form. It has various additional options but they are really only for special cases. The function returns a tuple containing the new value (this is a number so that the user can still format it if required) and a unit that is the units that we have been converted to. By supplying unit to the function we can force those units to be used eg ``unit=KiB`` would force the output to be in Kibibytes. By default we use non-si units but if the unit is si eg kB then we will switch to si units. Units can also be things like ``Mbit/sec``. If the auto parameter is False then we use the unit provided. This only makes sense when the unit is singular eg 'Bytes' and we want the result in bytes and not say converted to MBytes. optimal is used to control the size of the output value. We try to provide an output value of that number of characters (including decimal point), it may also be less due to rounding. If a fixed unit is used the output may be more than this number of characters. ### Response: def format_units(self, value, unit="B", optimal=5, auto=True, si=False): """ Takes a value and formats it for user output, we can choose the unit to use eg B, MiB, kbits/second. This is mainly for use with bytes/bits it converts the value into a human readable form. It has various additional options but they are really only for special cases. The function returns a tuple containing the new value (this is a number so that the user can still format it if required) and a unit that is the units that we have been converted to. By supplying unit to the function we can force those units to be used eg ``unit=KiB`` would force the output to be in Kibibytes. By default we use non-si units but if the unit is si eg kB then we will switch to si units. Units can also be things like ``Mbit/sec``. If the auto parameter is False then we use the unit provided. This only makes sense when the unit is singular eg 'Bytes' and we want the result in bytes and not say converted to MBytes. optimal is used to control the size of the output value. We try to provide an output value of that number of characters (including decimal point), it may also be less due to rounding. If a fixed unit is used the output may be more than this number of characters. """ UNITS = "KMGTPEZY" DECIMAL_SIZE = 1000 BINARY_SIZE = 1024 CUTOFF = 1000 can_round = False if unit: # try to guess the unit. Do we have a known prefix too it? if unit[0].upper() in UNITS: index = UNITS.index(unit[0].upper()) + 1 post = unit[1:] si = len(unit) > 1 and unit[1] != "i" if si: post = post[1:] if unit[1] == "b": value *= 8 auto = False else: index = 0 post = unit if si: size = DECIMAL_SIZE else: size = BINARY_SIZE if auto: # we will try to use an appropriate prefix if value < CUTOFF: unit_out = post else: value /= size for prefix in UNITS: if abs(value) < CUTOFF: break value /= size if si: # si kilo is lowercase if prefix == "K": prefix = "k" else: post = "i" + post unit_out = prefix + post can_round = True else: # we are using a fixed unit unit_out = unit size = pow(size, index) if size: value /= size can_round = True if can_round and optimal and value: # we will try to make the output value the desired size # we need to keep out value as a numeric type places = int(log10(abs(value))) if places >= optimal - 2: value = int(value) else: value = round(value, max(optimal - places - 2, 0)) return value, unit_out
def _sm_cleanup(self, *args, **kwargs): """ Delete all state associated with the chaos session """ if self._done_notification_func is not None: self._done_notification_func() self._timer.cancel()
Delete all state associated with the chaos session
Below is the the instruction that describes the task: ### Input: Delete all state associated with the chaos session ### Response: def _sm_cleanup(self, *args, **kwargs): """ Delete all state associated with the chaos session """ if self._done_notification_func is not None: self._done_notification_func() self._timer.cancel()
def to_pdb(structure, filename, contigs=None, annotations=None, indices=None, special_bins=None): """From a structure (or matrix) generate the corresponding pdb file representing each chain as a contig/chromosome and filling the occupancy field with a custom annotation. If the matrix has been trimmed somewhat, remaining indices may be specified. """ n = len(structure) letters = (string.ascii_uppercase + string.ascii_lowercase + string.digits + string.punctuation) * int(n / 94 + 1) if contigs is None: contigs = np.ones(n + 1) if annotations is None: annotations = np.zeros(n + 1) if indices is None: indices = range(n + 1) if special_bins is None: special_bins = np.zeros(n + 1) structure_shapes_match = structure.shape[0] == structure.shape[1] if isinstance(structure, np.ndarray) and structure_shapes_match: structure = (to_structure(structure)) X, Y, Z = (structure[:, i] for i in range(3)) Xmax, Ymax, Zmax = (np.max(np.abs(Xi)) for Xi in (X, Y, Z)) X *= 100.0 / Xmax Y *= 100.0 / Ymax Z *= 100.0 / Zmax X = np.around(X, 3) Y = np.around(Y, 3) Z = np.around(Z, 3) reference = ["OW", "OW", "CE", "TE", "tR"] with open(filename, 'w') as f: for i in range(1, n): line = "ATOM" # 1-4 "ATOM" line += " " # 5-6 unused line += str(i).rjust(5) # 7-11 atom serial number line += " " # 12 unused line += reference[special_bins[i]].rjust(4) # 13-16 atom name line += " " # 17 alternate location indicator line += "SOL" # 18-20 residue name line += " " # 21 unused line += letters[int(contigs[indices[i]] - 1) ] # 22 chain identifier line += str(i).rjust(4) # 23-26 residue sequence number line += " " # 27 code for insertion of residues line += " " # 28-30 unused line += str(X[i]).rjust(8) # 31-38 X orthogonal Å coordinate line += str(Y[i]).rjust(8) # 39-46 Y orthogonal Å coordinate line += str(Z[i]).rjust(8) # 47-54 Z orthogonal Å coordinate line += "1.00".rjust(6) # 55-60 Occupancy # 61-66 Temperature factor line += str(annotations[i - 1]).rjust(6) line += " " # 67-72 unused line += " " # 73-76 segment identifier line += "O".rjust(2) # 77-78 element symbol line += "\n" f.write(line)
From a structure (or matrix) generate the corresponding pdb file representing each chain as a contig/chromosome and filling the occupancy field with a custom annotation. If the matrix has been trimmed somewhat, remaining indices may be specified.
Below is the the instruction that describes the task: ### Input: From a structure (or matrix) generate the corresponding pdb file representing each chain as a contig/chromosome and filling the occupancy field with a custom annotation. If the matrix has been trimmed somewhat, remaining indices may be specified. ### Response: def to_pdb(structure, filename, contigs=None, annotations=None, indices=None, special_bins=None): """From a structure (or matrix) generate the corresponding pdb file representing each chain as a contig/chromosome and filling the occupancy field with a custom annotation. If the matrix has been trimmed somewhat, remaining indices may be specified. """ n = len(structure) letters = (string.ascii_uppercase + string.ascii_lowercase + string.digits + string.punctuation) * int(n / 94 + 1) if contigs is None: contigs = np.ones(n + 1) if annotations is None: annotations = np.zeros(n + 1) if indices is None: indices = range(n + 1) if special_bins is None: special_bins = np.zeros(n + 1) structure_shapes_match = structure.shape[0] == structure.shape[1] if isinstance(structure, np.ndarray) and structure_shapes_match: structure = (to_structure(structure)) X, Y, Z = (structure[:, i] for i in range(3)) Xmax, Ymax, Zmax = (np.max(np.abs(Xi)) for Xi in (X, Y, Z)) X *= 100.0 / Xmax Y *= 100.0 / Ymax Z *= 100.0 / Zmax X = np.around(X, 3) Y = np.around(Y, 3) Z = np.around(Z, 3) reference = ["OW", "OW", "CE", "TE", "tR"] with open(filename, 'w') as f: for i in range(1, n): line = "ATOM" # 1-4 "ATOM" line += " " # 5-6 unused line += str(i).rjust(5) # 7-11 atom serial number line += " " # 12 unused line += reference[special_bins[i]].rjust(4) # 13-16 atom name line += " " # 17 alternate location indicator line += "SOL" # 18-20 residue name line += " " # 21 unused line += letters[int(contigs[indices[i]] - 1) ] # 22 chain identifier line += str(i).rjust(4) # 23-26 residue sequence number line += " " # 27 code for insertion of residues line += " " # 28-30 unused line += str(X[i]).rjust(8) # 31-38 X orthogonal Å coordinate line += str(Y[i]).rjust(8) # 39-46 Y orthogonal Å coordinate line += str(Z[i]).rjust(8) # 47-54 Z orthogonal Å coordinate line += "1.00".rjust(6) # 55-60 Occupancy # 61-66 Temperature factor line += str(annotations[i - 1]).rjust(6) line += " " # 67-72 unused line += " " # 73-76 segment identifier line += "O".rjust(2) # 77-78 element symbol line += "\n" f.write(line)
def _make_conn(shape): """ Connectivity builder using Numba for speed boost. """ shape = np.array(shape) Ne = shape.prod() if len(shape) == 2: nx, ny = np.array(shape) +1 conn = np.zeros((Ne, 4), dtype = np.int32) counter = 0 pattern = np.array([0,1,1+nx,nx]) for j in range(shape[1]): for i in range(shape[0]): conn[counter] = pattern + 1 + i + j*nx counter += 1 if len(shape) == 3: nx, ny, nz = np.array(shape) +1 conn = np.zeros((Ne, 8), dtype = np.int32) counter = 0 pattern = np.array([0,1,1+nx,nx,nx*ny,1+nx*ny,1+(nx+1)*ny,(nx+1)*ny]) for k in range(shape[2]): for j in range(shape[1]): for i in range(shape[0]): conn[counter] = pattern + 1 + i + j*nx+ k*nx*ny counter += 1 return conn
Connectivity builder using Numba for speed boost.
Below is the the instruction that describes the task: ### Input: Connectivity builder using Numba for speed boost. ### Response: def _make_conn(shape): """ Connectivity builder using Numba for speed boost. """ shape = np.array(shape) Ne = shape.prod() if len(shape) == 2: nx, ny = np.array(shape) +1 conn = np.zeros((Ne, 4), dtype = np.int32) counter = 0 pattern = np.array([0,1,1+nx,nx]) for j in range(shape[1]): for i in range(shape[0]): conn[counter] = pattern + 1 + i + j*nx counter += 1 if len(shape) == 3: nx, ny, nz = np.array(shape) +1 conn = np.zeros((Ne, 8), dtype = np.int32) counter = 0 pattern = np.array([0,1,1+nx,nx,nx*ny,1+nx*ny,1+(nx+1)*ny,(nx+1)*ny]) for k in range(shape[2]): for j in range(shape[1]): for i in range(shape[0]): conn[counter] = pattern + 1 + i + j*nx+ k*nx*ny counter += 1 return conn
def getAllReadGroups(self): """ Get all read groups in a read group set """ for dataset in self.getAllDatasets(): iterator = self._client.search_read_group_sets( dataset_id=dataset.id) for readGroupSet in iterator: readGroupSet = self._client.get_read_group_set( readGroupSet.id) for readGroup in readGroupSet.read_groups: yield readGroup.id
Get all read groups in a read group set
Below is the the instruction that describes the task: ### Input: Get all read groups in a read group set ### Response: def getAllReadGroups(self): """ Get all read groups in a read group set """ for dataset in self.getAllDatasets(): iterator = self._client.search_read_group_sets( dataset_id=dataset.id) for readGroupSet in iterator: readGroupSet = self._client.get_read_group_set( readGroupSet.id) for readGroup in readGroupSet.read_groups: yield readGroup.id
def visual_callback_3d(fig=None, plot_each=1): """ Returns a callback than can be passed as the argument `iter_callback` of `morphological_geodesic_active_contour` and `morphological_chan_vese` for visualizing the evolution of the levelsets. Only works for 3D images. Parameters ---------- fig : matplotlib.figure.Figure Figure where results will be drawn. If not given, a new figure will be created. plot_each : positive integer The plot will be updated once every `plot_each` calls to the callback function. Returns ------- callback : Python function A function that receives a levelset and updates the current plot accordingly. This can be passed as the `iter_callback` argument of `morphological_geodesic_active_contour` and `morphological_chan_vese`. """ from mpl_toolkits.mplot3d import Axes3D # PyMCubes package is required for `visual_callback_3d` try: import mcubes except ImportError: raise ImportError("PyMCubes is required for 3D `visual_callback_3d`") # Prepare the visual environment. if fig is None: fig = plt.figure() fig.clf() ax = fig.add_subplot(111, projection='3d') plt.pause(0.001) counter = [-1] def callback(levelset): counter[0] += 1 if (counter[0] % plot_each) != 0: return if ax.collections: del ax.collections[0] coords, triangles = mcubes.marching_cubes(levelset, 0.5) ax.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2], triangles=triangles) plt.pause(0.1) return callback
Returns a callback than can be passed as the argument `iter_callback` of `morphological_geodesic_active_contour` and `morphological_chan_vese` for visualizing the evolution of the levelsets. Only works for 3D images. Parameters ---------- fig : matplotlib.figure.Figure Figure where results will be drawn. If not given, a new figure will be created. plot_each : positive integer The plot will be updated once every `plot_each` calls to the callback function. Returns ------- callback : Python function A function that receives a levelset and updates the current plot accordingly. This can be passed as the `iter_callback` argument of `morphological_geodesic_active_contour` and `morphological_chan_vese`.
Below is the the instruction that describes the task: ### Input: Returns a callback than can be passed as the argument `iter_callback` of `morphological_geodesic_active_contour` and `morphological_chan_vese` for visualizing the evolution of the levelsets. Only works for 3D images. Parameters ---------- fig : matplotlib.figure.Figure Figure where results will be drawn. If not given, a new figure will be created. plot_each : positive integer The plot will be updated once every `plot_each` calls to the callback function. Returns ------- callback : Python function A function that receives a levelset and updates the current plot accordingly. This can be passed as the `iter_callback` argument of `morphological_geodesic_active_contour` and `morphological_chan_vese`. ### Response: def visual_callback_3d(fig=None, plot_each=1): """ Returns a callback than can be passed as the argument `iter_callback` of `morphological_geodesic_active_contour` and `morphological_chan_vese` for visualizing the evolution of the levelsets. Only works for 3D images. Parameters ---------- fig : matplotlib.figure.Figure Figure where results will be drawn. If not given, a new figure will be created. plot_each : positive integer The plot will be updated once every `plot_each` calls to the callback function. Returns ------- callback : Python function A function that receives a levelset and updates the current plot accordingly. This can be passed as the `iter_callback` argument of `morphological_geodesic_active_contour` and `morphological_chan_vese`. """ from mpl_toolkits.mplot3d import Axes3D # PyMCubes package is required for `visual_callback_3d` try: import mcubes except ImportError: raise ImportError("PyMCubes is required for 3D `visual_callback_3d`") # Prepare the visual environment. if fig is None: fig = plt.figure() fig.clf() ax = fig.add_subplot(111, projection='3d') plt.pause(0.001) counter = [-1] def callback(levelset): counter[0] += 1 if (counter[0] % plot_each) != 0: return if ax.collections: del ax.collections[0] coords, triangles = mcubes.marching_cubes(levelset, 0.5) ax.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2], triangles=triangles) plt.pause(0.1) return callback
def read(self, source = None, **options): ''' Reads and optionally parses a single message. :Parameters: - `source` - optional data buffer to be read, if not specified data is read from the wrapped stream :Options: - `raw` (`boolean`) - indicates whether read data should parsed or returned in raw byte form - `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are backed by raw q representation (:class:`.QTemporalList`, :class:`.QTemporal`) instances, otherwise are represented as `numpy datetime64`/`timedelta64` arrays and atoms, **Default**: ``False`` :returns: :class:`.QMessage` - read data (parsed or raw byte form) along with meta information ''' message = self.read_header(source) message.data = self.read_data(message.size, message.is_compressed, **options) return message
Reads and optionally parses a single message. :Parameters: - `source` - optional data buffer to be read, if not specified data is read from the wrapped stream :Options: - `raw` (`boolean`) - indicates whether read data should parsed or returned in raw byte form - `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are backed by raw q representation (:class:`.QTemporalList`, :class:`.QTemporal`) instances, otherwise are represented as `numpy datetime64`/`timedelta64` arrays and atoms, **Default**: ``False`` :returns: :class:`.QMessage` - read data (parsed or raw byte form) along with meta information
Below is the the instruction that describes the task: ### Input: Reads and optionally parses a single message. :Parameters: - `source` - optional data buffer to be read, if not specified data is read from the wrapped stream :Options: - `raw` (`boolean`) - indicates whether read data should parsed or returned in raw byte form - `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are backed by raw q representation (:class:`.QTemporalList`, :class:`.QTemporal`) instances, otherwise are represented as `numpy datetime64`/`timedelta64` arrays and atoms, **Default**: ``False`` :returns: :class:`.QMessage` - read data (parsed or raw byte form) along with meta information ### Response: def read(self, source = None, **options): ''' Reads and optionally parses a single message. :Parameters: - `source` - optional data buffer to be read, if not specified data is read from the wrapped stream :Options: - `raw` (`boolean`) - indicates whether read data should parsed or returned in raw byte form - `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are backed by raw q representation (:class:`.QTemporalList`, :class:`.QTemporal`) instances, otherwise are represented as `numpy datetime64`/`timedelta64` arrays and atoms, **Default**: ``False`` :returns: :class:`.QMessage` - read data (parsed or raw byte form) along with meta information ''' message = self.read_header(source) message.data = self.read_data(message.size, message.is_compressed, **options) return message
def lyap_r_len(**kwargs): """ Helper function that calculates the minimum number of data points required to use lyap_r. Note that none of the required parameters may be set to None. Kwargs: kwargs(dict): arguments used for lyap_r (required: emb_dim, lag, trajectory_len and min_tsep) Returns: minimum number of data points required to call lyap_r with the given parameters """ # minimum length required to find single orbit vector min_len = (kwargs['emb_dim'] - 1) * kwargs['lag'] + 1 # we need trajectory_len orbit vectors to follow a complete trajectory min_len += kwargs['trajectory_len'] - 1 # we need min_tsep * 2 + 1 orbit vectors to find neighbors for each min_len += kwargs['min_tsep'] * 2 + 1 return min_len
Helper function that calculates the minimum number of data points required to use lyap_r. Note that none of the required parameters may be set to None. Kwargs: kwargs(dict): arguments used for lyap_r (required: emb_dim, lag, trajectory_len and min_tsep) Returns: minimum number of data points required to call lyap_r with the given parameters
Below is the the instruction that describes the task: ### Input: Helper function that calculates the minimum number of data points required to use lyap_r. Note that none of the required parameters may be set to None. Kwargs: kwargs(dict): arguments used for lyap_r (required: emb_dim, lag, trajectory_len and min_tsep) Returns: minimum number of data points required to call lyap_r with the given parameters ### Response: def lyap_r_len(**kwargs): """ Helper function that calculates the minimum number of data points required to use lyap_r. Note that none of the required parameters may be set to None. Kwargs: kwargs(dict): arguments used for lyap_r (required: emb_dim, lag, trajectory_len and min_tsep) Returns: minimum number of data points required to call lyap_r with the given parameters """ # minimum length required to find single orbit vector min_len = (kwargs['emb_dim'] - 1) * kwargs['lag'] + 1 # we need trajectory_len orbit vectors to follow a complete trajectory min_len += kwargs['trajectory_len'] - 1 # we need min_tsep * 2 + 1 orbit vectors to find neighbors for each min_len += kwargs['min_tsep'] * 2 + 1 return min_len
def _create_record(name, field_defs, step_name, inputs, unlist, file_vs, std_vs, parallel): """Create an output record by rearranging inputs. Batching processes create records that reformat the inputs for parallelization. """ if field_defs: fields = [] inherit = [] inherit_all = False inherit_exclude = [] for fdef in field_defs: if not fdef.get("type"): if fdef["id"] == "inherit": inherit_all = True inherit_exclude = fdef.get("exclude", []) else: inherit.append(fdef["id"]) else: cur = {"name": _get_string_vid(fdef["id"]), "type": fdef["type"]} fields.append(_add_secondary_to_rec_field(fdef, cur)) if inherit_all: fields.extend(_infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel, exclude=inherit_exclude)) elif inherit: fields.extend(_infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel, inherit)) else: fields = _infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel) out = {"id": "%s/%s" % (step_name, name), "type": {"name": name, "type": "record", "fields": fields}} if parallel in ["batch-single", "multi-batch"]: out = _nest_variable(out) return out
Create an output record by rearranging inputs. Batching processes create records that reformat the inputs for parallelization.
Below is the the instruction that describes the task: ### Input: Create an output record by rearranging inputs. Batching processes create records that reformat the inputs for parallelization. ### Response: def _create_record(name, field_defs, step_name, inputs, unlist, file_vs, std_vs, parallel): """Create an output record by rearranging inputs. Batching processes create records that reformat the inputs for parallelization. """ if field_defs: fields = [] inherit = [] inherit_all = False inherit_exclude = [] for fdef in field_defs: if not fdef.get("type"): if fdef["id"] == "inherit": inherit_all = True inherit_exclude = fdef.get("exclude", []) else: inherit.append(fdef["id"]) else: cur = {"name": _get_string_vid(fdef["id"]), "type": fdef["type"]} fields.append(_add_secondary_to_rec_field(fdef, cur)) if inherit_all: fields.extend(_infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel, exclude=inherit_exclude)) elif inherit: fields.extend(_infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel, inherit)) else: fields = _infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel) out = {"id": "%s/%s" % (step_name, name), "type": {"name": name, "type": "record", "fields": fields}} if parallel in ["batch-single", "multi-batch"]: out = _nest_variable(out) return out
def calculate_positions(positions): """Calculates position information""" current_position = {'data-x': 0, 'data-y': 0, 'data-z': 0, 'data-rotate-x': 0, 'data-rotate-y': 0, 'data-rotate-z': 0, 'data-scale': 1, } positer = iter(positions) position = next(positer) _update_position(current_position, position) while True: if 'path' in position: # Start of a new path! path = position['path'] # Follow the path specification first_point = _pos_to_cord(current_position) # Paths that end in Z or z are closed. closed_path = path.strip()[-1].upper() == 'Z' path = parse_path(path) # Find out how many positions should be calculated: count = 1 last = False deferred_positions = [] while True: try: position = next(positer) deferred_positions.append(position) except StopIteration: last = True # This path goes to the end break if not position.get('is_path') or 'path' in position: # The end of the path, or the start of a new one break count += 1 if count < 2: raise AssertionError("The path specification is only used for " "one slide, which makes it pointless.") if closed_path: # This path closes in on itself. Skip the last part, so that # the first and last step doesn't overlap. endcount = count + 1 else: endcount = count multiplier = (endcount * DEFAULT_MOVEMENT) / path.length() offset = path.point(0) path_iter = iter(deferred_positions) for x in range(count): point = path.point(x / (endcount - 1)) point = ((point - offset) * multiplier) + first_point current_position.update(_coord_to_pos(point)) rotation = _path_angle(path, x / (endcount - 1)) current_position['data-rotate-z'] = rotation yield current_position.copy() try: position = next(path_iter) except StopIteration: last = True break _update_position(current_position, position) if last: break continue yield current_position.copy() try: position = next(positer) except StopIteration: break _update_position(current_position, position)
Calculates position information
Below is the the instruction that describes the task: ### Input: Calculates position information ### Response: def calculate_positions(positions): """Calculates position information""" current_position = {'data-x': 0, 'data-y': 0, 'data-z': 0, 'data-rotate-x': 0, 'data-rotate-y': 0, 'data-rotate-z': 0, 'data-scale': 1, } positer = iter(positions) position = next(positer) _update_position(current_position, position) while True: if 'path' in position: # Start of a new path! path = position['path'] # Follow the path specification first_point = _pos_to_cord(current_position) # Paths that end in Z or z are closed. closed_path = path.strip()[-1].upper() == 'Z' path = parse_path(path) # Find out how many positions should be calculated: count = 1 last = False deferred_positions = [] while True: try: position = next(positer) deferred_positions.append(position) except StopIteration: last = True # This path goes to the end break if not position.get('is_path') or 'path' in position: # The end of the path, or the start of a new one break count += 1 if count < 2: raise AssertionError("The path specification is only used for " "one slide, which makes it pointless.") if closed_path: # This path closes in on itself. Skip the last part, so that # the first and last step doesn't overlap. endcount = count + 1 else: endcount = count multiplier = (endcount * DEFAULT_MOVEMENT) / path.length() offset = path.point(0) path_iter = iter(deferred_positions) for x in range(count): point = path.point(x / (endcount - 1)) point = ((point - offset) * multiplier) + first_point current_position.update(_coord_to_pos(point)) rotation = _path_angle(path, x / (endcount - 1)) current_position['data-rotate-z'] = rotation yield current_position.copy() try: position = next(path_iter) except StopIteration: last = True break _update_position(current_position, position) if last: break continue yield current_position.copy() try: position = next(positer) except StopIteration: break _update_position(current_position, position)
def in_channels(m:nn.Module) -> List[int]: "Return the shape of the first weight layer in `m`." for l in flatten_model(m): if hasattr(l, 'weight'): return l.weight.shape[1] raise Exception('No weight layer')
Return the shape of the first weight layer in `m`.
Below is the the instruction that describes the task: ### Input: Return the shape of the first weight layer in `m`. ### Response: def in_channels(m:nn.Module) -> List[int]: "Return the shape of the first weight layer in `m`." for l in flatten_model(m): if hasattr(l, 'weight'): return l.weight.shape[1] raise Exception('No weight layer')
def parse_graph_section(config_obj, section, outdir_default, indir_default): """ Parse the GRAPH section of the config to extract useful values :param config_obj: ConfigParser object :param section: Section name :param outdir_default: Default output directory passed in args :param indir_default: Default input directory passed in args :return: List of options extracted from the GRAPH section """ graph_timezone = None graphing_library = CONSTANTS.DEFAULT_GRAPHING_LIBRARY crossplots = [] if config_obj.has_option(section, 'graphing_library'): graphing_library = config_obj.get(section, 'graphing_library') if config_obj.has_option(section, 'graphs'): graphs_string = config_obj.get(section, 'graphs') crossplots = graphs_string.split() # Supporting both outdir and output_dir if config_obj.has_option(section, 'outdir'): outdir_default = config_obj.get(section, 'outdir') if config_obj.has_option(section, 'output_dir'): outdir_default = config_obj.get(section, 'output_dir') if config_obj.has_option(section, 'input_dir'): indir_default = config_obj.get(section, 'input_dir') if config_obj.has_option(section, 'graph_timezone'): graph_timezone = config_obj.get(section, 'graph_timezone') if graph_timezone not in ("UTC", "PST", "PDT"): logger.warn('Unsupported timezone ' + graph_timezone + ' specified in option graph_timezone. Will use UTC instead') graph_timezone = "UTC" return graphing_library, crossplots, outdir_default, indir_default, graph_timezone
Parse the GRAPH section of the config to extract useful values :param config_obj: ConfigParser object :param section: Section name :param outdir_default: Default output directory passed in args :param indir_default: Default input directory passed in args :return: List of options extracted from the GRAPH section
Below is the the instruction that describes the task: ### Input: Parse the GRAPH section of the config to extract useful values :param config_obj: ConfigParser object :param section: Section name :param outdir_default: Default output directory passed in args :param indir_default: Default input directory passed in args :return: List of options extracted from the GRAPH section ### Response: def parse_graph_section(config_obj, section, outdir_default, indir_default): """ Parse the GRAPH section of the config to extract useful values :param config_obj: ConfigParser object :param section: Section name :param outdir_default: Default output directory passed in args :param indir_default: Default input directory passed in args :return: List of options extracted from the GRAPH section """ graph_timezone = None graphing_library = CONSTANTS.DEFAULT_GRAPHING_LIBRARY crossplots = [] if config_obj.has_option(section, 'graphing_library'): graphing_library = config_obj.get(section, 'graphing_library') if config_obj.has_option(section, 'graphs'): graphs_string = config_obj.get(section, 'graphs') crossplots = graphs_string.split() # Supporting both outdir and output_dir if config_obj.has_option(section, 'outdir'): outdir_default = config_obj.get(section, 'outdir') if config_obj.has_option(section, 'output_dir'): outdir_default = config_obj.get(section, 'output_dir') if config_obj.has_option(section, 'input_dir'): indir_default = config_obj.get(section, 'input_dir') if config_obj.has_option(section, 'graph_timezone'): graph_timezone = config_obj.get(section, 'graph_timezone') if graph_timezone not in ("UTC", "PST", "PDT"): logger.warn('Unsupported timezone ' + graph_timezone + ' specified in option graph_timezone. Will use UTC instead') graph_timezone = "UTC" return graphing_library, crossplots, outdir_default, indir_default, graph_timezone
def rtCalibration(fiContainer, allowedRtDev=60, allowedMzDev=2.5, reference=None, specfiles=None, showPlots=False, plotDir=None, minIntensity=1e5): """Performs a retention time calibration between :class:`FeatureItem` of multiple specfiles. :ivar fiContainer: Perform alignment on :class:`FeatureItem` in :attr:`FeatureContainer.specfiles` :ivar allowedRtDev: maxium retention time difference of two features in two runs to be matched :ivar allowedMzDev: maxium relative m/z difference (in ppm) of two features in two runs to be matched :ivar showPlots: boolean, True if a plot should be generated which shows to results of the calibration :ivar plotDir: if not None and showPlots is True, the plots are saved to this location. :ivar reference: Can be used to specifically specify a reference specfile :ivar specfiles: Limit alignment to those specfiles in the fiContainer :ivar minIntensity: consider only features with an intensity above this value """ #TODO: long function, maybe split into subfunctions specfiles = [_ for _ in viewkeys(fiContainer.info)] if specfiles is None else specfiles matchCharge = True refMzKey = 'mz' mzKey = 'mz' if reference is not None: if reference in specfiles: specfiles = [reference] + list(set(specfiles).difference(set([reference]))) else: print('Specified reference specfile not present, using reference: ', specfiles[0]) for featureItem in fiContainer.getItems(specfiles=specfiles): if not hasattr(featureItem, 'obsRt'): setattr(featureItem, 'obsRt', featureItem.rt) referenceArrays = None for specfile in specfiles: featureArrays = fiContainer.getArrays(['rt', 'charge', 'mz', 'intensity'], specfiles=specfile, sort='rt' ) if minIntensity is not None: intensityMask = (featureArrays['intensity'] > minIntensity) for key in list(viewkeys(featureArrays)): featureArrays[key] = featureArrays[key][intensityMask] if referenceArrays is None: referenceArrays = featureArrays if showPlots: print('Reference: '+specfile) continue rtPosList = list() rtDevList = list() mzDevRelList = list() mzDevAbsList = list() for featurePos in range(len(featureArrays[mzKey])): currRt = featureArrays['rt'][featurePos] currMz = featureArrays[mzKey][featurePos] currZ = featureArrays['charge'][featurePos] mzLimitUp = currMz*(1+allowedMzDev*1E-6) mzLimitLow = currMz*(1-allowedMzDev*1E-6) rtLimitUp = currRt+allowedRtDev rtLimitLow = currRt-allowedRtDev posL = bisect.bisect_left(referenceArrays['rt'], rtLimitLow) posU = bisect.bisect_right(referenceArrays['rt'], rtLimitUp) refMask = (referenceArrays[refMzKey][posL:posU] <= mzLimitUp) & (referenceArrays[refMzKey][posL:posU] >= mzLimitLow) if matchCharge: refMask = refMask & (referenceArrays['charge'][posL:posU] == currZ) currMzDev = abs(referenceArrays[refMzKey][posL:posU][refMask] - currMz) bestHitMask = currMzDev.argsort() for refRt, refMz in zip(referenceArrays['rt'][posL:posU][refMask][bestHitMask], referenceArrays[refMzKey][posL:posU][refMask][bestHitMask]): rtPosList.append(currRt) rtDevList.append(currRt - refRt) mzDevRelList.append((1 - currMz / refMz)*1E6) mzDevAbsList.append(currMz - refMz) break rtPosList = numpy.array(rtPosList) rtDevList = numpy.array(rtDevList) splineInitialKnots = int(max(rtPosList) - min(rtPosList)) dataFit = aux.DataFit(rtDevList, rtPosList) dataFit.splineInitialKnots = splineInitialKnots dataFit.splineTerminalExpansion = 0.2 dataFit.processInput(dataAveraging='median', windowSize=10) dataFit.generateSplines() if showPlots: corrDevArr = rtDevList - dataFit.corrArray(rtPosList) timePoints = [min(rtPosList) + x for x in range(int(max(rtPosList)-min(rtPosList)))] corrValues = dataFit.corrArray(timePoints) fig, ax = plt.subplots(3, 2, sharex=False, sharey=False, figsize=(20, 18)) fig.suptitle(specfile) ax[0][0].hist(rtDevList, bins=100, color='grey', alpha=0.5, label='observed') ax[0][0].hist(corrDevArr, bins=100, color='red', alpha=0.5, label='corrected') ax[0][0].set_title('Retention time deviation') ax[0][0].legend() ax[0][0].set_xlim(allowedRtDev*-1, allowedRtDev) ax[0][1].hist(mzDevRelList, bins=100, color='grey') ax[0][1].set_title('Mz deviation [ppm]') ax[1][0].scatter(rtPosList, rtDevList, color='grey', alpha=0.1, label='observed') ax[1][0].plot(timePoints,corrValues, color='red', alpha=0.5, label='correction function') ax[1][0].set_title('Retention time deviation over time') ax[1][0].legend() ax[1][0].set_ylim(allowedRtDev*-1, allowedRtDev) ax[1][1].scatter(rtPosList, mzDevRelList, color='grey', alpha=0.1) ax[1][1].set_title('Mz deviation over time') ax[1][1].set_ylim(allowedMzDev*-1, allowedMzDev) ax[2][0].scatter(rtPosList, corrDevArr, color='grey', alpha=0.1) ax[2][0].set_title('Aligned retention time deviation over time') ax[2][0].set_ylim(allowedRtDev*-1, allowedRtDev) if plotDir is not None: plotloc = aux.joinpath(plotDir, specfile+'.rtAlign.png') fig.savefig(plotloc) else: fig.show() featureArrays = fiContainer.getArrays(['rt'], specfiles=specfile, sort='rt') featureArrays['corrRt'] = featureArrays['rt'] - dataFit.corrArray(featureArrays['rt']) for featureId, corrRt, rt in zip(featureArrays['id'], featureArrays['corrRt'], featureArrays['rt']): fiContainer.container[specfile][featureId].rt = corrRt
Performs a retention time calibration between :class:`FeatureItem` of multiple specfiles. :ivar fiContainer: Perform alignment on :class:`FeatureItem` in :attr:`FeatureContainer.specfiles` :ivar allowedRtDev: maxium retention time difference of two features in two runs to be matched :ivar allowedMzDev: maxium relative m/z difference (in ppm) of two features in two runs to be matched :ivar showPlots: boolean, True if a plot should be generated which shows to results of the calibration :ivar plotDir: if not None and showPlots is True, the plots are saved to this location. :ivar reference: Can be used to specifically specify a reference specfile :ivar specfiles: Limit alignment to those specfiles in the fiContainer :ivar minIntensity: consider only features with an intensity above this value
Below is the the instruction that describes the task: ### Input: Performs a retention time calibration between :class:`FeatureItem` of multiple specfiles. :ivar fiContainer: Perform alignment on :class:`FeatureItem` in :attr:`FeatureContainer.specfiles` :ivar allowedRtDev: maxium retention time difference of two features in two runs to be matched :ivar allowedMzDev: maxium relative m/z difference (in ppm) of two features in two runs to be matched :ivar showPlots: boolean, True if a plot should be generated which shows to results of the calibration :ivar plotDir: if not None and showPlots is True, the plots are saved to this location. :ivar reference: Can be used to specifically specify a reference specfile :ivar specfiles: Limit alignment to those specfiles in the fiContainer :ivar minIntensity: consider only features with an intensity above this value ### Response: def rtCalibration(fiContainer, allowedRtDev=60, allowedMzDev=2.5, reference=None, specfiles=None, showPlots=False, plotDir=None, minIntensity=1e5): """Performs a retention time calibration between :class:`FeatureItem` of multiple specfiles. :ivar fiContainer: Perform alignment on :class:`FeatureItem` in :attr:`FeatureContainer.specfiles` :ivar allowedRtDev: maxium retention time difference of two features in two runs to be matched :ivar allowedMzDev: maxium relative m/z difference (in ppm) of two features in two runs to be matched :ivar showPlots: boolean, True if a plot should be generated which shows to results of the calibration :ivar plotDir: if not None and showPlots is True, the plots are saved to this location. :ivar reference: Can be used to specifically specify a reference specfile :ivar specfiles: Limit alignment to those specfiles in the fiContainer :ivar minIntensity: consider only features with an intensity above this value """ #TODO: long function, maybe split into subfunctions specfiles = [_ for _ in viewkeys(fiContainer.info)] if specfiles is None else specfiles matchCharge = True refMzKey = 'mz' mzKey = 'mz' if reference is not None: if reference in specfiles: specfiles = [reference] + list(set(specfiles).difference(set([reference]))) else: print('Specified reference specfile not present, using reference: ', specfiles[0]) for featureItem in fiContainer.getItems(specfiles=specfiles): if not hasattr(featureItem, 'obsRt'): setattr(featureItem, 'obsRt', featureItem.rt) referenceArrays = None for specfile in specfiles: featureArrays = fiContainer.getArrays(['rt', 'charge', 'mz', 'intensity'], specfiles=specfile, sort='rt' ) if minIntensity is not None: intensityMask = (featureArrays['intensity'] > minIntensity) for key in list(viewkeys(featureArrays)): featureArrays[key] = featureArrays[key][intensityMask] if referenceArrays is None: referenceArrays = featureArrays if showPlots: print('Reference: '+specfile) continue rtPosList = list() rtDevList = list() mzDevRelList = list() mzDevAbsList = list() for featurePos in range(len(featureArrays[mzKey])): currRt = featureArrays['rt'][featurePos] currMz = featureArrays[mzKey][featurePos] currZ = featureArrays['charge'][featurePos] mzLimitUp = currMz*(1+allowedMzDev*1E-6) mzLimitLow = currMz*(1-allowedMzDev*1E-6) rtLimitUp = currRt+allowedRtDev rtLimitLow = currRt-allowedRtDev posL = bisect.bisect_left(referenceArrays['rt'], rtLimitLow) posU = bisect.bisect_right(referenceArrays['rt'], rtLimitUp) refMask = (referenceArrays[refMzKey][posL:posU] <= mzLimitUp) & (referenceArrays[refMzKey][posL:posU] >= mzLimitLow) if matchCharge: refMask = refMask & (referenceArrays['charge'][posL:posU] == currZ) currMzDev = abs(referenceArrays[refMzKey][posL:posU][refMask] - currMz) bestHitMask = currMzDev.argsort() for refRt, refMz in zip(referenceArrays['rt'][posL:posU][refMask][bestHitMask], referenceArrays[refMzKey][posL:posU][refMask][bestHitMask]): rtPosList.append(currRt) rtDevList.append(currRt - refRt) mzDevRelList.append((1 - currMz / refMz)*1E6) mzDevAbsList.append(currMz - refMz) break rtPosList = numpy.array(rtPosList) rtDevList = numpy.array(rtDevList) splineInitialKnots = int(max(rtPosList) - min(rtPosList)) dataFit = aux.DataFit(rtDevList, rtPosList) dataFit.splineInitialKnots = splineInitialKnots dataFit.splineTerminalExpansion = 0.2 dataFit.processInput(dataAveraging='median', windowSize=10) dataFit.generateSplines() if showPlots: corrDevArr = rtDevList - dataFit.corrArray(rtPosList) timePoints = [min(rtPosList) + x for x in range(int(max(rtPosList)-min(rtPosList)))] corrValues = dataFit.corrArray(timePoints) fig, ax = plt.subplots(3, 2, sharex=False, sharey=False, figsize=(20, 18)) fig.suptitle(specfile) ax[0][0].hist(rtDevList, bins=100, color='grey', alpha=0.5, label='observed') ax[0][0].hist(corrDevArr, bins=100, color='red', alpha=0.5, label='corrected') ax[0][0].set_title('Retention time deviation') ax[0][0].legend() ax[0][0].set_xlim(allowedRtDev*-1, allowedRtDev) ax[0][1].hist(mzDevRelList, bins=100, color='grey') ax[0][1].set_title('Mz deviation [ppm]') ax[1][0].scatter(rtPosList, rtDevList, color='grey', alpha=0.1, label='observed') ax[1][0].plot(timePoints,corrValues, color='red', alpha=0.5, label='correction function') ax[1][0].set_title('Retention time deviation over time') ax[1][0].legend() ax[1][0].set_ylim(allowedRtDev*-1, allowedRtDev) ax[1][1].scatter(rtPosList, mzDevRelList, color='grey', alpha=0.1) ax[1][1].set_title('Mz deviation over time') ax[1][1].set_ylim(allowedMzDev*-1, allowedMzDev) ax[2][0].scatter(rtPosList, corrDevArr, color='grey', alpha=0.1) ax[2][0].set_title('Aligned retention time deviation over time') ax[2][0].set_ylim(allowedRtDev*-1, allowedRtDev) if plotDir is not None: plotloc = aux.joinpath(plotDir, specfile+'.rtAlign.png') fig.savefig(plotloc) else: fig.show() featureArrays = fiContainer.getArrays(['rt'], specfiles=specfile, sort='rt') featureArrays['corrRt'] = featureArrays['rt'] - dataFit.corrArray(featureArrays['rt']) for featureId, corrRt, rt in zip(featureArrays['id'], featureArrays['corrRt'], featureArrays['rt']): fiContainer.container[specfile][featureId].rt = corrRt
def prior_predictive_to_xarray(self): """Convert prior_predictive samples to xarray.""" data = self.prior_predictive if not isinstance(data, dict): raise TypeError("DictConverter.prior_predictive is not a dictionary") return dict_to_dataset(data, library=None, coords=self.coords, dims=self.dims)
Convert prior_predictive samples to xarray.
Below is the the instruction that describes the task: ### Input: Convert prior_predictive samples to xarray. ### Response: def prior_predictive_to_xarray(self): """Convert prior_predictive samples to xarray.""" data = self.prior_predictive if not isinstance(data, dict): raise TypeError("DictConverter.prior_predictive is not a dictionary") return dict_to_dataset(data, library=None, coords=self.coords, dims=self.dims)
def run(self): """Load all paintings into the database """ df = PaintingsInputData().load() # rename columns df.rename(columns={'paintingLabel': 'name'}, inplace=True) # get artist IDs, map via artist wiki ID artists = models.Entity.query_with_attributes('artist', self.client) df['artist_id'] = df['creator_wiki_id'].map(artists.set_index('wiki_id')['id']) # define attributes to create attribute_columns = ['name', 'wiki_id', 'area', 'decade', 'artist_id'] # store entities and attributes self.store(df, attribute_columns) self.done()
Load all paintings into the database
Below is the the instruction that describes the task: ### Input: Load all paintings into the database ### Response: def run(self): """Load all paintings into the database """ df = PaintingsInputData().load() # rename columns df.rename(columns={'paintingLabel': 'name'}, inplace=True) # get artist IDs, map via artist wiki ID artists = models.Entity.query_with_attributes('artist', self.client) df['artist_id'] = df['creator_wiki_id'].map(artists.set_index('wiki_id')['id']) # define attributes to create attribute_columns = ['name', 'wiki_id', 'area', 'decade', 'artist_id'] # store entities and attributes self.store(df, attribute_columns) self.done()
def GetNumberOfRows(self, table_name): """Retrieves the number of rows in the table. Args: table_name (str): name of the table. Returns: int: number of rows. Raises: IOError: if the file-like object has not been opened. OSError: if the file-like object has not been opened. """ if not self._connection: raise IOError('Not opened.') self._cursor.execute(self._NUMBER_OF_ROWS_QUERY.format(table_name)) row = self._cursor.fetchone() if not row: raise IOError( 'Unable to retrieve number of rows of table: {0:s}'.format( table_name)) number_of_rows = row[0] if isinstance(number_of_rows, py2to3.STRING_TYPES): try: number_of_rows = int(number_of_rows, 10) except ValueError as exception: raise IOError(( 'Unable to determine number of rows of table: {0:s} ' 'with error: {1!s}').format(table_name, exception)) return number_of_rows
Retrieves the number of rows in the table. Args: table_name (str): name of the table. Returns: int: number of rows. Raises: IOError: if the file-like object has not been opened. OSError: if the file-like object has not been opened.
Below is the the instruction that describes the task: ### Input: Retrieves the number of rows in the table. Args: table_name (str): name of the table. Returns: int: number of rows. Raises: IOError: if the file-like object has not been opened. OSError: if the file-like object has not been opened. ### Response: def GetNumberOfRows(self, table_name): """Retrieves the number of rows in the table. Args: table_name (str): name of the table. Returns: int: number of rows. Raises: IOError: if the file-like object has not been opened. OSError: if the file-like object has not been opened. """ if not self._connection: raise IOError('Not opened.') self._cursor.execute(self._NUMBER_OF_ROWS_QUERY.format(table_name)) row = self._cursor.fetchone() if not row: raise IOError( 'Unable to retrieve number of rows of table: {0:s}'.format( table_name)) number_of_rows = row[0] if isinstance(number_of_rows, py2to3.STRING_TYPES): try: number_of_rows = int(number_of_rows, 10) except ValueError as exception: raise IOError(( 'Unable to determine number of rows of table: {0:s} ' 'with error: {1!s}').format(table_name, exception)) return number_of_rows
def fetchThreadMessages(self, thread_id=None, limit=20, before=None): """ Get the last messages in a thread :param thread_id: User/Group ID to get messages from. See :ref:`intro_threads` :param limit: Max. number of messages to retrieve :param before: A timestamp, indicating from which point to retrieve messages :type limit: int :type before: int :return: :class:`models.Message` objects :rtype: list :raises: FBchatException if request failed """ thread_id, thread_type = self._getThread(thread_id, None) params = { "id": thread_id, "message_limit": limit, "load_messages": True, "load_read_receipts": True, "before": before, } j = self.graphql_request(GraphQL(doc_id="1860982147341344", params=params)) if j.get("message_thread") is None: raise FBchatException("Could not fetch thread {}: {}".format(thread_id, j)) messages = [ Message._from_graphql(message) for message in j["message_thread"]["messages"]["nodes"] ] messages.reverse() read_receipts = j["message_thread"]["read_receipts"]["nodes"] for message in messages: for receipt in read_receipts: if int(receipt["watermark"]) >= int(message.timestamp): message.read_by.append(receipt["actor"]["id"]) return messages
Get the last messages in a thread :param thread_id: User/Group ID to get messages from. See :ref:`intro_threads` :param limit: Max. number of messages to retrieve :param before: A timestamp, indicating from which point to retrieve messages :type limit: int :type before: int :return: :class:`models.Message` objects :rtype: list :raises: FBchatException if request failed
Below is the the instruction that describes the task: ### Input: Get the last messages in a thread :param thread_id: User/Group ID to get messages from. See :ref:`intro_threads` :param limit: Max. number of messages to retrieve :param before: A timestamp, indicating from which point to retrieve messages :type limit: int :type before: int :return: :class:`models.Message` objects :rtype: list :raises: FBchatException if request failed ### Response: def fetchThreadMessages(self, thread_id=None, limit=20, before=None): """ Get the last messages in a thread :param thread_id: User/Group ID to get messages from. See :ref:`intro_threads` :param limit: Max. number of messages to retrieve :param before: A timestamp, indicating from which point to retrieve messages :type limit: int :type before: int :return: :class:`models.Message` objects :rtype: list :raises: FBchatException if request failed """ thread_id, thread_type = self._getThread(thread_id, None) params = { "id": thread_id, "message_limit": limit, "load_messages": True, "load_read_receipts": True, "before": before, } j = self.graphql_request(GraphQL(doc_id="1860982147341344", params=params)) if j.get("message_thread") is None: raise FBchatException("Could not fetch thread {}: {}".format(thread_id, j)) messages = [ Message._from_graphql(message) for message in j["message_thread"]["messages"]["nodes"] ] messages.reverse() read_receipts = j["message_thread"]["read_receipts"]["nodes"] for message in messages: for receipt in read_receipts: if int(receipt["watermark"]) >= int(message.timestamp): message.read_by.append(receipt["actor"]["id"]) return messages
def _setup_metric_group_definitions(self): """ Return the dict of MetricGroupDefinition objects for this metrics context, by processing its 'metric-group-infos' property. """ # Dictionary of MetricGroupDefinition objects, by metric group name metric_group_definitions = dict() for mg_info in self.properties['metric-group-infos']: mg_name = mg_info['group-name'] mg_def = MetricGroupDefinition( name=mg_name, resource_class=_resource_class_from_group(mg_name), metric_definitions=dict()) for i, m_info in enumerate(mg_info['metric-infos']): m_name = m_info['metric-name'] m_def = MetricDefinition( index=i, name=m_name, type=_metric_type(m_info['metric-type']), unit=_metric_unit_from_name(m_name)) mg_def.metric_definitions[m_name] = m_def metric_group_definitions[mg_name] = mg_def return metric_group_definitions
Return the dict of MetricGroupDefinition objects for this metrics context, by processing its 'metric-group-infos' property.
Below is the the instruction that describes the task: ### Input: Return the dict of MetricGroupDefinition objects for this metrics context, by processing its 'metric-group-infos' property. ### Response: def _setup_metric_group_definitions(self): """ Return the dict of MetricGroupDefinition objects for this metrics context, by processing its 'metric-group-infos' property. """ # Dictionary of MetricGroupDefinition objects, by metric group name metric_group_definitions = dict() for mg_info in self.properties['metric-group-infos']: mg_name = mg_info['group-name'] mg_def = MetricGroupDefinition( name=mg_name, resource_class=_resource_class_from_group(mg_name), metric_definitions=dict()) for i, m_info in enumerate(mg_info['metric-infos']): m_name = m_info['metric-name'] m_def = MetricDefinition( index=i, name=m_name, type=_metric_type(m_info['metric-type']), unit=_metric_unit_from_name(m_name)) mg_def.metric_definitions[m_name] = m_def metric_group_definitions[mg_name] = mg_def return metric_group_definitions
def run(self, value, errors, request): """Return the object if valid and available, otherwise None.""" value = self.id_validator(value, errors, request) if errors: return None if self.fetch_by: thing = self.cls.fetch_by(**{self.fetch_by: value}) else: thing = self.cls.fetch_by_id(value) if not thing and self.source == SOURCE_MATCHDICT: # If part of the URL we should have a not-found error raise HTTPNotFound() elif not thing: self.add_error(errors, 'Invalid {0}' .format(self.cls.__name__)) return thing
Return the object if valid and available, otherwise None.
Below is the the instruction that describes the task: ### Input: Return the object if valid and available, otherwise None. ### Response: def run(self, value, errors, request): """Return the object if valid and available, otherwise None.""" value = self.id_validator(value, errors, request) if errors: return None if self.fetch_by: thing = self.cls.fetch_by(**{self.fetch_by: value}) else: thing = self.cls.fetch_by_id(value) if not thing and self.source == SOURCE_MATCHDICT: # If part of the URL we should have a not-found error raise HTTPNotFound() elif not thing: self.add_error(errors, 'Invalid {0}' .format(self.cls.__name__)) return thing
def content_type(self, content_type): """ Set the Content-Type option of a response. :type content_type: int :param content_type: the Content-Type """ option = Option() option.number = defines.OptionRegistry.CONTENT_TYPE.number option.value = int(content_type) self.add_option(option)
Set the Content-Type option of a response. :type content_type: int :param content_type: the Content-Type
Below is the the instruction that describes the task: ### Input: Set the Content-Type option of a response. :type content_type: int :param content_type: the Content-Type ### Response: def content_type(self, content_type): """ Set the Content-Type option of a response. :type content_type: int :param content_type: the Content-Type """ option = Option() option.number = defines.OptionRegistry.CONTENT_TYPE.number option.value = int(content_type) self.add_option(option)
def parse(self, input): """Passes input to each QueryLineHandler in use""" query = None for handler in self._line_handlers: try: query = handler.handle(input) except Exception as e: query = None finally: if query is not None: return query return None
Passes input to each QueryLineHandler in use
Below is the the instruction that describes the task: ### Input: Passes input to each QueryLineHandler in use ### Response: def parse(self, input): """Passes input to each QueryLineHandler in use""" query = None for handler in self._line_handlers: try: query = handler.handle(input) except Exception as e: query = None finally: if query is not None: return query return None
def unregister_file(self, file_node, raise_exception=False): """ Unregisters given :class:`umbra.components.factory.script_editor.nodes.FileNode` class Node from the Model. :param file_node: FileNode to unregister. :type file_node: FileNode :param raise_exception: Raise the exception. :type raise_exception: bool :return: FileNode. :rtype: FileNode """ if raise_exception: if not file_node in self.list_file_nodes(): raise foundations.exceptions.ProgrammingError("{0} | '{1}' file 'FileNode' isn't registered!".format( self.__class__.__name__, file_node)) LOGGER.debug("> Unregistering '{0}' file 'FileNode'.".format(file_node)) parent = file_node.parent row = file_node.row() self.beginRemoveRows(self.get_node_index(parent), row, row) parent.remove_child(row) self.endRemoveRows() self.file_unregistered.emit(file_node) return file_node
Unregisters given :class:`umbra.components.factory.script_editor.nodes.FileNode` class Node from the Model. :param file_node: FileNode to unregister. :type file_node: FileNode :param raise_exception: Raise the exception. :type raise_exception: bool :return: FileNode. :rtype: FileNode
Below is the the instruction that describes the task: ### Input: Unregisters given :class:`umbra.components.factory.script_editor.nodes.FileNode` class Node from the Model. :param file_node: FileNode to unregister. :type file_node: FileNode :param raise_exception: Raise the exception. :type raise_exception: bool :return: FileNode. :rtype: FileNode ### Response: def unregister_file(self, file_node, raise_exception=False): """ Unregisters given :class:`umbra.components.factory.script_editor.nodes.FileNode` class Node from the Model. :param file_node: FileNode to unregister. :type file_node: FileNode :param raise_exception: Raise the exception. :type raise_exception: bool :return: FileNode. :rtype: FileNode """ if raise_exception: if not file_node in self.list_file_nodes(): raise foundations.exceptions.ProgrammingError("{0} | '{1}' file 'FileNode' isn't registered!".format( self.__class__.__name__, file_node)) LOGGER.debug("> Unregistering '{0}' file 'FileNode'.".format(file_node)) parent = file_node.parent row = file_node.row() self.beginRemoveRows(self.get_node_index(parent), row, row) parent.remove_child(row) self.endRemoveRows() self.file_unregistered.emit(file_node) return file_node
def get_config_dict(config): ''' 获取配置数据字典 对传入的配置包进行格式化处理,生成一个字典对象 :param object config: 配置模块 :return: 配置数据字典 :rtype: dict ''' dst = {} tmp = config.__dict__ key_list = dir(config) key_list.remove('os') for k, v in tmp.items(): if k in key_list and not k.startswith('_'): dst[k] = v return dst
获取配置数据字典 对传入的配置包进行格式化处理,生成一个字典对象 :param object config: 配置模块 :return: 配置数据字典 :rtype: dict
Below is the the instruction that describes the task: ### Input: 获取配置数据字典 对传入的配置包进行格式化处理,生成一个字典对象 :param object config: 配置模块 :return: 配置数据字典 :rtype: dict ### Response: def get_config_dict(config): ''' 获取配置数据字典 对传入的配置包进行格式化处理,生成一个字典对象 :param object config: 配置模块 :return: 配置数据字典 :rtype: dict ''' dst = {} tmp = config.__dict__ key_list = dir(config) key_list.remove('os') for k, v in tmp.items(): if k in key_list and not k.startswith('_'): dst[k] = v return dst
def cleanup(self, sched, coro): """Remove this coro from the waiting for signal queue.""" try: sched.sigwait[self.name].remove((self, coro)) except ValueError: pass return True
Remove this coro from the waiting for signal queue.
Below is the the instruction that describes the task: ### Input: Remove this coro from the waiting for signal queue. ### Response: def cleanup(self, sched, coro): """Remove this coro from the waiting for signal queue.""" try: sched.sigwait[self.name].remove((self, coro)) except ValueError: pass return True
def _add_monomer(self, monomer, mon_vector, move_direction): """ extend the polymer molecule by adding a monomer along mon_vector direction Args: monomer (Molecule): monomer molecule mon_vector (numpy.array): monomer vector that points from head to tail. move_direction (numpy.array): direction along which the monomer will be positioned """ translate_by = self.molecule.cart_coords[self.end] + \ self.link_distance * move_direction monomer.translate_sites(range(len(monomer)), translate_by) if not self.linear_chain: self._align_monomer(monomer, mon_vector, move_direction) # add monomer if there are no crossings does_cross = False for i, site in enumerate(monomer): try: self.molecule.append(site.specie, site.coords, properties=site.properties) except: does_cross = True polymer_length = len(self.molecule) self.molecule.remove_sites( range(polymer_length - i, polymer_length)) break if not does_cross: self.length += 1 self.end += len(self.monomer)
extend the polymer molecule by adding a monomer along mon_vector direction Args: monomer (Molecule): monomer molecule mon_vector (numpy.array): monomer vector that points from head to tail. move_direction (numpy.array): direction along which the monomer will be positioned
Below is the the instruction that describes the task: ### Input: extend the polymer molecule by adding a monomer along mon_vector direction Args: monomer (Molecule): monomer molecule mon_vector (numpy.array): monomer vector that points from head to tail. move_direction (numpy.array): direction along which the monomer will be positioned ### Response: def _add_monomer(self, monomer, mon_vector, move_direction): """ extend the polymer molecule by adding a monomer along mon_vector direction Args: monomer (Molecule): monomer molecule mon_vector (numpy.array): monomer vector that points from head to tail. move_direction (numpy.array): direction along which the monomer will be positioned """ translate_by = self.molecule.cart_coords[self.end] + \ self.link_distance * move_direction monomer.translate_sites(range(len(monomer)), translate_by) if not self.linear_chain: self._align_monomer(monomer, mon_vector, move_direction) # add monomer if there are no crossings does_cross = False for i, site in enumerate(monomer): try: self.molecule.append(site.specie, site.coords, properties=site.properties) except: does_cross = True polymer_length = len(self.molecule) self.molecule.remove_sites( range(polymer_length - i, polymer_length)) break if not does_cross: self.length += 1 self.end += len(self.monomer)
def _run_main(main, argv): """Calls main, optionally with pdb or profiler.""" if FLAGS.run_with_pdb: sys.exit(pdb.runcall(main, argv)) elif FLAGS.run_with_profiling or FLAGS.profile_file: # Avoid import overhead since most apps (including performance-sensitive # ones) won't be run with profiling. import atexit if FLAGS.use_cprofile_for_profiling: import cProfile as profile else: import profile profiler = profile.Profile() if FLAGS.profile_file: atexit.register(profiler.dump_stats, FLAGS.profile_file) else: atexit.register(profiler.print_stats) retval = profiler.runcall(main, argv) sys.exit(retval) else: sys.exit(main(argv))
Calls main, optionally with pdb or profiler.
Below is the the instruction that describes the task: ### Input: Calls main, optionally with pdb or profiler. ### Response: def _run_main(main, argv): """Calls main, optionally with pdb or profiler.""" if FLAGS.run_with_pdb: sys.exit(pdb.runcall(main, argv)) elif FLAGS.run_with_profiling or FLAGS.profile_file: # Avoid import overhead since most apps (including performance-sensitive # ones) won't be run with profiling. import atexit if FLAGS.use_cprofile_for_profiling: import cProfile as profile else: import profile profiler = profile.Profile() if FLAGS.profile_file: atexit.register(profiler.dump_stats, FLAGS.profile_file) else: atexit.register(profiler.print_stats) retval = profiler.runcall(main, argv) sys.exit(retval) else: sys.exit(main(argv))
def unique_otuids(groups): """ Get unique OTUIDs of each category. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on category name and unique OTUIDs as values. """ uniques = {key: set() for key in groups} for i, group in enumerate(groups): to_combine = groups.values()[:i]+groups.values()[i+1:] combined = combine_sets(*to_combine) uniques[group] = groups[group].difference(combined) return uniques
Get unique OTUIDs of each category. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on category name and unique OTUIDs as values.
Below is the the instruction that describes the task: ### Input: Get unique OTUIDs of each category. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on category name and unique OTUIDs as values. ### Response: def unique_otuids(groups): """ Get unique OTUIDs of each category. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on category name and unique OTUIDs as values. """ uniques = {key: set() for key in groups} for i, group in enumerate(groups): to_combine = groups.values()[:i]+groups.values()[i+1:] combined = combine_sets(*to_combine) uniques[group] = groups[group].difference(combined) return uniques
def get_plat_specifier(): """ Standard platform specifier used by distutils """ import setuptools # NOQA import distutils plat_name = distutils.util.get_platform() plat_specifier = ".%s-%s" % (plat_name, sys.version[0:3]) if hasattr(sys, 'gettotalrefcount'): plat_specifier += '-pydebug' return plat_specifier
Standard platform specifier used by distutils
Below is the the instruction that describes the task: ### Input: Standard platform specifier used by distutils ### Response: def get_plat_specifier(): """ Standard platform specifier used by distutils """ import setuptools # NOQA import distutils plat_name = distutils.util.get_platform() plat_specifier = ".%s-%s" % (plat_name, sys.version[0:3]) if hasattr(sys, 'gettotalrefcount'): plat_specifier += '-pydebug' return plat_specifier
def ack(self, *id_list): """ Acknowledge that the message(s) were been processed by the consumer associated with the parent :py:class:`ConsumerGroup`. :param id_list: one or more message ids to acknowledge :returns: number of messages marked acknowledged """ return self.database.xack(self.key, self.group, *id_list)
Acknowledge that the message(s) were been processed by the consumer associated with the parent :py:class:`ConsumerGroup`. :param id_list: one or more message ids to acknowledge :returns: number of messages marked acknowledged
Below is the the instruction that describes the task: ### Input: Acknowledge that the message(s) were been processed by the consumer associated with the parent :py:class:`ConsumerGroup`. :param id_list: one or more message ids to acknowledge :returns: number of messages marked acknowledged ### Response: def ack(self, *id_list): """ Acknowledge that the message(s) were been processed by the consumer associated with the parent :py:class:`ConsumerGroup`. :param id_list: one or more message ids to acknowledge :returns: number of messages marked acknowledged """ return self.database.xack(self.key, self.group, *id_list)
def astype(self, dtype, casting='unsafe'): """ Cast the array to a specified type. Parameters ---------- dtype : str or dtype Typecode or data-type to cast the array to (see numpy) """ rdd = self._rdd.mapValues(lambda v: v.astype(dtype, 'K', casting)) return self._constructor(rdd, dtype=dtype).__finalize__(self)
Cast the array to a specified type. Parameters ---------- dtype : str or dtype Typecode or data-type to cast the array to (see numpy)
Below is the the instruction that describes the task: ### Input: Cast the array to a specified type. Parameters ---------- dtype : str or dtype Typecode or data-type to cast the array to (see numpy) ### Response: def astype(self, dtype, casting='unsafe'): """ Cast the array to a specified type. Parameters ---------- dtype : str or dtype Typecode or data-type to cast the array to (see numpy) """ rdd = self._rdd.mapValues(lambda v: v.astype(dtype, 'K', casting)) return self._constructor(rdd, dtype=dtype).__finalize__(self)
def GET_savedmodifiedconditionitemvalues(self) -> None: """ToDo: extend functionality and add tests""" dict_ = state.modifiedconditionitemvalues.get(self._id) if dict_ is None: self.GET_conditionitemvalues() else: for name, value in dict_.items(): self._outputs[name] = value
ToDo: extend functionality and add tests
Below is the the instruction that describes the task: ### Input: ToDo: extend functionality and add tests ### Response: def GET_savedmodifiedconditionitemvalues(self) -> None: """ToDo: extend functionality and add tests""" dict_ = state.modifiedconditionitemvalues.get(self._id) if dict_ is None: self.GET_conditionitemvalues() else: for name, value in dict_.items(): self._outputs[name] = value
def _to_webdav(self, docs_base, release): """Upload to WebDAV store.""" try: git_path = subprocess.check_output('git remote get-url origin 2>/dev/null', shell=True) except subprocess.CalledProcessError: git_path = '' else: git_path = git_path.decode('ascii').strip() git_path = git_path.replace('http://', '').replace('https://', '').replace('ssh://', '') git_path = re.search(r'[^:/]+?[:/](.+)', git_path) git_path = git_path.group(1).replace('.git', '') if git_path else '' url = None with self._zipped(docs_base) as handle: url_ns = dict(name=self.cfg.project.name, version=release, git_path=git_path) reply = requests.put(self.params['url'].format(**url_ns), data=handle.read(), headers={'Accept': 'application/json'}) if reply.status_code in range(200, 300): notify.info("{status_code} {reason}".format(**vars(reply))) try: data = reply.json() except ValueError as exc: notify.warning("Didn't get a JSON response! ({})".format(exc)) else: if 'downloadUri' in data: # Artifactory url = data['downloadUri'] + '!/index.html' elif reply.status_code == 301: url = reply.headers['location'] else: data = self.cfg.copy() data.update(self.params) data.update(vars(reply)) notify.error("{status_code} {reason} for PUT to {url}".format(**data)) if not url: notify.warning("Couldn't get URL from upload response!") return url
Upload to WebDAV store.
Below is the the instruction that describes the task: ### Input: Upload to WebDAV store. ### Response: def _to_webdav(self, docs_base, release): """Upload to WebDAV store.""" try: git_path = subprocess.check_output('git remote get-url origin 2>/dev/null', shell=True) except subprocess.CalledProcessError: git_path = '' else: git_path = git_path.decode('ascii').strip() git_path = git_path.replace('http://', '').replace('https://', '').replace('ssh://', '') git_path = re.search(r'[^:/]+?[:/](.+)', git_path) git_path = git_path.group(1).replace('.git', '') if git_path else '' url = None with self._zipped(docs_base) as handle: url_ns = dict(name=self.cfg.project.name, version=release, git_path=git_path) reply = requests.put(self.params['url'].format(**url_ns), data=handle.read(), headers={'Accept': 'application/json'}) if reply.status_code in range(200, 300): notify.info("{status_code} {reason}".format(**vars(reply))) try: data = reply.json() except ValueError as exc: notify.warning("Didn't get a JSON response! ({})".format(exc)) else: if 'downloadUri' in data: # Artifactory url = data['downloadUri'] + '!/index.html' elif reply.status_code == 301: url = reply.headers['location'] else: data = self.cfg.copy() data.update(self.params) data.update(vars(reply)) notify.error("{status_code} {reason} for PUT to {url}".format(**data)) if not url: notify.warning("Couldn't get URL from upload response!") return url
def process_exception(self, request, exception): """ Add user details. """ if request.user and hasattr(request.user, 'email'): request.META['USER'] = request.user.email
Add user details.
Below is the the instruction that describes the task: ### Input: Add user details. ### Response: def process_exception(self, request, exception): """ Add user details. """ if request.user and hasattr(request.user, 'email'): request.META['USER'] = request.user.email
def DbGetHostServersInfo(self, argin): """ Get info about all servers running on specified host, name, mode and level :param argin: Host name :type: tango.DevString :return: Server info for all servers running on specified host :rtype: tango.DevVarStringArray """ self._log.debug("In DbGetHostServersInfo()") argin = replace_wildcard(argin) return self.db.get_host_servers_info(argin)
Get info about all servers running on specified host, name, mode and level :param argin: Host name :type: tango.DevString :return: Server info for all servers running on specified host :rtype: tango.DevVarStringArray
Below is the the instruction that describes the task: ### Input: Get info about all servers running on specified host, name, mode and level :param argin: Host name :type: tango.DevString :return: Server info for all servers running on specified host :rtype: tango.DevVarStringArray ### Response: def DbGetHostServersInfo(self, argin): """ Get info about all servers running on specified host, name, mode and level :param argin: Host name :type: tango.DevString :return: Server info for all servers running on specified host :rtype: tango.DevVarStringArray """ self._log.debug("In DbGetHostServersInfo()") argin = replace_wildcard(argin) return self.db.get_host_servers_info(argin)
def view_contents(token, dstore): """ Returns the size of the contents of the datastore and its total size """ try: desc = dstore['oqparam'].description except KeyError: desc = '' data = sorted((dstore.getsize(key), key) for key in dstore) rows = [(key, humansize(nbytes)) for nbytes, key in data] total = '\n%s : %s' % ( dstore.filename, humansize(os.path.getsize(dstore.filename))) return rst_table(rows, header=(desc, '')) + total
Returns the size of the contents of the datastore and its total size
Below is the the instruction that describes the task: ### Input: Returns the size of the contents of the datastore and its total size ### Response: def view_contents(token, dstore): """ Returns the size of the contents of the datastore and its total size """ try: desc = dstore['oqparam'].description except KeyError: desc = '' data = sorted((dstore.getsize(key), key) for key in dstore) rows = [(key, humansize(nbytes)) for nbytes, key in data] total = '\n%s : %s' % ( dstore.filename, humansize(os.path.getsize(dstore.filename))) return rst_table(rows, header=(desc, '')) + total
def add_unit_to_channel(current): """ Subscribe users of a given unit to given channel JSON API: .. code-block:: python # request: { 'view':'_zops_add_unit_to_channel', 'unit_key': key, 'channel_key': key, 'read_only': boolean, # true if this is a Broadcast channel, # false if it's a normal chat room } # response: { 'existing': [key,], # existing members 'newly_added': [key,], # newly added members 'status': 'Created', 'code': 201 } """ read_only = current.input['read_only'] newly_added, existing = [], [] for member_key in UnitModel.get_user_keys(current, current.input['unit_key']): sb, new = Subscriber(current).objects.get_or_create(user_id=member_key, read_only=read_only, channel_id=current.input['channel_key']) if new: newly_added.append(member_key) else: existing.append(member_key) current.output = { 'existing': existing, 'newly_added': newly_added, 'status': 'OK', 'code': 201 }
Subscribe users of a given unit to given channel JSON API: .. code-block:: python # request: { 'view':'_zops_add_unit_to_channel', 'unit_key': key, 'channel_key': key, 'read_only': boolean, # true if this is a Broadcast channel, # false if it's a normal chat room } # response: { 'existing': [key,], # existing members 'newly_added': [key,], # newly added members 'status': 'Created', 'code': 201 }
Below is the the instruction that describes the task: ### Input: Subscribe users of a given unit to given channel JSON API: .. code-block:: python # request: { 'view':'_zops_add_unit_to_channel', 'unit_key': key, 'channel_key': key, 'read_only': boolean, # true if this is a Broadcast channel, # false if it's a normal chat room } # response: { 'existing': [key,], # existing members 'newly_added': [key,], # newly added members 'status': 'Created', 'code': 201 } ### Response: def add_unit_to_channel(current): """ Subscribe users of a given unit to given channel JSON API: .. code-block:: python # request: { 'view':'_zops_add_unit_to_channel', 'unit_key': key, 'channel_key': key, 'read_only': boolean, # true if this is a Broadcast channel, # false if it's a normal chat room } # response: { 'existing': [key,], # existing members 'newly_added': [key,], # newly added members 'status': 'Created', 'code': 201 } """ read_only = current.input['read_only'] newly_added, existing = [], [] for member_key in UnitModel.get_user_keys(current, current.input['unit_key']): sb, new = Subscriber(current).objects.get_or_create(user_id=member_key, read_only=read_only, channel_id=current.input['channel_key']) if new: newly_added.append(member_key) else: existing.append(member_key) current.output = { 'existing': existing, 'newly_added': newly_added, 'status': 'OK', 'code': 201 }
def _open_file_obj(f, mode="r"): """ A context manager that provides access to a file. :param f: the file to be opened :type f: a file-like object or path to file :param mode: how to open the file :type mode: string """ if isinstance(f, six.string_types): if f.startswith(("http://", "https://")): file_obj = _urlopen(f) yield file_obj file_obj.close() else: with open(f, mode) as file_obj: yield file_obj else: yield f
A context manager that provides access to a file. :param f: the file to be opened :type f: a file-like object or path to file :param mode: how to open the file :type mode: string
Below is the the instruction that describes the task: ### Input: A context manager that provides access to a file. :param f: the file to be opened :type f: a file-like object or path to file :param mode: how to open the file :type mode: string ### Response: def _open_file_obj(f, mode="r"): """ A context manager that provides access to a file. :param f: the file to be opened :type f: a file-like object or path to file :param mode: how to open the file :type mode: string """ if isinstance(f, six.string_types): if f.startswith(("http://", "https://")): file_obj = _urlopen(f) yield file_obj file_obj.close() else: with open(f, mode) as file_obj: yield file_obj else: yield f
def get_locales(self, languages=None, locales=None, region=None, use_given_order=False, allow_conflicting_locales=False): """ Yield locale instances. :param languages: A list of language codes, e.g. ['en', 'es', 'zh-Hant']. If locales are not given, languages and region are used to construct locales to load. :type languages: list :param locales: A list of codes of locales which are to be loaded, e.g. ['fr-PF', 'qu-EC', 'af-NA'] :type locales: list :param region: A region code, e.g. 'IN', '001', 'NE'. If locales are not given, languages and region are used to construct locales to load. :type region: str|unicode :param use_given_order: If True, the returned mapping is ordered in the order locales are given. :type allow_redetect_language: bool :param allow_conflicting_locales: if True, locales with same language and different region can be loaded. :type allow_conflicting_locales: bool :yield: locale instances """ for _, locale in self._load_data( languages=languages, locales=locales, region=region, use_given_order=use_given_order, allow_conflicting_locales=allow_conflicting_locales): yield locale
Yield locale instances. :param languages: A list of language codes, e.g. ['en', 'es', 'zh-Hant']. If locales are not given, languages and region are used to construct locales to load. :type languages: list :param locales: A list of codes of locales which are to be loaded, e.g. ['fr-PF', 'qu-EC', 'af-NA'] :type locales: list :param region: A region code, e.g. 'IN', '001', 'NE'. If locales are not given, languages and region are used to construct locales to load. :type region: str|unicode :param use_given_order: If True, the returned mapping is ordered in the order locales are given. :type allow_redetect_language: bool :param allow_conflicting_locales: if True, locales with same language and different region can be loaded. :type allow_conflicting_locales: bool :yield: locale instances
Below is the the instruction that describes the task: ### Input: Yield locale instances. :param languages: A list of language codes, e.g. ['en', 'es', 'zh-Hant']. If locales are not given, languages and region are used to construct locales to load. :type languages: list :param locales: A list of codes of locales which are to be loaded, e.g. ['fr-PF', 'qu-EC', 'af-NA'] :type locales: list :param region: A region code, e.g. 'IN', '001', 'NE'. If locales are not given, languages and region are used to construct locales to load. :type region: str|unicode :param use_given_order: If True, the returned mapping is ordered in the order locales are given. :type allow_redetect_language: bool :param allow_conflicting_locales: if True, locales with same language and different region can be loaded. :type allow_conflicting_locales: bool :yield: locale instances ### Response: def get_locales(self, languages=None, locales=None, region=None, use_given_order=False, allow_conflicting_locales=False): """ Yield locale instances. :param languages: A list of language codes, e.g. ['en', 'es', 'zh-Hant']. If locales are not given, languages and region are used to construct locales to load. :type languages: list :param locales: A list of codes of locales which are to be loaded, e.g. ['fr-PF', 'qu-EC', 'af-NA'] :type locales: list :param region: A region code, e.g. 'IN', '001', 'NE'. If locales are not given, languages and region are used to construct locales to load. :type region: str|unicode :param use_given_order: If True, the returned mapping is ordered in the order locales are given. :type allow_redetect_language: bool :param allow_conflicting_locales: if True, locales with same language and different region can be loaded. :type allow_conflicting_locales: bool :yield: locale instances """ for _, locale in self._load_data( languages=languages, locales=locales, region=region, use_given_order=use_given_order, allow_conflicting_locales=allow_conflicting_locales): yield locale
def get_node_msindex(msinds, node_seed): """ Convert seeds-like selection of voxel to multiscale index. :param msinds: ndarray with indexes :param node_seed: ndarray with 1 where selected pixel is, or list of indexes in this array :return: multiscale index of first found seed """ if type(node_seed) == np.ndarray: seed_indexes = np.nonzero(node_seed) elif type(node_seed) == list: seed_indexes = node_seed else: seed_indexes = [node_seed] selected_nodes_msinds = msinds[seed_indexes] node_msindex = selected_nodes_msinds[0] return node_msindex
Convert seeds-like selection of voxel to multiscale index. :param msinds: ndarray with indexes :param node_seed: ndarray with 1 where selected pixel is, or list of indexes in this array :return: multiscale index of first found seed
Below is the the instruction that describes the task: ### Input: Convert seeds-like selection of voxel to multiscale index. :param msinds: ndarray with indexes :param node_seed: ndarray with 1 where selected pixel is, or list of indexes in this array :return: multiscale index of first found seed ### Response: def get_node_msindex(msinds, node_seed): """ Convert seeds-like selection of voxel to multiscale index. :param msinds: ndarray with indexes :param node_seed: ndarray with 1 where selected pixel is, or list of indexes in this array :return: multiscale index of first found seed """ if type(node_seed) == np.ndarray: seed_indexes = np.nonzero(node_seed) elif type(node_seed) == list: seed_indexes = node_seed else: seed_indexes = [node_seed] selected_nodes_msinds = msinds[seed_indexes] node_msindex = selected_nodes_msinds[0] return node_msindex
def _iter_tree_next(root_full, dir_rel, memo, on_error, follow_links): """ Scan the directory for all descendant files. *root_full* (:class:`str`) the absolute path to the root directory. *dir_rel* (:class:`str`) the path to the directory to scan relative to *root_full*. *memo* (:class:`dict`) keeps track of ancestor directories encountered. Maps each ancestor real path (:class:`str``) to relative path (:class:`str`). *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. *follow_links* (:class:`bool`) is whether to walk symbolik links that resolve to directories. """ dir_full = os.path.join(root_full, dir_rel) dir_real = os.path.realpath(dir_full) # Remember each encountered ancestor directory and its canonical # (real) path. If a canonical path is encountered more than once, # recursion has occurred. if dir_real not in memo: memo[dir_real] = dir_rel else: raise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel) for node in os.listdir(dir_full): node_rel = os.path.join(dir_rel, node) node_full = os.path.join(root_full, node_rel) # Inspect child node. try: node_stat = os.lstat(node_full) except OSError as e: if on_error is not None: on_error(e) continue if stat.S_ISLNK(node_stat.st_mode): # Child node is a link, inspect the target node. is_link = True try: node_stat = os.stat(node_full) except OSError as e: if on_error is not None: on_error(e) continue else: is_link = False if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link): # Child node is a directory, recurse into it and yield its # decendant files. for file_rel in _iter_tree_next(root_full, node_rel, memo, on_error, follow_links): yield file_rel elif stat.S_ISREG(node_stat.st_mode): # Child node is a file, yield it. yield node_rel # NOTE: Make sure to remove the canonical (real) path of the directory # from the ancestors memo once we are done with it. This allows the # same directory to appear multiple times. If this is not done, the # second occurance of the directory will be incorrectly interpreted as # a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>. del memo[dir_real]
Scan the directory for all descendant files. *root_full* (:class:`str`) the absolute path to the root directory. *dir_rel* (:class:`str`) the path to the directory to scan relative to *root_full*. *memo* (:class:`dict`) keeps track of ancestor directories encountered. Maps each ancestor real path (:class:`str``) to relative path (:class:`str`). *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. *follow_links* (:class:`bool`) is whether to walk symbolik links that resolve to directories.
Below is the the instruction that describes the task: ### Input: Scan the directory for all descendant files. *root_full* (:class:`str`) the absolute path to the root directory. *dir_rel* (:class:`str`) the path to the directory to scan relative to *root_full*. *memo* (:class:`dict`) keeps track of ancestor directories encountered. Maps each ancestor real path (:class:`str``) to relative path (:class:`str`). *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. *follow_links* (:class:`bool`) is whether to walk symbolik links that resolve to directories. ### Response: def _iter_tree_next(root_full, dir_rel, memo, on_error, follow_links): """ Scan the directory for all descendant files. *root_full* (:class:`str`) the absolute path to the root directory. *dir_rel* (:class:`str`) the path to the directory to scan relative to *root_full*. *memo* (:class:`dict`) keeps track of ancestor directories encountered. Maps each ancestor real path (:class:`str``) to relative path (:class:`str`). *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. *follow_links* (:class:`bool`) is whether to walk symbolik links that resolve to directories. """ dir_full = os.path.join(root_full, dir_rel) dir_real = os.path.realpath(dir_full) # Remember each encountered ancestor directory and its canonical # (real) path. If a canonical path is encountered more than once, # recursion has occurred. if dir_real not in memo: memo[dir_real] = dir_rel else: raise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel) for node in os.listdir(dir_full): node_rel = os.path.join(dir_rel, node) node_full = os.path.join(root_full, node_rel) # Inspect child node. try: node_stat = os.lstat(node_full) except OSError as e: if on_error is not None: on_error(e) continue if stat.S_ISLNK(node_stat.st_mode): # Child node is a link, inspect the target node. is_link = True try: node_stat = os.stat(node_full) except OSError as e: if on_error is not None: on_error(e) continue else: is_link = False if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link): # Child node is a directory, recurse into it and yield its # decendant files. for file_rel in _iter_tree_next(root_full, node_rel, memo, on_error, follow_links): yield file_rel elif stat.S_ISREG(node_stat.st_mode): # Child node is a file, yield it. yield node_rel # NOTE: Make sure to remove the canonical (real) path of the directory # from the ancestors memo once we are done with it. This allows the # same directory to appear multiple times. If this is not done, the # second occurance of the directory will be incorrectly interpreted as # a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>. del memo[dir_real]
async def fetch_page(session, host): """ Perform the page fetch from an individual host. `session` - An aiohttp [client session](http://aiohttp.readthedocs.io/en/stable/client_reference.html#client-session) `host` - URL to fetch `return` tuple with the following: * The host parameter * A vague status string * Text response or an exception depending on status above """ await asyncio.sleep(random.randint(0, 25) * 0.1) start = time.time() logger.info('Fetch from {}'.format(host)) try: response = await session.get(host, allow_redirects=False) except aiohttp.ClientResponseError as err: # likely a 404 implying HTTP but no page # likely a 401 implying HTTP but no access # FIXME: for instance, a gateway # headers are available via err.headers() # https://multidict.readthedocs.io/en/stable/multidict.html#multidict.CIMultiDict results_tuple = (host, 'no page', err) except aiohttp.ClientConnectorError as err: # likely device at IP but no HTTP server results_tuple = (host, 'no http', err) except aiohttp.ServerConnectionError as err: # likely ServerTimeoutError implying no device at IP results_tuple = (host, 'no dev', err) except aiohttp.InvalidURL as err: # likely a malformed URL results_tuple = (host, 'no URL', err) # except Exception as err: # # Generic trap for debug # results_tuple = (host, 'unknown', err) else: try: text_response = await response.text() except aiohttp.ClientPayloadError as err: # trouble reading page TODO: anyway to recover? results_tuple = (host, 'no read', err) else: results_tuple = (host, 'found', text_response) response.close() logger.info('Recvd from {} after {:.2f}s'.format(host, time.time() - start)) return results_tuple
Perform the page fetch from an individual host. `session` - An aiohttp [client session](http://aiohttp.readthedocs.io/en/stable/client_reference.html#client-session) `host` - URL to fetch `return` tuple with the following: * The host parameter * A vague status string * Text response or an exception depending on status above
Below is the the instruction that describes the task: ### Input: Perform the page fetch from an individual host. `session` - An aiohttp [client session](http://aiohttp.readthedocs.io/en/stable/client_reference.html#client-session) `host` - URL to fetch `return` tuple with the following: * The host parameter * A vague status string * Text response or an exception depending on status above ### Response: async def fetch_page(session, host): """ Perform the page fetch from an individual host. `session` - An aiohttp [client session](http://aiohttp.readthedocs.io/en/stable/client_reference.html#client-session) `host` - URL to fetch `return` tuple with the following: * The host parameter * A vague status string * Text response or an exception depending on status above """ await asyncio.sleep(random.randint(0, 25) * 0.1) start = time.time() logger.info('Fetch from {}'.format(host)) try: response = await session.get(host, allow_redirects=False) except aiohttp.ClientResponseError as err: # likely a 404 implying HTTP but no page # likely a 401 implying HTTP but no access # FIXME: for instance, a gateway # headers are available via err.headers() # https://multidict.readthedocs.io/en/stable/multidict.html#multidict.CIMultiDict results_tuple = (host, 'no page', err) except aiohttp.ClientConnectorError as err: # likely device at IP but no HTTP server results_tuple = (host, 'no http', err) except aiohttp.ServerConnectionError as err: # likely ServerTimeoutError implying no device at IP results_tuple = (host, 'no dev', err) except aiohttp.InvalidURL as err: # likely a malformed URL results_tuple = (host, 'no URL', err) # except Exception as err: # # Generic trap for debug # results_tuple = (host, 'unknown', err) else: try: text_response = await response.text() except aiohttp.ClientPayloadError as err: # trouble reading page TODO: anyway to recover? results_tuple = (host, 'no read', err) else: results_tuple = (host, 'found', text_response) response.close() logger.info('Recvd from {} after {:.2f}s'.format(host, time.time() - start)) return results_tuple