code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def cancel(self): """Detach strategy from its sensor and cancel ioloop callbacks.""" if self.OBSERVE_UPDATES: self.detach() self.ioloop.add_callback(self.cancel_timeouts)
Detach strategy from its sensor and cancel ioloop callbacks.
def pdna_network_from_bbox( lat_min=None, lng_min=None, lat_max=None, lng_max=None, bbox=None, network_type='walk', two_way=True, timeout=180, memory=None, max_query_area_size=50 * 1000 * 50 * 1000): """ Make a Pandana network from a bounding lat/lon box request to the Overpass API. Distance will be in the default units meters. Parameters ---------- lat_min, lng_min, lat_max, lng_max : float bbox : tuple Bounding box formatted as a 4 element tuple: (lng_max, lat_min, lng_min, lat_max) network_type : {'walk', 'drive'}, optional Specify whether the network will be used for walking or driving. A value of 'walk' attempts to exclude things like freeways, while a value of 'drive' attempts to exclude things like bike and walking paths. two_way : bool, optional Whether the routes are two-way. If True, node pairs will only occur once. timeout : int, optional the timeout interval for requests and to pass to Overpass API memory : int, optional server memory allocation size for the query, in bytes. If none, server will use its default allocation size max_query_area_size : float, optional max area for any part of the geometry, in the units the geometry is in Returns ------- network : pandana.Network """ nodes, edges = network_from_bbox(lat_min=lat_min, lng_min=lng_min, lat_max=lat_max, lng_max=lng_max, bbox=bbox, network_type=network_type, two_way=two_way, timeout=timeout, memory=memory, max_query_area_size=max_query_area_size) return Network( nodes['x'], nodes['y'], edges['from'], edges['to'], edges[['distance']])
Make a Pandana network from a bounding lat/lon box request to the Overpass API. Distance will be in the default units meters. Parameters ---------- lat_min, lng_min, lat_max, lng_max : float bbox : tuple Bounding box formatted as a 4 element tuple: (lng_max, lat_min, lng_min, lat_max) network_type : {'walk', 'drive'}, optional Specify whether the network will be used for walking or driving. A value of 'walk' attempts to exclude things like freeways, while a value of 'drive' attempts to exclude things like bike and walking paths. two_way : bool, optional Whether the routes are two-way. If True, node pairs will only occur once. timeout : int, optional the timeout interval for requests and to pass to Overpass API memory : int, optional server memory allocation size for the query, in bytes. If none, server will use its default allocation size max_query_area_size : float, optional max area for any part of the geometry, in the units the geometry is in Returns ------- network : pandana.Network
def validate_format(self, obj, pointer=None): """ ================= ============ Expected draft04 Alias of ----------------- ------------ date-time rfc3339.datetime email email hostname hostname ipv4 ipv4 ipv6 ipv6 uri uri ================= ============ """ if 'format' in self.attrs: substituted = { 'date-time': 'rfc3339.datetime', 'email': 'email', 'hostname': 'hostname', 'ipv4': 'ipv4', 'ipv6': 'ipv6', 'uri': 'uri', }.get(self.attrs['format'], self.attrs['format']) logger.debug('use %s', substituted) try: return self.formats[substituted](obj) except ValidationError as error: logger.error(error) self.fail('Forbidden value', obj, pointer) return obj
================= ============ Expected draft04 Alias of ----------------- ------------ date-time rfc3339.datetime email email hostname hostname ipv4 ipv4 ipv6 ipv6 uri uri ================= ============
def set_chat_description( self, chat_id: Union[int, str], description: str ) -> bool: """Use this method to change the description of a supergroup or a channel. You must be an administrator in the chat for this to work and must have the appropriate admin rights. Args: chat_id (``int`` | ``str``): Unique identifier (int) or username (str) of the target chat. description (``str``): New chat description, 0-255 characters. Returns: True on success. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. ``ValueError`` if a chat_id doesn't belong to a supergroup or a channel. """ peer = self.resolve_peer(chat_id) if isinstance(peer, (types.InputPeerChannel, types.InputPeerChat)): self.send( functions.messages.EditChatAbout( peer=peer, about=description ) ) else: raise ValueError("The chat_id \"{}\" belongs to a user".format(chat_id)) return True
Use this method to change the description of a supergroup or a channel. You must be an administrator in the chat for this to work and must have the appropriate admin rights. Args: chat_id (``int`` | ``str``): Unique identifier (int) or username (str) of the target chat. description (``str``): New chat description, 0-255 characters. Returns: True on success. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. ``ValueError`` if a chat_id doesn't belong to a supergroup or a channel.
def from_str(text): """Construct :class:`ChatMessageSegment` list parsed from a string. Args: text (str): Text to parse. May contain line breaks, URLs and formatting markup (simplified Markdown and HTML) to be converted into equivalent segments. Returns: List of :class:`ChatMessageSegment` objects. """ segment_list = chat_message_parser.parse(text) return [ChatMessageSegment(segment.text, **segment.params) for segment in segment_list]
Construct :class:`ChatMessageSegment` list parsed from a string. Args: text (str): Text to parse. May contain line breaks, URLs and formatting markup (simplified Markdown and HTML) to be converted into equivalent segments. Returns: List of :class:`ChatMessageSegment` objects.
def readTempC(self): """Return the thermocouple temperature value in degrees celsius.""" v = self._read32() # Check for error reading value. if v & 0x7: return float('NaN') # Check if signed bit is set. if v & 0x80000000: # Negative value, take 2's compliment. Compute this with subtraction # because python is a little odd about handling signed/unsigned. v >>= 18 v -= 16384 else: # Positive value, just shift the bits to get the value. v >>= 18 # Scale by 0.25 degrees C per bit and return value. return v * 0.25
Return the thermocouple temperature value in degrees celsius.
def get_none_policy_text(none_policy, # type: int verbose=False # type: bool ): """ Returns a user-friendly description of a NonePolicy taking into account NoneArgPolicy :param none_policy: :param verbose: :return: """ if none_policy is NonePolicy.SKIP: return "accept None without performing validation" if verbose else 'SKIP' elif none_policy is NonePolicy.FAIL: return "fail on None without performing validation" if verbose else 'FAIL' elif none_policy is NonePolicy.VALIDATE: return "validate None as any other values" if verbose else 'VALIDATE' elif none_policy is NoneArgPolicy.SKIP_IF_NONABLE_ELSE_FAIL: return "accept None without validation if the argument is optional, otherwise fail on None" if verbose \ else 'SKIP_IF_NONABLE_ELSE_FAIL' elif none_policy is NoneArgPolicy.SKIP_IF_NONABLE_ELSE_VALIDATE: return "accept None without validation if the argument is optional, otherwise validate None as any other " \ "values" if verbose else 'SKIP_IF_NONABLE_ELSE_VALIDATE' else: raise ValueError('Invalid none_policy ' + str(none_policy))
Returns a user-friendly description of a NonePolicy taking into account NoneArgPolicy :param none_policy: :param verbose: :return:
def _make_2d_array(self, data): """ Convert a 1D array of mesh values to a masked 2D mesh array given the 1D mesh indices ``mesh_idx``. Parameters ---------- data : 1D `~numpy.ndarray` A 1D array of mesh values. Returns ------- result : 2D `~numpy.ma.MaskedArray` A 2D masked array. Pixels not defined in ``mesh_idx`` are masked. """ if data.shape != self.mesh_idx.shape: raise ValueError('data and mesh_idx must have the same shape') if np.ma.is_masked(data): raise ValueError('data must not be a masked array') data2d = np.zeros(self._mesh_shape).astype(data.dtype) data2d[self.mesh_yidx, self.mesh_xidx] = data if len(self.mesh_idx) == self.nboxes: # no meshes were masked return data2d else: # some meshes were masked mask2d = np.ones(data2d.shape).astype(np.bool) mask2d[self.mesh_yidx, self.mesh_xidx] = False return np.ma.masked_array(data2d, mask=mask2d)
Convert a 1D array of mesh values to a masked 2D mesh array given the 1D mesh indices ``mesh_idx``. Parameters ---------- data : 1D `~numpy.ndarray` A 1D array of mesh values. Returns ------- result : 2D `~numpy.ma.MaskedArray` A 2D masked array. Pixels not defined in ``mesh_idx`` are masked.
def _bitResponseToValue(bytestring): """Convert a response string to a numerical value. Args: bytestring (str): A string of length 1. Can be for example ``\\x01``. Returns: The converted value (int). Raises: TypeError, ValueError """ _checkString(bytestring, description='bytestring', minlength=1, maxlength=1) RESPONSE_ON = '\x01' RESPONSE_OFF = '\x00' if bytestring == RESPONSE_ON: return 1 elif bytestring == RESPONSE_OFF: return 0 else: raise ValueError('Could not convert bit response to a value. Input: {0!r}'.format(bytestring))
Convert a response string to a numerical value. Args: bytestring (str): A string of length 1. Can be for example ``\\x01``. Returns: The converted value (int). Raises: TypeError, ValueError
def get_results(self, *, block=False, timeout=None): """Get the results of each job in the pipeline. Parameters: block(bool): Whether or not to block until a result is set. timeout(int): The maximum amount of time, in ms, to wait for a result when block is True. Defaults to 10 seconds. Raises: ResultMissing: When block is False and the result isn't set. ResultTimeout: When waiting for a result times out. Returns: A result generator. """ deadline = None if timeout: deadline = time.monotonic() + timeout / 1000 for message in self.messages: if deadline: timeout = max(0, int((deadline - time.monotonic()) * 1000)) yield message.get_result(block=block, timeout=timeout)
Get the results of each job in the pipeline. Parameters: block(bool): Whether or not to block until a result is set. timeout(int): The maximum amount of time, in ms, to wait for a result when block is True. Defaults to 10 seconds. Raises: ResultMissing: When block is False and the result isn't set. ResultTimeout: When waiting for a result times out. Returns: A result generator.
def generate_map_from_dataset(self, l_dataset): """ creates a map file (in the standard CSV format) based on columns of a dataset. 1. read column names, lookup names in list 2. read column content, get highest match of distinct values from ontology lists (eg, Years, countries, cities, ages) """ l_map = [] headers = l_dataset.get_header() print(headers) for row_num, col in enumerate(headers): if col != '': l_map.append('column:name:' + str(row_num) + '=' + l_dataset.force_to_string(col)) for row_num, col in enumerate(headers): if col != '': vals = l_dataset.get_distinct_values_from_cols([col]) l_map.append('column:count:distinct:' + col + '=' + str(len(vals[0])) ) for row_num, col in enumerate(headers): if col != '': col_vals = l_dataset.count_unique_values(row_num, col, 10) for val_num, v in enumerate(col_vals): l_map.append('column:topvalues:' + col + ':' + str(val_num) + '=' + v ) #l_map.append('column:values:top5:' + str(row_num) + '=' + col_vals) return l_map
creates a map file (in the standard CSV format) based on columns of a dataset. 1. read column names, lookup names in list 2. read column content, get highest match of distinct values from ontology lists (eg, Years, countries, cities, ages)
def asset_save_callback(self, *args, **kwargs): """Callback for the shot open button :returns: None :rtype: None :raises: None """ tasksel = self.browser.assetbrws.selected_indexes(2) if not tasksel or not tasksel[0].isValid(): self.statusbar.showMessage('No task selected! Cannot save!') return taskitem = tasksel[0].internalPointer() task = taskitem.internal_data() rtype = djadapter.RELEASETYPES['work'] descriptor = self.asset_descriptor_le.text() if not self.check_selection_for_save(task, rtype, descriptor): return tfi = TaskFileInfo.get_next(task=task, releasetype=rtype, typ=self._filetype, descriptor=descriptor) self._save_tfi(tfi, asset=True)
Callback for the shot open button :returns: None :rtype: None :raises: None
def get_auth_settings(): """ Returns all the key/secret settings for Twitter access, only if they're all defined. """ from yacms.conf import settings try: auth_settings = (settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET, settings.TWITTER_ACCESS_TOKEN_KEY, settings.TWITTER_ACCESS_TOKEN_SECRET) except AttributeError: return None else: return auth_settings if all(auth_settings) else None
Returns all the key/secret settings for Twitter access, only if they're all defined.
def fetch(self, category=CATEGORY_ENTRY): """Fetch the entries from the url. The method retrieves all entries from a RSS url :param category: the category of items to fetch :returns: a generator of entries """ kwargs = {} items = super().fetch(category, **kwargs) return items
Fetch the entries from the url. The method retrieves all entries from a RSS url :param category: the category of items to fetch :returns: a generator of entries
def register_views(app_name, view_filename, urlpatterns=None): """ app_name APP名 view_filename views 所在的文件 urlpatterns url中已经存在的urlpatterns return urlpatterns 只导入View结尾的,是类的视图 """ app_module = __import__(app_name) view_module = getattr(app_module, view_filename) views = dir(view_module) for view_name in views: if view_name.endswith('View'): view = getattr(view_module, view_name) if isinstance(view, object): if urlpatterns: urlpatterns += patterns('', url(r'^(?i)%s/$' % view_name, view.as_view(), name=view_name), ) else: urlpatterns = patterns('', url(r'^(?i)%s/$' % view_name, view.as_view(), name=view_name), ) else: pass return urlpatterns
app_name APP名 view_filename views 所在的文件 urlpatterns url中已经存在的urlpatterns return urlpatterns 只导入View结尾的,是类的视图
def chunk(sentence, format=None): """ Vietnamese chunking Parameters ========== sentence: {unicode, str} raw sentence Returns ======= tokens: list of tuple with word, pos tag, chunking tag tagged sentence Examples -------- >>> # -*- coding: utf-8 -*- >>> from underthesea import chunk >>> sentence = "Nghi vấn 4 thi thể Triều Tiên trôi dạt bờ biển Nhật Bản" >>> chunk(sentence) [('Nghi vấn', 'N', 'B-NP'), ('4', 'M', 'B-NP'), ('thi thể', 'N', 'B-NP'), ('Triều Tiên', 'Np', 'B-NP'), ('trôi dạt', 'V', 'B-VP'), ('bờ biển', 'N', 'B-NP'), ('Nhật Bản', 'Np', 'B-NP')] """ sentence = pos_tag(sentence) crf_model = CRFChunkingPredictor.Instance() result = crf_model.predict(sentence, format) return result
Vietnamese chunking Parameters ========== sentence: {unicode, str} raw sentence Returns ======= tokens: list of tuple with word, pos tag, chunking tag tagged sentence Examples -------- >>> # -*- coding: utf-8 -*- >>> from underthesea import chunk >>> sentence = "Nghi vấn 4 thi thể Triều Tiên trôi dạt bờ biển Nhật Bản" >>> chunk(sentence) [('Nghi vấn', 'N', 'B-NP'), ('4', 'M', 'B-NP'), ('thi thể', 'N', 'B-NP'), ('Triều Tiên', 'Np', 'B-NP'), ('trôi dạt', 'V', 'B-VP'), ('bờ biển', 'N', 'B-NP'), ('Nhật Bản', 'Np', 'B-NP')]
def disembowel(rest): "Disembowel some(one|thing)!" if rest: stabee = rest karma.Karma.store.change(stabee, -1) else: stabee = "someone nearby" return ( "/me takes %s, brings them down to the basement, ties them to a " "leaky pipe, and once bored of playing with them mercifully " "ritually disembowels them..." % stabee)
Disembowel some(one|thing)!
def set_resource(self, service_name, resource_name, to_cache): """ Sets the resource class within the cache. :param service_name: The service a given ``Resource`` talks to. Ex. ``sqs``, ``sns``, ``dynamodb``, etc. :type service_name: string :param resource_name: The name of the ``Resource``. Ex. ``Queue``, ``Notification``, ``Table``, etc. :type resource_name: string :param to_cache: The class to be cached for the service. :type to_cache: class """ self.services.setdefault(service_name, {}) self.services[service_name].setdefault('resources', {}) self.services[service_name]['resources'].setdefault(resource_name, {}) options = self.services[service_name]['resources'][resource_name] classpath = self.build_classpath(to_cache.__bases__[0]) if classpath == 'kotocore.resources.Resource': classpath = 'default' options[classpath] = to_cache
Sets the resource class within the cache. :param service_name: The service a given ``Resource`` talks to. Ex. ``sqs``, ``sns``, ``dynamodb``, etc. :type service_name: string :param resource_name: The name of the ``Resource``. Ex. ``Queue``, ``Notification``, ``Table``, etc. :type resource_name: string :param to_cache: The class to be cached for the service. :type to_cache: class
def plot_kde(data, ax, title=None, color='r', fill_bt=True): """ Plot a smoothed (by kernel density estimate) histogram. :type data: numpy array :param data: An array containing the data to be plotted :type ax: matplotlib.Axes :param ax: The Axes object to draw to :type title: str :param title: The plot title :type color: str :param color: The color of the histogram line and fill. Note that the fill will be plotted with an alpha of 0.35. :type fill_bt: bool :param fill_bt: Specify whether to fill the area beneath the histogram line """ if isinstance(data, list): data = np.asarray(data) e = kde.KDEUnivariate(data.astype(np.float)) e.fit() ax.plot(e.support, e.density, color=color, alpha=0.9, linewidth=2.25) if fill_bt: ax.fill_between(e.support, e.density, alpha=.35, zorder=1, antialiased=True, color=color) if title is not None: t = ax.set_title(title) t.set_y(1.05)
Plot a smoothed (by kernel density estimate) histogram. :type data: numpy array :param data: An array containing the data to be plotted :type ax: matplotlib.Axes :param ax: The Axes object to draw to :type title: str :param title: The plot title :type color: str :param color: The color of the histogram line and fill. Note that the fill will be plotted with an alpha of 0.35. :type fill_bt: bool :param fill_bt: Specify whether to fill the area beneath the histogram line
def global_instance(cls): """Return a per-thread global batcher instance.""" try: return GLOBAL_BATCHER.instance except AttributeError: instance = PrioritizedBatcher( **getattr(settings, 'PRIORITIZED_BATCHER', {}) ) GLOBAL_BATCHER.instance = instance return instance
Return a per-thread global batcher instance.
def get_modules(self): """Get modules by project_abspath and packages_scan. Traverse all files under folder packages_scan which set by customer. And get all modules name. """ if not self.project_abspath: raise TypeError("project_abspath can not be empty.") packages_abspath = self.get_package_abspath() for package_abspath in packages_abspath: self.get_module_name(package_abspath) return self._modules
Get modules by project_abspath and packages_scan. Traverse all files under folder packages_scan which set by customer. And get all modules name.
def config_hook(self, func): """ Decorator to add a config hook to this ingredient. Config hooks need to be a function that takes 3 parameters and returns a dictionary: (config, command_name, logger) --> dict Config hooks are run after the configuration of this Ingredient, but before any further ingredient-configurations are run. The dictionary returned by a config hook is used to update the config updates. Note that they are not restricted to the local namespace of the ingredient. """ argspec = inspect.getargspec(func) args = ['config', 'command_name', 'logger'] if not (argspec.args == args and argspec.varargs is None and argspec.keywords is None and argspec.defaults is None): raise ValueError('Wrong signature for config_hook. Expected: ' '(config, command_name, logger)') self.config_hooks.append(func) return self.config_hooks[-1]
Decorator to add a config hook to this ingredient. Config hooks need to be a function that takes 3 parameters and returns a dictionary: (config, command_name, logger) --> dict Config hooks are run after the configuration of this Ingredient, but before any further ingredient-configurations are run. The dictionary returned by a config hook is used to update the config updates. Note that they are not restricted to the local namespace of the ingredient.
def _get_band(self, high_res, low_res, color, ratio): """Figure out what data should represent this color.""" if self.high_resolution_band == color: ret = high_res else: ret = low_res * ratio ret.attrs = low_res.attrs.copy() return ret
Figure out what data should represent this color.
def bare_except(logical_line, noqa): r"""When catching exceptions, mention specific exceptions whenever possible. Okay: except Exception: Okay: except BaseException: E722: except: """ if noqa: return regex = re.compile(r"except\s*:") match = regex.match(logical_line) if match: yield match.start(), "E722 do not use bare except'"
r"""When catching exceptions, mention specific exceptions whenever possible. Okay: except Exception: Okay: except BaseException: E722: except:
def hrule(width=None, char=None): """Outputs or returns a horizontal line of the given character and width. Returns printed string.""" width = width or HRWIDTH char = char or HRCHAR return echo(getline(char, width))
Outputs or returns a horizontal line of the given character and width. Returns printed string.
def _color_level(str_, level): """ Return the string wrapped with the appropriate styling for the message level. The styling will be determined based on the rez configuration. Args: str_ (str): The string to be wrapped. level (str): The message level. Should be one of 'critical', 'error', 'warning', 'info' or 'debug'. Returns: str: The string styled with the appropriate escape sequences. """ fore_color, back_color, styles = _get_style_from_config(level) return _color(str_, fore_color, back_color, styles)
Return the string wrapped with the appropriate styling for the message level. The styling will be determined based on the rez configuration. Args: str_ (str): The string to be wrapped. level (str): The message level. Should be one of 'critical', 'error', 'warning', 'info' or 'debug'. Returns: str: The string styled with the appropriate escape sequences.
def clear(self): """Clears the server list""" if len(self.list): self._LOG.debug("List cleared.") self.list.clear()
Clears the server list
def get_validators_description(view): """ Returns validators description in format: ### Validators: * validator1 name * validator1 docstring * validator2 name * validator2 docstring """ action = getattr(view, 'action', None) if action is None: return '' description = '' validators = getattr(view, action + '_validators', []) for validator in validators: validator_description = get_entity_description(validator) description += '\n' + validator_description if description else validator_description return '### Validators:\n' + description if description else ''
Returns validators description in format: ### Validators: * validator1 name * validator1 docstring * validator2 name * validator2 docstring
def run_task(message): """Internal ``RUN_TASK`` consumer to run the task's callable""" task = Task.objects.get(pk=message['id']) if task.allow_overlap: task.run(message) else: if not task.running: task.running = True task.save() try: task.run(message) finally: task.running = False task.save()
Internal ``RUN_TASK`` consumer to run the task's callable
def nacm_rule_list_rule_comment(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") nacm = ET.SubElement(config, "nacm", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-acm") rule_list = ET.SubElement(nacm, "rule-list") name_key = ET.SubElement(rule_list, "name") name_key.text = kwargs.pop('name') rule = ET.SubElement(rule_list, "rule") name_key = ET.SubElement(rule, "name") name_key.text = kwargs.pop('name') comment = ET.SubElement(rule, "comment") comment.text = kwargs.pop('comment') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def expand(doc, doc_url="param://", params=None): """ ASSUMING YOU ALREADY PULED THE doc FROM doc_url, YOU CAN STILL USE THE EXPANDING FEATURE USE mo_json_config.expand({}) TO ASSUME CURRENT WORKING DIRECTORY :param doc: THE DATA STRUCTURE FROM JSON SOURCE :param doc_url: THE URL THIS doc CAME FROM (DEFAULT USES params AS A DOCUMENT SOURCE) :param params: EXTRA PARAMETERS NOT FOUND IN THE doc_url PARAMETERS (WILL SUPERSEDE PARAMETERS FROM doc_url) :return: EXPANDED JSON-SERIALIZABLE STRUCTURE """ if doc_url.find("://") == -1: Log.error("{{url}} must have a prototcol (eg http://) declared", url=doc_url) url = URL(doc_url) url.query = set_default(url.query, params) phase1 = _replace_ref(doc, url) # BLANK URL ONLY WORKS IF url IS ABSOLUTE phase2 = _replace_locals(phase1, [phase1]) return wrap(phase2)
ASSUMING YOU ALREADY PULED THE doc FROM doc_url, YOU CAN STILL USE THE EXPANDING FEATURE USE mo_json_config.expand({}) TO ASSUME CURRENT WORKING DIRECTORY :param doc: THE DATA STRUCTURE FROM JSON SOURCE :param doc_url: THE URL THIS doc CAME FROM (DEFAULT USES params AS A DOCUMENT SOURCE) :param params: EXTRA PARAMETERS NOT FOUND IN THE doc_url PARAMETERS (WILL SUPERSEDE PARAMETERS FROM doc_url) :return: EXPANDED JSON-SERIALIZABLE STRUCTURE
def run_mash(self): """ Run MASH to determine the closest refseq genomes """ self.pipeline = True mash.Mash(inputobject=self, analysistype='mash')
Run MASH to determine the closest refseq genomes
def has_comic(name): """Check if comic name already exists.""" names = [ ("Creators/%s" % name).lower(), ("DrunkDuck/%s" % name).lower(), ("GoComics/%s" % name).lower(), ("KeenSpot/%s" % name).lower(), ("ComicGenesis/%s" % name).lower(), ("SmackJeeves/%s" % name).lower(), ] for scraperclass in get_scraperclasses(): lname = scraperclass.getName().lower() if lname in names or lname == name.lower(): return True return False
Check if comic name already exists.
def _finish(self): """Mark transition as finished and execute callback.""" self.finished = True if self._callback: self._callback(self) self._finish_event.set()
Mark transition as finished and execute callback.
def __ordinal(self, num): """Returns the ordinal number of a given integer, as a string. eg. 1 -> 1st, 2 -> 2nd, 3 -> 3rd, etc. """ if 10 <= num % 100 < 20: return str(num) + 'th' else: ord_info = {1: 'st', 2: 'nd', 3: 'rd'}.get(num % 10, 'th') return '{}{}'.format(num, ord_info)
Returns the ordinal number of a given integer, as a string. eg. 1 -> 1st, 2 -> 2nd, 3 -> 3rd, etc.
def profile_device_delete(name, device_name, remote_addr=None, cert=None, key=None, verify_cert=True): ''' Delete a profile device. name : The name of the profile to delete the device. device_name : The name of the device to delete. remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Example: .. code-block:: bash $ salt '*' lxd.profile_device_delete autostart eth1 ''' profile = profile_get( name, remote_addr, cert, key, verify_cert, _raw=True ) return _delete_property_dict_item( profile, 'devices', device_name )
Delete a profile device. name : The name of the profile to delete the device. device_name : The name of the device to delete. remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Example: .. code-block:: bash $ salt '*' lxd.profile_device_delete autostart eth1
def set(self, instance, value, **kw): """Set the value of the field """ return self._set(instance, value, **kw)
Set the value of the field
def key_to_dimension(self, fact_key, fact_join_col, dimension_name, dimension_join_col, dimension_key): """ create SQL to join a fact table key based on "join_col" to a dimension The fact table is aliased as "op" and the join dimension is aliased as "ip" meaning you can pass substrings or SQL to match values. e.g. the command: aup.key_to_dimension('GENDER_KEY', 'substr(op.GENDER, 1,1)', 'tbl_GENDER', 'gender_code', 'GENDER_KEY') will generate the code: UPDATE table op SET op.gender_key = NVL ( (SELECT MAX (ip.gender_key) FROM tbl_GENDER ip WHERE ip.gender_code = SUBSTR (op.gender, 1, 1)), -1); """ self.sql_text += "UPDATE " + self.fact_table + " op SET op." + fact_key + " = NVL(\n" self.sql_text += " (SELECT MAX (ip." + dimension_key + ")\n" self.sql_text += " FROM " + dimension_name + " ip WHERE " self.sql_text += fact_join_col + " = \n ip." + dimension_join_col + "), -1); \n\n"
create SQL to join a fact table key based on "join_col" to a dimension The fact table is aliased as "op" and the join dimension is aliased as "ip" meaning you can pass substrings or SQL to match values. e.g. the command: aup.key_to_dimension('GENDER_KEY', 'substr(op.GENDER, 1,1)', 'tbl_GENDER', 'gender_code', 'GENDER_KEY') will generate the code: UPDATE table op SET op.gender_key = NVL ( (SELECT MAX (ip.gender_key) FROM tbl_GENDER ip WHERE ip.gender_code = SUBSTR (op.gender, 1, 1)), -1);
def ReverseComplementMembership(x, y, **kwargs): """Change (x doesn't contain y) to not(y in x).""" return ast.Complement( ast.Membership(y, x, **kwargs), **kwargs)
Change (x doesn't contain y) to not(y in x).
def _options(): """Collect all command line options""" opts = sys.argv[1:] return [click.Option((v.split('=')[0],)) for v in opts if v[0] == '-' and v != '--help']
Collect all command line options
def is_valid_pep484_type_hint(typ_hint, allow_forward_refs: bool = False): """ Returns True if the provided type is a valid PEP484 type hint, False otherwise. Note: string type hints (forward references) are not supported by default, since callers of this function in parsyfiles lib actually require them to be resolved already. :param typ_hint: :param allow_forward_refs: :return: """ # most common case first, to be faster try: if isinstance(typ_hint, type): return True except: pass # optionally, check forward reference try: if allow_forward_refs and is_forward_ref(typ_hint): return True except: pass # finally check unions and typevars try: return is_union_type(typ_hint) or is_typevar(typ_hint) except: return False
Returns True if the provided type is a valid PEP484 type hint, False otherwise. Note: string type hints (forward references) are not supported by default, since callers of this function in parsyfiles lib actually require them to be resolved already. :param typ_hint: :param allow_forward_refs: :return:
def _endpoint_from_socksport_line(reactor, socks_config): """ Internal helper. Returns an IStreamClientEndpoint for the given config, which is of the same format expected by the SOCKSPort option in Tor. """ if socks_config.startswith('unix:'): # XXX wait, can SOCKSPort lines with "unix:/path" still # include options afterwards? What about if the path has a # space in it? return UNIXClientEndpoint(reactor, socks_config[5:]) # options like KeepAliveIsolateSOCKSAuth can be appended # to a SocksPort line... if ' ' in socks_config: socks_config = socks_config.split()[0] if ':' in socks_config: host, port = socks_config.split(':', 1) port = int(port) else: host = '127.0.0.1' port = int(socks_config) return TCP4ClientEndpoint(reactor, host, port)
Internal helper. Returns an IStreamClientEndpoint for the given config, which is of the same format expected by the SOCKSPort option in Tor.
def build_row(self, line): """ Line describes an image or images to show Returns a dict with a list of dicts of image names or text items Examples: # A single image to display >>> x.build_row('foo.png') [{'image': 'foo.png'}] # Two images with text in between: >>> x.build_row('foo.png or bar.jpg') [{'image': 'foo.png'}, {'text': 'or'}, {'image': 'bar.png'}] """ items = [] row = dict(items=items) fields = line.split(' ') image_exts = ['.png', '.jpg'] # nothing there, carry on if not fields: return row for field in fields: ext = os.path.splitext(field)[-1] if ext.lower() in image_exts: items.append( dict(image=field)) else: items.append( dict(text=field)) return row
Line describes an image or images to show Returns a dict with a list of dicts of image names or text items Examples: # A single image to display >>> x.build_row('foo.png') [{'image': 'foo.png'}] # Two images with text in between: >>> x.build_row('foo.png or bar.jpg') [{'image': 'foo.png'}, {'text': 'or'}, {'image': 'bar.png'}]
def filter_queryset(self, attrs, queryset): """ Filter the queryset to all instances matching the given attributes. """ # If this is an update, then any unprovided field should # have it's value set based on the existing instance attribute. if self.instance is not None: for field_name in self.fields: if field_name not in attrs: attrs[field_name] = getattr(self.instance, field_name) # Determine the filter keyword arguments and filter the queryset. filter_kwargs = { field_name: attrs[field_name] for field_name in self.fields } return queryset.filter(**filter_kwargs)
Filter the queryset to all instances matching the given attributes.
def writeFromDict(dataDict, headers, csvFile): """ Write dictionary to a CSV, where keys are row numbers and values are a list. """ with open(csvFile, "wb") as f: writer = csv.writer(f, delimiter=",") writer.writerow(headers) for row in sorted(dataDict.keys()): writer.writerow(dataDict[row])
Write dictionary to a CSV, where keys are row numbers and values are a list.
def get_game_for_worker(map_name, directory_id): """Get game for the given worker (directory) id.""" if map_name == "v100unfriendly": games = ["chopper_command", "boxing", "asterix", "seaquest"] worker_per_game = 5 elif map_name == "human_nice": games = gym_env.ATARI_GAMES_WITH_HUMAN_SCORE_NICE worker_per_game = 5 else: raise ValueError("Unknown worker to game map name: %s" % map_name) games.sort() game_id = (directory_id - 1) // worker_per_game tf.logging.info("Getting game %d from %s." % (game_id, games)) return games[game_id]
Get game for the given worker (directory) id.
def provision_system_user(items, database_name, overwrite=False, clear=False, skip_user_check=False): """Provision a system user""" from hfos.provisions.base import provisionList from hfos.database import objectmodels # TODO: Add a root user and make sure owner can access it later. # Setting up details and asking for a password here is not very useful, # since this process is usually run automated. if overwrite is True: hfoslog('Refusing to overwrite system user!', lvl=warn, emitter='PROVISIONS') overwrite = False system_user_count = objectmodels['user'].count({'name': 'System'}) if system_user_count == 0 or clear is False: provisionList(Users, 'user', overwrite, clear, skip_user_check=True) hfoslog('Provisioning: Users: Done.', emitter="PROVISIONS") else: hfoslog('System user already present.', lvl=warn, emitter='PROVISIONS')
Provision a system user
def main_update(self): """ Main function called by the updater thread. Direct call is unnecessary. """ # Renice updater thread to limit overload try: os.nice(1) except AttributeError as er: pass # os.nice is not available on windows time.sleep(self.refresh) try: while True: # We pick a timestamp to take in account the time used by update() timestamp=time.time() # Update data with user's defined function self.update() # We use this trick because we cannot use signals in a backoffice threads # and alarm() mess up with readline() in the main thread. delay=(timestamp+self.refresh)-time.time() if delay > 0: if delay > self.refresh: time.sleep(self.refresh) else: time.sleep(delay) # Commit change exactly every 'refresh' seconds, whatever update() takes long. # Commited values are a bit old, but for RRD, punctuals values # are better than fresh-but-not-time-constants values. self.commit() except Exception as e: self.error=e raise
Main function called by the updater thread. Direct call is unnecessary.
def clean_year_month(year, month, month_orig): """ If 'month_orig', which is the month given in the url BEFORE any next/prev query strings have been applied, is out of range, sets month to the current month and returns an error message. Also Returns an error message if the year given is +/- 50 years from now. If 'month', which is the month given in the url AFTER any next/prev query strings have been applied, is out of range, adjusts it to be in range (by also adjusting the year). """ error = False error_msg = "The date given was invalid." if month_orig not in xrange(1, 13) and month_orig is not None: month = now.month error = error_msg # This takes care of 'next' query strings making month > 12 while month > 12: month -= 12 year += 1 # This takes care of 'prev' query strings making month < 1 while month < 1: month += 12 year -= 1 year, month, error = _check_year(year, month, error, error_msg) return year, month, error
If 'month_orig', which is the month given in the url BEFORE any next/prev query strings have been applied, is out of range, sets month to the current month and returns an error message. Also Returns an error message if the year given is +/- 50 years from now. If 'month', which is the month given in the url AFTER any next/prev query strings have been applied, is out of range, adjusts it to be in range (by also adjusting the year).
def playlist_create( self, name, description='', *, make_public=False, songs=None ): """Create a playlist. Parameters: name (str): Name to give the playlist. description (str): Description to give the playlist. make_public (bool, Optional): If ``True`` and account has a subscription, make playlist public. Default: ``False`` songs (list, Optional): A list of song dicts to add to the playlist. Returns: dict: Playlist information. """ share_state = 'PUBLIC' if make_public else 'PRIVATE' playlist = self._call( mc_calls.PlaylistsCreate, name, description, share_state ).body if songs: playlist = self.playlist_songs_add(songs, playlist) return playlist
Create a playlist. Parameters: name (str): Name to give the playlist. description (str): Description to give the playlist. make_public (bool, Optional): If ``True`` and account has a subscription, make playlist public. Default: ``False`` songs (list, Optional): A list of song dicts to add to the playlist. Returns: dict: Playlist information.
def getAssemblies(pth): """ Return the dependent assemblies of a binary. """ if not os.path.isfile(pth): pth = check_extract_from_egg(pth)[0][0] if pth.lower().endswith(".manifest"): return [] # check for manifest file manifestnm = pth + ".manifest" if os.path.isfile(manifestnm): fd = open(manifestnm, "rb") res = {RT_MANIFEST: {1: {0: fd.read()}}} fd.close() elif not winresource: # resource access unavailable (needs pywin32) return [] else: # check the binary for embedded manifest try: res = GetManifestResources(pth) except winresource.pywintypes.error, exc: if exc.args[0] == winresource.ERROR_BAD_EXE_FORMAT: logger.info('Cannot get manifest resource from non-PE ' 'file %s', pth) return [] raise rv = [] if RT_MANIFEST in res and len(res[RT_MANIFEST]): for name in res[RT_MANIFEST]: for language in res[RT_MANIFEST][name]: # check the manifest for dependent assemblies try: manifest = Manifest() manifest.filename = ":".join([pth, str(RT_MANIFEST), str(name), str(language)]) manifest.parse_string(res[RT_MANIFEST][name][language], False) except Exception, exc: logger.error("Can not parse manifest resource %s, %s" "from %s", name, language, pth) logger.exception(exc) else: if manifest.dependentAssemblies: logger.debug("Dependent assemblies of %s:", pth) logger.debug(", ".join([assembly.getid() for assembly in manifest.dependentAssemblies])) rv.extend(manifest.dependentAssemblies) return rv
Return the dependent assemblies of a binary.
def __geometryToGeomTemplate(self, geometry): """ Converts a single geometry object to a geometry service geometry template value. Input: geometry - ArcREST geometry object Output: python dictionary of geometry template """ template = {"geometryType": None, "geometry" : None} if isinstance(geometry, Polyline): template['geometryType'] = "esriGeometryPolyline" elif isinstance(geometry, Polygon): template['geometryType'] = "esriGeometryPolygon" elif isinstance(geometry, Point): template['geometryType'] = "esriGeometryPoint" elif isinstance(geometry, MultiPoint): template['geometryType'] = "esriGeometryMultipoint" elif isinstance(geometry, Envelope): template['geometryType'] = "esriGeometryEnvelope" else: raise AttributeError("Invalid geometry type") template['geometry'] = geometry.asDictionary return template
Converts a single geometry object to a geometry service geometry template value. Input: geometry - ArcREST geometry object Output: python dictionary of geometry template
def bios_settings(self): """Property to provide reference to `BIOSSettings` instance It is calculated once when the first time it is queried. On refresh, this property gets reset. """ return bios.BIOSSettings( self._conn, utils.get_subresource_path_by(self, 'Bios'), redfish_version=self.redfish_version)
Property to provide reference to `BIOSSettings` instance It is calculated once when the first time it is queried. On refresh, this property gets reset.
def enforce_periodic_boundary_conditions( self ): """ Ensure that all lattice sites are within the central periodic image of the simulation cell. Sites that are outside the central simulation cell are mapped back into this cell. Args: None Returns: None """ for s in self.sites: for i in range(3): if s.r[i] < 0.0: s.r[i] += self.cell_lengths[i] if s.r[i] > self.cell_lengths[i]: s.r[i] -= self.cell_lengths[i]
Ensure that all lattice sites are within the central periodic image of the simulation cell. Sites that are outside the central simulation cell are mapped back into this cell. Args: None Returns: None
def xception_entry(inputs, hidden_dim): """Xception entry flow.""" with tf.variable_scope("xception_entry"): def xnet_resblock(x, filters, res_relu, name): """Resblock.""" with tf.variable_scope(name): y = common_layers.separable_conv_block( x, filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], first_relu=True, padding="SAME", force2d=True, name="sep_conv_block") y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 2)) return y + common_layers.conv_block( x, filters, [((1, 1), (1, 1))], padding="SAME", strides=(2, 2), first_relu=res_relu, force2d=True, name="res_conv0") tf.summary.image("inputs", inputs, max_outputs=2) x = common_layers.conv_block( inputs, 32, [((1, 1), (3, 3))], first_relu=False, padding="SAME", strides=(2, 2), force2d=True, name="conv0") x = common_layers.conv_block( x, 64, [((1, 1), (3, 3))], padding="SAME", force2d=True, name="conv1") x = xnet_resblock(x, min(128, hidden_dim), True, "block0") x = xnet_resblock(x, min(256, hidden_dim), False, "block1") return xnet_resblock(x, hidden_dim, False, "block2")
Xception entry flow.
def get_deposit(self, deposit_id, **params): """https://developers.coinbase.com/api/v2#show-a-deposit""" return self.api_client.get_deposit(self.id, deposit_id, **params)
https://developers.coinbase.com/api/v2#show-a-deposit
def nintegral(wave, indep_min=None, indep_max=None): r""" Return the numerical integral of a waveform's dependent variable vector. The method used is the `trapezoidal <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_ method :param wave: Waveform :type wave: :py:class:`peng.eng.Waveform` :param indep_min: Independent vector start point of computation :type indep_min: integer or float :param indep_max: Independent vector stop point of computation :type indep_max: integer or float :rtype: float .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for .. peng.wave_functions.nintegral :raises: * RuntimeError (Argument \`indep_max\` is not valid) * RuntimeError (Argument \`indep_min\` is not valid) * RuntimeError (Argument \`wave\` is not valid) * RuntimeError (Incongruent \`indep_min\` and \`indep_max\` arguments) .. [[[end]]] """ ret = copy.copy(wave) _bound_waveform(ret, indep_min, indep_max) return np.trapz(ret._dep_vector, ret._indep_vector)
r""" Return the numerical integral of a waveform's dependent variable vector. The method used is the `trapezoidal <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_ method :param wave: Waveform :type wave: :py:class:`peng.eng.Waveform` :param indep_min: Independent vector start point of computation :type indep_min: integer or float :param indep_max: Independent vector stop point of computation :type indep_max: integer or float :rtype: float .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for .. peng.wave_functions.nintegral :raises: * RuntimeError (Argument \`indep_max\` is not valid) * RuntimeError (Argument \`indep_min\` is not valid) * RuntimeError (Argument \`wave\` is not valid) * RuntimeError (Incongruent \`indep_min\` and \`indep_max\` arguments) .. [[[end]]]
def envCheckFilter(self, name, attr): """Check if a specific graph attribute is enabled or disabled through the use of a filter based on include_<name> and exclude_<name> environment variables. @param name: Name of the Filter. @param attr: Name of the Attribute. @return: Return True if the attribute is enabled. """ flt = self._filters.get(name) if flt: return flt.check(attr) else: raise AttributeError("Undefined filter: %s" % name)
Check if a specific graph attribute is enabled or disabled through the use of a filter based on include_<name> and exclude_<name> environment variables. @param name: Name of the Filter. @param attr: Name of the Attribute. @return: Return True if the attribute is enabled.
def highres_imu_send(self, time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag, abs_pressure, diff_pressure, pressure_alt, temperature, fields_updated, force_mavlink1=False): ''' The IMU readings in SI units in NED body frame time_usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t) xacc : X acceleration (m/s^2) (float) yacc : Y acceleration (m/s^2) (float) zacc : Z acceleration (m/s^2) (float) xgyro : Angular speed around X axis (rad / sec) (float) ygyro : Angular speed around Y axis (rad / sec) (float) zgyro : Angular speed around Z axis (rad / sec) (float) xmag : X Magnetic field (Gauss) (float) ymag : Y Magnetic field (Gauss) (float) zmag : Z Magnetic field (Gauss) (float) abs_pressure : Absolute pressure in millibar (float) diff_pressure : Differential pressure in millibar (float) pressure_alt : Altitude calculated from pressure (float) temperature : Temperature in degrees celsius (float) fields_updated : Bitmask for fields that have updated since last message, bit 0 = xacc, bit 12: temperature (uint16_t) ''' return self.send(self.highres_imu_encode(time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag, abs_pressure, diff_pressure, pressure_alt, temperature, fields_updated), force_mavlink1=force_mavlink1)
The IMU readings in SI units in NED body frame time_usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t) xacc : X acceleration (m/s^2) (float) yacc : Y acceleration (m/s^2) (float) zacc : Z acceleration (m/s^2) (float) xgyro : Angular speed around X axis (rad / sec) (float) ygyro : Angular speed around Y axis (rad / sec) (float) zgyro : Angular speed around Z axis (rad / sec) (float) xmag : X Magnetic field (Gauss) (float) ymag : Y Magnetic field (Gauss) (float) zmag : Z Magnetic field (Gauss) (float) abs_pressure : Absolute pressure in millibar (float) diff_pressure : Differential pressure in millibar (float) pressure_alt : Altitude calculated from pressure (float) temperature : Temperature in degrees celsius (float) fields_updated : Bitmask for fields that have updated since last message, bit 0 = xacc, bit 12: temperature (uint16_t)
def convert_html_to_text(value, preserve_urls=False): r""" >>> convert_html_to_text( ... ''' ... <html><body> ... Look &amp; click ... <a href="https://example.com">here</a> ... </body></html>''', preserve_urls=True) 'Look & click here (https://example.com)' >>> convert_html_to_text( ... ''' ... <html><body> ... Look &amp; click ... <a href="https://example.com?timestamp=1234">here</a> ... </body></html>''', preserve_urls=True) 'Look & click here (https://example.com?timestamp=1234)' >>> convert_html_to_text( ... ''' ... <html><body> ... Look &#38; click here ... </body></html>''', preserve_urls=True) 'Look & click here' >>> convert_html_to_text( ... ''' ... <html><body> ... Look &amp; click on ... <a href="https://example.com">https://example.com</a> ... </body></html>''', preserve_urls=True) 'Look & click on https://example.com' >>> convert_html_to_text( ... ''' ... <html><body> ... I'm here, <br> click ... <a href="https://example.com">me</a> ... </body></html>''', preserve_urls=True) "I'm here,\nclick me (https://example.com)" >>> convert_html_to_text( ... ''' ... <html><body> ... I'm here, <br/> click ... <a href="https://example.com">me</a> ... </body></html>''', preserve_urls=True) "I'm here,\nclick me (https://example.com)" >>> convert_html_to_text( ... ''' ... <html><body> ... I'm here, <br/> click ... <a href="https://example.com">me</a> ... </body></html>''') "I'm here,\nclick me" >>> convert_html_to_text( ... ''' ... <html><body> ... <p>I'm here!</p> ... <p>Click <a href="https://example.com">me</a></p> ... </body></html>''', preserve_urls=True) "I'm here!\nClick me (https://example.com)\n" >>> convert_html_to_text( ... ''' ... <html> ... <head> ... <title>I'm here</title> ... </head> ... <body> ... <p>I'm here!</p> ... <p>Click <a href="https://example.com">me</a></p> ... </body> ... </html>''', preserve_urls=True) "I'm here!\nClick me (https://example.com)\n" """ s = MLStripper(preserve_urls=preserve_urls) s.feed(value) s.close() return s.get_data()
r""" >>> convert_html_to_text( ... ''' ... <html><body> ... Look &amp; click ... <a href="https://example.com">here</a> ... </body></html>''', preserve_urls=True) 'Look & click here (https://example.com)' >>> convert_html_to_text( ... ''' ... <html><body> ... Look &amp; click ... <a href="https://example.com?timestamp=1234">here</a> ... </body></html>''', preserve_urls=True) 'Look & click here (https://example.com?timestamp=1234)' >>> convert_html_to_text( ... ''' ... <html><body> ... Look &#38; click here ... </body></html>''', preserve_urls=True) 'Look & click here' >>> convert_html_to_text( ... ''' ... <html><body> ... Look &amp; click on ... <a href="https://example.com">https://example.com</a> ... </body></html>''', preserve_urls=True) 'Look & click on https://example.com' >>> convert_html_to_text( ... ''' ... <html><body> ... I'm here, <br> click ... <a href="https://example.com">me</a> ... </body></html>''', preserve_urls=True) "I'm here,\nclick me (https://example.com)" >>> convert_html_to_text( ... ''' ... <html><body> ... I'm here, <br/> click ... <a href="https://example.com">me</a> ... </body></html>''', preserve_urls=True) "I'm here,\nclick me (https://example.com)" >>> convert_html_to_text( ... ''' ... <html><body> ... I'm here, <br/> click ... <a href="https://example.com">me</a> ... </body></html>''') "I'm here,\nclick me" >>> convert_html_to_text( ... ''' ... <html><body> ... <p>I'm here!</p> ... <p>Click <a href="https://example.com">me</a></p> ... </body></html>''', preserve_urls=True) "I'm here!\nClick me (https://example.com)\n" >>> convert_html_to_text( ... ''' ... <html> ... <head> ... <title>I'm here</title> ... </head> ... <body> ... <p>I'm here!</p> ... <p>Click <a href="https://example.com">me</a></p> ... </body> ... </html>''', preserve_urls=True) "I'm here!\nClick me (https://example.com)\n"
def rename_in_module(occurrences_finder, new_name, resource=None, pymodule=None, replace_primary=False, region=None, reads=True, writes=True): """Returns the changed source or `None` if there is no changes""" if resource is not None: source_code = resource.read() else: source_code = pymodule.source_code change_collector = codeanalyze.ChangeCollector(source_code) for occurrence in occurrences_finder.find_occurrences(resource, pymodule): if replace_primary and occurrence.is_a_fixed_primary(): continue if replace_primary: start, end = occurrence.get_primary_range() else: start, end = occurrence.get_word_range() if (not reads and not occurrence.is_written()) or \ (not writes and occurrence.is_written()): continue if region is None or region[0] <= start < region[1]: change_collector.add_change(start, end, new_name) return change_collector.get_changed()
Returns the changed source or `None` if there is no changes
def change_parameters(self,params): """ Utility function for changing the approximate distribution parameters """ no_of_params = 0 for core_param in range(len(self.q)): for approx_param in range(self.q[core_param].param_no): self.q[core_param].vi_change_param(approx_param, params[no_of_params]) no_of_params += 1
Utility function for changing the approximate distribution parameters
def wrapComponent(comp): """Wraps a StimulusComponent with a class containing methods for painting and editing. Class will in fact, be the same as the component provided, but will also be a subclass of QStimulusComponent :param comp: Component to wrap :type comp: subclass of AbstractStimulusComponent :returns: sublass of AbstractStimulusComponent and QStimulusComponent """ # if already wrapped, return object if hasattr(comp, 'paint'): return comp # to avoid manually creating a mapping, get all classes in # this module, assume they are the class name appended with Q current_module = sys.modules[__name__] module_classes = {name[1:]: obj for name, obj in inspect.getmembers(sys.modules[__name__], inspect.isclass) if obj.__module__ == __name__} # print __name__, module_classes stimclass = comp.__class__.__name__ qclass = module_classes.get(stimclass, QStimulusComponent) return qclass(comp)
Wraps a StimulusComponent with a class containing methods for painting and editing. Class will in fact, be the same as the component provided, but will also be a subclass of QStimulusComponent :param comp: Component to wrap :type comp: subclass of AbstractStimulusComponent :returns: sublass of AbstractStimulusComponent and QStimulusComponent
def read(self, limit=-1): """Read content. See file.read""" remaining = self.len - self.parent_fd.tell() + self.offset if limit > remaining or limit == -1: limit = remaining return self.parent_fd.read(limit)
Read content. See file.read
def rows_to_columns(data, schema=None): """ :param data: array of objects :param schema: Known schema, will be extended to include all properties found in data :return: Table """ if not schema: schema = SchemaTree() all_schema = schema all_leaves = schema.leaves values = {full_name: [] for full_name in all_leaves} reps = {full_name: [] for full_name in all_leaves} defs = {full_name: [] for full_name in all_leaves} def _none_to_column(schema, path, rep_level, def_level): for full_path in all_schema.leaves: if startswith_field(full_path, path): reps[full_path].append(rep_level) defs[full_path].append(def_level) def _value_to_column(value, schema, path, counters, def_level): ptype = type(value) ntype, dtype, ltype, jtype, itype, byte_width = python_type_to_all_types[ptype] if jtype is NESTED: if schema.element.repetition_type != REPEATED: Log.error("Expecting {{path|quote}} to be repeated", path=path) new_path = path if not value: _none_to_column(schema, new_path, get_rep_level(counters), def_level) else: try: new_schema = schema.more.get('.') if not new_schema: if schema.locked: # DEFAULT TO REQUIRED ENTRIES new_schema = schema schema.element.repetition_type = REQUIRED else: new_path = path new_value = value[0] ptype = type(new_value) new_schema = schema.add( new_path, OPTIONAL, ptype ) if new_value is None or python_type_to_json_type[ptype] in PRIMITIVE: values[new_path] = [] reps[new_path] = [0] * counters[0] defs[new_path] = [0] * counters[0] for k, new_value in enumerate(value): new_counters = counters + (k,) _value_to_column(new_value, new_schema, new_path, new_counters, def_level+1) finally: schema.element.repetition_type = REPEATED elif jtype is OBJECT: if value is None: if schema.element.repetition_type == REQUIRED: Log.error("{{path|quote}} is required", path=path) _none_to_column(schema, path, get_rep_level(counters), def_level) else: if schema.element.repetition_type == REPEATED: Log.error("Expecting {{path|quote}} to be repeated", path=path) if schema.element.repetition_type == REQUIRED: new_def_level = def_level else: counters = counters + (0,) new_def_level = def_level+1 for name, sub_schema in schema.more.items(): new_path = concat_field(path, name) new_value = value.get(name, None) _value_to_column(new_value, sub_schema, new_path, counters, new_def_level) for name in set(value.keys()) - set(schema.more.keys()): if schema.locked: Log.error("{{path}} is not allowed in the schema", path=path) new_path = concat_field(path, name) new_value = value.get(name, None) ptype = type(new_value) sub_schema = schema.add( new_path, REPEATED if isinstance(new_value, list) else OPTIONAL, ptype ) if python_type_to_json_type[ptype] in PRIMITIVE: values[new_path] = [] reps[new_path] = [0] * counters[0] defs[new_path] = [0] * counters[0] _value_to_column(new_value, sub_schema, new_path, counters, new_def_level) else: if jtype is STRING: value = value.encode('utf8') merge_schema(schema, path, value) values[path].append(value) if schema.element.repetition_type == REQUIRED: reps[path].append(get_rep_level(counters)) defs[path].append(def_level) else: reps[path].append(get_rep_level(counters)) defs[path].append(def_level + 1) for rownum, new_value in enumerate(data): try: _value_to_column(new_value, schema, '.', (rownum,), 0) except Exception as e: Log.error("can not encode {{row|json}}", row=new_value, cause=e) return Table(values, reps, defs, len(data), schema)
:param data: array of objects :param schema: Known schema, will be extended to include all properties found in data :return: Table
def run(self, command, application): """Get or set the profile. If .profile is called with no args, the current profile is displayed. If the .profile command is called with a single arg, then the current profile for the application will be set to the new value. """ if len(command) == 1: profile = application.profile if profile is None: self._output.write( "Current shell profile: no profile configured\n" "You can change profiles using: .profile profile-name\n") else: self._output.write("Current shell profile: %s\n" % profile) elif len(command) == 2: new_profile_name = command[1] application.profile = new_profile_name self._output.write("Current shell profile changed to: %s\n" % new_profile_name) else: self._err.write("Usage:\n%s\n" % self.USAGE)
Get or set the profile. If .profile is called with no args, the current profile is displayed. If the .profile command is called with a single arg, then the current profile for the application will be set to the new value.
def _wrap_value_with_context(self, tokens: List[Token], start: int, end: int) -> Extraction: """Wraps the final result""" return Extraction(' '.join([x.orth_ if isinstance(x, Token) else x for x in tokens[start:end]]), self.name, start_token=start, end_token=end, start_char=tokens[start].idx if isinstance(tokens[start], Token) else -1, end_char=tokens[end - 1].idx + len(tokens[end - 1].orth_) if isinstance(tokens[end - 1], Token) else -1 )
Wraps the final result
def pbis(a): """End point of a reflected sun ray, given an angle a.""" return(math.cos(3*a - math.pi), (math.sin(3*a - math.pi)))
End point of a reflected sun ray, given an angle a.
def main(argv=None): """Count the hits from logfile.""" parser = create_parser('hits_counter', description=__doc__) parser.add_argument('--hostname', default='cnx.org', help="hostname of the site (default: cnx.org)") parser.add_argument('--log-format', default=LOG_FORMAT_GZ, choices=LOG_FORMATS, help="(default: {})".format(LOG_FORMAT_GZ)) parser.add_argument('log_file', help="path to the logfile.") args = parser.parse_args(argv) opener = LOG_FORMAT_OPENERS_MAPPING[args.log_format] # Build the URL pattern. hostname = args.hostname.replace('.', '\.') url_pattern = URL_PATTERN_TMPLT.format(hostname) url_pattern = re.compile(url_pattern) # Parse the log to structured data. with opener(args.log_file) as log: hits, start_timestamp, end_timestamp = parse_log(log, url_pattern) # Parse the configuration file for the postgres connection string. settings = get_app_settings_from_arguments(args) # Insert the hits into the database. connection_string = settings[config.CONNECTION_STRING] db_connection = psycopg2.connect(connection_string) with db_connection: with db_connection.cursor() as cursor: for ident_hash, hit_count in hits.items(): cursor.execute("""\ INSERT INTO document_hits (documentid, start_timestamp, end_timestamp, hits) SELECT module_ident, %s, %s, %s FROM modules WHERE ident_hash(uuid, major_version, minor_version) = %s""", (start_timestamp, end_timestamp, hit_count, ident_hash)) cursor.execute("SELECT update_hit_ranks();") db_connection.close() return 0
Count the hits from logfile.
def parse_rawprofile_blocks(text): """ Split the file into blocks along delimters and and put delimeters back in the list """ # The total time reported in the raw output is from pystone not kernprof # The pystone total time is actually the average time spent in the function delim = 'Total time: ' delim2 = 'Pystone time: ' #delim = 'File: ' profile_block_list = ut.regex_split('^' + delim, text) for ix in range(1, len(profile_block_list)): profile_block_list[ix] = delim2 + profile_block_list[ix] return profile_block_list
Split the file into blocks along delimters and and put delimeters back in the list
def get_lyrics_genius(song_title): ''' Scrapes the lyrics from Genius.com ''' base_url = "http://api.genius.com" headers = {'Authorization': 'Bearer %s' %(GENIUS_KEY)} search_url = base_url + "/search" data = {'q': song_title} response = requests.get(search_url, data=data, headers=headers) json = response.json() song_api_path = json["response"]["hits"][0]["result"]["api_path"] song_url = base_url + song_api_path response = requests.get(song_url, headers=headers) json = response.json() path = json["response"]["song"]["path"] page_url = "http://genius.com" + path page = requests.get(page_url) soup = BeautifulSoup(page.text, "html.parser") div = soup.find('div',{'class': 'song_body-lyrics'}) lyrics = div.find('p').getText() return lyrics
Scrapes the lyrics from Genius.com
def _viewbox_set(self, viewbox): """ Friend method of viewbox to register itself. """ self._viewbox = viewbox # Connect viewbox.events.mouse_press.connect(self.viewbox_mouse_event) viewbox.events.mouse_release.connect(self.viewbox_mouse_event) viewbox.events.mouse_move.connect(self.viewbox_mouse_event) viewbox.events.mouse_wheel.connect(self.viewbox_mouse_event) viewbox.events.resize.connect(self.viewbox_resize_event)
Friend method of viewbox to register itself.
def partialReleaseComplete(): """PARTIAL RELEASE COMPLETE Section 9.1.27""" a = TpPd(pd=0x6) b = MessageType(mesType=0xf) # 00001111 packet = a / b return packet
PARTIAL RELEASE COMPLETE Section 9.1.27
def _init_append(self): """ Initializes file on 'a' mode. """ # Require to load the full file content in buffer self._write_buffer[:] = self._readall() # Make initial seek position to current end of file self._seek = self._size
Initializes file on 'a' mode.
def connect(filename: str, mode: str = 'r+', *, validate: bool = True, spec_version: str = "2.0.1") -> LoomConnection: """ Establish a connection to a .loom file. Args: filename: Path to the Loom file to open mode: Read/write mode, 'r+' (read/write) or 'r' (read-only), defaults to 'r+' validate: Validate the file structure against the Loom file format specification spec_version: The loom file spec version to validate against (e.g. "2.0.1" or "old") Returns: A LoomConnection instance. Remarks: This function should typically be used as a context manager (i.e. inside a ``with``-block): .. highlight:: python .. code-block:: python import loompy with loompy.connect("mydata.loom") as ds: print(ds.ca.keys()) This ensures that the file will be closed automatically when the context block ends Note: if validation is requested, an exception is raised if validation fails. """ return LoomConnection(filename, mode, validate=validate, spec_version=spec_version)
Establish a connection to a .loom file. Args: filename: Path to the Loom file to open mode: Read/write mode, 'r+' (read/write) or 'r' (read-only), defaults to 'r+' validate: Validate the file structure against the Loom file format specification spec_version: The loom file spec version to validate against (e.g. "2.0.1" or "old") Returns: A LoomConnection instance. Remarks: This function should typically be used as a context manager (i.e. inside a ``with``-block): .. highlight:: python .. code-block:: python import loompy with loompy.connect("mydata.loom") as ds: print(ds.ca.keys()) This ensures that the file will be closed automatically when the context block ends Note: if validation is requested, an exception is raised if validation fails.
def MobileDeviceConfigurationProfile(self, data=None, subset=None): """{dynamic_docstring}""" return self.factory.get_object( jssobjects.MobileDeviceConfigurationProfile, data, subset)
{dynamic_docstring}
def from_s3_json(cls, bucket_name, key, json_path=None, key_mapping=None, aws_profile=None, aws_access_key_id=None, aws_secret_access_key=None, region_name=None): # pragma: no cover """ Load database credential from json on s3. :param bucket_name: str :param key: str :param aws_profile: if None, assume that you are using this from AWS cloud. (service on the same cloud doesn't need profile name) :param aws_access_key_id: str, not recommend to use :param aws_secret_access_key: str, not recommend to use :param region_name: str """ import boto3 ses = boto3.Session( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=region_name, profile_name=aws_profile, ) s3 = ses.resource("s3") bucket = s3.Bucket(bucket_name) object = bucket.Object(key) data = json.loads(object.get()["Body"].read().decode("utf-8")) return cls._from_json_data(data, json_path, key_mapping)
Load database credential from json on s3. :param bucket_name: str :param key: str :param aws_profile: if None, assume that you are using this from AWS cloud. (service on the same cloud doesn't need profile name) :param aws_access_key_id: str, not recommend to use :param aws_secret_access_key: str, not recommend to use :param region_name: str
def __modify(self, checkout_id, **kwargs): """Call documentation: `/checkout/modify <https://www.wepay.com/developer/reference/checkout#modify>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay` """ params = { 'checkout_id': checkout_id } return self.make_call(self.__modify, params, kwargs)
Call documentation: `/checkout/modify <https://www.wepay.com/developer/reference/checkout#modify>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay`
def _map_from_binaries(self, eopatch, dst_shape, request_data): """ Each request represents a binary class which will be mapped to the scalar `raster_value` """ if self.feature_name in eopatch[self.feature_type]: raster = eopatch[self.feature_type][self.feature_name].squeeze() else: raster = np.ones(dst_shape, dtype=self.raster_dtype) * self.no_data_val new_raster = self._reproject(eopatch, self._to_binary_mask(request_data)) # update raster raster[new_raster != 0] = new_raster[new_raster != 0] return raster
Each request represents a binary class which will be mapped to the scalar `raster_value`
def synchronizeResponse(self, pid, vendorSpecific=None): """CNRead.synchronize(session, pid) → boolean POST /synchronize. Args: pid: vendorSpecific: """ mmp_dict = {'pid': pid} return self.POST(['synchronize'], fields=mmp_dict, headers=vendorSpecific)
CNRead.synchronize(session, pid) → boolean POST /synchronize. Args: pid: vendorSpecific:
def userInformation(MoreData_presence=0): """USER INFORMATION Section 9.3.31""" a = TpPd(pd=0x3) b = MessageType(mesType=0x20) # 000100000 c = UserUser() packet = a / b / c if MoreData_presence is 1: d = MoreDataHdr(ieiMD=0xA0, eightBitMD=0x0) packet = packet / d return packet
USER INFORMATION Section 9.3.31
def register_dataframe_method(method): """Register a function as a method attached to the Pandas DataFrame. Example ------- .. code-block:: python @register_dataframe_method def print_column(df, col): '''Print the dataframe column given''' print(df[col]) """ def inner(*args, **kwargs): class AccessorMethod(object): def __init__(self, pandas_obj): self._obj = pandas_obj @wraps(method) def __call__(self, *args, **kwargs): return method(self._obj, *args, **kwargs) register_dataframe_accessor(method.__name__)(AccessorMethod) return method return inner()
Register a function as a method attached to the Pandas DataFrame. Example ------- .. code-block:: python @register_dataframe_method def print_column(df, col): '''Print the dataframe column given''' print(df[col])
def points_are_in_a_straight_line( points, tolerance=1e-7 ): """ Check whether a set of points fall on a straight line. Calculates the areas of triangles formed by triplets of the points. Returns False is any of these areas are larger than the tolerance. Args: points (list(np.array)): list of Cartesian coordinates for each point. tolerance (optional:float): the maximum triangle size for these points to be considered colinear. Default is 1e-7. Returns: (bool): True if all points fall on a straight line (within the allowed tolerance). """ a = points[0] b = points[1] for c in points[2:]: if area_of_a_triangle_in_cartesian_space( a, b, c ) > tolerance: return False return True
Check whether a set of points fall on a straight line. Calculates the areas of triangles formed by triplets of the points. Returns False is any of these areas are larger than the tolerance. Args: points (list(np.array)): list of Cartesian coordinates for each point. tolerance (optional:float): the maximum triangle size for these points to be considered colinear. Default is 1e-7. Returns: (bool): True if all points fall on a straight line (within the allowed tolerance).
def serviceViewChangerOutBox(self, limit: int = None) -> int: """ Service at most `limit` number of messages from the view_changer's outBox. :return: the number of messages successfully serviced. """ msgCount = 0 while self.view_changer.outBox and (not limit or msgCount < limit): msgCount += 1 msg = self.view_changer.outBox.popleft() if isinstance(msg, (InstanceChange, ViewChangeDone)): self.send(msg) else: logger.error("Received msg {} and don't know how to handle it". format(msg)) return msgCount
Service at most `limit` number of messages from the view_changer's outBox. :return: the number of messages successfully serviced.
def z(self, position=None): """Set/Get actor position along z axis.""" p = self.GetPosition() if position is None: return p[2] self.SetPosition(p[0], p[1], position) if self.trail: self.updateTrail() return self
Set/Get actor position along z axis.
def pop_parameter(key): '''Remove and get parameter by key. Args: key(str): Key of parameter. Returns: ~nnabla.Variable Parameter if key found, otherwise None. ''' names = key.split('/') if len(names) > 1: with parameter_scope(names[0]): return pop_parameter('/'.join(names[1:])) global current_scope param = current_scope.get(key, None) if param is not None: del current_scope[key] return param
Remove and get parameter by key. Args: key(str): Key of parameter. Returns: ~nnabla.Variable Parameter if key found, otherwise None.
def _coligative(self, rho, A, fav): """Miscelaneous properties of humid air Parameters ---------- rho : float Density, [kg/m³] A : float Mass fraction of dry air in humid air, [kg/kg] fav : dict dictionary with helmholtz energy and derivatives Returns ------- prop : dict Dictionary with calculated properties: * mu: Relative chemical potential, [kJ/kg] * muw: Chemical potential of water, [kJ/kg] * M: Molar mass of humid air, [g/mol] * HR: Humidity ratio, [-] * xa: Mole fraction of dry air, [-] * xw: Mole fraction of water, [-] References ---------- IAPWS, Guideline on an Equation of State for Humid Air in Contact with Seawater and Ice, Consistent with the IAPWS Formulation 2008 for the Thermodynamic Properties of Seawater, Table 12, http://www.iapws.org/relguide/SeaAir.html """ prop = {} prop["mu"] = fav["fira"] prop["muw"] = fav["fir"]+rho*fav["fird"]-A*fav["fira"] prop["M"] = 1/((1-A)/Mw+A/Ma) prop["HR"] = 1/A-1 prop["xa"] = A*Mw/Ma/(1-A*(1-Mw/Ma)) prop["xw"] = 1-prop["xa"] return prop
Miscelaneous properties of humid air Parameters ---------- rho : float Density, [kg/m³] A : float Mass fraction of dry air in humid air, [kg/kg] fav : dict dictionary with helmholtz energy and derivatives Returns ------- prop : dict Dictionary with calculated properties: * mu: Relative chemical potential, [kJ/kg] * muw: Chemical potential of water, [kJ/kg] * M: Molar mass of humid air, [g/mol] * HR: Humidity ratio, [-] * xa: Mole fraction of dry air, [-] * xw: Mole fraction of water, [-] References ---------- IAPWS, Guideline on an Equation of State for Humid Air in Contact with Seawater and Ice, Consistent with the IAPWS Formulation 2008 for the Thermodynamic Properties of Seawater, Table 12, http://www.iapws.org/relguide/SeaAir.html
def to_csv(self, file): """ Write all the trajectories of a collection to a csv file with the headers 'description', 'time' and 'value'. :param file: a file object to write to :type file: :class:`file` :return: """ file.write("description,time,value\n") for traj in self: for t,v in traj: file.write("%s,%f,%f\n"% (traj.description.symbol, t, v))
Write all the trajectories of a collection to a csv file with the headers 'description', 'time' and 'value'. :param file: a file object to write to :type file: :class:`file` :return:
def start(self): """ Creates a SSL connection to the iDigi Server and sends a ConnectionRequest message. """ self.log.info("Starting SSL Session for Monitor %s." % self.monitor_id) if self.socket is not None: raise Exception("Socket already established for %s." % self) try: # Create socket, wrap in SSL and connect. self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Validate that certificate server uses matches what we expect. if self.ca_certs is not None: self.socket = ssl.wrap_socket(self.socket, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_certs) else: self.socket = ssl.wrap_socket(self.socket) self.socket.connect((self.client.hostname, PUSH_SECURE_PORT)) self.socket.setblocking(0) except Exception as exception: self.socket.close() self.socket = None raise exception self.send_connection_request()
Creates a SSL connection to the iDigi Server and sends a ConnectionRequest message.
def center_text_cursor(object): """ Centers the text cursor position. :param object: Object to decorate. :type object: object :return: Object. :rtype: object """ @functools.wraps(object) def center_text_cursor_wrapper(*args, **kwargs): """ Centers the text cursor position. :param \*args: Arguments. :type \*args: \* :param \*\*kwargs: Keywords arguments. :type \*\*kwargs: \*\* :return: Object. :rtype: object """ if args: if hasattr(foundations.common.get_first_item(args), "setCenterOnScroll"): foundations.common.get_first_item(args).setCenterOnScroll(True) value = object(*args, **kwargs) if args: if hasattr(foundations.common.get_first_item(args), "setCenterOnScroll"): foundations.common.get_first_item(args).setCenterOnScroll(False) return value return center_text_cursor_wrapper
Centers the text cursor position. :param object: Object to decorate. :type object: object :return: Object. :rtype: object
def copy_vpcs_configs(source, target): """ Copy any VPCS configs to the converted topology :param str source: Source topology directory :param str target: Target topology files directory """ # Prepare a list of files to copy vpcs_files = glob.glob(os.path.join(source, 'configs', '*.vpc')) vpcs_hist = os.path.join(source, 'configs', 'vpcs.hist') vpcs_config_path = os.path.join(target, 'vpcs', 'multi-host') if os.path.isfile(vpcs_hist): vpcs_files.append(vpcs_hist) # Create the directory tree if len(vpcs_files) > 0: os.makedirs(vpcs_config_path) # Copy the files for old_file in vpcs_files: new_file = os.path.join(vpcs_config_path, os.path.basename(old_file)) shutil.copy(old_file, new_file)
Copy any VPCS configs to the converted topology :param str source: Source topology directory :param str target: Target topology files directory
def read_string(self, content): """ Reads a Python string that contains C++ code, and return the declarations tree. """ header_file = utils.create_temp_file_name(suffix='.h') with open(header_file, "w+") as f: f.write(content) try: decls = self.read_file(header_file) except Exception: utils.remove_file_no_raise(header_file, self.__config) raise utils.remove_file_no_raise(header_file, self.__config) return decls
Reads a Python string that contains C++ code, and return the declarations tree.
def log_images(self, name, images, step=None): """Log new images for given name on given step. Args: name (str): name of the variable (it will be converted to a valid tensorflow summary name). images (list): list of images to visualize step (int): non-negative integer used for visualization """ if isinstance(images, six.string_types): raise TypeError('"images" should be a list of ndarrays, got {}' .format(type(images))) self._check_step(step) tf_name = self._ensure_tf_name(name) summary = self._image_summary(tf_name, images, step=step) self._log_summary(tf_name, summary, images, step=step)
Log new images for given name on given step. Args: name (str): name of the variable (it will be converted to a valid tensorflow summary name). images (list): list of images to visualize step (int): non-negative integer used for visualization
def remotes_get(self): """Return remotes like git remote -v. :rtype: dict of tuples """ remotes = {} cmd = self.run(['remote']) ret = filter(None, cmd.split('\n')) for remote_name in ret: remotes[remote_name] = self.remote_get(remote_name) return remotes
Return remotes like git remote -v. :rtype: dict of tuples
def get_uids(self, filename=None): """UIDs of all reminders in the file excluding included files If a filename is specified, only it's UIDs are return, otherwise all. filename -- the remind file """ self._update() if filename: if filename not in self._reminders: return [] return self._reminders[filename].keys() return [uid for uids in self._reminders.values() for uid in uids]
UIDs of all reminders in the file excluding included files If a filename is specified, only it's UIDs are return, otherwise all. filename -- the remind file
def check_backslashes(self, definition, docstring): r'''D301: Use r""" if any backslashes in a docstring. Use r"""raw triple double quotes""" if you use any backslashes (\) in your docstrings. ''' # Just check that docstring is raw, check_triple_double_quotes # ensures the correct quotes. if docstring and '\\' in docstring and not docstring.startswith( ('r', 'ur')): return violations.D301()
r'''D301: Use r""" if any backslashes in a docstring. Use r"""raw triple double quotes""" if you use any backslashes (\) in your docstrings.
def all_dims(self): """The dimensions for each of the arrays in this list""" return [ _get_dims(arr) if not isinstance(arr, ArrayList) else arr.all_dims for arr in self]
The dimensions for each of the arrays in this list
def run_with_tornado(self): """ runs the tornado/websockets based test server """ from zengine.tornado_server.server import runserver runserver(self.manager.args.addr, int(self.manager.args.port))
runs the tornado/websockets based test server
def _collapse_edge_passing_predicates(graph: BELGraph, edge_predicates: EdgePredicates = None) -> None: """Collapse all edges passing the given edge predicates.""" for u, v, _ in filter_edges(graph, edge_predicates=edge_predicates): collapse_pair(graph, survivor=u, victim=v)
Collapse all edges passing the given edge predicates.
def read_csv(csv_name, usecols=None): """Returns a DataFrame from a .csv file stored in /data/raw/""" csv_path = os.path.join(DATA_FOLDER, csv_name) csv = pd.read_csv(csv_path, low_memory=False, usecols=usecols, encoding="utf-8") return csv
Returns a DataFrame from a .csv file stored in /data/raw/