code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def download(url, proxies=None): if proxies is None: proxies = [""] for proxy in proxies: if proxy == "": socket.socket = DEFAULT_SOCKET elif proxy.startswith('socks'): if proxy[5] == '4': proxy_type = socks.SOCKS4 else: proxy_type = socks.SOCKS5 proxy = proxy[proxy.find('://') + 3:] try: proxy, port = proxy.split(':') except ValueError: port = None socks.set_default_proxy(proxy_type, proxy, port) socket.socket = socks.socksocket else: try: proxy, port = proxy.split(':') except ValueError: port = None socks.set_default_proxy(socks.HTTP, proxy, port) socket.socket = socks.socksocket downloaded = _download_helper(url) if downloaded is not None: return downloaded return (None, None)
Download a PDF or DJVU document from a url, eventually using proxies. :params url: The URL to the PDF/DJVU document to fetch. :params proxies: An optional list of proxies to use. Proxies will be \ used sequentially. Proxies should be a list of proxy strings. \ Do not forget to include ``""`` (empty string) in the list if \ you want to try direct fetching without any proxy. :returns: A tuple of the raw content of the downloaded data and its \ associated content-type. Returns ``(None, None)`` if it was \ unable to download the document. >>> download("http://arxiv.org/pdf/1312.4006.pdf") # doctest: +SKIP
def selected(self, interrupt=False): self.ao2.output(self.get_title(), interrupt=interrupt)
This object has been selected.
def vote_poll( self, chat_id: Union[int, str], message_id: id, option: int ) -> bool: poll = self.get_messages(chat_id, message_id).poll self.send( functions.messages.SendVote( peer=self.resolve_peer(chat_id), msg_id=message_id, options=[poll.options[option].data] ) ) return True
Use this method to vote a poll. Args: chat_id (``int`` | ``str``): Unique identifier (int) or username (str) of the target chat. For your personal cloud (Saved Messages) you can simply use "me" or "self". For a contact that exists in your Telegram address book you can use his phone number (str). message_id (``int``): Unique poll message identifier inside this chat. option (``int``): Index of the poll option you want to vote for (0 to 9). Returns: On success, True is returned. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
def is_after(self, ts): if self.timestamp >= int(calendar.timegm(ts.timetuple())): return True return False
Compare this event's timestamp to a give timestamp.
def read_file(filename): if os.path.isfile(filename): with open(filename, 'r') as f: return f.read()
return the contents of the file named filename or None if file not found
def frame_info(self): if not self._logger.isEnabledFor(logging.DEBUG): return '' f = sys._getframe(3) fname = os.path.split(f.f_code.co_filename)[1] return '{}:{}'.format(fname, f.f_lineno)
Return a string identifying the current frame.
def _compare_columns(self, new_columns, old_columns): add_columns = {} remove_columns = {} rename_columns = {} retype_columns = {} resize_columns = {} for key, value in new_columns.items(): if key not in old_columns.keys(): add_columns[key] = True if value[2]: if value[2] in old_columns.keys(): rename_columns[key] = value[2] del add_columns[key] else: if value[1] != old_columns[key][1]: retype_columns[key] = value[1] if value[3] != old_columns[key][3]: resize_columns[key] = value[3] remove_keys = set(old_columns.keys()) - set(new_columns.keys()) if remove_keys: for key in list(remove_keys): remove_columns[key] = True return add_columns, remove_columns, rename_columns, retype_columns, resize_columns
a helper method for generating differences between column properties
def __select_builder(lxml_builder, libxml2_builder, cmdline_builder): if prefer_xsltproc: return cmdline_builder if not has_libxml2: if has_lxml: return lxml_builder else: return cmdline_builder return libxml2_builder
Selects a builder, based on which Python modules are present.
def _anchor_path(self, anchor_id): "Absolute path to the data file for `anchor_id`." file_name = '{}.yml'.format(anchor_id) file_path = self._spor_dir / file_name return file_path
Absolute path to the data file for `anchor_id`.
def include_fields(self, *args): r for arg in args: self._includefields.append(arg) return self
r""" Include fields is the fields that you want to be returned when searching. These are in addition to the fields that are always included below. :param args: items passed in will be turned into a list :returns: :class:`Search` >>> bugzilla.search_for.include_fields("flags") The following fields are always included in search: 'version', 'id', 'summary', 'status', 'op_sys', 'resolution', 'product', 'component', 'platform'
def reset_config(ip, mac): click.echo("Reset configuration of button %s..." % ip) data = { 'single': "", 'double': "", 'long': "", 'touch': "", } request = requests.post( 'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT) if request.status_code == 200: click.echo("Reset configuration of %s" % mac)
Reset the current configuration of a myStrom WiFi Button.
def is_base_datatype(datatype, version=None): if version is None: version = get_default_version() lib = load_library(version) return lib.is_base_datatype(datatype)
Check if the given datatype is a base datatype of the specified version :type datatype: ``str`` :param datatype: the datatype (e.g. ST) :type version: ``str`` :param version: the HL7 version (e.g. 2.5) :return: ``True`` if it is a base datatype, ``False`` otherwise >>> is_base_datatype('ST') True >>> is_base_datatype('CE') False
def _add_parameters(self, parameter_map, parameter_list): for parameter in parameter_list: if parameter.get('$ref'): parameter = self.specification['parameters'].get(parameter.get('$ref').split('/')[-1]) parameter_map[parameter['name']] = parameter
Populates the given parameter map with the list of parameters provided, resolving any reference objects encountered. Args: parameter_map: mapping from parameter names to parameter objects parameter_list: list of either parameter objects or reference objects
def comments(accountable): comments = accountable.issue_comments() headers = sorted(['author_name', 'body', 'updated']) if comments: rows = [[v for k, v in sorted(c.items()) if k in headers] for c in comments] rows.insert(0, headers) print_table(SingleTable(rows)) else: click.secho('No comments found for {}'.format( accountable.issue_key ), fg='red')
Lists all comments for a given issue key.
def _on_split_requested(self): orientation = self.sender().text() widget = self.widget(self.tab_under_menu()) if 'horizontally' in orientation: self.split_requested.emit( widget, QtCore.Qt.Horizontal) else: self.split_requested.emit( widget, QtCore.Qt.Vertical)
Emits the split requested signal with the desired orientation.
def _get_ln_a_n_max(self, C, n_sites, idx, rup): ln_a_n_max = C["lnSC1AM"] * np.ones(n_sites) for i in [2, 3, 4]: if np.any(idx[i]): ln_a_n_max[idx[i]] += C["S{:g}".format(i)] return ln_a_n_max
Defines the rock site amplification defined in equations 10a and 10b
def code_timer(reset=False): global CODE_TIMER if reset: CODE_TIMER = CodeTimer() else: if CODE_TIMER is None: return CodeTimer() else: return CODE_TIMER
Sets a global variable for tracking the timer accross multiple files
def show_disk(name=None, kwargs=None, call=None): if not kwargs or 'disk_name' not in kwargs: log.error( 'Must specify disk_name.' ) return False conn = get_conn() return _expand_disk(conn.ex_get_volume(kwargs['disk_name']))
Show the details of an existing disk. CLI Example: .. code-block:: bash salt-cloud -a show_disk myinstance disk_name=mydisk salt-cloud -f show_disk gce disk_name=mydisk
def compose(f: Callable[[Any], Monad], g: Callable[[Any], Monad]) -> Callable[[Any], Monad]: r return lambda x: g(x).bind(f)
r"""Monadic compose function. Right-to-left Kleisli composition of two monadic functions. (<=<) :: Monad m => (b -> m c) -> (a -> m b) -> a -> m c f <=< g = \x -> g x >>= f
def tone_marks(): return RegexBuilder( pattern_args=symbols.TONE_MARKS, pattern_func=lambda x: u"(?<={}).".format(x)).regex
Keep tone-modifying punctuation by matching following character. Assumes the `tone_marks` pre-processor was run for cases where there might not be any space after a tone-modifying punctuation mark.
def requires_lock(function): def new_lock_requiring_function(self, filename, *args, **kwargs): if self.owns_lock(filename): return function(self, filename, *args, **kwargs) else: raise RequiresLockException() return new_lock_requiring_function
Decorator to check if the user owns the required lock. The first argument must be the filename.
def close(self): if self._mode == _MODE_CLOSED: return try: if self._mode in (_MODE_READ, _MODE_READ_EOF): self._decompressor = None self._buffer = None elif self._mode == _MODE_WRITE: self._fp.write(self._compressor.flush()) self._compressor = None finally: try: if self._closefp: self._fp.close() finally: self._fp = None self._closefp = False self._mode = _MODE_CLOSED
Flush and close the file. May be called more than once without error. Once the file is closed, any other operation on it will raise a ValueError.
def libvlc_media_list_new(p_instance): f = _Cfunctions.get('libvlc_media_list_new', None) or \ _Cfunction('libvlc_media_list_new', ((1,),), class_result(MediaList), ctypes.c_void_p, Instance) return f(p_instance)
Create an empty media list. @param p_instance: libvlc instance. @return: empty media list, or NULL on error.
def r2z(r): with np.errstate(invalid='ignore', divide='ignore'): return 0.5 * (np.log(1 + r) - np.log(1 - r))
Function that calculates the Fisher z-transformation Parameters ---------- r : int or ndarray Correlation value Returns ---------- result : int or ndarray Fishers z transformed correlation value
def detect_encoding(fp, default=None): init_pos = fp.tell() try: sample = fp.read( current_app.config.get('PREVIEWER_CHARDET_BYTES', 1024)) result = cchardet.detect(sample) threshold = current_app.config.get('PREVIEWER_CHARDET_CONFIDENCE', 0.9) if result.get('confidence', 0) > threshold: return result.get('encoding', default) else: return default except Exception: current_app.logger.warning('Encoding detection failed.', exc_info=True) return default finally: fp.seek(init_pos)
Detect the cahracter encoding of a file. :param fp: Open Python file pointer. :param default: Fallback encoding to use. :returns: The detected encoding. .. note:: The file pointer is returned at its original read position.
def friendships_create(self, user_id=None, screen_name=None, follow=None): params = {} set_str_param(params, 'user_id', user_id) set_str_param(params, 'screen_name', screen_name) set_bool_param(params, 'follow', follow) return self._post_api('friendships/create.json', params)
Allows the authenticating users to follow the specified user. https://dev.twitter.com/docs/api/1.1/post/friendships/create :param str user_id: The screen name of the user for whom to befriend. Required if ``screen_name`` isn't given. :param str screen_name: The ID of the user for whom to befriend. Required if ``user_id`` isn't given. :param bool follow: Enable notifications for the target user. :returns: A dict containing the newly followed user.
def remove_option(self, section, name, value=None): if self._is_live(): raise RuntimeError('Submitted units cannot update their options') removed = 0 for option in list(self._data['options']): if option['section'] == section: if option['name'] == name: if value is None or option['value'] == value: self._data['options'].remove(option) removed += 1 if removed > 0: return True return False
Remove an option from a unit Args: section (str): The section to remove from. name (str): The item to remove. value (str, optional): If specified, only the option matching this value will be removed If not specified, all options with ``name`` in ``section`` will be removed Returns: True: At least one item was removed False: The item requested to remove was not found
def getAtomLinesForResidueInRosettaStructure(self, resid): lines = [line for line in self.lines if line[0:4] == "ATOM" and resid == int(line[22:27])] if not lines: raise Exception("Could not find the ATOM/HETATM line corresponding to residue '%(resid)s'." % vars()) return lines
We assume a Rosetta-generated structure where residues are uniquely identified by number.
def kabsch_rmsd(P, Q, translate=False): if translate: Q = Q - centroid(Q) P = P - centroid(P) P = kabsch_rotate(P, Q) return rmsd(P, Q)
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD. Parameters ---------- P : array (N,D) matrix, where N is points and D is dimension. Q : array (N,D) matrix, where N is points and D is dimension. translate : bool Use centroids to translate vector P and Q unto each other. Returns ------- rmsd : float root-mean squared deviation
def hidden_cursor(self): self.stream.write(self.hide_cursor) try: yield finally: self.stream.write(self.normal_cursor)
Return a context manager that hides the cursor while inside it and makes it visible on leaving.
def is_analyst_assignment_allowed(self): if not self.allow_edit: return False if not self.can_manage: return False if self.filter_by_user: return False return True
Check if the analyst can be assigned
def inserted_hs_indices(self): if self.dimension_type in DT.ARRAY_TYPES: return [] return [ idx for idx, item in enumerate( self._iter_interleaved_items(self.valid_elements) ) if item.is_insertion ]
list of int index of each inserted subtotal for the dimension. Each value represents the position of a subtotal in the interleaved sequence of elements and subtotals items.
def get_branching_nodes(self): nodes = set() for n in self.graph.nodes(): if self.graph.out_degree(n) >= 2: nodes.add(n) return nodes
Returns all nodes that has an out degree >= 2
def nodes_with_role(rolename): nodes = [n['name'] for n in lib.get_nodes_with_role(rolename, env.chef_environment)] if not len(nodes): print("No nodes found with role '{0}'".format(rolename)) sys.exit(0) return node(*nodes)
Configures a list of nodes that have the given role in their run list
def get_all_parents(self): ownership = Ownership.objects.filter(child=self) parents = Company.objects.filter(parent__in=ownership) for parent in parents: parents = parents | parent.get_all_parents() return parents
Return all parents of this company.
def _get_all_run_infos(self): info_dir = self._settings.info_dir if not os.path.isdir(info_dir): return [] paths = [os.path.join(info_dir, x) for x in os.listdir(info_dir)] return [d for d in [RunInfo(os.path.join(p, 'info')).get_as_dict() for p in paths if os.path.isdir(p) and not os.path.islink(p)] if 'timestamp' in d]
Find the RunInfos for all runs since the last clean-all.
def register_service(service): frame = inspect.currentframe() m_name = frame.f_back.f_globals['__name__'] m = sys.modules[m_name] m._SERVICE_NAME = service
Register the ryu application specified by 'service' as a provider of events defined in the calling module. If an application being loaded consumes events (in the sense of set_ev_cls) provided by the 'service' application, the latter application will be automatically loaded. This mechanism is used to e.g. automatically start ofp_handler if there are applications consuming OFP events.
def _looks_like_numpy_function(func_name, numpy_module_name, node): return node.name == func_name and node.parent.name == numpy_module_name
Return True if the current node correspond to the function inside the numpy module in parameters :param node: the current node :type node: FunctionDef :param func_name: name of the function :type func_name: str :param numpy_module_name: name of the numpy module :type numpy_module_name: str :return: True if the current node correspond to the function looked for :rtype: bool
def size_in_bytes(self, offset, timestamp, key, value, headers=None): assert not headers, "Headers not supported in v0/v1" magic = self._magic return self.LOG_OVERHEAD + self.record_size(magic, key, value)
Actual size of message to add
def validate_callback(callback): if not(hasattr(callback, '_validated')) or callback._validated == False: assert hasattr(callback, 'on_loop_start') \ or hasattr(callback, 'on_loop_end'), \ 'callback must have `on_loop_start` or `on_loop_end` method' if hasattr(callback, 'on_loop_start'): setattr(callback, 'on_loop_start', validate_callback_data(callback.on_loop_start)) if hasattr(callback, 'on_loop_end'): setattr(callback, 'on_loop_end', validate_callback_data(callback.on_loop_end)) setattr(callback, '_validated', True) return callback
validates a callback's on_loop_start and on_loop_end methods Parameters ---------- callback : Callback object Returns ------- validated callback
def _looks_like_resource_file(self, name): if (re.search(r'__init__.(txt|robot|html|tsv)$', name)): return False found_keyword_table = False if (name.lower().endswith(".robot") or name.lower().endswith(".txt") or name.lower().endswith(".tsv")): with open(name, "r") as f: data = f.read() for match in re.finditer(r'^\*+\s*(Test Cases?|(?:User )?Keywords?)', data, re.MULTILINE|re.IGNORECASE): if (re.match(r'Test Cases?', match.group(1), re.IGNORECASE)): return False if (not found_keyword_table and re.match(r'(User )?Keywords?', match.group(1), re.IGNORECASE)): found_keyword_table = True return found_keyword_table
Return true if the file has a keyword table but not a testcase table
def lstltc(string, n, lenvals, array): string = stypes.stringToCharP(string) array = stypes.listToCharArrayPtr(array, xLen=lenvals, yLen=n) n = ctypes.c_int(n) lenvals = ctypes.c_int(lenvals) return libspice.lstltc_c(string, n, lenvals, array)
Given a character string and an ordered array of character strings, find the index of the largest array element less than the given string. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/lstltc_c.html :param string: Upper bound value to search against. :type string: int :param n: Number elements in array. :type n: int :param lenvals: String length. :type lenvals: int :param array: Array of possible lower bounds :type array: list :return: index of the last element of array that is lexically less than string. :rtype: int
def _q_iteration(self, Q, Bpp_solver, Vm, Va, pq): dVm = -Bpp_solver.solve(Q) Vm[pq] = Vm[pq] + dVm V = Vm * exp(1j * Va) return V, Vm, Va
Performs a Q iteration, updates Vm.
def elapsed(self, label=None, total=True): t = timer() if label is None: label = self.dfltlbl if label not in self.t0: return 0.0 if label not in self.t0: raise KeyError('Unrecognized timer key %s' % label) te = 0.0 if self.t0[label] is not None: te = t - self.t0[label] if total: te += self.td[label] return te
Get elapsed time since timer start. Parameters ---------- label : string, optional (default None) Specify the label of the timer for which the elapsed time is required. If it is ``None``, the default timer with label specified by the ``dfltlbl`` parameter of :meth:`__init__` is selected. total : bool, optional (default True) If ``True`` return the total elapsed time since the first call of :meth:`start` for the selected timer, otherwise return the elapsed time since the most recent call of :meth:`start` for which there has not been a corresponding call to :meth:`stop`. Returns ------- dlt : float Elapsed time
def setup(self): if not self.networks(): for _ in range(self.practice_repeats): network = self.create_network() network.role = "practice" self.session.add(network) for _ in range(self.experiment_repeats): network = self.create_network() network.role = "experiment" self.session.add(network) self.session.commit()
Create the networks if they don't already exist.
def select_and_start_cluster(self, platform): clusters = self.reactor_config.get_enabled_clusters_for_platform(platform) if not clusters: raise UnknownPlatformException('No clusters found for platform {}!' .format(platform)) retry_contexts = { cluster.name: ClusterRetryContext(self.max_cluster_fails) for cluster in clusters } while True: try: possible_cluster_info = self.get_clusters(platform, retry_contexts, clusters) except AllClustersFailedException as ex: cluster = ClusterInfo(None, platform, None, None) build_info = WorkerBuildInfo(build=None, cluster_info=cluster, logger=self.log) build_info.monitor_exception = repr(ex) self.worker_builds.append(build_info) return for cluster_info in possible_cluster_info: ctx = retry_contexts[cluster_info.cluster.name] try: self.log.info('Attempting to start build for platform %s on cluster %s', platform, cluster_info.cluster.name) self.do_worker_build(cluster_info) return except OsbsException: ctx.try_again_later(self.failure_retry_delay)
Choose a cluster and start a build on it
def get(self): content = {} if self.mime_type is not None: content["type"] = self.mime_type if self.content is not None: content["value"] = self.content return content
Get a JSON-ready representation of this HtmlContent. :returns: This HtmlContent, ready for use in a request body. :rtype: dict
def _index_entities(self): all_ents = pd.DataFrame.from_records( [v.entities for v in self.variables.values()]) constant = all_ents.apply(lambda x: x.nunique() == 1) if constant.empty: self.entities = {} else: keep = all_ents.columns[constant] ents = {k: all_ents[k].dropna().iloc[0] for k in keep} self.entities = {k: v for k, v in ents.items() if pd.notnull(v)}
Sets current instance's entities based on the existing index. Note: Only entity key/value pairs common to all rows in all contained Variables are returned. E.g., if a Collection contains Variables extracted from runs 1, 2 and 3 from subject '01', the returned dict will be {'subject': '01'}; the runs will be excluded as they vary across the Collection contents.
def load_glove(file): model = {} with open(file, encoding="utf8", errors='ignore') as f: for line in f: line = line.split(' ') word = line[0] vector = np.array([float(val) for val in line[1:]]) model[word] = vector return model
Loads GloVe vectors in numpy array. Args: file (str): a path to a glove file. Return: dict: a dict of numpy arrays.
def clear(self, contours=True, components=True, anchors=True, guidelines=True, image=True): self._clear(contours=contours, components=components, anchors=anchors, guidelines=guidelines, image=image)
Clear the glyph. >>> glyph.clear() This clears: - contours - components - anchors - guidelines - image It's possible to turn off the clearing of portions of the glyph with the listed arguments. >>> glyph.clear(guidelines=False)
def _del_module(self, lpBaseOfDll): try: aModule = self.__moduleDict[lpBaseOfDll] del self.__moduleDict[lpBaseOfDll] except KeyError: aModule = None msg = "Unknown base address %d" % HexDump.address(lpBaseOfDll) warnings.warn(msg, RuntimeWarning) if aModule: aModule.clear()
Private method to remove a module object from the snapshot. @type lpBaseOfDll: int @param lpBaseOfDll: Module base address.
def asDictionary(self): template = { "xmin" : self._xmin, "ymin" : self._ymin, "xmax" : self._xmax, "ymax" : self._ymax, "spatialReference" : self.spatialReference } if self._zmax is not None and \ self._zmin is not None: template['zmin'] = self._zmin template['zmax'] = self._zmax if self._mmin is not None and \ self._mmax is not None: template['mmax'] = self._mmax template['mmin'] = self._mmin return template
returns the envelope as a dictionary
def start_recording(self, file='mingus_dump.wav'): w = wave.open(file, 'wb') w.setnchannels(2) w.setsampwidth(2) w.setframerate(44100) self.wav = w
Initialize a new wave file for recording.
def paginator(context, adjacent_pages=2): current_page = context.get('page') paginator = context.get('paginator') if not paginator: return pages = paginator.num_pages current_range = range(current_page - adjacent_pages, current_page + adjacent_pages + 1) page_numbers = [n for n in current_range if n > 0 and n <= pages] slugtype = '' if 'topic_slug' in context: page_url = context["topic"].get_short_url() slugtype = 'topic' elif 'forum_slug' in context: page_url = '/forum/%s/' % context["forum_slug"] slugtype = 'forum' else: page_url = context['request'].get_full_path() return { "is_paginated": context["is_paginated"], "page": current_page, "pages": pages, "page_obj": context['page_obj'], "page_numbers": page_numbers, "has_next": context["page_obj"].has_next(), "has_previous": context["page_obj"].has_previous(), "page_url" : page_url, 'slugtype' : slugtype, }
To be used in conjunction with the object_list generic view. Adds pagination context variables for use in displaying first, adjacent and last page links in addition to those created by the object_list generic view.
def compute_We(self, Eemin=None, Eemax=None): if Eemin is None and Eemax is None: We = self.We else: if Eemax is None: Eemax = self.Eemax if Eemin is None: Eemin = self.Eemin log10gmin = np.log10(Eemin / mec2).value log10gmax = np.log10(Eemax / mec2).value gam = np.logspace( log10gmin, log10gmax, int(self.nEed * (log10gmax - log10gmin)) ) nelec = ( self.particle_distribution(gam * mec2).to(1 / mec2_unit).value ) We = trapz_loglog(gam * nelec, gam * mec2) return We
Total energy in electrons between energies Eemin and Eemax Parameters ---------- Eemin : :class:`~astropy.units.Quantity` float, optional Minimum electron energy for energy content calculation. Eemax : :class:`~astropy.units.Quantity` float, optional Maximum electron energy for energy content calculation.
def getChemicalPotential(self, solution): if isinstance(solution, Solution): solution = solution.getSolution() self.mu = self.solver.chemicalPotential(solution) return self.mu
Call solver in order to calculate chemical potential.
def get_bug_report(): platform_info = BugReporter.get_platform_info() module_info = { 'version': hal_version.__version__, 'build': hal_version.__build__ } return { 'platform': platform_info, 'pyhal': module_info }
Generate information for a bug report :return: information for bug report
def create(self, name, redirect_uri=None): data = dict(name=name) if redirect_uri: data['redirect_uri'] = redirect_uri auth_request_resource = self.resource.create(data) return (auth_request_resource.attributes['metadata']['device_token'], auth_request_resource.attributes['mfa_uri'])
Create a new Device object. Devices tie Users and Applications together. For your Application to access and act on behalf of a User, the User must authorize a Device created by your Application. This function will return a `device_token` which you must store and use after the Device is approved in `client.authenticate_device(api_token, device_token)` The second value returned is an `mfa_uri` which is the location the User must visit to approve the new device. After this function completes, you should launch a new browser tab or webview with this value as the location. After the User approves the Device, they will be redirected to the redirect_uri you specify in this call. Args: name (str): Human-readable name for the device (e.g. "Suzanne's iPhone") redirect_uri (str, optional): A URI to which to redirect the User after they approve the new Device. Returns: A tuple of (device_token, mfa_uri)
def add_arrow(self, tipLoc, tail=None, arrow=arrow.default): self._arrows.append((tipLoc, tail, arrow))
This method adds a straight arrow that points to @var{TIPLOC}, which is a tuple of integers. @var{TAIL} specifies the starting point of the arrow. It is either None or a string consisting of the following letters: 'l', 'c', 'r', 't', 'm,', and 'b'. Letters 'l', 'c', or 'r' means to start the arrow from the left, center, or right of the text box, respectively. Letters 't', 'm', or 'b' means to start the arrow from the top, middle or bottom of the text box. For example, when @samp{tail = 'tc'} then arrow is drawn from top-center point of the text box. ARROW specifies the style of the arrow. <<arrow>>.
def contains(self, key): try: self._api.objects_get(self._bucket, key) except datalab.utils.RequestException as e: if e.status == 404: return False raise e except Exception as e: raise e return True
Checks if the specified item exists. Args: key: the key of the item to lookup. Returns: True if the item exists; False otherwise. Raises: Exception if there was an error requesting information about the item.
def set_spacing(self, new_spacing): if not isinstance(new_spacing, (tuple, list)): raise ValueError('arg must be tuple or list') if len(new_spacing) != self.dimension: raise ValueError('must give a spacing value for each dimension (%i)' % self.dimension) libfn = utils.get_lib_fn('setSpacing%s'%self._libsuffix) libfn(self.pointer, new_spacing)
Set image spacing Arguments --------- new_spacing : tuple or list updated spacing for the image. should have one value for each dimension Returns ------- None
def check_cmake_exists(cmake_command): from subprocess import Popen, PIPE p = Popen( '{0} --version'.format(cmake_command), shell=True, stdin=PIPE, stdout=PIPE) if not ('cmake version' in p.communicate()[0].decode('UTF-8')): sys.stderr.write(' This code is built using CMake\n\n') sys.stderr.write(' CMake is not found\n') sys.stderr.write(' get CMake at http://www.cmake.org/\n') sys.stderr.write(' on many clusters CMake is installed\n') sys.stderr.write(' but you have to load it first:\n') sys.stderr.write(' $ module load cmake\n') sys.exit(1)
Check whether CMake is installed. If not, print informative error message and quits.
async def logs( self, service_id: str, *, details: bool = False, follow: bool = False, stdout: bool = False, stderr: bool = False, since: int = 0, timestamps: bool = False, is_tty: bool = False, tail: str = "all" ) -> Union[str, AsyncIterator[str]]: if stdout is False and stderr is False: raise TypeError("Need one of stdout or stderr") params = { "details": details, "follow": follow, "stdout": stdout, "stderr": stderr, "since": since, "timestamps": timestamps, "tail": tail, } response = await self.docker._query( "services/{service_id}/logs".format(service_id=service_id), method="GET", params=params, ) return await multiplexed_result(response, follow, is_tty=is_tty)
Retrieve logs of the given service Args: details: show service context and extra details provided to logs follow: return the logs as a stream. stdout: return logs from stdout stderr: return logs from stderr since: return logs since this time, as a UNIX timestamp timestamps: add timestamps to every log line is_tty: the service has a pseudo-TTY allocated tail: only return this number of log lines from the end of the logs, specify as an integer or `all` to output all log lines.
def pad_sentences(sentences, padding_word="</s>"): sequence_length = max(len(x) for x in sentences) padded_sentences = [] for i, sentence in enumerate(sentences): num_padding = sequence_length - len(sentence) new_sentence = sentence + [padding_word] * num_padding padded_sentences.append(new_sentence) return padded_sentences
Pads all sentences to the same length. The length is defined by the longest sentence. Returns padded sentences.
def exists(self, digest): return self.conn.client.blob_exists(self.container_name, digest)
Check if a blob exists :param digest: Hex digest of the blob :return: Boolean indicating existence of the blob
def _update(self, **kwargs): path = self._construct_path_to_item() if not kwargs: return return self._http.put(path, json.dumps(kwargs))
Update a resource in a remote Transifex server.
def get_record_value(request, uid, keyword, default=None): value = request.get(keyword) if not value: return default if not isinstance(value, list): return default return value[0].get(uid, default) or default
Returns the value for the keyword and uid from the request
def abort(status_code, message=None): if message is None: message = STATUS_CODES.get(status_code) message = message.decode("utf8") sanic_exception = _sanic_exceptions.get(status_code, SanicException) raise sanic_exception(message=message, status_code=status_code)
Raise an exception based on SanicException. Returns the HTTP response message appropriate for the given status code, unless provided. :param status_code: The HTTP status code to return. :param message: The HTTP response body. Defaults to the messages in response.py for the given status code.
def _prepare_resource_chunks(self, resources, resource_delim=','): return [self._prepare_resource_chunk(resources, resource_delim, pos) for pos in range(0, len(resources), self._resources_per_req)]
As in some VirusTotal API methods the call can be made for multiple resources at once this method prepares a list of concatenated resources according to the maximum number of resources per requests. Args: resources: a list of the resources. resource_delim: a string used to separate the resources. Default value is a comma. Returns: A list of the concatenated resources.
def row(self, data): for column in self.column_funcs: if callable(column): yield column(data) else: yield utils.lookup(data, *column)
Return a formatted row for the given data.
def add(self, uuid): if uuid: try: x = hash64(uuid) except UnicodeEncodeError: x = hash64(uuid.encode('ascii', 'ignore')) j = x & ((1 << self.b) - 1) w = x >> self.b self.M[j] = max(self.M[j], self._get_rho(w, self.bitcount_arr))
Adds a key to the HyperLogLog
def update_buttons(self): current_scheme = self.current_scheme names = self.get_option("names") try: names.pop(names.index(u'Custom')) except ValueError: pass delete_enabled = current_scheme not in names self.delete_button.setEnabled(delete_enabled) self.reset_button.setEnabled(not delete_enabled)
Updates the enable status of delete and reset buttons.
def split_arguments(args): prev = False for i, value in enumerate(args[1:]): if value.startswith('-'): prev = True elif prev: prev = False else: return args[:i+1], args[i+1:] return args, []
Split specified arguments to two list. This is used to distinguish the options of the program and execution command/arguments. Parameters ---------- args : list Command line arguments Returns ------- list : options, arguments options indicate the optional arguments for the program and arguments indicate the execution command/arguments
def right(ctx, text, num_chars): num_chars = conversions.to_integer(num_chars, ctx) if num_chars < 0: raise ValueError("Number of chars can't be negative") elif num_chars == 0: return '' else: return conversions.to_string(text, ctx)[-num_chars:]
Returns the last characters in a text string
def get_route(self, route_id): raw = self.protocol.get('/routes/{id}', id=route_id) return model.Route.deserialize(raw, bind_client=self)
Gets specified route. Will be detail-level if owned by authenticated user; otherwise summary-level. https://strava.github.io/api/v3/routes/#retreive :param route_id: The ID of route to fetch. :type route_id: int :rtype: :class:`stravalib.model.Route`
def merge_dicts(dict1, dict2, deep_merge=True): if deep_merge: if isinstance(dict1, list) and isinstance(dict2, list): return dict1 + dict2 if not isinstance(dict1, dict) or not isinstance(dict2, dict): return dict2 for key in dict2: dict1[key] = merge_dicts(dict1[key], dict2[key]) if key in dict1 else dict2[key] return dict1 dict3 = dict1.copy() dict3.update(dict2) return dict3
Merge dict2 into dict1.
def delete(self): self.close() if self.does_file_exist(): os.remove(self.path)
Delete the file.
def _overwrite(self, n): if os.path.isfile(n[:-4]): shutil.copy2(n[:-4], n[:-4] + ".old") print("Old file {0} saved as {1}.old".format( n[:-4].split("/")[-1], n[:-4].split("/")[-1])) if os.path.isfile(n): shutil.move(n, n[:-4]) print("New file {0} overwrite as {1}".format( n.split("/")[-1], n[:-4].split("/")[-1]))
Overwrite old file with new and keep file with suffix .old
def create_d1_dn_subject(common_name_str): return cryptography.x509.Name( [ cryptography.x509.NameAttribute( cryptography.x509.oid.NameOID.COUNTRY_NAME, "US" ), cryptography.x509.NameAttribute( cryptography.x509.oid.NameOID.STATE_OR_PROVINCE_NAME, "California" ), cryptography.x509.NameAttribute( cryptography.x509.oid.NameOID.LOCALITY_NAME, "San Francisco" ), cryptography.x509.NameAttribute( cryptography.x509.oid.NameOID.ORGANIZATION_NAME, "Root CA" ), cryptography.x509.NameAttribute( cryptography.x509.oid.NameOID.COMMON_NAME, "ca.ca.com" ), ] )
Create the DN Subject for certificate that will be used in a DataONE environment. The DN is formatted into a DataONE subject, which is used in authentication, authorization and event tracking. Args: common_name_str: str DataONE uses simple DNs without physical location information, so only the ``common_name_str`` (``CommonName``) needs to be specified. For Member Node Client Side certificates or CSRs, ``common_name_str`` is the ``node_id``, e.g., ``urn:node:ABCD`` for production, or ``urn:node:mnTestABCD`` for the test environments. For a local CA, something like ``localCA`` may be used. For a locally trusted client side certificate, something like ``localClient`` may be used.
def events(self, *events): return self.send_events(self.create_event(e) for e in events)
Sends multiple events in a single message >>> client.events({'service': 'riemann-client', 'state': 'awesome'}) :param \*events: event dictionaries for :py:func:`create_event` :returns: The response message from Riemann
def has_path(self, path, method=None): method = self._normalize_method_name(method) path_dict = self.get_path(path) path_dict_exists = path_dict is not None if method: return path_dict_exists and method in path_dict return path_dict_exists
Returns True if this Swagger has the given path and optional method :param string path: Path name :param string method: HTTP method :return: True, if this path/method is present in the document
def set_category(self, category): pcategory = self.find("general/category") pcategory.clear() name = ElementTree.SubElement(pcategory, "name") if isinstance(category, Category): id_ = ElementTree.SubElement(pcategory, "id") id_.text = category.id name.text = category.name elif isinstance(category, basestring): name.text = category
Set the policy's category. Args: category: A category object.
def append_tag(self, field_number, wire_type): self._stream.append_var_uint32(wire_format.pack_tag(field_number, wire_type))
Appends a tag containing field number and wire type information.
def delete_operation( self, name, retry=gapic_v1.method.DEFAULT, timeout=gapic_v1.method.DEFAULT ): request = operations_pb2.DeleteOperationRequest(name=name) self._delete_operation(request, retry=retry, timeout=timeout)
Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. Example: >>> from google.api_core import operations_v1 >>> api = operations_v1.OperationsClient() >>> name = '' >>> api.delete_operation(name) Args: name (str): The name of the operation resource to be deleted. retry (google.api_core.retry.Retry): The retry strategy to use when invoking the RPC. If unspecified, the default retry from the client configuration will be used. If ``None``, then this method will not retry the RPC at all. timeout (float): The amount of time in seconds to wait for the RPC to complete. Note that if ``retry`` is used, this timeout applies to each individual attempt and the overall time it takes for this method to complete may be longer. If unspecified, the the default timeout in the client configuration is used. If ``None``, then the RPC method will not time out. Raises: google.api_core.exceptions.MethodNotImplemented: If the server does not support this method. Services are not required to implement this method. google.api_core.exceptions.GoogleAPICallError: If an error occurred while invoking the RPC, the appropriate ``GoogleAPICallError`` subclass will be raised.
async def enable_digital_reporting(self, command): pin = int(command[0]) await self.core.enable_digital_reporting(pin)
Enable Firmata reporting for a digital pin. :param command: {"method": "enable_digital_reporting", "params": [PIN]} :returns: {"method": "digital_message_reply", "params": [PIN, DIGITAL_DATA_VALUE]}
def rename(name): from peltak.extra.gitflow import logic if name is None: name = click.prompt('Hotfix name') logic.hotfix.rename(name)
Give the currently developed hotfix a new name.
def _validate_sub(claims, subject=None): if 'sub' not in claims: return if not isinstance(claims['sub'], string_types): raise JWTClaimsError('Subject must be a string.') if subject is not None: if claims.get('sub') != subject: raise JWTClaimsError('Invalid subject')
Validates that the 'sub' claim is valid. The "sub" (subject) claim identifies the principal that is the subject of the JWT. The claims in a JWT are normally statements about the subject. The subject value MUST either be scoped to be locally unique in the context of the issuer or be globally unique. The processing of this claim is generally application specific. The "sub" value is a case-sensitive string containing a StringOrURI value. Use of this claim is OPTIONAL. Args: claims (dict): The claims dictionary to validate. subject (str): The subject of the token.
def get_days_off(transactions): days_off = [] for trans in transactions: date, action, _ = _parse_transaction_entry(trans) if action == 'off': days_off.append(date) return days_off
Return the dates for any 'take day off' transactions.
def rebin(self, factor): if self.pilimage != None: raise RuntimeError, "Cannot rebin anymore, PIL image already exists !" if type(factor) != type(0): raise RuntimeError, "Rebin factor must be an integer !" if factor < 1: return origshape = np.asarray(self.numpyarray.shape) neededshape = origshape - (origshape % factor) if not (origshape == neededshape).all(): if self.verbose : print "Rebinning %ix%i : I have to crop from %s to %s" % (factor, factor, origshape, neededshape) self.crop(0, neededshape[0], 0, neededshape[1]) else: if self.verbose : print "Rebinning %ix%i : I do not need to crop" % (factor, factor) self.numpyarray = rebin(self.numpyarray, neededshape/factor) self.binfactor = int(self.binfactor * factor)
I robustly rebin your image by a given factor. You simply specify a factor, and I will eventually take care of a crop to bring the image to interger-multiple-of-your-factor dimensions. Note that if you crop your image before, you must directly crop to compatible dimensions ! We update the binfactor, this allows you to draw on the image later, still using the orignial pixel coordinates. Here we work on the numpy array.
def _exec_cmd(self, args, shell, timeout, stderr): if timeout and timeout <= 0: raise ValueError('Timeout is not a positive value: %s' % timeout) try: (ret, out, err) = utils.run_command( args, shell=shell, timeout=timeout) except psutil.TimeoutExpired: raise AdbTimeoutError( cmd=args, timeout=timeout, serial=self.serial) if stderr: stderr.write(err) logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s', utils.cli_cmd_to_string(args), out, err, ret) if ret == 0: return out else: raise AdbError( cmd=args, stdout=out, stderr=err, ret_code=ret, serial=self.serial)
Executes adb commands. Args: args: string or list of strings, program arguments. See subprocess.Popen() documentation. shell: bool, True to run this command through the system shell, False to invoke it directly. See subprocess.Popen() docs. timeout: float, the number of seconds to wait before timing out. If not specified, no timeout takes effect. stderr: a Byte stream, like io.BytesIO, stderr of the command will be written to this object if provided. Returns: The output of the adb command run if exit code is 0. Raises: ValueError: timeout value is invalid. AdbError: The adb command exit code is not 0. AdbTimeoutError: The adb command timed out.
def subscribers(self, date="", page=1, page_size=1000, order_field="email", order_direction="asc", include_tracking_information=False): params = { "date": date, "page": page, "pagesize": page_size, "orderfield": order_field, "orderdirection": order_direction, "includetrackinginformation": include_tracking_information } response = self._get(self.uri_for("active"), params=params) return json_to_py(response)
Gets the active subscribers in this segment.
def dumps(obj, *args, **kwargs): return json.dumps(obj, *args, cls=TypelessSONEncoder, ensure_ascii=False, **kwargs)
Typeless dump an object to json string
def add(user_id, resource_policy, admin, inactive, rate_limit): try: user_id = int(user_id) except ValueError: pass with Session() as session: try: data = session.KeyPair.create( user_id, is_active=not inactive, is_admin=admin, resource_policy=resource_policy, rate_limit=rate_limit) except Exception as e: print_error(e) sys.exit(1) if not data['ok']: print_fail('KeyPair creation has failed: {0}'.format(data['msg'])) sys.exit(1) item = data['keypair'] print('Access Key: {0}'.format(item['access_key'])) print('Secret Key: {0}'.format(item['secret_key']))
Add a new keypair. USER_ID: User ID of a new key pair. RESOURCE_POLICY: resource policy for new key pair.
def _reload_config(self, reload_original_config): if reload_original_config: self.original_config = self.running_config self.original_config.set_name('original') paths = self.running_config.get_paths() self.running_config = FortiConfig('running', vdom=self.vdom) for path in paths: self.load_config(path, empty_candidate=True)
This command will update the running config from the live device. Args: * reload_original_config: * If ``True`` the original config will be loaded with the running config before reloading the\ original config. * If ``False`` the original config will remain untouched.
def get_relative_to_remote(self): s = self.git("status", "--short", "-b")[0] r = re.compile("\[([^\]]+)\]") toks = r.findall(s) if toks: try: s2 = toks[-1] adj, n = s2.split() assert(adj in ("ahead", "behind")) n = int(n) return -n if adj == "behind" else n except Exception as e: raise ReleaseVCSError( ("Problem parsing first line of result of 'git status " "--short -b' (%s):\n%s") % (s, str(e))) else: return 0
Return the number of commits we are relative to the remote. Negative is behind, positive in front, zero means we are matched to remote.
def _init(): if (hasattr(_local_context, '_initialized') and _local_context._initialized == os.environ.get('REQUEST_ID_HASH')): return _local_context.registry = [] _local_context._executing_async_context = None _local_context._executing_async = [] _local_context._initialized = os.environ.get('REQUEST_ID_HASH') return _local_context
Initialize the furious context and registry. NOTE: Do not directly run this method.
def process_response(self, request, response): if hasattr(request, 'COUNTRY_CODE'): response.set_cookie( key=constants.COUNTRY_COOKIE_NAME, value=request.COUNTRY_CODE, max_age=settings.LANGUAGE_COOKIE_AGE, path=settings.LANGUAGE_COOKIE_PATH, domain=settings.LANGUAGE_COOKIE_DOMAIN ) return response
Shares config with the language cookie as they serve a similar purpose
def compute_qkv(query_antecedent, memory_antecedent, total_key_depth, total_value_depth, q_filter_width=1, kv_filter_width=1, q_padding="VALID", kv_padding="VALID", vars_3d_num_heads=0, layer_collection=None): if memory_antecedent is None: memory_antecedent = query_antecedent q = compute_attention_component( query_antecedent, total_key_depth, q_filter_width, q_padding, "q", vars_3d_num_heads=vars_3d_num_heads, layer_collection=layer_collection) k = compute_attention_component( memory_antecedent, total_key_depth, kv_filter_width, kv_padding, "k", vars_3d_num_heads=vars_3d_num_heads, layer_collection=layer_collection) v = compute_attention_component( memory_antecedent, total_value_depth, kv_filter_width, kv_padding, "v", vars_3d_num_heads=vars_3d_num_heads, layer_collection=layer_collection) return q, k, v
Computes query, key and value. Args: query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: a Tensor with shape [batch, length_m, channels] total_key_depth: an integer total_value_depth: an integer q_filter_width: An integer specifying how wide you want the query to be. kv_filter_width: An integer specifying how wide you want the keys and values to be. q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. kv_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. vars_3d_num_heads: an optional (if we want to use 3d variables) layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. Returns: q, k, v : [batch, length, depth] tensors
def data(self): content = { 'form_data': self.form_data, 'token': self.token, 'viz_name': self.viz_type, 'filter_select_enabled': self.datasource.filter_select_enabled, } return content
This is the data object serialized to the js layer
def log(self, time, message, level=None, attachment=None): logger.debug("log queued") args = { "time": time, "message": message, "level": level, "attachment": attachment, } self.queue.put_nowait(("log", args))
Logs a message with attachment. The attachment is a dict of: name: name of attachment data: file content mime: content type for attachment