code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def pull_image(self, image_name, stream=None): stream_writer = stream or StreamWriter(sys.stderr) try: result_itr = self.docker_client.api.pull(image_name, stream=True, decode=True) except docker.errors.APIError as ex: LOG.debug("Failed to download image with name %s", image_name) raise DockerImagePullFailedException(str(ex)) stream_writer.write(u"\nFetching {} Docker container image...".format(image_name)) for _ in result_itr: stream_writer.write(u'.') stream_writer.flush() stream_writer.write(u"\n")
Ask Docker to pull the container image with given name. Parameters ---------- image_name str Name of the image stream samcli.lib.utils.stream_writer.StreamWriter Optional stream writer to output to. Defaults to stderr Raises ------ DockerImagePullFailedException If the Docker image was not available in the server
def getall(self): vrfs_re = re.compile(r'(?<=^vrf definition\s)(\w+)', re.M) response = dict() for vrf in vrfs_re.findall(self.config): response[vrf] = self.get(vrf) return response
Returns a dict object of all VRFs in the running-config Returns: A dict object of VRF attributes
def artboards(src_path): pages = list_artboards(src_path) artboards = [] for page in pages: artboards.extend(page.artboards) return artboards
Return artboards as a flat list
def getHourTable(date, pos): table = hourTable(date, pos) return HourTable(table, date)
Returns an HourTable object.
def get_query_str(query: Type[QueryDict], max_length: int = 1024) -> str: query_dict = query.copy() query_dict.pop('password', None) query_dict.pop(settings.AXES_PASSWORD_FORM_FIELD, None) query_str = '\n'.join( f'{key}={value}' for key, value in query_dict.items() ) return query_str[:max_length]
Turns a query dictionary into an easy-to-read list of key-value pairs. If a field is called either ``'password'`` or ``settings.AXES_PASSWORD_FORM_FIELD`` it will be excluded. The length of the output is limited to max_length to avoid a DoS attack via excessively large payloads.
def append_var_uint32(self, value): if not 0 <= value <= wire_format.UINT32_MAX: raise errors.EncodeError('Value out of range: %d' % value) self.append_var_uint64(value)
Appends an unsigned 32-bit integer to the internal buffer, encoded as a varint.
def getTimeSinceLastVsync(self): fn = self.function_table.getTimeSinceLastVsync pfSecondsSinceLastVsync = c_float() pulFrameCounter = c_uint64() result = fn(byref(pfSecondsSinceLastVsync), byref(pulFrameCounter)) return result, pfSecondsSinceLastVsync.value, pulFrameCounter.value
Returns the number of elapsed seconds since the last recorded vsync event. This will come from a vsync timer event in the timer if possible or from the application-reported time if that is not available. If no vsync times are available the function will return zero for vsync time and frame counter and return false from the method.
def capture_packet(self): data = self.socket.recv(self._buffer_size) for h in self.capture_handlers: h['reads'] += 1 h['data_read'] += len(data) d = data if 'pre_write_transforms' in h: for data_transform in h['pre_write_transforms']: d = data_transform(d) h['logger'].write(d)
Write packet data to the logger's log file.
def getAllNodeUids(self): ret = { self.uid } ret.update(self.getAllChildNodeUids()) return ret
getAllNodeUids - Returns all the unique internal IDs from getAllChildNodeUids, but also includes this tag's uid @return set<uuid.UUID> A set of uuid objects
def is_main_variation(self) -> bool: if not self.parent: return True return not self.parent.variations or self.parent.variations[0] == self
Checks if this node is the first variation from the point of view of its parent. The root node is also in the main variation.
def _read_until(infile=sys.stdin, maxchars=20, end=RS): chars = [] read = infile.read if not isinstance(end, tuple): end = (end,) while maxchars: char = read(1) if char in end: break chars.append(char) maxchars -= 1 return ''.join(chars)
Read a terminal response of up to a few characters from stdin.
def remove_callback(self, callback, msg_type=None): if msg_type is None: msg_type = self._callbacks.keys() cb_keys = self._to_iter(msg_type) if cb_keys is not None: for msg_type_ in cb_keys: try: self._callbacks[msg_type_].remove(callback) except KeyError: pass else: self._callbacks[msg_type].remove(callback)
Remove per message type of global callback. Parameters ---------- callback : fn Callback function msg_type : int | iterable Message type to remove callback from. Default `None` means global callback. Iterable type removes the callback from all the message types.
def stonith_create(stonith_id, stonith_device_type, stonith_device_options=None, cibfile=None): return item_create(item='stonith', item_id=stonith_id, item_type=stonith_device_type, extra_args=stonith_device_options, cibfile=cibfile)
Create a stonith resource via pcs command stonith_id name for the stonith resource stonith_device_type name of the stonith agent fence_eps, fence_xvm f.e. stonith_device_options additional options for creating the stonith resource cibfile use cibfile instead of the live CIB for manipulation CLI Example: .. code-block:: bash salt '*' pcs.stonith_create stonith_id='eps_fence' stonith_device_type='fence_eps' stonith_device_options="['pcmk_host_map=node1.example.org:01;node2.example.org:02', 'ipaddr=myepsdevice.example.org', 'action=reboot', 'power_wait=5', 'verbose=1', 'debug=/var/log/pcsd/eps_fence.log', 'login=hidden', 'passwd=hoonetorg']" cibfile='/tmp/cib_for_stonith.cib'
def v1_tag_associate(request, tags, tag): tag = tag.decode('utf-8').strip() assoc = dict(json.loads(request.body.read()), **{'tag': tag}) tags.add(assoc)
Associate an HTML element with a tag. The association should be a JSON serialized object on the request body. Here is an example association that should make the object's structure clear: .. code-block:: python { "url": "http://example.com/abc/xyz?foo=bar", "text": "The text the user highlighted.", "stream_id": "{unix timestamp}-{md5 of url}", "hash": "{nilsimsa hash of the HTML}", "timestamp": {unix timestamp}, "xpath": { "start_node": "/html/body/p[1]/text()[2]", "start_idx": 3, "end_node": "/html/body/p[1]/text()[3]", "end_idx": 9 } } All fields are required and cannot be empty or ``null``. The tag of the association should be specified in the URL and is delimited by ``//``.
def getedges(fname, iddfile): data, commdct, _idd_index = readidf.readdatacommdct(fname, iddfile=iddfile) edges = makeairplantloop(data, commdct) return edges
return the edges of the idf file fname
def gff3_to_recarray(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', tabix='tabix', dtype=None): recs = list(iter_gff3(path, attributes=attributes, region=region, score_fill=score_fill, phase_fill=phase_fill, attributes_fill=attributes_fill, tabix=tabix)) if not recs: return None if dtype is None: dtype = [('seqid', object), ('source', object), ('type', object), ('start', int), ('end', int), ('score', float), ('strand', object), ('phase', int)] if attributes: for n in attributes: dtype.append((n, object)) a = np.rec.fromrecords(recs, dtype=dtype) return a
Load data from a GFF3 into a NumPy recarray. Parameters ---------- path : string Path to input file. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. tabix : string, optional Tabix command. dtype : dtype, optional Override dtype. Returns ------- np.recarray
def update_workspace_attributes(namespace, workspace, attrs): headers = _fiss_agent_header({"Content-type": "application/json"}) uri = "{0}workspaces/{1}/{2}/updateAttributes".format(fcconfig.root_url, namespace, workspace) body = json.dumps(attrs) return __SESSION.patch(uri, headers=headers, data=body)
Update or remove workspace attributes. Args: namespace (str): project to which workspace belongs workspace (str): Workspace name attrs (list(dict)): List of update operations for workspace attributes. Use the helper dictionary construction functions to create these: _attr_set() : Set/Update attribute _attr_rem() : Remove attribute _attr_ladd() : Add list member to attribute _attr_lrem() : Remove list member from attribute Swagger: https://api.firecloud.org/#!/Workspaces/updateAttributes
def depth(sequence, func=max, _depth=0): if isinstance(sequence, dict): sequence = list(sequence.values()) depth_list = [depth(item, func=func, _depth=_depth + 1) for item in sequence if (isinstance(item, dict) or util_type.is_listlike(item))] if len(depth_list) > 0: return func(depth_list) else: return _depth
Find the nesting depth of a nested sequence
def from_str(cls, string): return cls([Literal.from_str(lit) for lit in string.split('+')])
Creates a clause from a given string. Parameters ---------- string: str A string of the form `a+!b` which translates to `a AND NOT b`. Returns ------- caspo.core.clause.Clause Created object instance
def get_automatic_parser(exim_id, infile): adapter = getExim(exim_id) if IInstrumentAutoImportInterface.providedBy(adapter): return adapter.get_automatic_parser(infile) parser_func = filter(lambda i: i[0] == exim_id, PARSERS) parser_func = parser_func and parser_func[0][1] or None if not parser_func or not hasattr(adapter, parser_func): return None parser_func = getattr(adapter, parser_func) return parser_func(infile)
Returns the parser to be used by default for the instrument id interface and results file passed in.
def iter_issues(self, milestone=None, state=None, assignee=None, mentioned=None, labels=None, sort=None, direction=None, since=None, number=-1, etag=None): url = self._build_url('issues', base_url=self._api) params = {'assignee': assignee, 'mentioned': mentioned} if milestone in ('*', 'none') or isinstance(milestone, int): params['milestone'] = milestone self._remove_none(params) params.update( issue_params(None, state, labels, sort, direction, since) ) return self._iter(int(number), url, Issue, params, etag)
Iterate over issues on this repo based upon parameters passed. .. versionchanged:: 0.9.0 The ``state`` parameter now accepts 'all' in addition to 'open' and 'closed'. :param int milestone: (optional), 'none', or '*' :param str state: (optional), accepted values: ('all', 'open', 'closed') :param str assignee: (optional), 'none', '*', or login name :param str mentioned: (optional), user's login name :param str labels: (optional), comma-separated list of labels, e.g. 'bug,ui,@high' :param sort: (optional), accepted values: ('created', 'updated', 'comments', 'created') :param str direction: (optional), accepted values: ('asc', 'desc') :param since: (optional), Only issues after this date will be returned. This can be a `datetime` or an `ISO8601` formatted date string, e.g., 2012-05-20T23:10:27Z :type since: datetime or string :param int number: (optional), Number of issues to return. By default all issues are returned :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`Issue <github3.issues.issue.Issue>`\ s
def change_directory(self, directory): self._process.write(('cd %s\n' % directory).encode()) if sys.platform == 'win32': self._process.write((os.path.splitdrive(directory)[0] + '\r\n').encode()) self.clear() else: self._process.write(b'\x0C')
Changes the current directory. Change is made by running a "cd" command followed by a "clear" command. :param directory: :return:
def _sync_enter(self): if hasattr(self, 'loop'): loop = self.loop else: loop = self._client.loop if loop.is_running(): raise RuntimeError( 'You must use "async with" if the event loop ' 'is running (i.e. you are inside an "async def")' ) return loop.run_until_complete(self.__aenter__())
Helps to cut boilerplate on async context managers that offer synchronous variants.
def max_or(a, b, c, d, w): m = (1 << (w - 1)) while m != 0: if (b & d & m) != 0: temp = (b - m) | (m - 1) if temp >= a: b = temp break temp = (d - m) | (m - 1) if temp >= c: d = temp break m >>= 1 return b | d
Upper bound of result of ORing 2-intervals. :param a: Lower bound of first interval :param b: Upper bound of first interval :param c: Lower bound of second interval :param d: Upper bound of second interval :param w: bit width :return: Upper bound of ORing 2-intervals
def netmask(mask): if not isinstance(mask, string_types): return False octets = mask.split('.') if not len(octets) == 4: return False return ipv4_addr(mask) and octets == sorted(octets, reverse=True)
Returns True if the value passed is a valid netmask, otherwise return False
def complain(error): if callable(error): if DEVELOP: raise error() elif DEVELOP: raise error else: logger.warn_err(error)
Raises in develop; warns in release.
def generate_config(directory): default_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.yml") target_config_path = os.path.abspath(os.path.join(directory, 'config.yml')) shutil.copy(default_config, target_config_path) six.print_("Config file has been generated in", target_config_path)
Generate default config file
def is_in(self, search_list, pair): index = -1 for nr, i in enumerate(search_list): if(np.all(i == pair)): return nr return index
If pair is in search_list, return the index. Otherwise return -1
def tabFileNameChanged(self, tab): if tab == self.currentTab: if tab.fileName: self.setWindowTitle("") if globalSettings.windowTitleFullPath: self.setWindowTitle(tab.fileName + '[*]') self.setWindowFilePath(tab.fileName) self.tabWidget.setTabText(self.ind, tab.getBaseName()) self.tabWidget.setTabToolTip(self.ind, tab.fileName) QDir.setCurrent(QFileInfo(tab.fileName).dir().path()) else: self.setWindowFilePath('') self.setWindowTitle(self.tr('New document') + '[*]') canReload = bool(tab.fileName) and not self.autoSaveActive(tab) self.actionSetEncoding.setEnabled(canReload) self.actionReload.setEnabled(canReload)
Perform all UI state changes that need to be done when the filename of the current tab has changed.
def get_subkey(self,name): subkey = Key(name,self) try: hkey = subkey.hkey except WindowsError: raise AttributeError("subkey '%s' does not exist" % (name,)) return subkey
Retreive the subkey with the specified name. If the named subkey is not found, AttributeError is raised; this is for consistency with the attribute-based access notation.
def setColor(self, color): if color == 'blue': self.color = 'blue' self.colorCode = self.colors['blue'] self.colorCodeDark = self.colors['dblue'] elif color == 'red': self.color = 'red' self.colorCode = self.colors['red'] self.colorCodeDark = self.colors['dred'] elif color == 'yellow': self.color = 'yellow' self.colorCode = self.colors['yellow'] self.colorCodeDark = self.colors['dyellow'] elif color == 'green': self.color = 'green' self.colorCode = self.colors['green'] self.colorCodeDark = self.colors['dgreen'] elif color == 'wild': self.wild = True self.color = 'wild' self.colorCodeDark = self.colors['dwild'] self.colorCode = self.colors['wild']
Sets Card's color and escape code.
def flatten_spec(spec, prefix,joiner=" :: "): if any(filter(operator.methodcaller("startswith","Test"),spec.keys())): flat_spec = {} for (k,v) in spec.items(): flat_spec.update(flatten_spec(v,prefix + joiner + k[5:])) return flat_spec else: return {"Test "+prefix: spec}
Flatten a canonical specification with nesting into one without nesting. When building unique names, concatenate the given prefix to the local test name without the "Test " tag.
def All(*validators): @wraps(All) def built(value): for validator in validators: value = validator(value) return value return built
Combines all the given validator callables into one, running all the validators in sequence on the given value.
def parse_partial(self, text): if not isinstance(text, str): raise TypeError( 'Can only parsing string but got {!r}'.format(text)) res = self(text, 0) if res.status: return (res.value, text[res.index:]) else: raise ParseError(res.expected, text, res.index)
Parse the longest possible prefix of a given string. Return a tuple of the result value and the rest of the string. If failed, raise a ParseError.
def FundamentalType(self, _type): log.debug('HERE in FundamentalType for %s %s', _type, _type.name) if _type.name in ["None", "c_long_double_t", "c_uint128", "c_int128"]: self.enable_fundamental_type_wrappers() return _type.name return "ctypes.%s" % (_type.name)
Returns the proper ctypes class name for a fundamental type 1) activates generation of appropriate headers for ## int128_t ## c_long_double_t 2) return appropriate name for type
def execute_with_retries(retryable_function, retryable_errors, logger, human_readable_action_name='Action', nonretryable_errors=None): max_retries = 10 attempt = 0 if not nonretryable_errors: nonretryable_errors = () while True: try: return retryable_function() except tuple(nonretryable_errors): raise except tuple(retryable_errors) as e: attempt += 1 if attempt > max_retries: raise delay = 2**attempt + random.random() logger.info('"%s" failed with error "%s". '\ 'Retry number %s of %s in %s seconds' % (human_readable_action_name, str(e), attempt, max_retries, delay)) time.sleep(delay)
This attempts to execute "retryable_function" with exponential backoff on delay time. 10 retries adds up to about 34 minutes total delay before the last attempt. "human_readable_action_name" is an option input to customize retry message.
def getPageImageList(self, pno): if self.isClosed or self.isEncrypted: raise ValueError("operation illegal for closed / encrypted doc") if self.isPDF: return self._getPageInfo(pno, 2) return []
Retrieve a list of images used on a page.
def chgroups(name, groups, append=False): if isinstance(groups, six.string_types): groups = groups.split(',') ugrps = set(list_groups(name)) if ugrps == set(groups): return True if append: groups.update(ugrps) cmd = ['usermod', '-G', ','.join(groups), name] return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
Change the groups to which a user belongs name Username to modify groups List of groups to set for the user. Can be passed as a comma-separated list or a Python list. append : False Set to ``True`` to append these groups to the user's existing list of groups. Otherwise, the specified groups will replace any existing groups for the user. CLI Example: .. code-block:: bash salt '*' user.chgroups foo wheel,root True
def get_cache_key(content, **kwargs): cache_key = '' for key in sorted(kwargs.keys()): cache_key = '{cache_key}.{key}:{value}'.format( cache_key=cache_key, key=key, value=kwargs[key], ) cache_key = '{content}{cache_key}'.format( content=content, cache_key=cache_key, ) cache_key = cache_key.encode('utf-8', 'ignore') cache_key = md5(cache_key).hexdigest() cache_key = '{prefix}.{version}.{language}.{cache_key}'.format( prefix=settings.ACTIVE_URL_CACHE_PREFIX, version=__version__, language=get_language(), cache_key=cache_key ) return cache_key
generate cache key
def sync_remote_to_local(force="no"): assert "local_wp_dir" in env, "Missing local_wp_dir in env" if force != "yes": message = "This will replace your local database with your "\ "remote, are you sure [y/n]" answer = prompt(message, "y") if answer != "y": logger.info("Sync stopped") return init_tasks() remote_file = "sync_%s.sql" % int(time.time()*1000) remote_path = "/tmp/%s" % remote_file with env.cd(paths.get_current_path()): env.run("wp db export %s" % remote_path) local_wp_dir = env.local_wp_dir local_path = "/tmp/%s" % remote_file get(remote_path, local_path) with lcd(local_wp_dir): elocal("wp db import %s" % local_path) env.run("rm %s" % remote_path) elocal("rm %s" % local_path)
Replace your remote db with your local Example: sync_remote_to_local:force=yes
def _hmm_command(self, input_pipe, pairs_to_run): r element = pairs_to_run.pop() hmmsearch_cmd = self._individual_hmm_command(element[0][0], element[0][1], element[1]) while len(pairs_to_run) > 0: element = pairs_to_run.pop() hmmsearch_cmd = "tee >(%s) | %s" % (self._individual_hmm_command(element[0][0], element[0][1], element[1]), hmmsearch_cmd) hmmsearch_cmd = "%s | %s" % (input_pipe, hmmsearch_cmd) return hmmsearch_cmd
r"""INTERNAL method for getting cmdline for running a batch of HMMs. Parameters ---------- input_pipe: as hmmsearch pairs_to_run: list list with 2 members: (1) list of hmm and output file, (2) number of CPUs to use when searching Returns ------- A string command to be run with bash
def _download_metadata_archive(self): with tempfile.NamedTemporaryFile(delete=False) as metadata_archive: shutil.copyfileobj(urlopen(self.catalog_source), metadata_archive) yield metadata_archive.name remove(metadata_archive.name)
Makes a remote call to the Project Gutenberg servers and downloads the entire Project Gutenberg meta-data catalog. The catalog describes the texts on Project Gutenberg in RDF. The function returns a file-pointer to the catalog.
def identify_degenerate_nests(nest_spec): degenerate_positions = [] for pos, key in enumerate(nest_spec): if len(nest_spec[key]) == 1: degenerate_positions.append(pos) return degenerate_positions
Identify the nests within nest_spec that are degenerate, i.e. those nests with only a single alternative within the nest. Parameters ---------- nest_spec : OrderedDict. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Returns ------- list. Will contain the positions in the list of keys from `nest_spec` that are degenerate.
def _initialize(self): self._graph = tf.Graph() with self._graph.as_default(): self._input_node = tf.placeholder(tf.float32, (self._batch_size, self._im_height, self._im_width, self._num_channels)) weights = self.build_alexnet_weights() self._output_tensor = self.build_alexnet(weights) self._feature_tensor = self.build_alexnet(weights, output_layer=self._feature_layer) self._initialized = True
Open from caffe weights
def bind_path_fallback(self, name, folder): if not len(name) or name[0] != '/' or name[-1] != '/': raise ValueError( "name must start and end with '/': {0}".format(name)) self._folder_masks.append((name, folder))
Adds a fallback for a given folder relative to `base_path`.
def toggle_sequential_download(self, infohash_list): data = self._process_infohash_list(infohash_list) return self._post('command/toggleSequentialDownload', data=data)
Toggle sequential download in supplied torrents. :param infohash_list: Single or list() of infohashes.
def get_knowledge_category_id(self): if not bool(self._my_map['knowledgeCategoryId']): raise errors.IllegalState('this Objective has no knowledge_category') else: return Id(self._my_map['knowledgeCategoryId'])
Gets the grade ``Id`` associated with the knowledge dimension. return: (osid.id.Id) - the grade ``Id`` raise: IllegalState - ``has_knowledge_category()`` is ``false`` *compliance: mandatory -- This method must be implemented.*
async def cleanup(self, app): self.conn.close() if self.pubsub_conn: self.pubsub_reader.cancel() self.pubsub_conn.close() await asyncio.sleep(0)
Close self connections.
def _reverse_indexer(self): categories = self.categories r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'), categories.size) counts = counts.cumsum() result = (r[start:end] for start, end in zip(counts, counts[1:])) result = dict(zip(categories, result)) return result
Compute the inverse of a categorical, returning a dict of categories -> indexers. *This is an internal function* Returns ------- dict of categories -> indexers Example ------- In [1]: c = pd.Categorical(list('aabca')) In [2]: c Out[2]: [a, a, b, c, a] Categories (3, object): [a, b, c] In [3]: c.categories Out[3]: Index(['a', 'b', 'c'], dtype='object') In [4]: c.codes Out[4]: array([0, 0, 1, 2, 0], dtype=int8) In [5]: c._reverse_indexer() Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
def acquire(self, signal=True): if not self.needs_lock: return with self.synclock: while not self.lock.acquire(False): self.synclock.wait() if signal: self.acquired_event(self) self.synclock.notify_all()
Locks the account. Method has no effect if the constructor argument `needs_lock` wsa set to False. :type signal: bool :param signal: Whether to emit the acquired_event signal.
def _get_split_tasks(args, split_fn, file_key, outfile_i=-1): split_args = [] combine_map = {} finished_map = collections.OrderedDict() extras = [] for data in args: out_final, out_parts = split_fn(data) for parts in out_parts: split_args.append([utils.deepish_copy(data)] + list(parts)) for part_file in [x[outfile_i] for x in out_parts]: combine_map[part_file] = out_final if len(out_parts) == 0: if out_final is not None: if out_final not in finished_map: data[file_key] = out_final finished_map[out_final] = [data] else: extras.append([data]) else: extras.append([data]) return split_args, combine_map, list(finished_map.values()), extras
Split up input files and arguments, returning arguments for parallel processing. outfile_i specifies the location of the output file in the arguments to the processing function. Defaults to the last item in the list.
def find_inherited_key_completions(rootpath, root_env): tup = inflate_context_tuple(rootpath, root_env) if isinstance(tup, runtime.CompositeTuple): keys = set(k for t in tup.tuples[:-1] for k in t.keys()) return {n: get_completion(tup, n) for n in keys} return {}
Return completion keys from INHERITED tuples. Easiest way to get those is to evaluate the tuple, check if it is a CompositeTuple, then enumerate the keys that are NOT in the rightmost tuple.
def getitem_in(obj, name): for part in name.split('.'): obj = obj[part] return obj
Finds a key in @obj via a period-delimited string @name. @obj: (#dict) @name: (#str) |.|-separated keys to search @obj in .. obj = {'foo': {'bar': {'baz': True}}} getitem_in(obj, 'foo.bar.baz') .. |True|
def share(self, auth, resource, options={}, defer=False): return self._call('share', auth, [resource, options], defer)
Generates a share code for the given resource. Args: auth: <cik> resource: The identifier of the resource. options: Dictonary of options.
def check_version(ctx, param, value): if ctx.resilient_parsing: return if not value and ctx.invoked_subcommand != 'run': ctx.call_on_close(_check_version)
Check for latest version of renku on PyPI.
def list_roles(): for role in lib.get_roles(): margin_left = lib.get_margin(len(role['fullname'])) print("{0}{1}{2}".format( role['fullname'], margin_left, role.get('description', '(no description)')))
Show a list of all available roles
def remove(self): from fs.errors import ResourceNotFoundError try: self._fs.remove(self.file_name) except ResourceNotFoundError: pass
Removes file from filesystem.
def fundamental_frequency(s,FS): s = s - mean(s) f, fs = plotfft(s, FS, doplot=False) fs = fs[1:int(len(fs) / 2)] f = f[1:int(len(f) / 2)] cond = find(f > 0.5)[0] bp = bigPeaks(fs[cond:], 0) if bp==[]: f0=0 else: bp = bp + cond f0 = f[min(bp)] return f0
Compute fundamental frequency along the specified axes. Parameters ---------- s: ndarray input from which fundamental frequency is computed. FS: int sampling frequency Returns ------- f0: int its integer multiple best explain the content of the signal spectrum.
def get_ip(request, real_ip_only=False, right_most_proxy=False): best_matched_ip = None warnings.warn('get_ip is deprecated and will be removed in 3.0.', DeprecationWarning) for key in defs.IPWARE_META_PRECEDENCE_ORDER: value = request.META.get(key, request.META.get(key.replace('_', '-'), '')).strip() if value is not None and value != '': ips = [ip.strip().lower() for ip in value.split(',')] if right_most_proxy and len(ips) > 1: ips = reversed(ips) for ip_str in ips: if ip_str and is_valid_ip(ip_str): if not ip_str.startswith(NON_PUBLIC_IP_PREFIX): return ip_str if not real_ip_only: loopback = defs.IPWARE_LOOPBACK_PREFIX if best_matched_ip is None: best_matched_ip = ip_str elif best_matched_ip.startswith(loopback) and not ip_str.startswith(loopback): best_matched_ip = ip_str return best_matched_ip
Returns client's best-matched ip-address, or None @deprecated - Do not edit
def search_by_release(self, release_id, limit=0, order_by=None, sort_order=None, filter=None): url = "%s/release/series?release_id=%d" % (self.root_url, release_id) info = self.__get_search_results(url, limit, order_by, sort_order, filter) if info is None: raise ValueError('No series exists for release id: ' + str(release_id)) return info
Search for series that belongs to a release id. Returns information about matching series in a DataFrame. Parameters ---------- release_id : int release id, e.g., 151 limit : int, optional limit the number of results to this value. If limit is 0, it means fetching all results without limit. order_by : str, optional order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity' sort_order : str, optional sort the results by ascending or descending order. Valid options are 'asc' or 'desc' filter : tuple, optional filters the results. Expects a tuple like (filter_variable, filter_value). Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment' Returns ------- info : DataFrame a DataFrame containing information about the matching Fred series
def __set_values(self, values): array = tuple(tuple(self._clean_value(col) for col in row) for row in values) self._get_target().setDataArray(array)
Sets values in this cell range from an iterable of iterables.
def __prepare_namespaces(self): self.namespaces = dict( text="urn:text", draw="urn:draw", table="urn:table", office="urn:office", xlink="urn:xlink", svg="urn:svg", manifest="urn:manifest", ) for tree_root in self.tree_roots: self.namespaces.update(tree_root.nsmap) self.namespaces.pop(None, None) self.namespaces['py'] = GENSHI_URI self.namespaces['py3o'] = PY3O_URI
create proper namespaces for our document
def _write_sample_config(run_folder, ldetails): out_file = os.path.join(run_folder, "%s.yaml" % os.path.basename(run_folder)) with open(out_file, "w") as out_handle: fc_name, fc_date = flowcell.parse_dirname(run_folder) out = {"details": sorted([_prepare_sample(x, run_folder) for x in ldetails], key=operator.itemgetter("name", "description")), "fc_name": fc_name, "fc_date": fc_date} yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) return out_file
Generate a bcbio-nextgen YAML configuration file for processing a sample.
def negotiate_encoding (self): try: features = self.url_connection.sendcmd("FEAT") except ftplib.error_perm as msg: log.debug(LOG_CHECK, "Ignoring error when getting FTP features: %s" % msg) pass else: log.debug(LOG_CHECK, "FTP features %s", features) if " UTF-8" in features.splitlines(): self.filename_encoding = "utf-8"
Check if server can handle UTF-8 encoded filenames. See also RFC 2640.
def open_external_file(self, fname): fname = encoding.to_unicode_from_fs(fname) if osp.isfile(fname): self.open_file(fname, external=True) elif osp.isfile(osp.join(CWD, fname)): self.open_file(osp.join(CWD, fname), external=True)
Open external files that can be handled either by the Editor or the variable explorer inside Spyder.
def clear_cycle_mrkrs(self, test=False): if not test: msgBox = QMessageBox(QMessageBox.Question, 'Clear Cycle Markers', 'Are you sure you want to remove all cycle ' 'markers for this rater?') msgBox.setStandardButtons(QMessageBox.Yes | QMessageBox.No) msgBox.setDefaultButton(QMessageBox.Yes) response = msgBox.exec_() if response == QMessageBox.No: return self.annot.clear_cycles() self.parent.overview.display() self.parent.overview.display_annotations()
Remove all cycle markers.
def parameters_to_datetime(self, p): dt = p[self._param_name] return datetime(dt.year, dt.month, dt.day)
Given a dictionary of parameters, will extract the ranged task parameter value
def _cp_embeds_into(cp1, cp2): if cp1 is None or cp2 is None: return False cp1 = as_complex_pattern(cp1) cp2 = as_complex_pattern(cp2) if len(cp2.monomer_patterns) == 1: mp2 = cp2.monomer_patterns[0] for mp1 in cp1.monomer_patterns: if _mp_embeds_into(mp1, mp2): return True return False
Check that any state in ComplexPattern2 is matched in ComplexPattern1.
def cleanup(self): for instance in self.context: del(instance) for plugin in self.plugins: del(plugin)
Forcefully delete objects from memory In an ideal world, this shouldn't be necessary. Garbage collection guarantees that anything without reference is automatically removed. However, because this application is designed to be run multiple times from the same interpreter process, extra case must be taken to ensure there are no memory leaks. Explicitly deleting objects shines a light on where objects may still be referenced in the form of an error. No errors means this was uneccesary, but that's ok.
def main(filename): font_family = 'arial' font = Font(font_family, bold=True) if not font: raise RuntimeError('No font found for %r' % font_family) with Document('output.pdf') as document: with document.Page() as ctx: with Image(filename) as embed: ctx.box = embed.box ctx.embed(embed) ctx.add(Text('Hello World', font, size=14, x=100, y=60))
Creates a PDF by embedding the first page from the given image and writes some text to it. @param[in] filename The source filename of the image to embed.
def zlines(f = None, sep = "\0", osep = None, size = 8192): if f is None: f = sys.stdin if osep is None: osep = sep buf = "" while True: chars = f.read(size) if not chars: break buf += chars; lines = buf.split(sep); buf = lines.pop() for line in lines: yield line + osep if buf: yield buf
File iterator that uses alternative line terminators.
def get_order(self, order_id): if order_id in self.blotter.orders: return self.blotter.orders[order_id].to_api_obj()
Lookup an order based on the order id returned from one of the order functions. Parameters ---------- order_id : str The unique identifier for the order. Returns ------- order : Order The order object.
def get_primary_domain(self): try: domain = self.domains.get(is_primary=True) return domain except get_tenant_domain_model().DoesNotExist: return None
Returns the primary domain of the tenant
def shift_select(self, first_element, last_element): self.click(first_element) self.shift_click(last_element)
Clicks a web element and shift clicks another web element. :param first_element: WebElement instance :param last_element: WebElement instance :return: None
def get_anchor_contents(markup): soup = BeautifulSoup(markup, 'lxml') return ['%s' % link.contents[0] for link in soup.find_all('a')]
Given HTML markup, return a list of href inner html for each anchor tag.
def _get_dispatches(filter_kwargs): dispatches = Dispatch.objects.prefetch_related('message').filter( **filter_kwargs ).order_by('-message__time_created') return list(dispatches)
Simplified version. Not distributed friendly.
def get_value_prob(self, attr_name, value): if attr_name not in self._attr_value_count_totals: return n = self._attr_value_counts[attr_name][value] d = self._attr_value_count_totals[attr_name] return n/float(d)
Returns the value probability of the given attribute at this node.
def add(self, filename): basename = os.path.basename(filename) match = self.regexp.search(basename) if match: self.by_episode[int(match.group('ep'))].add(filename)
Try to add a file.
def create(self, re='brunel-py-ex-*.gdf', index=True): self.cursor.execute('CREATE TABLE IF NOT EXISTS spikes (neuron INT UNSIGNED, time REAL)') tic = now() for f in glob.glob(re): print(f) while True: try: for data in self._blockread(f): self.cursor.executemany('INSERT INTO spikes VALUES (?, ?)', data) self.conn.commit() except: continue break toc = now() if self.debug: print('Inserts took %g seconds.' % (toc-tic)) if index: tic = now() self.cursor.execute('CREATE INDEX neuron_index on spikes (neuron)') toc = now() if self.debug: print('Indexed db in %g seconds.' % (toc-tic))
Create db from list of gdf file glob Parameters ---------- re : str File glob to load. index : bool Create index on neurons for speed. Returns ------- None See also -------- sqlite3.connect.cursor, sqlite3.connect
def _get_price(self, package): for price in package['prices']: if not price.get('locationGroupId'): return price['id'] raise SoftLayer.SoftLayerError("Could not find valid price")
Returns valid price for ordering a dedicated host.
def get_ancestors(self): node = self ancestor_list = [] while node.parent is not None: ancestor_list.append(node.parent) node = node.parent return ancestor_list
Returns a list of ancestors of the node. Ordered from the earliest. :return: node's ancestors, ordered from most recent :rtype: list(FenwickNode)
def use_comparative_hierarchy_view(self): self._hierarchy_view = COMPARATIVE for session in self._get_provider_sessions(): try: session.use_comparative_hierarchy_view() except AttributeError: pass
Pass through to provider HierarchyLookupSession.use_comparative_hierarchy_view
def fill_main_goids(go2obj, goids): for goid in goids: goobj = go2obj[goid] if goid != goobj.id and goobj.id not in go2obj: go2obj[goobj.id] = goobj
Ensure main GO IDs are included in go2obj.
def remove_bucket(self, bucket_name): is_valid_bucket_name(bucket_name) self._url_open('DELETE', bucket_name=bucket_name) self._delete_bucket_region(bucket_name)
Remove a bucket. :param bucket_name: Bucket to remove
def annotation_path(cls, project, incident, annotation): return google.api_core.path_template.expand( "projects/{project}/incidents/{incident}/annotations/{annotation}", project=project, incident=incident, annotation=annotation, )
Return a fully-qualified annotation string.
def getlines(self, bufnr=None): buf = self._vim.buffers[bufnr] if bufnr else self._vim.current.buffer return buf[:]
Get all lines of a buffer as a list. Args: bufnr (Optional[int]): A Vim buffer number, current if ``None``. Returns: List[str]
def get_time_now(self): import datetime import getpass username = getpass.getuser() timenow = str(datetime.datetime.now()) timenow = timenow.split('.')[0] msg = '<div class="date">Created on ' + timenow msg += " by " + username +'</div>' return msg
Returns a time stamp
def update_firewall_policy(self, firewall_policy, body=None): return self.put(self.firewall_policy_path % (firewall_policy), body=body)
Updates a firewall policy.
def release(): sh("paver bdist_egg") print print "~" * 78 print "TESTING SOURCE BUILD" sh( "{ cd dist/ && unzip -q %s-%s.zip && cd %s-%s/" " && /usr/bin/python setup.py sdist >/dev/null" " && if { unzip -ql ../%s-%s.zip; unzip -ql dist/%s-%s.zip; }" " | cut -b26- | sort | uniq -c| egrep -v '^ +2 +' ; then" " echo '^^^ Difference in file lists! ^^^'; false;" " else true; fi; } 2>&1" % tuple([project["name"], version] * 4) ) path("dist/%s-%s" % (project["name"], version)).rmtree() print "~" * 78 print print "Created", " ".join([str(i) for i in path("dist").listdir()]) print "Use 'paver sdist bdist_egg upload' to upload to PyPI" print "Use 'paver dist_docs' to prepare an API documentation upload"
Check release before upload to PyPI.
def get_journal_abstracts(self, refresh=True): return [abstract for abstract in self.get_abstracts(refresh=refresh) if abstract.aggregationType == 'Journal']
Return a list of ScopusAbstract objects using ScopusSearch, but only if belonging to a Journal.
def filter_queryset(self, request, queryset, view): self.ordering_param = view.SORT ordering = self.get_ordering(request, queryset, view) if ordering: return queryset.order_by(*ordering) return queryset
Filter the queryset, applying the ordering. The `ordering_param` can be overwritten here. In DRF, the ordering_param is 'ordering', but we support changing it to allow the viewset to control the parameter.
def list(self,params=None, headers=None): path = '/creditor_bank_accounts' response = self._perform_request('GET', path, params, headers, retry_failures=True) return self._resource_for(response)
List creditor bank accounts. Returns a [cursor-paginated](#api-usage-cursor-pagination) list of your creditor bank accounts. Args: params (dict, optional): Query string parameters. Returns: CreditorBankAccount
def generic_visit(self, node): super(RangeValues, self).generic_visit(node) return self.add(node, UNKNOWN_RANGE)
Other nodes are not known and range value neither.
def get_addresses_from_input_file(input_file_name): mode = 'r' if sys.version_info[0] < 3: mode = 'rb' with io.open(input_file_name, mode) as input_file: reader = csv.reader(input_file, delimiter=',', quotechar='"') addresses = list(map(tuple, reader)) if len(addresses) == 0: raise Exception('No addresses found in input file') header_columns = list(column.lower() for column in addresses.pop(0)) try: address_index = header_columns.index('address') zipcode_index = header_columns.index('zipcode') except ValueError: raise Exception( ) return list((row[address_index], row[zipcode_index]) for row in addresses)
Read addresses from input file into list of tuples. This only supports address and zipcode headers
def get_system_properties(server=None): properties = {} data = _api_get('system-properties', server) if any(data['extraProperties']['systemProperties']): for element in data['extraProperties']['systemProperties']: properties[element['name']] = element['value'] return properties return {}
Get system properties
def multiget(self, pairs, **params): if self._multiget_pool: params['pool'] = self._multiget_pool return riak.client.multi.multiget(self, pairs, **params)
Fetches many keys in parallel via threads. :param pairs: list of bucket_type/bucket/key tuple triples :type pairs: list :param params: additional request flags, e.g. r, pr :type params: dict :rtype: list of :class:`RiakObjects <riak.riak_object.RiakObject>`, :class:`Datatypes <riak.datatypes.Datatype>`, or tuples of bucket_type, bucket, key, and the exception raised on fetch
def bundle(self, ref, capture_exceptions=False): from ..orm.exc import NotFoundError if isinstance(ref, Dataset): ds = ref else: try: ds = self._db.dataset(ref) except NotFoundError: ds = None if not ds: try: p = self.partition(ref) ds = p._bundle.dataset except NotFoundError: ds = None if not ds: raise NotFoundError('Failed to find dataset for ref: {}'.format(ref)) b = Bundle(ds, self) b.capture_exceptions = capture_exceptions return b
Return a bundle build on a dataset, with the given vid or id reference
async def get_wallets(self, *args, **kwargs): logging.debug("\n [+] -- Get wallets debugging.") if kwargs.get("message"): kwargs = json.loads(kwargs.get("message")) logging.debug(kwargs) uid = kwargs.get("uid",0) address = kwargs.get("address") coinid = kwargs.get("coinid") try: coinid = coinid.replace("TEST", "") except: pass try: uid = int(uid) except: return await self.error_400("User id must be integer. ") if not uid and address: uid = await self.get_uid_by_address(address=address, coinid=coinid) if isinstance(uid, dict): return uid wallets = [i async for i in self.collect_wallets(uid)] return {"wallets":wallets}
Get users wallets by uid Accepts: - uid [integer] (users id) Returns a list: - [ { "address": [string], "uid": [integer], "amount_active": [integer], "amount_frozen": [integer] }, ]
def get_extra_kwargs(self): extra_kwargs = getattr(self.Meta, 'extra_kwargs', {}) read_only_fields = getattr(self.Meta, 'read_only_fields', None) if read_only_fields is not None: for field_name in read_only_fields: kwargs = extra_kwargs.get(field_name, {}) kwargs['read_only'] = True extra_kwargs[field_name] = kwargs return extra_kwargs
Return a dictionary mapping field names to a dictionary of additional keyword arguments.
def update(self, scopes=[], add_scopes=[], rm_scopes=[], note='', note_url=''): success = False json = None if scopes: d = {'scopes': scopes} json = self._json(self._post(self._api, data=d), 200) if add_scopes: d = {'add_scopes': add_scopes} json = self._json(self._post(self._api, data=d), 200) if rm_scopes: d = {'remove_scopes': rm_scopes} json = self._json(self._post(self._api, data=d), 200) if note or note_url: d = {'note': note, 'note_url': note_url} json = self._json(self._post(self._api, data=d), 200) if json: self._update_(json) success = True return success
Update this authorization. :param list scopes: (optional), replaces the authorization scopes with these :param list add_scopes: (optional), scopes to be added :param list rm_scopes: (optional), scopes to be removed :param str note: (optional), new note about authorization :param str note_url: (optional), new note URL about this authorization :returns: bool