code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def read_csv(fname):
values = defaultdict(list)
with open(fname) as f:
reader = csv.DictReader(f)
for row in reader:
for (k,v) in row.items():
values[k].append(v)
npvalues = {k: np.array(values[k]) for k in values.keys()}
for k in npvalues.keys():
for ... | Read a csv file into a DataAccessObject
:param fname: filename |
def tags_newer(self, versions_file, majors):
highest = versions_file.highest_version_major(majors)
all = self.tags_get()
newer = _newer_tags_get(highest, all)
if len(newer) == 0:
raise RuntimeError("No new tags found.")
return newer | Checks this git repo tags for newer versions.
@param versions_file: a common.VersionsFile instance to
check against.
@param majors: a list of major branches to check. E.g. ['6', '7']
@raise RuntimeError: no newer tags were found.
@raise MissingMajorException: A new version fr... |
def open(self, title):
try:
properties = finditem(
lambda x: x['name'] == title,
self.list_spreadsheet_files()
)
properties['title'] = properties['name']
return Spreadsheet(self, properties)
except StopIteration:
... | Opens a spreadsheet.
:param title: A title of a spreadsheet.
:type title: str
:returns: a :class:`~gspread.models.Spreadsheet` instance.
If there's more than one spreadsheet with same title the first one
will be opened.
:raises gspread.SpreadsheetNotFound: if no sprea... |
def __x_product_aux (property_sets, seen_features):
assert is_iterable_typed(property_sets, property_set.PropertySet)
assert isinstance(seen_features, set)
if not property_sets:
return ([], set())
properties = property_sets[0].all()
these_features = set()
for p in property_sets[0].non_fr... | Returns non-conflicting combinations of property sets.
property_sets is a list of PropertySet instances. seen_features is a set of Property
instances.
Returns a tuple of:
- list of lists of Property instances, such that within each list, no two Property instance
have the same feature, and no Prope... |
def appname(path=None):
if path is None:
path = sys.argv[0]
name = os.path.basename(os.path.splitext(path)[0])
if name == 'mod_wsgi':
name = 'nvn_web'
return name | Return a useful application name based on the program argument.
A special case maps 'mod_wsgi' to a more appropriate name so
web applications show up as our own. |
def enu2ecef(e1: float, n1: float, u1: float,
lat0: float, lon0: float, h0: float,
ell: Ellipsoid = None, deg: bool = True) -> Tuple[float, float, float]:
x0, y0, z0 = geodetic2ecef(lat0, lon0, h0, ell, deg=deg)
dx, dy, dz = enu2uvw(e1, n1, u1, lat0, lon0, deg=deg)
return x0 + dx, ... | ENU to ECEF
Parameters
----------
e1 : float or numpy.ndarray of float
target east ENU coordinate (meters)
n1 : float or numpy.ndarray of float
target north ENU coordinate (meters)
u1 : float or numpy.ndarray of float
target up ENU coordinate (meters)
lat0 : float
... |
def get_maybe_base_expanded_node_name(self, node_name, run_key, device_name):
device_name = tf.compat.as_str(device_name)
if run_key not in self._run_key_to_original_graphs:
raise ValueError('Unknown run_key: %s' % run_key)
if device_name not in self._run_key_to_original_graphs[run_key]:
raise V... | Obtain possibly base-expanded node name.
Base-expansion is the transformation of a node name which happens to be the
name scope of other nodes in the same graph. For example, if two nodes,
called 'a/b' and 'a/b/read' in a graph, the name of the first node will
be base-expanded to 'a/b/(b)'.
This m... |
def iris(display=False):
d = sklearn.datasets.load_iris()
df = pd.DataFrame(data=d.data, columns=d.feature_names)
if display:
return df, [d.target_names[v] for v in d.target]
else:
return df, d.target | Return the classic iris data in a nice package. |
def GetTopLevel(self, file_object):
try:
top_level_object = biplist.readPlist(file_object)
except (biplist.InvalidPlistException,
biplist.NotBinaryPlistException) as exception:
raise errors.UnableToParseFile(
'Unable to parse plist with error: {0!s}'.format(exception))
retu... | Returns the deserialized content of a plist as a dictionary object.
Args:
file_object (dfvfs.FileIO): a file-like object to parse.
Returns:
dict[str, object]: contents of the plist.
Raises:
UnableToParseFile: when the file cannot be parsed. |
def polyline(self, arr):
for i in range(0, len(arr) - 1):
self.line(arr[i][0], arr[i][1], arr[i + 1][0], arr[i + 1][1]) | Draw a set of lines |
def application_name(self):
if self._application_name is None and self.units:
self._application_name = self.units[0].unit_name.split('/')[0]
return self._application_name | The name of the remote application for this relation, or ``None``.
This is equivalent to::
relation.units[0].unit_name.split('/')[0] |
def visit_FunctionDef(self, node):
assert self.current_function is None
self.current_function = node
self.naming = dict()
self.in_cond = False
self.generic_visit(node)
self.current_function = None | Initialize variable for the current function to add edges from calls.
We compute variable to call dependencies and add edges when returns
are reach. |
def get_version(*args):
contents = get_contents(*args)
metadata = dict(re.findall('__([a-z]+)__ = [\'"]([^\'"]+)', contents))
return metadata['version'] | Extract the version number from a Python module. |
def unfinished_objects(self):
mask = self._end_isnull
if self._rbound is not None:
mask = mask | (self._end > self._rbound)
oids = set(self[mask]._oid.tolist())
return self[self._oid.apply(lambda oid: oid in oids)] | Leaves only versions of those objects that has some version with
`_end == None` or with `_end > right cutoff`. |
def get_subdomain_info(fqn, db_path=None, atlasdb_path=None, zonefiles_dir=None, check_pending=False, include_did=False):
opts = get_blockstack_opts()
if not is_subdomains_enabled(opts):
log.warn("Subdomain support is disabled")
return None
if db_path is None:
db_path = opts['subdoma... | Static method for getting the state of a subdomain, given its fully-qualified name.
Return the subdomain record on success.
Return None if not found. |
def enable_caching(self):
"Enable the cache of this object."
self.caching_enabled = True
for c in self.values():
c.enable_cacher() | Enable the cache of this object. |
def meld(*values):
values = [x for x in values if x is not None]
if not values:
return None
result = repeated(*values)
if isrepeating(result):
return result
return getvalue(result) | Return the repeated value, or the first value if there's only one.
This is a convenience function, equivalent to calling
getvalue(repeated(x)) to get x.
This function skips over instances of None in values (None is not allowed
in repeated variables).
Examples:
meld("foo", "bar") # => List... |
def wc_wrap(text, length):
line_words = []
line_len = 0
words = re.split(r"\s+", text.strip())
for word in words:
word_len = wcswidth(word)
if line_words and line_len + word_len > length:
line = " ".join(line_words)
if line_len <= length:
yield lin... | Wrap text to given length, breaking on whitespace and taking into account
character width.
Meant for use on a single line or paragraph. Will destroy spacing between
words and paragraphs and any indentation. |
def convert(input_file_name, **kwargs):
delimiter = kwargs["delimiter"] or ","
quotechar = kwargs["quotechar"] or "|"
if six.PY2:
delimiter = delimiter.encode("utf-8")
quotechar = quotechar.encode("utf-8")
with open(input_file_name, "rb") as input_file:
reader = csv.reader(input_... | Convert CSV file to HTML table |
def is_hex_string(string):
pattern = re.compile(r'[A-Fa-f0-9]+')
if isinstance(string, six.binary_type):
string = str(string)
return pattern.match(string) is not None | Check if the string is only composed of hex characters. |
def make_env(env_type, real_env, sim_env_kwargs):
return {
"real": lambda: real_env.new_like(
batch_size=sim_env_kwargs["batch_size"],
store_rollouts=False,
),
"simulated": lambda: rl_utils.SimulatedBatchGymEnvWithFixedInitialFrames(
**sim_env_kwargs
),
}[env_ty... | Factory function for envs. |
def expr(s):
prog = re.compile('\{([^}]+)\}')
def repl(matchobj):
return "rec['%s']" % matchobj.group(1)
return eval("lambda rec: " + prog.sub(repl, s)) | Construct a function operating on a table record.
The expression string is converted into a lambda function by prepending
the string with ``'lambda rec: '``, then replacing anything enclosed in
curly braces (e.g., ``"{foo}"``) with a lookup on the record (e.g.,
``"rec['foo']"``), then finally calling :... |
def _api_group_for_type(cls):
_groups = {
(u"v1beta1", u"Deployment"): u"extensions",
(u"v1beta1", u"DeploymentList"): u"extensions",
(u"v1beta1", u"ReplicaSet"): u"extensions",
(u"v1beta1", u"ReplicaSetList"): u"extensions",
}
key = (
cls.apiVersion,
cls.__na... | Determine which Kubernetes API group a particular PClass is likely to
belong with.
This is basically nonsense. The question being asked is wrong. An
abstraction has failed somewhere. Fixing that will get rid of the need
for this. |
def _get_best_indexes(logits, n_best_size):
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes | Get the n-best logits from a list. |
def apply_published_filter(self, queryset, operation, value):
if operation not in ["after", "before"]:
raise ValueError()
return queryset.filter(Published(**{operation: value})) | Add the appropriate Published filter to a given elasticsearch query.
:param queryset: The DJES queryset object to be filtered.
:param operation: The type of filter (before/after).
:param value: The date or datetime value being applied to the filter. |
def get_corpus_path(name: str) -> [str, None]:
db = TinyDB(corpus_db_path())
temp = Query()
if len(db.search(temp.name == name)) > 0:
path = get_full_data_path(db.search(temp.name == name)[0]["file"])
db.close()
if not os.path.exists(path):
download(name)
return p... | Get corpus path
:param string name: corpus name |
def _create_fw_fab_dev_te(self, tenant_id, drvr_name, fw_dict):
is_fw_virt = self.is_device_virtual()
ret = self.fabric.prepare_fabric_fw(tenant_id, fw_dict, is_fw_virt,
fw_constants.RESULT_FW_CREATE_INIT)
if not ret:
LOG.error("Prepare Fab... | Prepares the Fabric and configures the device.
This routine calls the fabric class to prepare the fabric when
a firewall is created. It also calls the device manager to
configure the device. It updates the database with the final
result. |
def decode(symbol_string, checksum=False, strict=False):
symbol_string = normalize(symbol_string, strict=strict)
if checksum:
symbol_string, check_symbol = symbol_string[:-1], symbol_string[-1]
number = 0
for symbol in symbol_string:
number = number * base + decode_symbols[symbol]
if... | Decode an encoded symbol string.
If checksum is set to True, the string is assumed to have a
trailing check symbol which will be validated. If the
checksum validation fails, a ValueError is raised.
If strict is set to True, a ValueError is raised if the
normalization step requires changes to the s... |
def format_content(content):
paragraphs = parse_html(content)
first = True
for paragraph in paragraphs:
if not first:
yield ""
for line in paragraph:
yield line
first = False | Given a Status contents in HTML, converts it into lines of plain text.
Returns a generator yielding lines of content. |
def get_filters_params(self, params=None):
if not params:
params = self.params
lookup_params = params.copy()
for ignored in IGNORED_PARAMS:
if ignored in lookup_params:
del lookup_params[ignored]
return lookup_params | Returns all params except IGNORED_PARAMS |
def _simulate_installation_of(to_install, package_set):
installed = set()
for inst_req in to_install:
dist = make_abstract_dist(inst_req).dist()
name = canonicalize_name(dist.key)
package_set[name] = PackageDetails(dist.version, dist.requires())
installed.add(name)
return ins... | Computes the version of packages after installing to_install. |
def grant_local_roles_for(brain_or_object, roles, user=None):
user_id = get_user_id(user)
obj = api.get_object(brain_or_object)
if isinstance(roles, basestring):
roles = [roles]
obj.manage_addLocalRoles(user_id, roles)
return get_local_roles_for(brain_or_object) | Grant local roles for the object
Code extracted from `IRoleManager.manage_addLocalRoles`
:param brain_or_object: Catalog brain or object
:param user: A user ID, user object or None (for the current user)
:param roles: The local roles to grant for the current user |
def setupTxns(self, key, force: bool = False):
import data
dataDir = os.path.dirname(data.__file__)
allEnvs = {
"local": Environment("pool_transactions_local",
"domain_transactions_local"),
"test": Environment("pool_transactions_sandbox",
... | Create base transactions
:param key: ledger
:param force: replace existing transaction files |
def get_assembly_mapping_data(self, source_assembly, target_assembly):
return self._load_assembly_mapping_data(
self._get_path_assembly_mapping_data(source_assembly, target_assembly)
) | Get assembly mapping data.
Parameters
----------
source_assembly : {'NCBI36', 'GRCh37', 'GRCh38'}
assembly to remap from
target_assembly : {'NCBI36', 'GRCh37', 'GRCh38'}
assembly to remap to
Returns
-------
dict
dict of json a... |
def lineage(self):
indexes = six.moves.range(1, len(self.parts))
return {FieldPath(*self.parts[:index]) for index in indexes} | Return field paths for all parents.
Returns: Set[:class:`FieldPath`] |
def operation(self, other, function, **kwargs):
result = TimeSeries(**kwargs)
if isinstance(other, TimeSeries):
for time, value in self:
result[time] = function(value, other[time])
for time, value in other:
result[time] = function(self[time], value... | Calculate "elementwise" operation either between this TimeSeries
and another one, i.e.
operation(t) = function(self(t), other(t))
or between this timeseries and a constant:
operation(t) = function(self(t), other)
If it's another time series, the measurement times in the
... |
def update_preferences_by_category(self, category, communication_channel_id, notification_preferences_frequency):
path = {}
data = {}
params = {}
path["communication_channel_id"] = communication_channel_id
path["category"] = category
data["notification_preferences[f... | Update preferences by category.
Change the preferences for multiple notifications based on the category for a single communication channel |
def get_layout():
tica_msm = TemplateDir(
'tica',
[
'tica/tica.py',
'tica/tica-plot.py',
'tica/tica-sample-coordinate.py',
'tica/tica-sample-coordinate-plot.py',
],
[
TemplateDir(
'cluster',
[... | Specify a hierarchy of our templates. |
def _build_dependent_model_list(self, obj_schema):
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
els... | Helper function to build the list of models the given object schema is referencing. |
def save_loop(self):
last_hash = hash(repr(self.hosts))
while self.running:
eventlet.sleep(self.save_interval)
next_hash = hash(repr(self.hosts))
if next_hash != last_hash:
self.save()
last_hash = next_hash | Saves the state if it has changed. |
async def subscribe(self, topic):
if self.socket_type not in {SUB, XSUB}:
raise AssertionError(
"A %s socket cannot subscribe." % self.socket_type.decode(),
)
self._subscriptions.append(topic)
tasks = [
asyncio.ensure_future(
pe... | Subscribe the socket to the specified topic.
:param topic: The topic to subscribe to. |
def host_info_getter(func, name=None):
name = name or func.__name__
host_info_gatherers[name] = func
return func | The decorated function is added to the process of collecting the host_info.
This just adds the decorated function to the global
``sacred.host_info.host_info_gatherers`` dictionary.
The functions from that dictionary are used when collecting the host info
using :py:func:`~sacred.host_info.get_host_info`... |
def install_python(name, version=None, install_args=None, override_args=False):
return install(name,
version=version,
source='python',
install_args=install_args,
override_args=override_args) | Instructs Chocolatey to install a package via Python's easy_install.
name
The name of the package to be installed. Only accepts a single argument.
version
Install a specific version of the package. Defaults to latest version
available.
install_args
A list of install argume... |
def is_user_profile_valid(user_profile):
if not user_profile:
return False
if not type(user_profile) is dict:
return False
if UserProfile.USER_ID_KEY not in user_profile:
return False
if UserProfile.EXPERIMENT_BUCKET_MAP_KEY not in user_profile:
return False
experiment_bucket_map = user_profil... | Determine if provided user profile is valid or not.
Args:
user_profile: User's profile which needs to be validated.
Returns:
Boolean depending upon whether profile is valid or not. |
def convert_entrez_to_uniprot(self, entrez):
server = "http://www.uniprot.org/uniprot/?query=%22GENEID+{0}%22&format=xml".format(entrez)
r = requests.get(server, headers={"Content-Type": "text/xml"})
if not r.ok:
r.raise_for_status()
sys.exit()
response = r.text
... | Convert Entrez Id to Uniprot Id |
def stem_word(self, word):
if self.is_plural(word):
return self.stem_plural_word(word)
else:
return self.stem_singular_word(word) | Stem a word to its common stem form. |
def get_all_attribute_value(
self, tag_name, attribute, format_value=True, **attribute_filter
):
tags = self.find_tags(tag_name, **attribute_filter)
for tag in tags:
value = tag.get(attribute) or tag.get(self._ns(attribute))
if value is not None:
if fo... | Yields all the attribute values in xml files which match with the tag name and the specific attribute
:param str tag_name: specify the tag name
:param str attribute: specify the attribute
:param bool format_value: specify if the value needs to be formatted with packagename |
def _at_least_x_are_true(a, b, x):
match = tf.equal(a, b)
match = tf.cast(match, tf.int32)
return tf.greater_equal(tf.reduce_sum(match), x) | At least `x` of `a` and `b` `Tensors` are true. |
def _apply_post_render_hooks(self, data, obj, fmt):
hooks = self.post_render_hooks.get(fmt,[])
for hook in hooks:
try:
data = hook(data, obj)
except Exception as e:
self.param.warning("The post_render_hook %r could not "
... | Apply the post-render hooks to the data. |
def update(self, **kwargs):
svg_changed = False
for prop in kwargs:
if prop == "drawing_id":
pass
elif getattr(self, prop) != kwargs[prop]:
if prop == "svg":
svg_changed = True
setattr(self, prop, kwargs[prop])
... | Update the drawing
:param kwargs: Drawing properties |
def _auto(direction, name, value, source='auto', convert_to_human=True):
if direction not in ['to', 'from']:
return value
props = property_data_zpool()
if source == 'zfs':
props = property_data_zfs()
elif source == 'auto':
props.update(property_data_zfs())
value_type = props[... | Internal magic for from_auto and to_auto |
def _list_subnets_by_identifier(self, identifier):
identifier = identifier.split('/', 1)[0]
results = self.list_subnets(identifier=identifier, mask='id')
return [result['id'] for result in results] | Returns a list of IDs of the subnet matching the identifier.
:param string identifier: The identifier to look up
:returns: List of matching IDs |
def gfonts_repo_structure(fonts):
from fontbakery.utils import get_absolute_path
abspath = get_absolute_path(fonts[0])
return abspath.split(os.path.sep)[-3] in ["ufl", "ofl", "apache"] | The family at the given font path
follows the files and directory structure
typical of a font project hosted on
the Google Fonts repo on GitHub ? |
def __Post(self, path, request, body, headers):
return synchronized_request.SynchronizedRequest(self,
request,
self._global_endpoint_manager,
se... | Azure Cosmos 'POST' http request.
:params str url:
:params str path:
:params (str, unicode, dict) body:
:params dict headers:
:return:
Tuple of (result, headers).
:rtype:
tuple of (dict, dict) |
def findspan(self, *words):
for span in self.select(AbstractSpanAnnotation,None,True):
if tuple(span.wrefs()) == words:
return span
raise NoSuchAnnotation | Returns the span element which spans over the specified words or morphemes.
See also:
:meth:`Word.findspans` |
def new_result(self, job, update_model=True):
if not job.exception is None:
self.logger.warning("job {} failed with exception\n{}".format(job.id, job.exception)) | registers finished runs
Every time a run has finished, this function should be called
to register it with the result logger. If overwritten, make
sure to call this method from the base class to ensure proper
logging.
Parameters
----------
job: instance of hpbandster.distributed.dispatcher.Job
contai... |
def __load_pst(self):
if self.pst_arg is None:
return None
if isinstance(self.pst_arg, Pst):
self.__pst = self.pst_arg
return self.pst
else:
try:
self.log("loading pst: " + str(self.pst_arg))
self.__pst = Pst(self.ps... | private method set the pst attribute |
def request_output(self, table, outtype):
job_types = ["CSV", "DataSet", "FITS", "VOTable"]
assert outtype in job_types
params = {"tableName": table, "type": outtype}
r = self._send_request("SubmitExtractJob", params=params)
job_id = int(self._parse_single(r.text, "long"))
... | Request the output for a given table.
## Arguments
* `table` (str): The name of the table to export.
* `outtype` (str): The type of output. Must be one of:
CSV - Comma Seperated Values
DataSet - XML DataSet
FITS - Flexible Image Transfer System (FITS ... |
def network_size(value, options=None, version=None):
ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version)
if not ipaddr_filter_out:
return
if not isinstance(value, (list, tuple, types.GeneratorType)):
return _network_size(ipaddr_filter_out[0])
return [
_net... | Get the size of a network. |
def check_aggregate(self, variable, components=None, exclude_on_fail=False,
multiplier=1, **kwargs):
df_components = self.aggregate(variable, components)
if df_components is None:
return
rows = self._apply_filters(variable=variable)
df_variable, df_com... | Check whether a timeseries matches the aggregation of its components
Parameters
----------
variable: str
variable to be checked for matching aggregation of sub-categories
components: list of str, default None
list of variables, defaults to all sub-categories of `... |
def alter_and_get(self, function):
check_not_none(function, "function can't be None")
return self._encode_invoke(atomic_reference_alter_and_get_codec, function=self._to_data(function)) | Alters the currently stored reference by applying a function on it and gets the result.
:param function: (Function), A stateful serializable object which represents the Function defined on
server side.
This object must have a serializable Function counter part registered on server side ... |
def v4_int_to_packed(address):
if address > _BaseV4._ALL_ONES:
raise ValueError('Address too large for IPv4')
return Bytes(struct.pack('!I', address)) | The binary representation of this address.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The binary representation of this address.
Raises:
ValueError: If the integer is too large to be an IPv4 IP
address. |
def submit(self,
workflow_uuid='',
experiment='',
image='',
cmd='',
prettified_cmd='',
workflow_workspace='',
job_name='',
cvmfs_mounts='false'):
job_spec = {
'experiment': experim... | Submit a job to RJC API.
:param name: Name of the job.
:param experiment: Experiment the job belongs to.
:param image: Identifier of the Docker image which will run the job.
:param cmd: String which represents the command to execute. It can be
modified by the workflow engine... |
def drawdown_recov(self, return_int=False):
td = self.recov_date() - self.drawdown_end()
if return_int:
return td.days
return td | Length of drawdown recovery in days.
This is the duration from trough to recovery date.
Parameters
----------
return_int : bool, default False
If True, return the number of days as an int.
If False, return a Pandas Timedelta object.
Returns
----... |
def start_response(self, status = 200, headers = [], clearheaders = True, disabletransferencoding = False):
"Start to send response"
if self._sendHeaders:
raise HttpProtocolException('Cannot modify response, headers already sent')
self.status = status
self.disabledeflate = di... | Start to send response |
def _to_bstr(l):
if isinstance(l, str):
l = l.encode('ascii', 'backslashreplace')
elif not isinstance(l, bytes):
l = str(l).encode('ascii', 'backslashreplace')
return l | Convert to byte string. |
def setCurrentIndex(self, y, x):
self.dataTable.selectionModel().setCurrentIndex(
self.dataTable.model().index(y, x),
QItemSelectionModel.ClearAndSelect) | Set current selection. |
def send_to_tsdb(self, realm, host, service, metrics, ts, path):
if ts is None:
ts = int(time.time())
data = {
"measurement": service,
"tags": {
"host": host,
"service": service,
"realm": '.'.join(realm) if isinstance(re... | Send performance data to time series database
Indeed this function stores metrics in the internal cache and checks if the flushing
is necessary and then flushes.
:param realm: concerned realm
:type: string
:param host: concerned host
:type: string
:param service... |
def translate(self, from_lang=None, to="de"):
if from_lang is None:
from_lang = self.translator.detect(self.string)
return self.translator.translate(self.string,
from_lang=from_lang, to_lang=to) | Translate the word to another language using Google's Translate API.
.. versionadded:: 0.5.0 (``textblob``) |
def clear(self):
with self._conn:
self._conn.execute('DELETE FROM results')
self._conn.execute('DELETE FROM work_items') | Clear all work items from the session.
This removes any associated results as well. |
def as_pyplot_figure(self, label=1, **kwargs):
import matplotlib.pyplot as plt
exp = self.as_list(label=label, **kwargs)
fig = plt.figure()
vals = [x[1] for x in exp]
names = [x[0] for x in exp]
vals.reverse()
names.reverse()
colors = ['green' if x > 0 els... | Returns the explanation as a pyplot figure.
Will throw an error if you don't have matplotlib installed
Args:
label: desired label. If you ask for a label for which an
explanation wasn't computed, will throw an exception.
Will be ignored for regression e... |
def on_init(app):
docs_path = os.path.abspath(os.path.dirname(__file__))
root_path = os.path.abspath(os.path.join(docs_path, '..'))
apidoc_path = 'sphinx-apidoc'
swg2rst_path = 'swg2rst'
if hasattr(sys, 'real_prefix'):
bin_path = os.path.abspath(os.path.join(sys.prefix, 'bin'))
apido... | Run sphinx-apidoc and swg2rst after Sphinx initialization.
Read the Docs won't run tox or custom shell commands, so we need this to
avoid checking in the generated reStructuredText files. |
def toProtocolElement(self):
gaFeatureSet = protocol.FeatureSet()
gaFeatureSet.id = self.getId()
gaFeatureSet.dataset_id = self.getParentContainer().getId()
gaFeatureSet.reference_set_id = pb.string(self._referenceSet.getId())
gaFeatureSet.name = self._name
gaFeatureSet.s... | Returns the representation of this FeatureSet as the corresponding
ProtocolElement. |
def hide_zeroes(self):
for v in self.subset_labels:
if v is not None and v.get_text() == '0':
v.set_visible(False) | Sometimes it makes sense to hide the labels for subsets whose size is zero.
This utility method does this. |
def iteritems(self):
for (key, val) in six.iteritems(self.__dict__):
if key in self._printable_exclude:
continue
yield (key, val) | Wow this class is messed up. I had to overwrite items when
moving to python3, just because I haden't called it yet |
def print_examples(self, full=False):
msg = []
i = 1
for key in sorted(self.DEMOS.keys()):
example = self.DEMOS[key]
if full or example["show"]:
msg.append(u"Example %d (%s)" % (i, example[u"description"]))
msg.append(u" $ %s %s" % (self.i... | Print the examples and exit.
:param bool full: if ``True``, print all examples; otherwise,
print only selected ones |
def on_btn_metadata(self, event):
if not self.check_for_meas_file():
return
if not self.check_for_uncombined_files():
return
if self.data_model_num == 2:
wait = wx.BusyInfo('Compiling required data, please wait...')
wx.SafeYield()
self.... | Initiate the series of windows to add metadata
to the contribution. |
def _execute_select_commands(self, source, commands):
rows = {}
for tbl, command in tqdm(commands, total=len(commands), desc='Executing {0} select queries'.format(source)):
if tbl not in rows:
rows[tbl] = []
rows[tbl].extend(self.fetch(command, commit=True))
... | Execute select queries for all of the tables from a source database. |
def swear_word(self) -> str:
bad_words = self._data['words'].get('bad')
return self.random.choice(bad_words) | Get a random swear word.
:return: Swear word.
:Example:
Damn. |
def surface_projection_from_fault_data(cls, edges):
lons = []
lats = []
for edge in edges:
for point in edge:
lons.append(point.longitude)
lats.append(point.latitude)
lons = numpy.array(lons, dtype=float)
lats = numpy.array(lats, dtype=... | Get a surface projection of the complex fault surface.
:param edges:
A list of horizontal edges of the surface as instances
of :class:`openquake.hazardlib.geo.line.Line`.
:returns:
Instance of :class:`~openquake.hazardlib.geo.polygon.Polygon`
describing t... |
def retrieve_content(self):
path = self._construct_path_to_source_content()
res = self._http.get(path)
self._populated_fields['content'] = res['content']
return res['content'] | Retrieve the content of a resource. |
def get_report_details(self, report_id, id_type=None):
params = {'idType': id_type}
resp = self._client.get("reports/%s" % report_id, params=params)
return Report.from_dict(resp.json()) | Retrieves a report by its ID. Internal and external IDs are both allowed.
:param str report_id: The ID of the incident report.
:param str id_type: Indicates whether ID is internal or external.
:return: The retrieved |Report| object.
Example:
>>> report = ts.get_report_detail... |
def secure_boot(self):
return secure_boot.SecureBoot(
self._conn, utils.get_subresource_path_by(self, 'SecureBoot'),
redfish_version=self.redfish_version) | Property to provide reference to `SecureBoot` instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset. |
def _get_ilo_details(self):
manager_uri = '/rest/v1/Managers/1'
status, headers, manager = self._rest_get(manager_uri)
if status != 200:
msg = self._get_extended_error(manager)
raise exception.IloError(msg)
mtype = self._get_type(manager)
if (mtype not in ... | Gets iLO details
:raises: IloError, on an error from iLO.
:raises: IloConnectionError, if iLO is not up after reset.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server. |
def _get_block_transaction_data(db: BaseDB, transaction_root: Hash32) -> Iterable[Hash32]:
transaction_db = HexaryTrie(db, root_hash=transaction_root)
for transaction_idx in itertools.count():
transaction_key = rlp.encode(transaction_idx)
if transaction_key in transaction_db:
... | Returns iterable of the encoded transactions for the given block header |
def find_group_differences(groups1, groups2):
r
import utool as ut
item_to_others1 = {item: set(_group) - {item}
for _group in groups1 for item in _group}
item_to_others2 = {item: set(_group) - {item}
for _group in groups2 for item in _group}
flat_items1... | r"""
Returns a measure of how disimilar two groupings are
Args:
groups1 (list): true grouping of items
groups2 (list): predicted grouping of items
CommandLine:
python -m utool.util_alg find_group_differences
SeeAlso:
vtool.group_indicies
vtool.apply_grouping
... |
def fetch_bug_details(self, bug_ids):
params = {'include_fields': 'product, component, priority, whiteboard, id'}
params['id'] = bug_ids
try:
response = self.session.get(settings.BZ_API_URL + '/rest/bug', headers=self.session.headers,
params=pa... | Fetches bug metadata from bugzilla and returns an encoded
dict if successful, otherwise returns None. |
def _highlight(string, color):
if CONFIG['color']:
if color < 8:
return '\033[{color}m{string}\033[0m'.format(string = string, color = color+30)
else:
return '\033[{color}m{string}\033[0m'.format(string = string, color = color+82)
else:
return string | Return a string highlighted for a terminal. |
def set_device_scale(self, x_scale, y_scale):
cairo.cairo_surface_set_device_scale(self._pointer, x_scale, y_scale)
self._check_status() | Sets a scale that is multiplied to the device coordinates determined
by the CTM when drawing to surface.
One common use for this is to render to very high resolution display
devices at a scale factor, so that code that assumes 1 pixel will be a
certain size will still work. Setting a t... |
def get_resources(self, ids, cache=True):
client = local_session(self.manager.session_factory).client('dax')
return client.describe_clusters(ClusterNames=ids).get('Clusters') | Retrieve dax resources for serverless policies or related resources |
def verify(password_hash, password):
ensure(len(password_hash) == PWHASH_SIZE,
"The password hash must be exactly %s bytes long" %
nacl.bindings.crypto_pwhash_scryptsalsa208sha256_STRBYTES,
raising=exc.ValueError)
return nacl.bindings.crypto_pwhash_scryptsalsa208sha256_str_verif... | Takes the output of scryptsalsa208sha256 and compares it against
a user provided password to see if they are the same
:param password_hash: bytes
:param password: bytes
:rtype: boolean
.. versionadded:: 1.2 |
def OnCopy(self, event):
focus = self.main_window.FindFocus()
if isinstance(focus, wx.TextCtrl):
focus.Copy()
else:
selection = self.main_window.grid.selection
data = self.main_window.actions.copy(selection)
self.main_window.clipboard.set_clipboard... | Clipboard copy event handler |
def parentItem(self, value):
self._parentItem = value
self._recursiveSetNodePath(self._constructNodePath()) | The parent item |
def __do_parse(self, pattern_str):
in_ = antlr4.InputStream(pattern_str)
lexer = STIXPatternLexer(in_)
lexer.removeErrorListeners()
token_stream = antlr4.CommonTokenStream(lexer)
parser = STIXPatternParser(token_stream)
parser.removeErrorListeners()
error_listener... | Parses the given pattern and returns the antlr parse tree.
:param pattern_str: The STIX pattern
:return: The parse tree
:raises ParseException: If there is a parse error |
async def create(cls, destination: Union[int, Subnet],
source: Union[int, Subnet], gateway_ip: str, metric: int):
params = {
"gateway_ip": gateway_ip,
"metric": metric,
}
if isinstance(source, Subnet):
params["source"] = source.id
... | Create a `StaticRoute` in MAAS.
:param name: The name of the `StaticRoute` (optional, will be given a
default value if not specified).
:type name: `str`
:param description: A description of the `StaticRoute` (optional).
:type description: `str`
:param class_type: The cla... |
def form_valid(self, form):
self.metric_slugs = [k.strip() for k in form.cleaned_data['metrics']]
return super(AggregateFormView, self).form_valid(form) | Pull the metrics from the submitted form, and store them as a
list of strings in ``self.metric_slugs``. |
def uniqify(list_):
"inefficient on long lists; short lists only. preserves order."
a=[]
for x in list_:
if x not in a: a.append(x)
return a | inefficient on long lists; short lists only. preserves order. |
def guarded(meth):
@functools.wraps(meth)
def _check(self, *args, **kwargs):
self._check_conn_validity(meth.__name__)
return meth(self, *args, **kwargs)
return _check | A decorator to add a sanity check to ConnectionResource methods. |
def __sub(self, string: str = '') -> str:
replacer = self.random.choice(['_', '-'])
return re.sub(r'\s+', replacer, string.strip()) | Replace spaces in string.
:param string: String.
:return: String without spaces. |
def validate_serializer(serializer, _type):
if not issubclass(serializer, _type):
raise ValueError("Serializer should be an instance of {}".format(_type.__name__)) | Validates the serializer for given type.
:param serializer: (Serializer), the serializer to be validated.
:param _type: (Type), type to be used for serializer validation. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.