Unnamed: 0 int64 0 389k | code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|---|
367,700 | def _set_widths(self, row, proc_group):
width_free = self.style["width_"] - sum(
[sum(self.fields[c].width for c in self.columns),
self.width_separtor])
if width_free < 0:
width_fixed = sum(
[sum(self.fields[c].width for c in self.columns
... | Update auto-width Fields based on `row`.
Parameters
----------
row : dict
proc_group : {'default', 'override'}
Whether to consider 'default' or 'override' key for pre- and
post-format processors.
Returns
-------
True if any widths require... |
367,701 | def _handle_nodes(nodes: MaybeNodeList) -> List[BaseEntity]:
if isinstance(nodes, BaseEntity):
return [nodes]
return [
(
parse_result_to_dsl(node)
if not isinstance(node, BaseEntity) else
node
)
for node in nodes
] | Handle node(s) that might be dictionaries. |
367,702 | def set_vf0(self, vf):
self.vf0 = vf
self.system.dae.y[self.vf] = matrix(vf) | set value for self.vf0 and dae.y[self.vf] |
367,703 | def sharded_cluster_link(rel, cluster_id=None,
shard_id=None, router_id=None, self_rel=False):
clusters_href =
link = _SHARDED_CLUSTER_LINKS[rel].copy()
link[] = link[].format(**locals())
link[] = if self_rel else rel
return link | Helper for getting a ShardedCluster link document, given a rel. |
367,704 | def encode(self, delimiter=):
try:
return delimiter.join([str(f) for f in [
self.node_id,
self.child_id,
int(self.type),
self.ack,
int(self.sub_type),
self.payload,
]]) +
exc... | Encode a command string from message. |
367,705 | def token_required(view_func):
def _parse_auth_header(auth_header):
reg = re.compile()
header_dict = dict(reg.findall(auth_header))
return header_dict[]
def _get_passed_token(request):
try:
auth_header = r... | Decorator which ensures that one of the WATCHMAN_TOKENS is provided if set.
WATCHMAN_TOKEN_NAME can also be set if the token GET parameter must be
customized. |
367,706 | def read_csi_node(self, name, **kwargs):
kwargs[] = True
if kwargs.get():
return self.read_csi_node_with_http_info(name, **kwargs)
else:
(data) = self.read_csi_node_with_http_info(name, **kwargs)
return data | read the specified CSINode
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_csi_node(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name... |
367,707 | def store_sm(smodel, filename, monitor):
h5 = monitor.hdf5
with monitor():
sources = h5[]
source_geom = h5[]
gid = len(source_geom)
for sg in smodel:
if filename:
with hdf5.File(filename, ) as hdf5cache:
hdf5cache[ % sg.id] = s... | :param smodel: a :class:`openquake.hazardlib.nrml.SourceModel` instance
:param filename: path to an hdf5 file (cache_XXX.hdf5)
:param monitor: a Monitor instance with an .hdf5 attribute |
367,708 | def set_share_path(self, share_path):
assert share_path == "" or share_path.startswith("/")
if share_path == "/":
share_path = ""
assert share_path in ("", "/") or not share_path.endswith("/")
self.share_path = share_path | Set application location for this resource provider.
@param share_path: a UTF-8 encoded, unquoted byte string. |
367,709 | def find_this(search, source=SOURCE):
print("Searching for {what}.".format(what=search))
if not search or not source:
print("Not found on source: {what}.".format(what=search))
return ""
return str(re.compile(r.format(
what=search), re.S).match(source).group(1)).strip() | Take a string and a filename path string and return the found value. |
367,710 | def _cutadapt_trim_cmd(fastq_files, quality_format, adapters, out_files, data):
if all([utils.file_exists(x) for x in out_files]):
return out_files
if quality_format == "illumina":
quality_base = "64"
else:
quality_base = "33"
cutadapt = os.path.join(os.... | Trimming with cutadapt, using version installed with bcbio-nextgen. |
367,711 | def disk_vmag(hemi, retinotopy=, to=None, **kw):
mdat = mag_data(hemi, retinotopy=retinotopy, **kw)
if pimms.is_vector(mdat): return tuple([face_vmag(m, to=to) for m in mdat])
elif pimms.is_vector(mdat.keys(), ):
return pimms.lazy_map({k: curry(lambda k: face_vmag(mdat[k], to=to), k)
... | disk_vmag(mesh) yields the visual magnification based on the projection of disks on the cortical
surface into the visual field.
All options accepted by mag_data() are accepted by disk_vmag(). |
367,712 | def dispatch_hook(cls, _pkt=None, *args, **kargs):
cls = conf.raw_layer
if _pkt is not None:
ptype = orb(_pkt[0])
return globals().get(_param_set_cls.get(ptype), conf.raw_layer)
return cls | Returns the right parameter set class. |
367,713 | def literal_matches_objectliteral(v1: Literal, v2: ShExJ.ObjectLiteral) -> bool:
v2_lit = Literal(str(v2.value), datatype=iriref_to_uriref(v2.type), lang=str(v2.language) if v2.language else None)
return v1 == v2_lit | Compare :py:class:`rdflib.Literal` with :py:class:`ShExJ.objectLiteral` |
367,714 | def preserve_builtin_query_params(url, request=None):
if request is None:
return url
overrides = [
api_settings.URL_FORMAT_OVERRIDE,
]
for param in overrides:
if param and (param in request.GET):
value = request.GET[param]
url = replace_query_param(... | Given an incoming request, and an outgoing URL representation,
append the value of any built-in query parameters. |
367,715 | def consult_filters(self, url_info: URLInfo, url_record: URLRecord, is_redirect: bool=False) \
-> Tuple[bool, str, dict]:
if not self._url_filter:
return True, , None
test_info = self._url_filter.test_info(url_info, url_record)
verdict = test_info[]
if... | Consult the URL filter.
Args:
url_record: The URL record.
is_redirect: Whether the request is a redirect and it is
desired that it spans hosts.
Returns
tuple:
1. bool: The verdict
2. str: A short reason string: nofilters, fil... |
367,716 | def set_value(self, attribute, section, value):
if not self.section_exists(section):
LOGGER.debug("> Adding section.".format(section))
self.__sections[section] = OrderedDict() if self.__preserve_order else dict()
self.__sections[section][attribute] = value
re... | Sets requested attribute value.
Usage::
>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
>>> sections_file_parser = SectionsFileParser()
>>> sections_file_parser.content = conten... |
367,717 | def get_assets_by_repository(self, repository_id):
mgr = self._get_provider_manager(, local=True)
lookup_session = mgr.get_asset_lookup_session_for_repository(repository_id, proxy=self._proxy)
lookup_session.use_isolated_repository_view()
return lookup_session.... | Gets the list of ``Assets`` associated with a ``Repository``.
arg: repository_id (osid.id.Id): ``Id`` of the ``Repository``
return: (osid.repository.AssetList) - list of related assets
raise: NotFound - ``repository_id`` is not found
raise: NullArgument - ``repository_id`` is ``nul... |
367,718 | def get_schemas():
schemas = {}
for name in os.listdir(JSON_PATH):
if name not in NO_SCHEMA:
schemas[name] = Schema(name)
return schemas | Return a dict of schema names mapping to a Schema.
The schema is of type schul_cloud_resources_api_v1.schema.Schema |
367,719 | def to_unicode(s, encoding=None, errors=):
encoding = encoding or
if is_unicode(s):
return s
elif is_strlike(s):
return s.decode(encoding, errors)
else:
if six.PY2:
return str(s).decode(encoding, errors)
else:
return str(s) | Make unicode string from any value
:param s:
:param encoding:
:param errors:
:return: unicode |
367,720 | def _pseudoinverse(self, A, tol=1.0e-10):
[M, N] = A.shape
if N != M:
raise DataError("pseudoinverse can only be computed for square matrices: dimensions were %d x %d" % (
M, N))
if(np.any(np.isnan(A))):
... | Compute the Moore-Penrose pseudoinverse.
REQUIRED ARGUMENTS
A (np KxK matrix) - the square matrix whose pseudoinverse is to be computed
RETURN VALUES
Ainv (np KxK matrix) - the pseudoinverse
OPTIONAL VALUES
tol - the tolerance (relative to largest magnitude singl... |
367,721 | def render_entry(entry_id, slug_text=, category=):
path_redirect = get_redirect()
if path_redirect:
return path_redirect
raise http_error.NotFound("No such entry")
if record.status == model.PublishStatus.DRAFT.value:
raise http_error.Forbidden("Ent... | Render an entry page.
Arguments:
entry_id -- The numeric ID of the entry to render
slug_text -- The expected URL slug text
category -- The expected category |
367,722 | def check_version(mod, required):
vers = tuple(int(v) for v in mod.__version__.split()[:3])
if vers < required:
req = .join(str(v) for v in required)
raise ImproperlyConfigured(
"Module \"%s\" version (%s) must be >= %s." %
(mod.__name__, mod.__version__, req)) | Require minimum version of module using ``__version__`` member. |
367,723 | def get_lead(self, lead_id):
params = self.base_params
endpoint = self.base_endpoint.format( + str(lead_id))
return self._query_hunter(endpoint, params) | Get a specific lead saved on your account.
:param lead_id: Id of the lead to search. Must be defined.
:return: Lead found as a dict. |
367,724 | def visit_decorators(self, node, parent):
newnode = nodes.Decorators(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode) for child in node.decorator_list])
return newnode | visit a Decorators node by returning a fresh instance of it |
367,725 | def check_webhook_validation(app_configs=None, **kwargs):
from . import settings as djstripe_settings
messages = []
validation_options = ("verify_signature", "retrieve_event")
if djstripe_settings.WEBHOOK_VALIDATION is None:
messages.append(
checks.Warning(
"Webhook validation is disabled, this is a s... | Check that DJSTRIPE_WEBHOOK_VALIDATION is valid |
367,726 | def _recursive_split(self, bbox, zoom_level, column, row):
if zoom_level == self.zoom_level:
self.bbox_list.append(bbox)
self.info_list.append({: zoom_level,
: column,
: row})
return
bbox_... | Method that recursively creates bounding boxes of OSM grid that intersect the area.
:param bbox: Bounding box
:type bbox: BBox
:param zoom_level: OSM zoom level
:type zoom_level: int
:param column: Column in the OSM grid
:type column: int
:param row: Row in the O... |
367,727 | def dump_normals(dataset_dir, data_dir, dataset, root=None, compress=True):
if root is None:
root = {}
normals = dataset.GetPointData().GetNormals()
if normals:
dumped_array = dump_data_array(dataset_dir, data_dir, normals, {}, compress)
root[][] = len(root[][])
root[][]... | dump vtkjs normal vectors |
367,728 | def calculateDatasets(self, scene, axes, datasets):
items = self.calculateDatasetItems(scene, datasets)
if not items:
scene.clear()
return
rect = self.buildData()
half_size = self.maximumBarSize() / 2.0
for dataset, ite... | Builds the datasets for this renderer. Each renderer will need to
subclass and implemenent this method, otherwise, no data will be
shown in the chart.
:param scene | <XChartScene>
axes | [<
datasets | [<XChartDataset>, ..] |
367,729 | def add_missing_children(required_children, element_children):
element_tags = [element.tag for element in element_children]
for contained_element in required_children:
except:
added_child = PYUNTL_DISPATCH[contained_element]()
element_children.append(ad... | Determine if there are elements not in the children
that need to be included as blank elements in the form. |
367,730 | def _get_metadata_for_galaxies(
self):
self.log.debug()
total, batches = self._count_galaxies_requiring_metadata()
print "%(total)s galaxies require metadata. Need to send %(batches)s batch requests to NED." % locals()
totalBatches = self.batches
thisCount ... | get metadata for galaxies
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any use... |
367,731 | def parse_dformat(dformat, check=True):
if check and dformat not in [, ]:
raise IOError(
"{} is a bad features format, please choose or "
.format(dformat))
return dformat | Return `dformat` or raise if it is not 'dense' or 'sparse |
367,732 | def login_with_google(self, email, oauth2_token, **kwargs):
params = {
: email,
: oauth2_token
}
req_func = self._get
if kwargs.get(, 0) == 1: | Login to Todoist using Google's oauth2 authentication.
:param email: The user's Google email address.
:type email: str
:param oauth2_token: The user's Google oauth2 token.
:type oauth2_token: str
:param auto_signup: If ``1`` register an account automatically.
:type auto_... |
367,733 | def check_alive_instances(self):
for instance in self.instances:
if instance in self.to_restart:
continue
if instance.is_external and instance.process and not instance.process.is_alive():
logger.error("The external module %s died unexpec... | Check alive instances.
If not, log error and try to restart it
:return: None |
367,734 | def relabel_groups_masked(group_idx, keep_group):
keep_group = keep_group.astype(bool, copy=not keep_group[0])
if not keep_group[0]:
keep_group[0] = True
relabel = np.zeros(keep_group.size, dtype=group_idx.dtype)
relabel[keep_group] = np.arange(np.count_nonzero(keep_group))
return r... | group_idx: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]
0 1 2 3 4 5
keep_group: [0 1 0 1 1 1]
ret: [0 2 2 2 0 0 4 0 0 1 1 0 2 4 4]
Description of above in words: remove group 2, and relabel group 3,4, and 5
to be 2, 3 and 4 respecitvely, in order to fill the gap. Note that group... |
367,735 | def resolve_image_as_pil(self, image_url, coords=None):
files = self.mets.find_files(url=image_url)
if files:
image_filename = self.download_file(files[0]).local_filename
else:
image_filename = self.download_url(image_url)
if image_url not in self.image_... | Resolve an image URL to a PIL image.
Args:
coords (list) : Coordinates of the bounding box to cut from the image
Returns:
Image or region in image as PIL.Image |
367,736 | def transform_inverse(im_tensor, mean, std):
assert im_tensor.shape[0] == 3
im = im_tensor.transpose((1, 2, 0))
im = im * std + mean
im = im.astype(np.uint8)
return im | transform from mxnet im_tensor to ordinary RGB image
im_tensor is limited to one image
:param im_tensor: [batch, channel, height, width]
:param mean: [RGB pixel mean]
:param std: [RGB pixel std var]
:return: im [height, width, channel(RGB)] |
367,737 | def isopen(self) -> bool:
if self._file is None:
return False
return bool(self._file.id) | State of backing file. |
367,738 | def Call(method,url,payload=None,session=None,debug=False):
if session is not None:
token = session[]
http_session = session[]
else:
if not clc._LOGIN_TOKEN_V2:
API._Login()
token = clc._LOGIN_TOKEN_V2
http_session = clc._REQUESTS_SESSION
if payload is None:
payload = {}
if ... | Execute v2 API call.
:param url: URL paths associated with the API call
:param payload: dict containing all parameters to submit with POST call
:returns: decoded API json result |
367,739 | def rdlevenshtein_norm(source, target):
distance = _levenshtein_compute(source, target, True)
return float(distance) / max(len(source), len(target)) | Calculates the normalized restricted Damerau-Levenshtein distance
(a.k.a. the normalized optimal string alignment distance) between two
string arguments. The result will be a float in the range [0.0, 1.0], with
1.0 signifying the maximum distance between strings with these lengths |
367,740 | def preprocess_cell(
self, cell: "NotebookNode", resources: dict, cell_index: int
) -> Tuple["NotebookNode", dict]:
output_files_dir = resources.get("output_files_dir", None)
if not isinstance(resources["outputs"], dict):
resources["outputs"] = {}
... | Apply a transformation on each cell.
Parameters
----------
cell : NotebookNode cell
Notebook cell being processed
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine... |
367,741 | def decode(geohash):
lat, lon, lat_err, lon_err = decode_exactly(geohash)
lats = "%.*f" % (max(1, int(round(-log10(lat_err)))) - 1, lat)
lons = "%.*f" % (max(1, int(round(-log10(lon_err)))) - 1, lon)
if in lats: lats = lats.rstrip()
if in lons: lons = lons.rstrip()
return lats, lons | Decode geohash, returning two strings with latitude and longitude
containing only relevant digits and with trailing zeroes removed. |
367,742 | def _as_dict(self):
values = self._dynamic_columns or {}
for name, col in self._columns.items():
values[name] = col.to_database(getattr(self, name, None))
return values | Returns a map of column names to cleaned values |
367,743 | def module_imports_on_top_of_file(
logical_line, indent_level, checker_state, noqa):
r
def is_string_literal(line):
if line[0] in :
line = line[1:]
if line and line[0] in :
line = line[1:]
return line and (line[0] == or line[0] == "tryexceptelsefinallyimp... | r"""Place imports at the top of the file.
Always put imports at the top of the file, just after any module comments
and docstrings, and before module globals and constants.
Okay: import os
Okay: # this is a comment\nimport os
Okay: '''this is a module docstring'''\nimport os
Okay: r'''this is ... |
367,744 | def weather(self, latitude=None, longitude=None, date=None):
if latitude is None:
if self.latitude is None:
raise TypeError("latitude must be type str is None")
else:
self.latitude = latitude
if longitude is None:
... | :param float latitude: Locations latitude
:param float longitude: Locations longitude
:param datetime or str or int date: Date/time for historical weather data
:raises requests.exceptions.HTTPError: Raises on bad http response
:raises TypeError: Raises on invalid param types
:r... |
367,745 | def forestplot(trace_obj, vars=None, alpha=0.05, quartiles=True, rhat=True,
main=None, xtitle=None, xrange=None, ylabels=None, chain_spacing=0.05, vline=0):
if not gridspec:
print_(
)
return
qlist = [100 * alpha / 2, 50, 100 * (1 - alpha / 2)]
if quarti... | Forest plot (model summary plot)
Generates a "forest plot" of 100*(1-alpha)% credible intervals for either the
set of variables in a given model, or a specified set of nodes.
:Arguments:
trace_obj: NpTrace or MultiTrace object
Trace(s) from an MCMC sample.
vars: list
... |
367,746 | def bfs_depth(self, U):
bfs_queue = [[U, 0]]
visited = set()
max_depth = 0
while bfs_queue:
[V, depth] = bfs_queue.pop()
if max_depth < depth:
max_depth = depth
visited.add(V)
adj_set = self.edges[V]
for W in adj_set:
if W not in visited:
bf... | Returns the maximum distance between any vertex and U in the connected
component containing U
:param U:
:return: |
367,747 | def set_topic_config(self, topic, value, kafka_version=(0, 10, )):
config_data = dump_json(value)
try:
return_value = self.set(
"/config/topics/{topic}".format(topic=topic),
config_data
)
version = ka... | Set configuration information for specified topic.
:topic : topic whose configuration needs to be changed
:value : config value with which the topic needs to be
updated with. This would be of the form key=value.
Example 'cleanup.policy=compact'
:kafka_version :tuple kaf... |
367,748 | def restrict_args(func, *args, **kwargs):
callargs = getargspec(func)
if not callargs.varargs:
args = args[0:len(callargs.args)]
return func(*args, **kwargs) | Restricts the possible arguements to a method to match the func argument.
restrict_args(lambda a: a, 1, 2)
# => 1 |
367,749 | def paginate(parser, token, paginator_class=None):
try:
tag_name, tag_args = token.contents.split(None, 1)
except ValueError:
msg = % token.contents.split()[0]
raise template.TemplateSyntaxError(msg)
match = PAGINATE_EXPRESSION.match(tag_args)
if match is None:
... | Paginate objects.
Usage:
.. code-block:: html+django
{% paginate entries %}
After this call, the *entries* variable in the template context is replaced
by only the entries of the current page.
You can also keep your *entries* original variable (usually a queryset)
and add to the con... |
367,750 | def add_locals(self, locals):
if locals is None:
return self
return _jinja2_vars(self.basedir, self.vars, self.globals, locals, *self.extras) | If locals are provided, create a copy of self containing those
locals in addition to what is already in this variable proxy. |
367,751 | def color(self, *args):
return self.Color(mode=self.color_mode, color_range=self.color_range, *args) | :param args: color in a supported format.
:return: Color object containing the color. |
367,752 | def _register_simple(self, endpoint, scheme, f):
assert scheme in DEFAULT_NAMES, ("Unsupported arg scheme %s" % scheme)
if scheme == JSON:
req_serializer = JsonSerializer()
resp_serializer = JsonSerializer()
else:
req_serializer = RawSerializer()
... | Register a simple endpoint with this TChannel.
:param endpoint:
Name of the endpoint being registered.
:param scheme:
Name of the arg scheme under which the endpoint will be
registered.
:param f:
Callable handler for the endpoint. |
367,753 | def kafka_kip(enrich):
def extract_vote_and_binding(body):
vote = 0
binding = 0
nlines = 0
for line in body.split("\n"):
if nlines > MAX_LINES_FOR_VOTE:
break
if line.startswith(">"):
... | Kafka Improvement Proposals process study |
367,754 | def get_outcome_for_state_id(self, state_id):
return_value = None
for s_id, name_outcome_tuple in self.final_outcomes_dict.items():
if s_id == state_id:
return_value = name_outcome_tuple[1]
break
return return_value | Returns the final outcome of the child state specified by the state_id.
:param state_id: The id of the state to get the final outcome for.
:return: |
367,755 | def _get_torrent_category(self, tag, result=None):
hrefs = ["/movies/", "/tv/", "/music/", "/games/", "/applications/", "/anime/",
"/books/", "/xxx/"]
category = None
if not result is None:
category = hrefs[result / 10].strip("/")
return category
for item in hrefs:
if tag.select("a[href=... | Given a tag containing torrent details try to find category
of torrent. In search pages the category is found in links of
the form <a href='/tv/'>TV</a> with TV replaced with movies, books
etc. For the home page I will use the result number to
decide the category |
367,756 | def _is_bst(root, min_value=float(), max_value=float()):
if root is None:
return True
return (
min_value < root.value < max_value and
_is_bst(root.left, min_value, root.value) and
_is_bst(root.right, root.value, max_value)
) | Check if the binary tree is a BST (binary search tree).
:param root: Root node of the binary tree.
:type root: binarytree.Node | None
:param min_value: Minimum node value seen.
:type min_value: int | float
:param max_value: Maximum node value seen.
:type max_value: int | float
:return: True... |
367,757 | def managed(name, users=None, defaults=None):
edge01.kix01netusers_|-netusers_example_|-netusers_example_|-managedcommentConfiguration updated!namenetusers_examplestart_time10:57:08.678811__id__netusers_exampleduration__run_num__changesupdatedadminlevelrestrictedlevelmartinsshkeysssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3... | Manages the configuration of the users on the device, as specified in the state SLS file. Users not defined in that
file will be remove whilst users not configured on the device, will be added.
SLS Example:
.. code-block:: yaml
netusers_example:
netusers.managed:
- us... |
367,758 | def main():
parser = argparse.ArgumentParser(description=)
parser.add_argument(,
help=)
parser.add_argument(,
nargs=,
dest=,
help=)
parser.add_argument(,
nargs=,
dest=,
help=)
parser.add_argument(,
nargs=,
dest=,
help=)
parser.add_argument(, ,
... | Parse command line arguments and then run the test suite |
367,759 | def _ParseDateTimeValue(self, parser_mediator, date_time_value):
if date_time_value[14] != :
parser_mediator.ProduceExtractionWarning(
.format(date_time_value))
return None
try:
year = int(date_time_value[0:4], 10)
month = int(date_time_value[4:6], 10)
day_of_month ... | Parses a date time value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
date_time_value (str): date time value
(CSSM_DB_ATTRIBUTE_FORMAT_TIME_DATE) in the format: "YYYYMMDDhhmmssZ".
Returns:
... |
367,760 | def validate(self, columns=None):
schema = self.schema()
if not columns:
ignore_flags = orb.Column.Flags.Virtual | orb.Column.Flags.ReadOnly
columns = schema.columns(flags=~ignore_flags).values()
use_indexes = True
else:
use_indexes = Fals... | Validates the current record object to make sure it is ok to commit to the database. If
the optional override dictionary is passed in, then it will use the given values vs. the one
stored with this record object which can be useful to check to see if the record will be valid before
it is commit... |
367,761 | def check_monophyly(self,
values,
target_attr,
ignore_missing=False,
unrooted=False):
if type(values) != set:
values = set(values)
n2leaves = self.get_cached_content()
if ignore_missing:
found_value... | Returns True if a given target attribute is monophyletic under
this node for the provided set of values.
If not all values are represented in the current tree
structure, a ValueError exception will be raised to warn that
strict monophyly could never be reached (this behaviour can be
... |
367,762 | def _handle_version(self, data):
_, version_string = data.split()
version_parts = version_string.split()
self.serial_number = version_parts[0]
self.version_number = version_parts[1]
self.version_flags = version_parts[2] | Handles received version data.
:param data: Version string to parse
:type data: string |
367,763 | def _grp_store_group(self, traj_group, store_data=pypetconstants.STORE_DATA,
with_links=True, recursive=False, max_depth=None,
_hdf5_group=None, _newly_created=False):
if store_data == pypetconstants.STORE_NOTHING:
return
elif store_... | Stores a group node.
For group nodes only annotations and comments need to be stored. |
367,764 | def additions_version():
*
try:
d = _additions_dir()
except EnvironmentError:
return False
if d and os.listdir(d):
return re.sub(r.format(_additions_dir_prefix), ,
os.path.basename(d))
return False | Check VirtualBox Guest Additions version.
CLI Example:
.. code-block:: bash
salt '*' vbox_guest.additions_version
:return: version of VirtualBox Guest Additions or False if they are not installed |
367,765 | def addPath(rel_path, prepend=False):
path = lambda *paths: os.path.abspath(
os.path.join(os.path.dirname(__file__), *paths)) +
if prepend:
return sys.path.insert(0, path(rel_path))
return sys.path.append(path(rel_path)) | Adds a directory to the system python path, either by append (doesn't
override default or globally installed package names) or by prepend
(overrides default/global package names). |
367,766 | def handle_namespace_pattern(self, line: str, position: int, tokens: ParseResults) -> ParseResults:
namespace = tokens[]
self.raise_for_redefined_namespace(line, position, namespace)
self.namespace_to_pattern[namespace] = re.compile(tokens[])
return tokens | Handle statements like ``DEFINE NAMESPACE X AS PATTERN "Y"``.
:raises: RedefinedNamespaceError |
367,767 | def linkify(text, shorten=False, extra_params="",
require_protocol=False, permitted_protocols=["http", "https"]):
if extra_params and not callable(extra_params):
extra_params = " " + extra_params.strip()
def make_link(m):
url = m.group(1)
proto = m.group(2)
... | Converts plain text into HTML with links.
For example: ``linkify("Hello http://tornadoweb.org!")`` would return
``Hello <a href="http://tornadoweb.org">http://tornadoweb.org</a>!``
Parameters:
* ``shorten``: Long urls will be shortened for display.
* ``extra_params``: Extra text to incl... |
367,768 | def bayesian_hmm(observations, estimated_hmm, nsample=100, reversible=True, stationary=False,
p0_prior=, transition_matrix_prior=, store_hidden=False, call_back=None):
r
from bhmm.estimators.bayesian_sampling import BayesianHMMSampler as _BHMM
sampler = _BHMM(observations, estimated_hm... | r""" Bayesian HMM based on sampling the posterior
Generic maximum-likelihood estimation of HMMs
Parameters
----------
observations : list of numpy arrays representing temporal data
`observations[i]` is a 1d numpy array corresponding to the observed trajectory index `i`
estimated_hmm : HMM
... |
367,769 | def textmetrics(self, txt, width=None, height=None, **kwargs):
txt = self.Text(txt, 0, 0, width, height, enableRendering=False, **kwargs)
return txt.metrics | Returns the width and height of a string of text as a tuple
(according to current font settings). |
367,770 | def cublasZtpmv(handle, uplo, trans, diag, n, AP, x, incx):
status = _libcublas.cublasZtpmv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
... | Matrix-vector product for complex triangular-packed matrix. |
367,771 | def create_from_other( Class, other, values=None ):
m = Class()
m.alphabet = other.alphabet
m.sorted_alphabet = other.sorted_alphabet
m.char_to_index = other.char_to_index
if values is not None:
m.values = values
else:
m.values = other.val... | Create a new Matrix with attributes taken from `other` but with the
values taken from `values` if provided |
367,772 | def full_signature(self):
if self.kind == "function":
return "{template}{return_type} {name}({parameters})".format(
template="template <{0}> ".format(", ".join(self.template)) if self.template else "",
return_type=self.return_type,
name=self.n... | The full signature of a ``"function"`` node.
**Return**
:class:`python:str`
The full signature of the function, including template, return type,
name, and parameter types.
**Raises**
:class:`python:RuntimeError`
If ``self.kind != ... |
367,773 | def index(self, alias):
clone = self._clone()
clone._index = alias
return clone | Selects which database this QuerySet should execute its query against. |
367,774 | def _maybe_clear_confirmation_futures(self):
for name in self._connections.keys():
self._connections[name].clear_confirmation_futures() | Invoked when the message has finished processing, ensuring there
are no confirmation futures pending. |
367,775 | def _add32(ins):
op1, op2 = tuple(ins.quad[2:])
if _int_ops(op1, op2) is not None:
o1, o2 = _int_ops(op1, op2)
if int(o2) == 0:
output = _32bit_oper(o1)
output.append()
output.append()
return output
if op1[0] == and op2[0] != :
... | Pops last 2 bytes from the stack and adds them.
Then push the result onto the stack.
Optimizations:
* If any of the operands is ZERO,
then do NOTHING: A + 0 = 0 + A = A |
367,776 | def do_copy(self,args):
parser = CommandArgumentParser("copy")
parser.add_argument(,,dest=,nargs=,required=False,default=[],help=)
parser.add_argument(,,dest=,nargs=,required=False,default=[],help=)
args = vars(parser.parse_args(args))
values = []
if args... | Copy specified id to stack. copy -h for detailed help. |
367,777 | def add_text_img(img, text, pos, box=None, color=None, thickness=1, scale=1, vertical=False):
if color is None:
color = COL_WHITE
text = str(text)
top_left = pos
if box is not None:
top_left = box.move(pos).to_int().top_left()
if top_left[0] > img.shape[1]:
retu... | Adds the given text in the image.
:param img: Input image
:param text: String text
:param pos: (x, y) in the image or relative to the given Box object
:param box: Box object. If not None, the text is placed inside the box.
:param color: Color of the text.
:param thickness: Thickness of the font... |
367,778 | def intersection(self, another_moc, delta_t=DEFAULT_OBSERVATION_TIME):
order_op = TimeMOC.time_resolution_to_order(delta_t)
self_degraded, moc_degraded = self._process_degradation(another_moc, order_op)
return super(TimeMOC, self_degraded).intersection(moc_degraded) | Intersection between self and moc. ``delta_t`` gives the possibility to the user
to set a time resolution for performing the tmoc intersection
Parameters
----------
another_moc : `~mocpy.abstract_moc.AbstractMOC`
the MOC/TimeMOC used for performing the intersection with self... |
367,779 | def print_email(message, app):
invenio_mail = app.extensions[]
with invenio_mail._lock:
invenio_mail.stream.write(
.format(message.as_string(), * 79))
invenio_mail.stream.flush() | Print mail to stream.
Signal handler for email_dispatched signal. Prints by default the output
to the stream specified in the constructor of InvenioMail.
:param message: Message object.
:param app: Flask application object. |
367,780 | def new_geom(geom_type, size, pos=(0, 0, 0), rgba=RED, group=0, **kwargs):
kwargs["type"] = str(geom_type)
kwargs["size"] = array_to_string(size)
kwargs["rgba"] = array_to_string(rgba)
kwargs["group"] = str(group)
kwargs["pos"] = array_to_string(pos)
element = ET.Element("geom", attrib=kwar... | Creates a geom element with attributes specified by @**kwargs.
Args:
geom_type (str): type of the geom.
see all types here: http://mujoco.org/book/modeling.html#geom
size: geom size parameters.
pos: 3d position of the geom frame.
rgba: color and transparency. Defaults to... |
367,781 | def read_cell(self, x, y):
cell = self._sheet.row(x)[y]
if self._file.xf_list[
cell.xf_index].background.pattern_colour_index == 64:
self._file.xf_list[
cell.xf_index].background.pattern_colour_index = 9
if self._file.xf_list[
cell.xf_... | reads the cell at position x and y; puts the default styles in xlwt |
367,782 | def bna_config_cmd_status_input_session_id(self, **kwargs):
config = ET.Element("config")
bna_config_cmd_status = ET.Element("bna_config_cmd_status")
config = bna_config_cmd_status
input = ET.SubElement(bna_config_cmd_status, "input")
session_id = ET.SubElement(input, "s... | Auto Generated Code |
367,783 | def _get_template(self, event, ctype, fields):
from os import path
template = path.join(self.server.dirname, "templates", "{}.{}".format(event, ctype))
contents = None
if path.isfile(template):
with open(template) as f:
... | Gets the contents of the template for the specified event and type
with all the fields replaced.
:arg event: one of ['start', 'error', 'success', 'timeout', 'failure'].
:arg ctype: one of ["txt", "html"] specifying which template to use.
:arg fields: a dictionary of fields and t... |
367,784 | def _build_gui(self):
self._central_widget.deleteLater()
self._sliders = []
self._buttons_top_color = []
self._buttons_bottom_color = []
self._checkboxes = []
self._buttons_plus = []
self._buttons_... | Removes all existing sliders and rebuilds them based on the colormap. |
367,785 | def plot_forward_models(self, maglim=None, phalim=None, **kwargs):
return_dict = {}
N = len(self.frequencies)
nrx = min(N, 4)
nrz = int(np.ceil(N / nrx))
for index, key, limits in zip(
(0, 1), (, ), (maglim, phalim)):
if limits is None:
... | Create plots of the forward models
Returns
-------
mag_fig: dict
Dictionary containing the figure and axes objects of the magnitude
plots |
367,786 | def _arrays_to_sections(self, arrays):
sections = []
sections_to_resize_later = {}
show_all = self.config[]
image_width = self._determine_image_width(arrays, show_all)
for array_number, array in enumerate(arrays):
rank = len(array.shape)
section_height = self._determine_section_hei... | input: unprocessed numpy arrays.
returns: columns of the size that they will appear in the image, not scaled
for display. That needs to wait until after variance is computed. |
367,787 | def AddClientKeywords(self, client_id, keywords):
if client_id not in self.metadatas:
raise db.UnknownClientError(client_id)
for kw in keywords:
self.keywords.setdefault(kw, {})
self.keywords[kw][client_id] = rdfvalue.RDFDatetime.Now() | Associates the provided keywords with the client. |
367,788 | def data_slice(self, slice_ind):
if self.height is None:
return self.data[slice_ind]
return self.data[slice_ind, ...] | Returns a slice of datapoints |
367,789 | def pfopen(self, event=None):
fname = self._openMenuChoice.get()
self.updateTitle(fname)
self._taskParsObj.filename = fname
self.freshenFocus()
self.showStatus("Loaded values from: "+fname, keep=2)
... | Load the parameter settings from a user-specified file. |
367,790 | def add_poisson(image, exp_time):
if isinstance(exp_time, int) or isinstance(exp_time, float):
if exp_time <= 0:
exp_time = 1
else:
mean_exp_time = np.mean(exp_time)
exp_time[exp_time < mean_exp_time/10] = mean_exp_time/10
sigma = np.sqrt(np.abs(image)/exp_time)... | adds a poison (or Gaussian) distributed noise with mean given by surface brightness
:param image: pixel values (photon counts per unit exposure time)
:param exp_time: exposure time
:return: Poisson noise realization of input image |
367,791 | def ParseZeitgeistEventRow(
self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
event_data = ZeitgeistActivityEventData()
event_data.offset = self._GetRowValue(query_hash, row, )
event_data.query = query
event_data.subject_uri = self._GetRowValue(query_hash, row... | Parses a zeitgeist event row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row. |
367,792 | def _parse_key(stream):
logger.debug("parsing key")
key = stream.advance_past_chars(["="])
logger.debug("parsed key:")
logger.debug("%s", fmt_green(key))
return key | Parse key, value combination
returns :
Parsed key (string) |
367,793 | def setShapeClass(self, vehID, clazz):
self._connection._sendStringCmd(
tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_SHAPECLASS, vehID, clazz) | setShapeClass(string, string) -> None
Sets the shape class for this vehicle. |
367,794 | def embed(self, title=):
if self.embed_disabled:
self.warning_log("Embed is disabled when runned from the grid runner because of the multithreading")
return False
from IPython.terminal.embed import InteractiveShellEmbed
if BROME_CONFIG[][]:
say(BR... | Start an IPython embed
Calling embed won't do anything in a multithread context
The stack_depth will be found automatically |
367,795 | def ack(self):
if self.acknowledged:
raise self.MessageStateError(
"Message already acknowledged with state: %s" % self._state)
self.backend.ack(self.delivery_tag)
self._state = "ACK" | Acknowledge this message as being processed.,
This will remove the message from the queue.
:raises MessageStateError: If the message has already been
acknowledged/requeued/rejected. |
367,796 | def _merge_variables(new, cur):
new_added = set([])
out = []
for cur_var in cur:
updated = False
for new_var in new:
if get_base_id(new_var["id"]) == get_base_id(cur_var["id"]):
out.append(new_var)
new_added.add(new_var["id"])
... | Add any new variables to the world representation in cur.
Replaces any variables adjusted by previous steps. |
367,797 | def create_entity_type(project_id, display_name, kind):
import dialogflow_v2 as dialogflow
entity_types_client = dialogflow.EntityTypesClient()
parent = entity_types_client.project_agent_path(project_id)
entity_type = dialogflow.types.EntityType(
display_name=display_name, kind=kind)
... | Create an entity type with the given display name. |
367,798 | def coords_by_cutoff(self, cutoff=0.80):
i = np.where(self.cve >= cutoff)[0][0]
coords_matrix = self.vecs[:, :i + 1]
return coords_matrix, self.cve[i] | Returns fitted coordinates in as many dimensions as are needed to
explain a given amount of variance (specified in the cutoff) |
367,799 | def is_integer(value, min=None, max=None):
(min_val, max_val) = _is_num_param((, ), (min, max))
if not isinstance(value, int_or_string_types):
raise VdtTypeError(value)
if isinstance(value, string_types):
try:
value = int(value)
except ValueError:
... | A check that tests that a given value is an integer (int, or long)
and optionally, between bounds. A negative value is accepted, while
a float will fail.
If the value is a string, then the conversion is done - if possible.
Otherwise a VdtError is raised.
>>> vtor = Validator()
>>> vtor.check('... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.