code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def user_default_loader(self, pk):
try:
obj = User.objects.get(pk=pk)
except User.DoesNotExist:
return None
else:
self.user_default_add_related_pks(obj)
return obj | Load a User from the database. |
def columns(self):
fields = [f.label for f in self.form_fields
if self.cleaned_data["field_%s_export" % f.id]]
if self.cleaned_data["field_0_export"]:
fields.append(self.entry_time_name)
return fields | Returns the list of selected column names. |
def readPlistFromString(data):
try:
plistData = buffer(data)
except TypeError, err:
raise NSPropertyListSerializationException(err)
dataObject, dummy_plistFormat, error = (
NSPropertyListSerialization.
propertyListFromData_mutabilityOption_format_errorDescription_(
plistData, NSPropertyListMutableContainers, None, None))
if dataObject is None:
if error:
error = error.encode('ascii', 'ignore')
else:
error = "Unknown error"
raise NSPropertyListSerializationException(error)
else:
return dataObject | Read a plist data from a string. Return the root object. |
def new_bundle(name, scriptmap, filemap=None):
if name in BUNDLEMAP:
logger.warn('overwriting bundle %s' % name)
BUNDLEMAP[name] = Bundle(scriptmap, filemap) | Create a bundle and add to available bundles |
def as_txt(self):
s = "IIIF Image Server Error\n\n"
s += self.text if (self.text) else 'UNKNOWN_ERROR'
s += "\n\n"
if (self.parameter):
s += "parameter=%s\n" % self.parameter
if (self.code):
s += "code=%d\n\n" % self.code
for header in sorted(self.headers):
s += "header %s=%s\n" % (header, self.headers[header])
return s | Text rendering of error response.
Designed for use with Image API version 1.1 and above where the
error response is suggested to be text or html but not otherwise
specified. Intended to provide useful information for debugging. |
def heating_values(self):
heating_dict = {
'level': self.heating_level,
'target': self.target_heating_level,
'active': self.now_heating,
'remaining': self.heating_remaining,
'last_seen': self.last_seen,
}
return heating_dict | Return a dict of all the current heating values. |
def _update_redundancy_router_interfaces(self, context, router,
port, modified_port_data,
redundancy_router_ids=None,
ha_settings_db=None):
router_id = router['id']
if ha_settings_db is None:
ha_settings_db = self._get_ha_settings_by_router_id(context,
router_id)
if ha_settings_db is None:
return
e_context = context.elevated()
rr_ids = self._get_redundancy_router_ids(e_context, router_id)
port_info_list = self._core_plugin.get_ports(
e_context, filters={'device_id': rr_ids,
'network_id': [port['network_id']]},
fields=['device_id', 'id'])
for port_info in port_info_list:
self._core_plugin.update_port(e_context, port_info['id'],
modified_port_data)
self._update_hidden_port(e_context, port['id'], modified_port_data) | To be called when the router interfaces are updated,
like in the case of change in port admin_state_up status |
def _get_page_elements(self):
page_elements = []
for attribute, value in list(self.__dict__.items()) + list(self.__class__.__dict__.items()):
if attribute != 'parent' and isinstance(value, CommonObject):
page_elements.append(value)
return page_elements | Return page elements and page objects of this page object
:returns: list of page elements and page objects |
def remove(self, expr):
self._assert_supports_contents()
index = self._contents.index(expr)
self._contents.remove(expr)
return index | Remove a provided expression from its list of contents.
:param Union[TexExpr,str] expr: Content to add
:return: index of the expression removed
:rtype: int
>>> expr = TexExpr('textbf', ('hello',))
>>> expr.remove('hello')
0
>>> expr
TexExpr('textbf', []) |
def handle_symbol_search(self, call_id, payload):
self.log.debug('handle_symbol_search: in %s', Pretty(payload))
syms = payload["syms"]
qfList = []
for sym in syms:
p = sym.get("pos")
if p:
item = self.editor.to_quickfix_item(str(p["file"]),
p["line"],
str(sym["name"]),
"info")
qfList.append(item)
self.editor.write_quickfix_list(qfList, "Symbol Search") | Handler for symbol search results |
def _compute_valid(self):
r
if self._dimension != 2:
raise NotImplementedError("Validity check only implemented in R^2")
poly_sign = None
if self._degree == 1:
first_deriv = self._nodes[:, 1:] - self._nodes[:, :-1]
poly_sign = _SIGN(np.linalg.det(first_deriv))
elif self._degree == 2:
bernstein = _surface_helpers.quadratic_jacobian_polynomial(
self._nodes
)
poly_sign = _surface_helpers.polynomial_sign(bernstein, 2)
elif self._degree == 3:
bernstein = _surface_helpers.cubic_jacobian_polynomial(self._nodes)
poly_sign = _surface_helpers.polynomial_sign(bernstein, 4)
else:
raise _helpers.UnsupportedDegree(self._degree, supported=(1, 2, 3))
return poly_sign == 1 | r"""Determines if the current surface is "valid".
Does this by checking if the Jacobian of the map from the
reference triangle is everywhere positive.
Returns:
bool: Flag indicating if the current surface is valid.
Raises:
NotImplementedError: If the surface is in a dimension other
than :math:`\mathbf{R}^2`.
.UnsupportedDegree: If the degree is not 1, 2 or 3. |
def convert_cmus_output(self, cmus_output):
cmus_output = cmus_output.split('\n')
cmus_output = [x.replace('tag ', '') for x in cmus_output if not x in '']
cmus_output = [x.replace('set ', '') for x in cmus_output]
status = {}
partitioned = (item.partition(' ') for item in cmus_output)
status = {item[0]: item[2] for item in partitioned}
status['duration'] = self.convert_time(status['duration'])
status['position'] = self.convert_time(status['position'])
return status | Change the newline separated string of output data into
a dictionary which can then be used to replace the strings in the config
format.
cmus_output: A string with information about cmus that is newline
seperated. Running cmus-remote -Q in a terminal will show you what
you're dealing with. |
def metadata(self):
resp = self.r_session.get(self.database_url)
resp.raise_for_status()
return response_to_json_dict(resp) | Retrieves the remote database metadata dictionary.
:returns: Dictionary containing database metadata details |
def obfn_fvarf(self):
return self.Xf if self.opt['fEvalX'] else \
sl.rfftn(self.Y, None, self.cri.axisN) | Variable to be evaluated in computing data fidelity term,
depending on 'fEvalX' option value. |
def rhochange(self):
self.lu, self.piv = sl.lu_factor(self.Z, self.rho)
self.lu = np.asarray(self.lu, dtype=self.dtype) | Re-factorise matrix when rho changes |
def connect_service(service, credentials, region_name = None, config = None, silent = False):
api_client = None
try:
client_params = {}
client_params['service_name'] = service.lower()
session_params = {}
session_params['aws_access_key_id'] = credentials['AccessKeyId']
session_params['aws_secret_access_key'] = credentials['SecretAccessKey']
session_params['aws_session_token'] = credentials['SessionToken']
if region_name:
client_params['region_name'] = region_name
session_params['region_name'] = region_name
if config:
client_params['config'] = config
aws_session = boto3.session.Session(**session_params)
if not silent:
infoMessage = 'Connecting to AWS %s' % service
if region_name:
infoMessage = infoMessage + ' in %s' % region_name
printInfo('%s...' % infoMessage)
api_client = aws_session.client(**client_params)
except Exception as e:
printException(e)
return api_client | Instantiates an AWS API client
:param service:
:param credentials:
:param region_name:
:param config:
:param silent:
:return: |
def seed(vault_client, opt):
if opt.thaw_from:
opt.secrets = tempfile.mkdtemp('aomi-thaw')
auto_thaw(vault_client, opt)
Context.load(get_secretfile(opt), opt) \
.fetch(vault_client) \
.sync(vault_client, opt)
if opt.thaw_from:
rmtree(opt.secrets) | Will provision vault based on the definition within a Secretfile |
def find_anomalies(errors, index, z_range=(0, 10)):
threshold = find_threshold(errors, z_range)
sequences = find_sequences(errors, threshold)
anomalies = list()
denominator = errors.mean() + errors.std()
for start, stop in sequences:
max_error = errors[start:stop + 1].max()
score = (max_error - threshold) / denominator
anomalies.append([index[start], index[stop], score])
return np.asarray(anomalies) | Find sequences of values that are anomalous.
We first find the ideal threshold for the set of errors that we have,
and then find the sequences of values that are above this threshold.
Lastly, we compute a score proportional to the maximum error in the
sequence, and finally return the index pairs that correspond to
each sequence, along with its score. |
def contains_bad_glyph(glyph_data, data):
def check_glyph(char):
for cmap in glyph_data["cmap"].tables:
if cmap.isUnicode():
if char in cmap.cmap:
return True
return False
for part in data:
text = part.get("full_text", "")
try:
text = text.decode("utf8")
except AttributeError:
pass
for char in text:
if not check_glyph(ord(char)):
print(u"%s (%s) missing" % (char, ord(char)))
return True
return False | Pillow only looks for glyphs in the font used so we need to make sure our
font has the glygh. Although we could substitute a glyph from another font
eg symbola but this adds more complexity and is of limited value. |
def set_cols_valign(self, array):
self._check_row_size(array)
self._valign = array
return self | Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell |
def stringize(
self,
rnf_profile=RnfProfile(),
):
sorted_segments = sorted(self.segments,
key=lambda x: (
x.genome_id * (10 ** 23) +
x.chr_id * (10 ** 21) +
(x.left + (int(x.left == 0) * x.right - 1)) * (10 ** 11) +
x.right * (10 ** 1) +
int(x.direction == "F")
)
)
segments_strings = [x.stringize(rnf_profile) for x in sorted_segments]
read_tuple_name = "__".join(
[
self.prefix,
format(self.read_tuple_id, 'x').zfill(rnf_profile.read_tuple_id_width),
",".join(segments_strings),
self.suffix,
]
)
return read_tuple_name | Create RNF representation of this read.
Args:
read_tuple_id_width (int): Maximal expected string length of read tuple ID.
genome_id_width (int): Maximal expected string length of genome ID.
chr_id_width (int): Maximal expected string length of chromosome ID.
coor_width (int): Maximal expected string length of a coordinate. |
def server_receives_binary_from(self, name=None, timeout=None, connection=None, label=None):
server, name = self._servers.get_with_name(name)
msg, ip, port = server.receive_from(timeout=timeout, alias=connection)
self._register_receive(server, label, name, connection=connection)
return msg, ip, port | Receive raw binary message. Returns message, ip, and port.
If server `name` is not given, uses the latest server. Optional message
`label` is shown on logs.
Examples:
| ${binary} | ${ip} | ${port} = | Server receives binary from |
| ${binary} | ${ip} | ${port} = | Server receives binary from | Server1 | connection=my_connection | timeout=5 | |
def pull(self, arm_id, success, failure):
self.__beta_dist_dict[arm_id].observe(success, failure) | Pull arms.
Args:
arm_id: Arms master id.
success: The number of success.
failure: The number of failure. |
def _new_page(self):
self._current_page = Drawing(*self._pagesize)
if self._bgimage:
self._current_page.add(self._bgimage)
self._pages.append(self._current_page)
self.page_count += 1
self._position = [1, 0] | Helper function to start a new page. Not intended for external use. |
def pull_release(self, name, version, destfolder=".", force=False):
unique_id = name.replace('/', '_')
depdict = {
'name': name,
'unique_id': unique_id,
'required_version': version,
'required_version_string': str(version)
}
destdir = os.path.join(destfolder, unique_id)
if os.path.exists(destdir):
if not force:
raise ExternalError("Output directory exists and force was not specified, aborting",
output_directory=destdir)
shutil.rmtree(destdir)
result = self.update_dependency(None, depdict, destdir)
if result != "installed":
raise ArgumentError("Could not find component to satisfy name/version combination") | Download and unpack a released iotile component by name and version range
If the folder that would be created already exists, this command fails unless
you pass force=True
Args:
name (string): The name of the component to download
version (SemanticVersionRange): The valid versions of the component to fetch
destfolder (string): The folder into which to unpack the result, defaults to
the current working directory
force (bool): Forcibly overwrite whatever is currently in the folder that would
be fetched.
Raises:
ExternalError: If the destination folder exists and force is not specified
ArgumentError: If the specified component could not be found with the required version |
def getVersion(data):
data = data.splitlines()
return next((
v
for v, u in zip(data, data[1:])
if len(v) == len(u) and allSame(u) and hasDigit(v) and "." in v
)) | Parse version from changelog written in RST format. |
def consume_socket_output(frames, demux=False):
if demux is False:
return six.binary_type().join(frames)
out = [None, None]
for frame in frames:
assert frame != (None, None)
if frame[0] is not None:
if out[0] is None:
out[0] = frame[0]
else:
out[0] += frame[0]
else:
if out[1] is None:
out[1] = frame[1]
else:
out[1] += frame[1]
return tuple(out) | Iterate through frames read from the socket and return the result.
Args:
demux (bool):
If False, stdout and stderr are multiplexed, and the result is the
concatenation of all the frames. If True, the streams are
demultiplexed, and the result is a 2-tuple where each item is the
concatenation of frames belonging to the same stream. |
def _req(self, method="get", verb=None, headers={}, params={}, data={}):
url = self.BASE_URL.format(verb=verb)
request_headers = {"content-type": "application/json"}
request_params = {"api_key": self.API_KEY}
request_headers.update(headers)
request_params.update(params)
return getattr(requests, method)(
url, params=request_params, headers=request_headers, data=data
) | Method to wrap all request building
:return: a Response object based on the specified method and request values. |
def is_org_admin(self, organisation_id):
return (self._has_role(organisation_id, self.roles.administrator) or
self.is_admin()) | Is the user authorized to administrate the organisation |
def decipher_all(decipher, objid, genno, x):
if isinstance(x, str):
return decipher(objid, genno, x)
if isinstance(x, list):
x = [decipher_all(decipher, objid, genno, v) for v in x]
elif isinstance(x, dict):
for (k, v) in x.iteritems():
x[k] = decipher_all(decipher, objid, genno, v)
return x | Recursively deciphers the given object. |
def clear_jobs(self, recursive=True):
if recursive:
for link in self._links.values():
link.clear_jobs(recursive)
self.jobs.clear() | Clear a dictionary with all the jobs
If recursive is True this will include jobs from all internal `Link` |
def _match_serializers_by_query_arg(self, serializers):
arg_name = current_app.config.get('REST_MIMETYPE_QUERY_ARG_NAME')
if arg_name:
arg_value = request.args.get(arg_name, None)
if arg_value is None:
return None
try:
return serializers[
self.serializers_query_aliases[arg_value]]
except KeyError:
return None
return None | Match serializer by query arg. |
def copy_user_agent_from_driver(self):
selenium_user_agent = self.driver.execute_script("return navigator.userAgent;")
self.headers.update({"user-agent": selenium_user_agent}) | Updates requests' session user-agent with the driver's user agent
This method will start the browser process if its not already running. |
def install(source,
venv=None,
requirement_files=None,
upgrade=False,
ignore_platform=False,
install_args=''):
requirement_files = requirement_files or []
logger.info('Installing %s', source)
processed_source = get_source(source)
metadata = _get_metadata(processed_source)
def raise_unsupported_platform(machine_platform):
raise WagonError(
'Platform unsupported for wagon ({0})'.format(
machine_platform))
try:
supported_platform = metadata['supported_platform']
if not ignore_platform and supported_platform != ALL_PLATFORMS_TAG:
logger.debug(
'Validating Platform %s is supported...', supported_platform)
machine_platform = get_platform()
if not _is_platform_supported(
supported_platform, machine_platform):
raise_unsupported_platform(machine_platform)
wheels_path = os.path.join(processed_source, DEFAULT_WHEELS_PATH)
install_package(
metadata['package_name'],
wheels_path,
venv,
requirement_files,
upgrade,
install_args)
finally:
if processed_source != source:
shutil.rmtree(os.path.dirname(
processed_source), ignore_errors=True) | Install a Wagon archive.
This can install in a provided `venv` or in the current
virtualenv in case one is currently active.
`upgrade` is merely pip's upgrade.
`ignore_platform` will allow to ignore the platform check, meaning
that if an archive was created for a specific platform (e.g. win32),
and the current platform is different, it will still attempt to
install it.
Platform check will fail on the following:
If not linux and no platform match (e.g. win32 vs. darwin)
If linux and:
architecture doesn't match (e.g. manylinux1_x86_64 vs. linux_i686)
wheel not manylinux and no platform match (linux_x86_64 vs. linux_i686) |
def station(self, station_id, *, num_songs=25, recently_played=None):
station_info = {
'station_id': station_id,
'num_entries': num_songs,
'library_content_only': False
}
if recently_played is not None:
station_info['recently_played'] = recently_played
response = self._call(
mc_calls.RadioStationFeed,
station_infos=[station_info]
)
station_feed = response.body.get('data', {}).get('stations', [])
try:
station = station_feed[0]
except IndexError:
station = {}
return station | Get information about a station.
Parameters:
station_id (str): A station ID. Use 'IFL' for I'm Feeling Lucky.
num_songs (int, Optional): The maximum number of songs to return from the station.
Default: ``25``
recently_played (list, Optional): A list of dicts in the form of {'id': '', 'type'}
where ``id`` is a song ID and ``type`` is 0 for a library song and 1 for a store song.
Returns:
dict: Station information. |
def trace(self, urls=None, **overrides):
if urls is not None:
overrides['urls'] = urls
return self.where(accept='TRACE', **overrides) | Sets the acceptable HTTP method to TRACE |
def rng_annotation(self, stmt, p_elem):
ext = stmt.i_extension
prf, extkw = stmt.raw_keyword
(modname,rev)=stmt.i_module.i_prefixes[prf]
prefix = self.add_namespace(
statements.modulename_to_module(self.module,modname,rev))
eel = SchemaNode(prefix + ":" + extkw, p_elem)
argst = ext.search_one("argument")
if argst:
if argst.search_one("yin-element", "true"):
SchemaNode(prefix + ":" + argst.arg, eel, stmt.arg)
else:
eel.attr[argst.arg] = stmt.arg
self.handle_substmts(stmt, eel) | Append YIN representation of extension statement `stmt`. |
def FilterRange(self, start_time=None, stop_time=None):
start_time = self._NormalizeTime(start_time)
stop_time = self._NormalizeTime(stop_time)
self.data = [
p for p in self.data
if (start_time is None or p[1] >= start_time) and
(stop_time is None or p[1] < stop_time)
] | Filter the series to lie between start_time and stop_time.
Removes all values of the series which are outside of some time range.
Args:
start_time: If set, timestamps before start_time will be dropped.
stop_time: If set, timestamps at or past stop_time will be dropped. |
def quic_graph_lasso(X, num_folds, metric):
print("QuicGraphicalLasso + GridSearchCV with:")
print(" metric: {}".format(metric))
search_grid = {
"lam": np.logspace(np.log10(0.01), np.log10(1.0), num=100, endpoint=True),
"init_method": ["cov"],
"score_metric": [metric],
}
model = GridSearchCV(QuicGraphicalLasso(), search_grid, cv=num_folds, refit=True)
model.fit(X)
bmodel = model.best_estimator_
print(" len(cv_lams): {}".format(len(search_grid["lam"])))
print(" cv-lam: {}".format(model.best_params_["lam"]))
print(" lam_scale_: {}".format(bmodel.lam_scale_))
print(" lam_: {}".format(bmodel.lam_))
return bmodel.covariance_, bmodel.precision_, bmodel.lam_ | Run QuicGraphicalLasso with mode='default' and use standard scikit
GridSearchCV to find the best lambda.
Primarily demonstrates compatibility with existing scikit tooling. |
def load_calibration(labware: Labware):
calibration_path = CONFIG['labware_calibration_offsets_dir_v4']
labware_offset_path = calibration_path/'{}.json'.format(labware._id)
if labware_offset_path.exists():
calibration_data = _read_file(str(labware_offset_path))
offset_array = calibration_data['default']['offset']
offset = Point(x=offset_array[0], y=offset_array[1], z=offset_array[2])
labware.set_calibration(offset)
if 'tipLength' in calibration_data.keys():
tip_length = calibration_data['tipLength']['length']
labware.tip_length = tip_length | Look up a calibration if it exists and apply it to the given labware. |
def _detect_categorical_columns(self,data):
numeric_cols = set(data._get_numeric_data().columns.values)
date_cols = set(data.select_dtypes(include=[np.datetime64]).columns)
likely_cat = set(data.columns) - numeric_cols
likely_cat = list(likely_cat - date_cols)
for var in data._get_numeric_data().columns:
likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.05
if likely_flag:
likely_cat.append(var)
return likely_cat | Detect categorical columns if they are not specified.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
likely_cat : list
List of variables that appear to be categorical. |
def delete_genelist(list_id, case_id=None):
if case_id:
case_obj = app.db.case(case_id)
app.db.remove_genelist(list_id, case_obj=case_obj)
return redirect(request.referrer)
else:
app.db.remove_genelist(list_id)
return redirect(url_for('.index')) | Delete a whole gene list with links to cases or a link. |
def parse_string(self, xmlstr, initialize=True):
try:
domtree = minidom.parseString(xmlstr)
except xml.parsers.expat.ExpatError, e:
raise ManifestXMLParseError(e)
self.load_dom(domtree, initialize) | Load manifest from XML string |
def get_items_by_id(self, jid, node, ids):
iq = aioxmpp.stanza.IQ(to=jid, type_=aioxmpp.structs.IQType.GET)
iq.payload = pubsub_xso.Request(
pubsub_xso.Items(node)
)
iq.payload.payload.items = [
pubsub_xso.Item(id_)
for id_ in ids
]
if not iq.payload.payload.items:
raise ValueError("ids must not be empty")
return (yield from self.client.send(iq)) | Request specific items by their IDs from a node.
:param jid: Address of the PubSub service.
:type jid: :class:`aioxmpp.JID`
:param node: Name of the PubSub node to query.
:type node: :class:`str`
:param ids: The item IDs to return.
:type ids: :class:`~collections.abc.Iterable` of :class:`str`
:raises aioxmpp.errors.XMPPError: as returned by the service
:return: The response from the service
:rtype: :class:`.xso.Request`
`ids` must be an iterable of :class:`str` of the IDs of the items to
request from the pubsub node. If the iterable is empty,
:class:`ValueError` is raised (as otherwise, the request would be
identical to calling :meth:`get_items` without `max_items`).
Return the :class:`.xso.Request` object, which has a
:class:`~.xso.Items` :attr:`~.xso.Request.payload`. |
def parse_key_curve(value=None):
if isinstance(value, ec.EllipticCurve):
return value
if value is None:
return ca_settings.CA_DEFAULT_ECC_CURVE
curve = getattr(ec, value.strip(), type)
if not issubclass(curve, ec.EllipticCurve):
raise ValueError('%s: Not a known Eliptic Curve' % value)
return curve() | Parse an elliptic curve value.
This function uses a value identifying an elliptic curve to return an
:py:class:`~cg:cryptography.hazmat.primitives.asymmetric.ec.EllipticCurve` instance. The name must match a
class name of one of the classes named under "Elliptic Curves" in
:any:`cg:hazmat/primitives/asymmetric/ec`.
For convenience, passing ``None`` will return the value of :ref:`CA_DEFAULT_ECC_CURVE
<settings-ca-default-ecc-curve>`, and passing an
:py:class:`~cg:cryptography.hazmat.primitives.asymmetric.ec.EllipticCurve` will return that instance
unchanged.
Example usage::
>>> parse_key_curve('SECP256R1') # doctest: +ELLIPSIS
<cryptography.hazmat.primitives.asymmetric.ec.SECP256R1 object at ...>
>>> parse_key_curve('SECP384R1') # doctest: +ELLIPSIS
<cryptography.hazmat.primitives.asymmetric.ec.SECP384R1 object at ...>
>>> parse_key_curve(ec.SECP256R1()) # doctest: +ELLIPSIS
<cryptography.hazmat.primitives.asymmetric.ec.SECP256R1 object at ...>
>>> parse_key_curve() # doctest: +ELLIPSIS
<cryptography.hazmat.primitives.asymmetric.ec.SECP256R1 object at ...>
Parameters
----------
value : str, otional
The name of the curve or ``None`` to return the default curve.
Returns
-------
curve
An :py:class:`~cg:cryptography.hazmat.primitives.asymmetric.ec.EllipticCurve` instance.
Raises
------
ValueError
If the named curve is not supported. |
def _get_snapshot(name, suffix, array):
snapshot = name + '.' + suffix
try:
for snap in array.get_volume(name, snap=True):
if snap['name'] == snapshot:
return snapshot
except purestorage.PureError:
return None | Private function to check snapshot |
def corr_dw_v1(self):
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
old = self.sequences.states.fastaccess_old
new = self.sequences.states.fastaccess_new
idx = der.toy[self.idx_sim]
if (con.maxdw[idx] > 0.) and ((old.w-new.w) > con.maxdw[idx]):
new.w = old.w-con.maxdw[idx]
self.interp_v()
flu.qa = flu.qz+(old.v-new.v)/der.seconds | Adjust the water stage drop to the highest value allowed and correct
the associated fluxes.
Note that method |corr_dw_v1| calls the method `interp_v` of the
respective application model. Hence the requirements of the actual
`interp_v` need to be considered additionally.
Required control parameter:
|MaxDW|
Required derived parameters:
|llake_derived.TOY|
|Seconds|
Required flux sequence:
|QZ|
Updated flux sequence:
|llake_fluxes.QA|
Updated state sequences:
|llake_states.W|
|llake_states.V|
Basic Restriction:
:math:`W_{old} - W_{new} \\leq MaxDW`
Examples:
In preparation for the following examples, define a short simulation
time period with a simulation step size of 12 hours and initialize
the required model object:
>>> from hydpy import pub
>>> pub.timegrids = '2000.01.01', '2000.01.04', '12h'
>>> from hydpy.models.llake import *
>>> parameterstep('1d')
>>> derived.toy.update()
>>> derived.seconds.update()
Select the first half of the second day of January as the simulation
step relevant for the following examples:
>>> model.idx_sim = pub.timegrids.init['2000.01.02']
The following tests are based on method |interp_v_v1| for the
interpolation of the stored water volume based on the corrected
water stage:
>>> model.interp_v = model.interp_v_v1
For the sake of simplicity, the underlying `w`-`v` relationship is
assumed to be linear:
>>> n(2.)
>>> w(0., 1.)
>>> v(0., 1e6)
The maximum drop in water stage for the first half of the second
day of January is set to 0.4 m/d. Note that, due to the difference
between the parameter step size and the simulation step size, the
actual value used for calculation is 0.2 m/12h:
>>> maxdw(_1_1_18=.1,
... _1_2_6=.4,
... _1_2_18=.1)
>>> maxdw
maxdw(toy_1_1_18_0_0=0.1,
toy_1_2_6_0_0=0.4,
toy_1_2_18_0_0=0.1)
>>> from hydpy import round_
>>> round_(maxdw.value[2])
0.2
Define old and new water stages and volumes in agreement with the
given linear relationship:
>>> states.w.old = 1.
>>> states.v.old = 1e6
>>> states.w.new = .9
>>> states.v.new = 9e5
Also define an inflow and an outflow value. Note the that the latter
is set to zero, which is inconsistent with the actual water stage drop
defined above, but done for didactic reasons:
>>> fluxes.qz = 1.
>>> fluxes.qa = 0.
Calling the |corr_dw_v1| method does not change the values of
either of following sequences, as the actual drop (0.1 m/12h) is
smaller than the allowed drop (0.2 m/12h):
>>> model.corr_dw_v1()
>>> states.w
w(0.9)
>>> states.v
v(900000.0)
>>> fluxes.qa
qa(0.0)
Note that the values given above are not recalculated, which can
clearly be seen for the lake outflow, which is still zero.
Through setting the new value of the water stage to 0.6 m, the actual
drop (0.4 m/12h) exceeds the allowed drop (0.2 m/12h). Hence the
water stage is trimmed and the other values are recalculated:
>>> states.w.new = .6
>>> model.corr_dw_v1()
>>> states.w
w(0.8)
>>> states.v
v(800000.0)
>>> fluxes.qa
qa(5.62963)
Through setting the maximum water stage drop to zero, method
|corr_dw_v1| is effectively disabled. Regardless of the actual
change in water stage, no trimming or recalculating is performed:
>>> maxdw.toy_01_02_06 = 0.
>>> states.w.new = .6
>>> model.corr_dw_v1()
>>> states.w
w(0.6)
>>> states.v
v(800000.0)
>>> fluxes.qa
qa(5.62963) |
def get_initial_centroids(self):
if self.seed is not None:
np.random.seed(self.seed)
n = self.data.shape[0]
rand_indices = np.random.randint(0, n, self.k)
centroids = self.data[rand_indices,:].toarray()
self.centroids=centroids
return centroids | Randomly choose k data points as initial centroids |
def bind(self, func: Callable[[Any], 'Writer']) -> 'Writer':
a, w = self.run()
b, w_ = func(a).run()
if isinstance(w_, Monoid):
w__ = cast(Monoid, w).append(w_)
else:
w__ = w + w_
return Writer(b, w__) | Flat is better than nested.
Haskell:
(Writer (x, v)) >>= f = let
(Writer (y, v')) = f x in Writer (y, v `append` v') |
def _reset (self):
self.entries = []
self.default_entry = None
self.disallow_all = False
self.allow_all = False
self.last_checked = 0
self.sitemap_urls = [] | Reset internal flags and entry lists. |
def cosine_similarity(evaluated_model, reference_model):
if not (isinstance(evaluated_model, TfModel) and isinstance(reference_model, TfModel)):
raise ValueError(
"Arguments has to be instances of 'sumy.models.TfDocumentModel'")
terms = frozenset(evaluated_model.terms) | frozenset(reference_model.terms)
numerator = 0.0
for term in terms:
numerator += evaluated_model.term_frequency(term) * reference_model.term_frequency(term)
denominator = evaluated_model.magnitude * reference_model.magnitude
if denominator == 0.0:
raise ValueError("Document model can't be empty. Given %r & %r" % (
evaluated_model, reference_model))
return numerator / denominator | Computes cosine similarity of two text documents. Each document
has to be represented as TF model of non-empty document.
:returns float:
0 <= cos <= 1, where 0 means independence and 1 means
exactly the same. |
def make_regression(func, n_samples=100, n_features=1, bias=0.0, noise=0.0,
random_state=None):
generator = check_random_state(random_state)
X = generator.randn(n_samples, n_features)
y = func(*X.T) + bias
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
return X, y | Make dataset for a regression problem.
Examples
--------
>>> f = lambda x: 0.5*x + np.sin(2*x)
>>> X, y = make_regression(f, bias=.5, noise=1., random_state=1)
>>> X.shape
(100, 1)
>>> y.shape
(100,)
>>> X[:5].round(2)
array([[ 1.62],
[-0.61],
[-0.53],
[-1.07],
[ 0.87]])
>>> y[:5].round(2)
array([ 0.76, 0.48, -0.23, -0.28, 0.83]) |
def download(url, target_file, chunk_size=4096):
r = requests.get(url, stream=True)
with open(target_file, 'w+') as out:
with click.progressbar(r.iter_content(chunk_size=chunk_size),
int(r.headers['Content-Length'])/chunk_size,
label='Downloading...') as chunks:
for chunk in chunks:
out.write(chunk) | Simple requests downloader |
def numlistbetween(num1, num2, option='list', listoption='string'):
if option == 'list':
if listoption == 'string':
output = ''
output += str(num1)
for currentnum in range(num1 + 1, num2 + 1):
output += ','
output += str(currentnum)
elif listoption == 'list':
output = []
for currentnum in range(num1, num2 + 1):
output.append(str(currentnum))
return output
elif option == 'count':
return num2 - num1 | List Or Count The Numbers Between Two Numbers |
def diffusion_mds(means, weights, d, diffusion_rounds=10):
for i in range(diffusion_rounds):
weights = weights*weights
weights = weights/weights.sum(0)
X = dim_reduce(means, weights, d)
if X.shape[0]==2:
return X.dot(weights)
else:
return X.T.dot(weights) | Dimensionality reduction using MDS, while running diffusion on W.
Args:
means (array): genes x clusters
weights (array): clusters x cells
d (int): desired dimensionality
Returns:
W_reduced (array): array of shape (d, cells) |
def pkdecrypt(self, conn):
for msg in [b'S INQUIRE_MAXLEN 4096', b'INQUIRE CIPHERTEXT']:
keyring.sendline(conn, msg)
line = keyring.recvline(conn)
assert keyring.recvline(conn) == b'END'
remote_pubkey = parse_ecdh(line)
identity = self.get_identity(keygrip=self.keygrip)
ec_point = self.client.ecdh(identity=identity, pubkey=remote_pubkey)
keyring.sendline(conn, b'D ' + _serialize_point(ec_point)) | Handle decryption using ECDH. |
def _get_go2nt(self, goids):
go2nt_all = self.grprobj.go2nt
return {go:go2nt_all[go] for go in goids} | Get go2nt for given goids. |
def import_obj(clsname, default_module=None):
if default_module is not None:
if not clsname.startswith(default_module + '.'):
clsname = '{0}.{1}'.format(default_module, clsname)
mod, clsname = clsname.rsplit('.', 1)
mod = importlib.import_module(mod)
try:
obj = getattr(mod, clsname)
except AttributeError:
raise ImportError('Cannot import {0} from {1}'.format(clsname, mod))
return obj | Import the object given by clsname.
If default_module is specified, import from this module. |
def schema_from_table(table, schema=None):
schema = schema if schema is not None else {}
pairs = []
for name, column in table.columns.items():
if name in schema:
dtype = dt.dtype(schema[name])
else:
dtype = dt.dtype(
getattr(table.bind, 'dialect', SQLAlchemyDialect()),
column.type,
nullable=column.nullable,
)
pairs.append((name, dtype))
return sch.schema(pairs) | Retrieve an ibis schema from a SQLAlchemy ``Table``.
Parameters
----------
table : sa.Table
Returns
-------
schema : ibis.expr.datatypes.Schema
An ibis schema corresponding to the types of the columns in `table`. |
def _record(self):
return struct.pack(self.FMT, 1, self.platform_id, 0, self.id_string,
self.checksum, 0x55, 0xaa) | An internal method to generate a string representing this El Torito
Validation Entry.
Parameters:
None.
Returns:
String representing this El Torito Validation Entry. |
def _get_retrier(self, receiver: Address) -> _RetryQueue:
if receiver not in self._address_to_retrier:
retrier = _RetryQueue(transport=self, receiver=receiver)
self._address_to_retrier[receiver] = retrier
retrier.start()
return self._address_to_retrier[receiver] | Construct and return a _RetryQueue for receiver |
def _limited_iterator(self):
i = 0
while True:
for crash_id in self._basic_iterator():
if self._filter_disallowed_values(crash_id):
continue
if crash_id is None:
yield crash_id
continue
if i == int(self.config.number_of_submissions):
break
i += 1
yield crash_id
if i == int(self.config.number_of_submissions):
break | this is the iterator for the case when "number_of_submissions" is
set to an integer. It goes through the innermost iterator exactly the
number of times specified by "number_of_submissions" To do that, it
might run the innermost iterator to exhaustion. If that happens, that
innermost iterator is called again to start over. It is up to the
implementation of the innermost iteration to define what starting
over means. Some iterators may repeat exactly what they did before,
while others may iterate over new values |
def get_hash(self):
depencency_hashes = [dep.get_hash() for dep in self.dep()]
sl = inspect.getsourcelines
hash_sources = [sl(self.__class__), self.args,
self.kwargs, *depencency_hashes]
hash_input = pickle.dumps(hash_sources)
return hashlib.md5(hash_input).hexdigest() | Retruns a hash based on the the current table code and kwargs.
Also changes based on dependent tables. |
def _serialize_items(self, serializer, kind, items):
if self.request and self.request.query_params.get('hydrate_{}'.format(kind), False):
serializer = serializer(items, many=True, read_only=True)
serializer.bind(kind, self)
return serializer.data
else:
return [item.id for item in items] | Return serialized items or list of ids, depending on `hydrate_XXX` query param. |
def render_configuration(self, configuration=None):
if configuration is None:
configuration = self.environment
if isinstance(configuration, dict):
return {k: self.render_configuration(v) for k, v in configuration.items()}
elif isinstance(configuration, list):
return [self.render_configuration(x) for x in configuration]
elif isinstance(configuration, Variable):
return configuration.resolve(self.parameters)
else:
return configuration | Render variables in configuration object but don't instantiate anything |
def packages(self, login=None, platform=None, package_type=None,
type_=None, access=None):
logger.debug('')
method = self._anaconda_client_api.user_packages
return self._create_worker(method, login=login, platform=platform,
package_type=package_type,
type_=type_, access=access) | Return all the available packages for a given user.
Parameters
----------
type_: Optional[str]
Only find packages that have this conda `type`, (i.e. 'app').
access : Optional[str]
Only find packages that have this access level (e.g. 'private',
'authenticated', 'public'). |
def check_errors(self, response, data):
if "error_id" in data:
error_id = data["error_id"]
if error_id in self.error_ids:
raise self.error_ids[error_id](response)
if "error_code" in data:
error_code = data["error_code"]
if error_code in self.error_codes:
raise self.error_codes[error_code](response)
if "error_code" in data or "error_id" in data:
raise AppNexusException(response) | Check for errors and raise an appropriate error if needed |
def update(self):
from .link import Link
diff = self.diff()
status = {
'added': 'active',
'removed': 'disconnected',
'changed': 'active'
}
for section in ['added', 'removed', 'changed']:
if not diff[section]:
continue
for link_dict in diff[section]['links']:
try:
link = Link.get_or_create(source=link_dict['source'],
target=link_dict['target'],
cost=link_dict['cost'],
topology=self)
except (LinkDataNotFound, ValidationError) as e:
msg = 'Exception while updating {0}'.format(self.__repr__())
logger.exception(msg)
print('{0}\n{1}\n'.format(msg, e))
continue
link.ensure(status=status[section],
cost=link_dict['cost']) | Updates topology
Links are not deleted straightaway but set as "disconnected" |
def auto_detect(self, args):
suffixes = [
".tgz",
".txz",
".tbz",
".tlz"
]
if (not args[0].startswith("-") and args[0] not in self.commands and
args[0].endswith(tuple(suffixes))):
packages, not_found = [], []
for pkg in args:
if pkg.endswith(tuple(suffixes)):
if os.path.isfile(pkg):
packages.append(pkg)
else:
not_found.append(pkg)
if packages:
Auto(packages).select()
if not_found:
for ntf in not_found:
self.msg.pkg_not_found("", ntf, "Not installed", "")
raise SystemExit() | Check for already Slackware binary packages exist |
def find_spec(self, fullname, path, target=None):
if fullname.startswith(self.package_prefix):
for path in self._get_paths(fullname):
if os.path.exists(path):
return ModuleSpec(
name=fullname,
loader=self.loader_class(fullname, path),
origin=path,
is_package=(path.endswith('__init__.ipynb') or path.endswith('__init__.py')),
) | Claims modules that are under ipynb.fs |
def parse_numtuple(s,intype,length=2,scale=1):
if intype == int:
numrx = intrx_s;
elif intype == float:
numrx = fltrx_s;
else:
raise NotImplementedError("Not implemented for type: {}".format(
intype));
if parse_utuple(s, numrx, length=length) is None:
raise ValueError("{} is not a valid number tuple.".format(s));
return [x*scale for x in evalt(s)]; | parse a string into a list of numbers of a type |
def get_choices(self):
if 'choiceInfo' not in self.dto[self.name]:
raise GPException('not a choice parameter')
if self.get_choice_status()[1] == "NOT_INITIALIZED":
print(self.get_choice_status())
print("choice status not initialized")
request = urllib.request.Request(self.get_choice_href())
if self.task.server_data.authorization_header() is not None:
request.add_header('Authorization', self.task.server_data.authorization_header())
request.add_header('User-Agent', 'GenePatternRest')
response = urllib.request.urlopen(request)
self.dto[self.name]['choiceInfo'] = json.loads(response.read().decode('utf-8'))
return self.dto[self.name]['choiceInfo']['choices'] | Returns a list of dictionary objects, one dictionary object per choice.
Each object has two keys defined: 'value', 'label'.
The 'label' entry is what should be displayed on the UI, the 'value' entry
is what is written into GPJobSpec. |
def run_parallel(self):
try:
self.start_parallel()
result = self.empty_result(*self.context)
while self.num_processes > 0:
r = self.result_queue.get()
self.maybe_put_task()
if r is POISON_PILL:
self.num_processes -= 1
elif isinstance(r, ExceptionWrapper):
r.reraise()
else:
result = self.process_result(r, result)
self.progress.update(1)
if self.done:
self.complete.set()
self.finish_parallel()
except Exception:
raise
finally:
log.debug('Removing progress bar')
self.progress.close()
return result | Perform the computation in parallel, reading results from the output
queue and passing them to ``process_result``. |
def parent(self):
family = self.repository.get_parent_package_family(self.resource)
return PackageFamily(family) if family else None | Get the parent package family.
Returns:
`PackageFamily`. |
def _get_application(self, subdomain):
with self.lock:
app = self.instances.get(subdomain)
if app is None:
app = self.create_application(subdomain=subdomain)
self.instances[subdomain] = app
return app | Return a WSGI application for subdomain. The subdomain is
passed to the create_application constructor as a keyword argument.
:param subdomain: Subdomain to get or create an application with |
def add_ui(self, klass, *args, **kwargs):
ui = klass(self.widget, *args, **kwargs)
self.widget.uis.append(ui)
return ui | Add an UI element for the current scene. The approach is
the same as renderers.
.. warning:: The UI api is not yet finalized |
def i2c_monitor_read(self):
data = array.array('H', (0,) * self.BUFFER_SIZE)
ret = api.py_aa_i2c_monitor_read(self.handle, self.BUFFER_SIZE,
data)
_raise_error_if_negative(ret)
del data[ret:]
return data.tolist() | Retrieved any data fetched by the monitor.
This function has an integrated timeout mechanism. You should use
:func:`poll` to determine if there is any data available.
Returns a list of data bytes and special symbols. There are three
special symbols: `I2C_MONITOR_NACK`, I2C_MONITOR_START and
I2C_MONITOR_STOP. |
def command_exists(command, noop_invocation, exc_msg):
try:
found = bool(shutil.which(command))
except AttributeError:
try:
p = subprocess.Popen(noop_invocation, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
found = False
else:
stdout, stderr = p.communicate()
found = p.returncode == 0
if not found:
logger.error("`%s` exited with a non-zero return code (%s)",
noop_invocation, p.returncode)
logger.error("command stdout = %s", stdout)
logger.error("command stderr = %s", stderr)
if not found:
raise CommandDoesNotExistException(exc_msg)
return True | Verify that the provided command exists. Raise CommandDoesNotExistException in case of an
error or if the command does not exist.
:param command: str, command to check (python 3 only)
:param noop_invocation: list of str, command to check (python 2 only)
:param exc_msg: str, message of exception when command does not exist
:return: bool, True if everything's all right (otherwise exception is thrown) |
def createLinkToSelf(self, new_zone, callback=None, errback=None,
**kwargs):
zone = Zone(self.config, new_zone)
kwargs['link'] = self.data['zone']
return zone.create(callback=callback, errback=errback, **kwargs) | Create a new linked zone, linking to ourselves. All records in this
zone will then be available as "linked records" in the new zone.
:param str new_zone: the new zone name to link to this one
:return: new Zone |
def _process_underscores(self, tokens):
"Strip underscores to make sure the number is correct after join"
groups = [[str(''.join(el))] if b else list(el)
for (b,el) in itertools.groupby(tokens, lambda k: k=='_')]
flattened = [el for group in groups for el in group]
processed = []
for token in flattened:
if token == '_': continue
if token.startswith('_'):
token = str(token[1:])
if token.endswith('_'):
token = str(token[:-1])
processed.append(token)
return processed | Strip underscores to make sure the number is correct after join |
def linkify(self, timeperiods):
new_exclude = []
if hasattr(self, 'exclude') and self.exclude != []:
logger.debug("[timeentry::%s] have excluded %s", self.get_name(), self.exclude)
excluded_tps = self.exclude
for tp_name in excluded_tps:
timepriod = timeperiods.find_by_name(tp_name.strip())
if timepriod is not None:
new_exclude.append(timepriod.uuid)
else:
msg = "[timeentry::%s] unknown %s timeperiod" % (self.get_name(), tp_name)
self.add_error(msg)
self.exclude = new_exclude | Will make timeperiod in exclude with id of the timeperiods
:param timeperiods: Timeperiods object
:type timeperiods:
:return: None |
def is_read_only(cls,
db: DATABASE_SUPPORTER_FWD_REF,
logger: logging.Logger = None) -> bool:
def convert_enums(row_):
return [True if x == 'Y' else (False if x == 'N' else None)
for x in row_]
try:
sql =
rows = db.fetchall(sql)
for row in rows:
dbname = row[0]
prohibited = convert_enums(row[1:])
if any(prohibited):
if logger:
logger.debug(
"MySQL.is_read_only(): FAIL: database privileges "
"wrong: dbname={}, prohibited={}".format(
dbname, prohibited
)
)
return False
except mysql.OperationalError:
pass
try:
sql =
rows = db.fetchall(sql)
if not rows or len(rows) > 1:
return False
prohibited = convert_enums(rows[0])
if any(prohibited):
if logger:
logger.debug(
"MySQL.is_read_only(): FAIL: GLOBAL privileges "
"wrong: prohibited={}".format(prohibited))
return False
except mysql.OperationalError:
pass
return True | Do we have read-only access? |
def get_disk_quota(username, machine_name=None):
try:
ua = Account.objects.get(
username=username,
date_deleted__isnull=True)
except Account.DoesNotExist:
return 'Account not found'
result = ua.get_disk_quota()
if result is None:
return False
return result * 1048576 | Returns disk quota for username in KB |
def get_file_name(url):
return os.path.basename(urllib.parse.urlparse(url).path) or 'unknown_name' | Returns file name of file at given url. |
def ls(self):
if self.isfile():
raise NotDirectoryError('Cannot ls() on non-directory node: {path}'.format(path=self._pyerarchy_path))
return os.listdir(self._pyerarchy_path) | List the children entities of the directory.
Raises exception if the object is a file.
:return: |
def set_all_attribute_values(self, value):
for attribute_name, type_instance in inspect.getmembers(self):
if attribute_name.startswith('__') or inspect.ismethod(type_instance):
continue
if isinstance(type_instance, bool):
self.__dict__[attribute_name] = value
elif isinstance(type_instance, self.__class__):
type_instance.set_all_attribute_values(value) | sets all the attribute values to the value and propagate to any children |
def query_unbound_ong(self, base58_address: str) -> int:
contract_address = self.get_asset_address('ont')
unbound_ong = self.__sdk.rpc.get_allowance("ong", Address(contract_address).b58encode(), base58_address)
return int(unbound_ong) | This interface is used to query the amount of account's unbound ong.
:param base58_address: a base58 encode address which indicate which account's unbound ong we want to query.
:return: the amount of unbound ong in the form of int. |
def parse_networking_file():
pairs = dict()
allocated_subnets = []
try:
with open(VMWARE_NETWORKING_FILE, "r", encoding="utf-8") as f:
version = f.readline()
for line in f.read().splitlines():
try:
_, key, value = line.split(' ', 3)
key = key.strip()
value = value.strip()
pairs[key] = value
if key.endswith("HOSTONLY_SUBNET"):
allocated_subnets.append(value)
except ValueError:
raise SystemExit("Error while parsing {}".format(VMWARE_NETWORKING_FILE))
except OSError as e:
raise SystemExit("Cannot open {}: {}".format(VMWARE_NETWORKING_FILE, e))
return version, pairs, allocated_subnets | Parse the VMware networking file. |
def pprint_tree_differences(self, missing_pys, missing_docs):
if missing_pys:
print('The following Python files appear to be missing:')
for pyfile in missing_pys:
print(pyfile)
print('\n')
if missing_docs:
print('The following documentation files appear to be missing:')
for docfiile in missing_docs:
print(docfiile)
print('\n') | Pprint the missing files of each given set.
:param set missing_pys: The set of missing Python files.
:param set missing_docs: The set of missing documentation files.
:rtype: None |
def circular_gaussian_kernel(sd,radius):
i,j = np.mgrid[-radius:radius+1,-radius:radius+1].astype(float) / radius
mask = i**2 + j**2 <= 1
i = i * radius / sd
j = j * radius / sd
kernel = np.zeros((2*radius+1,2*radius+1))
kernel[mask] = np.e ** (-(i[mask]**2+j[mask]**2) /
(2 * sd **2))
kernel = kernel / np.sum(kernel)
return kernel | Create a 2-d Gaussian convolution kernel
sd - standard deviation of the gaussian in pixels
radius - build a circular kernel that convolves all points in the circle
bounded by this radius |
def get_edge_schema_element_or_raise(self, edge_classname):
schema_element = self.get_element_by_class_name_or_raise(edge_classname)
if not schema_element.is_edge:
raise InvalidClassError(u'Non-edge class provided: {}'.format(edge_classname))
return schema_element | Return the schema element with the given name, asserting that it's of edge type. |
def _write(self, text):
spaces = ' ' * (self.indent * self.indentlevel)
t = spaces + text.strip() + '\n'
if hasattr(t, 'encode'):
t = t.encode(self.encoding, 'xmlcharrefreplace')
self.stream.write(t) | Write text by respecting the current indentlevel |
def source_lines(self, filename):
with self.filesystem.open(filename) as f:
return f.readlines() | Return a list for source lines of file `filename`. |
def label_const(self, const:Any=0, label_cls:Callable=None, **kwargs)->'LabelList':
"Label every item with `const`."
return self.label_from_func(func=lambda o: const, label_cls=label_cls, **kwargs) | Label every item with `const`. |
def calc_avr_uvr_v1(self):
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
for i in range(2):
if flu.h <= (con.hm+der.hv[i]):
flu.avr[i] = 0.
flu.uvr[i] = 0.
else:
flu.avr[i] = (flu.h-(con.hm+der.hv[i]))**2*con.bnvr[i]/2.
flu.uvr[i] = (flu.h-(con.hm+der.hv[i]))*(1.+con.bnvr[i]**2)**.5 | Calculate the flown through area and the wetted perimeter of both
outer embankments.
Note that each outer embankment lies beyond its foreland and that all
water flowing exactly above the a embankment is added to |AVR|.
The theoretical surface seperating water above the foreland from water
above its embankment is not contributing to |UVR|.
Required control parameters:
|HM|
|BNVR|
Required derived parameter:
|HV|
Required flux sequence:
|H|
Calculated flux sequence:
|AVR|
|UVR|
Examples:
Generally, right trapezoids are assumed. Here, for simplicity, both
forelands are assumed to be symmetrical. Their smaller bases (bottoms)
hava a length of 2 meters, their non-vertical legs show an inclination
of 1 meter per 4 meters, and their height (depths) is 1 meter. Both
forelands lie 1 meter above the main channels bottom.
Generally, a triangles are assumed, with the vertical side
seperating the foreland from its outer embankment. Here, for
simplicity, both forelands are assumed to be symmetrical. Their
inclinations are 1 meter per 4 meters and their lowest point is
1 meter above the forelands bottom and 2 meters above the main
channels bottom:
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> hm(1.0)
>>> bnvr(4.0)
>>> derived.hv(1.0)
The first example deals with moderate high flow conditions, where
water flows over the forelands, but not over their outer embankments
(|HM| < |H| < (|HM| + |HV|)):
>>> fluxes.h = 1.5
>>> model.calc_avr_uvr_v1()
>>> fluxes.avr
avr(0.0, 0.0)
>>> fluxes.uvr
uvr(0.0, 0.0)
The second example deals with extreme high flow conditions, where
water flows over the both foreland and their outer embankments
((|HM| + |HV|) < |H|):
>>> fluxes.h = 2.5
>>> model.calc_avr_uvr_v1()
>>> fluxes.avr
avr(0.5, 0.5)
>>> fluxes.uvr
uvr(2.061553, 2.061553) |
def is_user_valid(self, userID):
cur = self.conn.cursor()
cur.execute('SELECT * FROM users WHERE id=? LIMIT 1', [userID])
results = cur.fetchall()
cur.close()
return len(results) > 0 | Check if this User ID is valid. |
def pre_validate(self, form):
for preprocessor in self._preprocessors:
preprocessor(form, self)
super(FieldHelper, self).pre_validate(form) | Calls preprocessors before pre_validation |
def identify(file_elements):
if not file_elements:
return
_validate_file_elements(file_elements)
iterator = PeekableIterator((element_i, element) for (element_i, element) in enumerate(file_elements)
if element.type != elements.TYPE_METADATA)
try:
_, first_element = iterator.peek()
if isinstance(first_element, TableElement):
iterator.next()
yield AnonymousTable(first_element)
except KeyError:
pass
except StopIteration:
return
for element_i, element in iterator:
if not isinstance(element, TableHeaderElement):
continue
if not element.is_array_of_tables:
table_element_i, table_element = next(iterator)
yield Table(names=element.names, table_element=table_element)
else:
table_element_i, table_element = next(iterator)
yield ArrayOfTables(names=element.names, table_element=table_element) | Outputs an ordered sequence of instances of TopLevel types.
Elements start with an optional TableElement, followed by zero or more pairs of (TableHeaderElement, TableElement). |
def _OpenPathSpec(self, path_specification, ascii_codepage='cp1252'):
if not path_specification:
return None
file_entry = self._file_system.GetFileEntryByPathSpec(path_specification)
if file_entry is None:
return None
file_object = file_entry.GetFileObject()
if file_object is None:
return None
registry_file = dfwinreg_regf.REGFWinRegistryFile(
ascii_codepage=ascii_codepage)
try:
registry_file.Open(file_object)
except IOError as exception:
logger.warning(
'Unable to open Windows Registry file with error: {0!s}'.format(
exception))
file_object.close()
return None
return registry_file | Opens the Windows Registry file specified by the path specification.
Args:
path_specification (dfvfs.PathSpec): path specification.
ascii_codepage (Optional[str]): ASCII string codepage.
Returns:
WinRegistryFile: Windows Registry file or None. |
def get_help_msg(self,
dotspace_ending=False,
**kwargs):
context = self.get_context_for_help_msgs(kwargs)
if self.help_msg is not None and len(self.help_msg) > 0:
context = copy(context)
try:
help_msg = self.help_msg
variables = re.findall("{\S+}", help_msg)
for v in set(variables):
v = v[1:-1]
if v in context and len(str(context[v])) > self.__max_str_length_displayed__:
new_name = '@@@@' + v + '@@@@'
help_msg = help_msg.replace('{' + v + '}', '{' + new_name + '}')
context[new_name] = "(too big for display)"
help_msg = help_msg.format(**context)
except KeyError as e:
raise HelpMsgFormattingException(self.help_msg, e, context)
if dotspace_ending:
return end_with_dot_space(help_msg)
else:
return help_msg
else:
return '' | The method used to get the formatted help message according to kwargs. By default it returns the 'help_msg'
attribute, whether it is defined at the instance level or at the class level.
The help message is formatted according to help_msg.format(**kwargs), and may be terminated with a dot
and a space if dotspace_ending is set to True.
:param dotspace_ending: True will append a dot and a space at the end of the message if it is not
empty (default is False)
:param kwargs: keyword arguments to format the help message
:return: the formatted help message |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.