code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def details(self):
params = {'wsfunction': 'core_course_get_categories',
'criteria[0][key]': 'id',
'criteria[0][value]': self.category_id}
params.update(self.request_params)
return requests.post(self.api_url, params=params, verify=False) | Returns details for given category
:returns: category response object
Example Usage::
>>> import muddle
>>> muddle.category(10).details() |
def next(self):
while True:
if not self._resp:
self._start()
if self._stop:
raise StopIteration
skip, data = self._process_data(next_(self._lines))
if not skip:
break
return data | Handles the iteration by pulling the next line out of the stream,
attempting to convert the response to JSON if necessary.
:returns: Data representing what was seen in the feed |
def add_input(self, name, value=None):
self._input_vars.append(name)
self.__setattr__(name, value) | Create a new input variable called ``name`` for this process
and initialize it with the given ``value``.
Quantity is accessible in two ways:
* as a process attribute, i.e. ``proc.name``
* as a member of the input dictionary,
i.e. ``proc.input['name']``
Use attribute method to set values, e.g.
```proc.name = value ```
:param str name: name of diagnostic quantity to be initialized
:param array value: initial value for quantity [default: None] |
def _load_wurlitzer(self):
if not os.name == 'nt':
from IPython.core.getipython import get_ipython
try:
get_ipython().run_line_magic('reload_ext', 'wurlitzer')
except Exception:
pass | Load wurlitzer extension. |
def create_model(self,
base_model_id,
forced_glossary=None,
parallel_corpus=None,
name=None,
**kwargs):
if base_model_id is None:
raise ValueError('base_model_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('language_translator', 'V3',
'create_model')
headers.update(sdk_headers)
params = {
'version': self.version,
'base_model_id': base_model_id,
'name': name
}
form_data = {}
if forced_glossary:
form_data['forced_glossary'] = (None, forced_glossary,
'application/octet-stream')
if parallel_corpus:
form_data['parallel_corpus'] = (None, parallel_corpus,
'application/octet-stream')
url = '/v3/models'
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
files=form_data,
accept_json=True)
return response | Create model.
Uploads Translation Memory eXchange (TMX) files to customize a translation model.
You can either customize a model with a forced glossary or with a corpus that
contains parallel sentences. To create a model that is customized with a parallel
corpus <b>and</b> a forced glossary, proceed in two steps: customize with a
parallel corpus first and then customize the resulting model with a glossary.
Depending on the type of customization and the size of the uploaded corpora,
training can range from minutes for a glossary to several hours for a large
parallel corpus. You can upload a single forced glossary file and this file must
be less than <b>10 MB</b>. You can upload multiple parallel corpora tmx files. The
cumulative file size of all uploaded files is limited to <b>250 MB</b>. To
successfully train with a parallel corpus you must have at least <b>5,000 parallel
sentences</b> in your corpus.
You can have a <b>maxium of 10 custom models per language pair</b>.
:param str base_model_id: The model ID of the model to use as the base for
customization. To see available models, use the `List models` method. Usually all
IBM provided models are customizable. In addition, all your models that have been
created via parallel corpus customization, can be further customized with a forced
glossary.
:param file forced_glossary: A TMX file with your customizations. The
customizations in the file completely overwrite the domain translaton data,
including high frequency or high confidence phrase translations. You can upload
only one glossary with a file size less than 10 MB per call. A forced glossary
should contain single words or short phrases.
:param file parallel_corpus: A TMX file with parallel sentences for source and
target language. You can upload multiple parallel_corpus files in one request. All
uploaded parallel_corpus files combined, your parallel corpus must contain at
least 5,000 parallel sentences to train successfully.
:param str name: An optional model name that you can use to identify the model.
Valid characters are letters, numbers, dashes, underscores, spaces and
apostrophes. The maximum length is 32 characters.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse |
def annotation_id(self, value):
if value in [None, ''] or str(value).strip() == '':
raise AttributeError("Invalid ID value supplied")
self._id = value | Set ID for Annotation |
def render_booleanfield(field, attrs):
attrs.setdefault("_no_label", True)
attrs.setdefault("_inline", True)
field.field.widget.attrs["style"] = "display:hidden"
return wrappers.CHECKBOX_WRAPPER % {
"style": pad(attrs.get("_style", "")),
"field": field,
"label": format_html(
wrappers.LABEL_TEMPLATE, field.html_name, mark_safe(field.label)
)
} | Render BooleanField with label next to instead of above. |
def dump(self, script, file=None):
"Write a compressed representation of script to the Pickler's file object."
if file is None:
file = self._file
self._dump(script, file, self._protocol, self._version) | Write a compressed representation of script to the Pickler's file object. |
def set_dimensional_calibrations(self, dimensional_calibrations: typing.List[CalibrationModule.Calibration]) -> None:
self.__data_item.set_dimensional_calibrations(dimensional_calibrations) | Set the dimensional calibrations.
:param dimensional_calibrations: A list of calibrations, must match the dimensions of the data.
.. versionadded:: 1.0
Scriptable: Yes |
def get_managed_zone(self, zone):
if zone.endswith('.in-addr.arpa.'):
return self.reverse_prefix + '-'.join(zone.split('.')[-5:-3])
return self.forward_prefix + '-'.join(zone.split('.')[:-1]) | Get the GDNS managed zone name for a DNS zone.
Google uses custom string names with specific `requirements
<https://cloud.google.com/dns/api/v1/managedZones#resource>`_
for storing records. The scheme implemented here chooses a
managed zone name which removes the trailing dot and replaces
other dots with dashes, and in the case of reverse records,
uses only the two most significant octets, prepended with
'reverse'. At least two octets are required for reverse DNS zones.
Example:
get_managed_zone('example.com.') = 'example-com'
get_managed_zone('20.10.in-addr.arpa.) = 'reverse-20-10'
get_managed_zone('30.20.10.in-addr.arpa.) = 'reverse-20-10'
get_managed_zone('40.30.20.10.in-addr.arpa.) = 'reverse-20-10'
Args:
zone (str): DNS zone.
Returns:
str of managed zone name. |
def _populate_alternate_kwargs(kwargs):
resource_namespace = kwargs['namespace']
resource_type = kwargs.get('child_type_{}'.format(kwargs['last_child_num'])) or kwargs['type']
resource_name = kwargs.get('child_name_{}'.format(kwargs['last_child_num'])) or kwargs['name']
_get_parents_from_parts(kwargs)
kwargs['resource_namespace'] = resource_namespace
kwargs['resource_type'] = resource_type
kwargs['resource_name'] = resource_name
return kwargs | Translates the parsed arguments into a format used by generic ARM commands
such as the resource and lock commands. |
def listen_loop(self):
while self.listening:
try:
data, address = self.sock.recvfrom(self.bufsize)
self.receive_datagram(data, address)
if self.stats_enabled:
self.stats['bytes_recieved'] += len(data)
except socket.error as error:
if error.errno == errno.WSAECONNRESET:
logger.info("connection reset")
else:
raise
logger.info("Shutting down the listener...") | Starts the listen loop and executes the receieve_datagram method
whenever a packet is receieved.
Args:
None
Returns:
None |
def clean_ns(tag):
if '}' in tag:
split = tag.split('}')
return split[0].strip('{'), split[-1]
return '', tag | Return a tag and its namespace separately. |
def listTables(self,walkTrace=tuple(),case=None,element=None):
if case == 'sectionmain': print(walkTrace,self.title)
if case == 'table':
caption,tab = element
try:
print(walkTrace,tab._leopardref,caption)
except AttributeError:
tab._leopardref = next(self._reportSection._tabnr)
print(walkTrace,tab._leopardref,caption) | List section tables. |
def _get_part_reader(self, headers: 'CIMultiDictProxy[str]') -> Any:
ctype = headers.get(CONTENT_TYPE, '')
mimetype = parse_mimetype(ctype)
if mimetype.type == 'multipart':
if self.multipart_reader_cls is None:
return type(self)(headers, self._content)
return self.multipart_reader_cls(
headers, self._content, _newline=self._newline
)
else:
return self.part_reader_cls(
self._boundary, headers, self._content, _newline=self._newline
) | Dispatches the response by the `Content-Type` header, returning
suitable reader instance.
:param dict headers: Response headers |
def _update_response_body(self, resource):
rpr = self._get_response_representer(resource)
self.request.response.content_type = \
rpr.content_type.mime_type_string
rpr_body = rpr.to_bytes(resource)
self.request.response.body = rpr_body | Creates a representer and updates the response body with the byte
representation created for the given resource. |
def _find_realname(self, post_input):
if "lis_person_name_full" in post_input:
return post_input["lis_person_name_full"]
if "lis_person_name_given" in post_input and "lis_person_name_family" in post_input:
return post_input["lis_person_name_given"] + post_input["lis_person_name_family"]
if "lis_person_contact_email_primary" in post_input:
return post_input["lis_person_contact_email_primary"]
if "lis_person_name_family" in post_input:
return post_input["lis_person_name_family"]
if "lis_person_name_given" in post_input:
return post_input["lis_person_name_given"]
return post_input["user_id"] | Returns the most appropriate name to identify the user |
def serializeTransform(transformObj):
return ' '.join([command + '(' + ' '.join([scourUnitlessLength(number) for number in numbers]) + ')'
for command, numbers in transformObj]) | Reserializes the transform data with some cleanups. |
def repository_url_part(distro):
if distro.normalized_release.int_major >= 6:
if distro.normalized_name == 'redhat':
return 'rhel' + distro.normalized_release.major
if distro.normalized_name in ['centos', 'scientific', 'oracle', 'virtuozzo']:
return 'el' + distro.normalized_release.major
return 'el6' | Historically everything CentOS, RHEL, and Scientific has been mapped to
`el6` urls, but as we are adding repositories for `rhel`, the URLs should
map correctly to, say, `rhel6` or `rhel7`.
This function looks into the `distro` object and determines the right url
part for the given distro, falling back to `el6` when all else fails.
Specifically to work around the issue of CentOS vs RHEL::
>>> import platform
>>> platform.linux_distribution()
('Red Hat Enterprise Linux Server', '7.0', 'Maipo') |
def getCatalogDefinitions():
final = {}
analysis_request = bika_catalog_analysisrequest_listing_definition
analysis = bika_catalog_analysis_listing_definition
autoimportlogs = bika_catalog_autoimportlogs_listing_definition
worksheet = bika_catalog_worksheet_listing_definition
report = bika_catalog_report_definition
final.update(analysis_request)
final.update(analysis)
final.update(autoimportlogs)
final.update(worksheet)
final.update(report)
return final | Returns a dictionary with catalogs definitions. |
def joliet_vd_factory(joliet, sys_ident, vol_ident, set_size, seqnum,
log_block_size, vol_set_ident, pub_ident_str,
preparer_ident_str, app_ident_str, copyright_file,
abstract_file, bibli_file, vol_expire_date, app_use, xa):
if joliet == 1:
escape_sequence = b'%/@'
elif joliet == 2:
escape_sequence = b'%/C'
elif joliet == 3:
escape_sequence = b'%/E'
else:
raise pycdlibexception.PyCdlibInvalidInput('Invalid Joliet level; must be 1, 2, or 3')
svd = PrimaryOrSupplementaryVD(VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY)
svd.new(0, sys_ident, vol_ident, set_size, seqnum, log_block_size,
vol_set_ident, pub_ident_str, preparer_ident_str, app_ident_str,
copyright_file, abstract_file,
bibli_file, vol_expire_date, app_use, xa, 1, escape_sequence)
return svd | An internal function to create an Joliet Volume Descriptor.
Parameters:
joliet - The joliet version to use, one of 1, 2, or 3.
sys_ident - The system identification string to use on the new ISO.
vol_ident - The volume identification string to use on the new ISO.
set_size - The size of the set of ISOs this ISO is a part of.
seqnum - The sequence number of the set of this ISO.
log_block_size - The logical block size to use for the ISO. While ISO9660
technically supports sizes other than 2048 (the default),
this almost certainly doesn't work.
vol_set_ident - The volume set identification string to use on the new ISO.
pub_ident_str - The publisher identification string to use on the new ISO.
preparer_ident_str - The preparer identification string to use on the new ISO.
app_ident_str - The application identification string to use on the new ISO.
copyright_file - The name of a file at the root of the ISO to use as the
copyright file.
abstract_file - The name of a file at the root of the ISO to use as the
abstract file.
bibli_file - The name of a file at the root of the ISO to use as the
bibliographic file.
vol_expire_date - The date that this ISO will expire at.
app_use - Arbitrary data that the application can stuff into the primary
volume descriptor of this ISO.
xa - Whether to add the ISO9660 Extended Attribute extensions to this
ISO. The default is False.
Returns:
The newly created Joliet Volume Descriptor. |
def zcross(seq, hysteresis=0, first_sign=0):
neg_hyst = -hysteresis
seq_iter = iter(seq)
if first_sign == 0:
last_sign = 0
for el in seq_iter:
yield 0
if (el > hysteresis) or (el < neg_hyst):
last_sign = -1 if el < 0 else 1
break
else:
last_sign = -1 if first_sign < 0 else 1
for el in seq_iter:
if el * last_sign < neg_hyst:
last_sign = -1 if el < 0 else 1
yield 1
else:
yield 0 | Zero-crossing stream.
Parameters
----------
seq :
Any iterable to be used as input for the zero crossing analysis
hysteresis :
Crossing exactly zero might happen many times too fast due to high
frequency oscilations near zero. To avoid this, you can make two
threshold limits for the zero crossing detection: ``hysteresis`` and
``-hysteresis``. Defaults to zero (0), which means no hysteresis and only
one threshold.
first_sign :
Optional argument with the sign memory from past. Gets the sig from any
signed number. Defaults to zero (0), which means "any", and the first sign
will be the first one found in data.
Returns
-------
A Stream instance that outputs 1 for each crossing detected, 0 otherwise. |
def get_albums(self, limit=None):
url = (self._imgur._base_url + "/3/account/{0}/albums/{1}".format(self.name,
'{}'))
resp = self._imgur._send_request(url, limit=limit)
return [Album(alb, self._imgur, False) for alb in resp] | Return a list of the user's albums.
Secret and hidden albums are only returned if this is the logged-in
user. |
def get_types(self):
from . import types
url_path = '/handcar/services/learning/types/'
type_list = self._get_request(url_path)
type_list += types.TYPES
return objects.TypeList(type_list) | Gets all the known Types.
return: (osid.type.TypeList) - the list of all known Types
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method must be implemented. |
def _copy_params(self):
cls = type(self)
src_name_attrs = [(x, getattr(cls, x)) for x in dir(cls)]
src_params = list(filter(lambda nameAttr: isinstance(nameAttr[1], Param), src_name_attrs))
for name, param in src_params:
setattr(self, name, param._copy_new_parent(self)) | Copy all params defined on the class to current object. |
def _setup_trunk(self, trunk, vlan_id=None):
LOG.info('Binding trunk port: %s.', trunk)
try:
self._trunk_rpc.update_subport_bindings(self._context,
trunk.sub_ports)
vlan_trunk = [s.segmentation_id for s in trunk.sub_ports]
self._set_port_vlan(trunk.port_id, vlan_id, vlan_trunk)
self._trunk_rpc.update_trunk_status(self._context, trunk.id,
t_const.ACTIVE_STATUS)
except Exception:
LOG.exception("Failure setting up subports for %s", trunk.port_id)
self._trunk_rpc.update_trunk_status(self._context, trunk.id,
t_const.DEGRADED_STATUS) | Sets up VLAN trunk and updates the trunk status. |
def is_valid_py_file(path):
import os
is_valid = False
if os.path.isfile(path) and not os.path.splitext(path)[1] == '.pyx':
try:
with open(path, 'rb') as f:
compile(f.read(), path, 'exec')
is_valid = True
except:
pass
return is_valid | Checks whether the file can be read by the coverage module. This is especially
needed for .pyx files and .py files with syntax errors. |
def parse_member_from_rsvp(data):
return MeetupMember(
id=data['member'].get('member_id', None),
name=data['member'].get('name', None),
photo=(parse_photo(data['member_photo'])
if 'member_photo' in data else None)
) | Parse a ``MeetupMember`` from the given RSVP response data.
Returns
-------
A ``pythonkc_meetups.types.MeetupMember``. |
def chown_r(path: str, user: str, group: str) -> None:
for root, dirs, files in os.walk(path):
for x in dirs:
shutil.chown(os.path.join(root, x), user, group)
for x in files:
shutil.chown(os.path.join(root, x), user, group) | Performs a recursive ``chown``.
Args:
path: path to walk down
user: user name or ID
group: group name or ID
As per http://stackoverflow.com/questions/2853723 |
def get_open_orders(self, market=None):
return self._api_query(path_dict={
API_V1_1: '/market/getopenorders',
API_V2_0: '/key/market/getopenorders'
}, options={'market': market, 'marketname': market} if market else None, protection=PROTECTION_PRV) | Get all orders that you currently have opened.
A specific market can be requested.
Endpoint:
1.1 /market/getopenorders
2.0 /key/market/getopenorders
:param market: String literal for the market (ie. BTC-LTC)
:type market: str
:return: Open orders info in JSON
:rtype : dict |
def cas2mach(cas, h):
tas = cas2tas(cas, h)
M = tas2mach(tas, h)
return M | CAS Mach conversion |
def connectionLostForKey(self, key):
if key in self.cachedConnections:
del self.cachedConnections[key]
if self._shuttingDown and self._shuttingDown.get(key):
d, self._shuttingDown[key] = self._shuttingDown[key], None
d.callback(None) | Remove lost connection from cache.
@param key: key of connection that was lost
@type key: L{tuple} of L{IAddress} and C{extraHash} |
def add_epoch_number(batch: Batch, epoch: int) -> Batch:
for instance in batch.instances:
instance.fields['epoch_num'] = MetadataField(epoch)
return batch | Add the epoch number to the batch instances as a MetadataField. |
def _sync_hooks(self, repos, asynchronous=True):
if not asynchronous:
for repo_id in repos:
try:
with db.session.begin_nested():
self.sync_repo_hook(repo_id)
db.session.commit()
except RepositoryAccessError as e:
current_app.logger.warning(e.message, exc_info=True)
except NoResultFound:
pass
else:
db.session.commit()
sync_hooks.delay(self.user_id, repos) | Check if a hooks sync task needs to be started. |
def selected_option(self):
" Return the currently selected option. "
i = 0
for category in self.options:
for o in category.options:
if i == self.selected_option_index:
return o
else:
i += 1 | Return the currently selected option. |
def dump_relation(api, rel_cfg, pid, data):
schema_class = rel_cfg.schema
if schema_class is not None:
schema = schema_class()
schema.context['pid'] = pid
result, errors = schema.dump(api)
data.setdefault(rel_cfg.name, []).append(result) | Dump a specific relation to a data dict. |
def mmGetPlotUnionSDRActivity(self, title="Union SDR Activity Raster",
showReset=False, resetShading=0.25):
unionSDRTrace = self.mmGetTraceUnionSDR().data
columnCount = self.getNumColumns()
activityType = "Union SDR Activity"
return self.mmGetCellTracePlot(unionSDRTrace, columnCount, activityType,
title=title, showReset=showReset,
resetShading=resetShading) | Returns plot of the activity of union SDR bits.
@param title an optional title for the figure
@param showReset if true, the first set of activities after a reset
will have a gray background
@param resetShading If showReset is true, this float specifies the
intensity of the reset background with 0.0 being white and 1.0 being black
@return (Plot) plot |
def __verify_job_has_started(self):
self.__get_job()
pods = self.__get_pods()
assert len(pods) > 0, "No pod scheduled by " + self.uu_name
for pod in pods:
status = pod.obj['status']
for cont_stats in status.get('containerStatuses', []):
if 'terminated' in cont_stats['state']:
t = cont_stats['state']['terminated']
err_msg = "Pod %s %s (exit code %d). Logs: `kubectl logs pod/%s`" % (
pod.name, t['reason'], t['exitCode'], pod.name)
assert t['exitCode'] == 0, err_msg
if 'waiting' in cont_stats['state']:
wr = cont_stats['state']['waiting']['reason']
assert wr == 'ContainerCreating', "Pod %s %s. Logs: `kubectl logs pod/%s`" % (
pod.name, wr, pod.name)
for cond in status.get('conditions', []):
if 'message' in cond:
if cond['reason'] == 'ContainersNotReady':
return False
assert cond['status'] != 'False', \
"[ERROR] %s - %s" % (cond['reason'], cond['message'])
return True | Asserts that the job has successfully started |
def split_limits_heads(self):
heads = []
new_limit_to = []
for limit in self.limit_to:
if '.' in limit:
name, limit = limit.split('.', 1)
heads.append(name)
new_limit_to.append(limit)
else:
heads.append(limit)
return heads, new_limit_to | Return first parts of dot-separated strings, and rest of strings.
Returns:
(list of str, list of str): the heads and rest of the strings. |
def time_afterwards_preceding(
self, when: datetime.datetime) -> Optional[datetime.timedelta]:
if self.is_empty():
return None
end_time = self.end_datetime()
if when <= end_time:
return datetime.timedelta()
else:
return when - end_time | Returns the time after our last interval, but before ``when``.
If ``self`` is an empty list, returns ``None``. |
def _apply_conv(self, inputs, w):
outputs = tf.nn.convolution(inputs, w, strides=self._stride,
padding=self._conv_op_padding,
dilation_rate=self._rate,
data_format=self._data_format)
return outputs | Apply a convolution operation on `inputs` using variable `w`.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16` or `tf.float32`.
w: A weight matrix of the same type as `inputs`.
Returns:
outputs: The result of the convolution operation on `inputs`. |
def feed_interval_get(feed_id, parameters):
'Get adaptive interval between checks for a feed.'
val = cache.get(getkey( T_INTERVAL,
key=feed_interval_key(feed_id, parameters) ))
return val if isinstance(val, tuple) else (val, None) | Get adaptive interval between checks for a feed. |
def check_positive(**params):
for p in params:
if not isinstance(params[p], numbers.Number) or params[p] <= 0:
raise ValueError(
"Expected {} > 0, got {}".format(p, params[p])) | Check that parameters are positive as expected
Raises
------
ValueError : unacceptable choice of parameters |
def add(self, post_id):
post_data = self.get_post_data()
post_data['user_name'] = self.userinfo.user_name
post_data['user_id'] = self.userinfo.uid
post_data['post_id'] = post_id
replyid = MReply.create_reply(post_data)
if replyid:
out_dic = {'pinglun': post_data['cnt_reply'],
'uid': replyid}
logger.info('add reply result dic: {0}'.format(out_dic))
return json.dump(out_dic, self) | Adding reply to a post. |
def opensearch(self, query, results=10, redirect=True):
self._check_query(query, "Query must be specified")
query_params = {
"action": "opensearch",
"search": query,
"limit": (100 if results > 100 else results),
"redirects": ("resolve" if redirect else "return"),
"warningsaserror": True,
"namespace": "",
}
results = self.wiki_request(query_params)
self._check_error_response(results, query)
res = list()
for i, item in enumerate(results[1]):
res.append((item, results[2][i], results[3][i]))
return res | Execute a MediaWiki opensearch request, similar to search box
suggestions and conforming to the OpenSearch specification
Args:
query (str): Title to search for
results (int): Number of pages within the radius to return
redirect (bool): If **False** return the redirect itself, \
otherwise resolve redirects
Returns:
List: List of results that are stored in a tuple \
(Title, Summary, URL) |
def phenotype_data(self):
if self._phenotype_data is None:
pheno_data = {}
for gsm_name, gsm in iteritems(self.gsms):
tmp = {}
for key, value in iteritems(gsm.metadata):
if len(value) == 0:
tmp[key] = np.nan
elif key.startswith("characteristics_"):
for i, char in enumerate(value):
char = re.split(":\s+", char)
char_type, char_value = [char[0],
": ".join(char[1:])]
tmp[key + "." + str(
i) + "." + char_type] = char_value
else:
tmp[key] = ",".join(value)
pheno_data[gsm_name] = tmp
self._phenotype_data = DataFrame(pheno_data).T
return self._phenotype_data | Get the phenotype data for each of the sample. |
def caller_path(steps=1):
frame = sys._getframe(steps + 1)
try:
path = os.path.dirname(frame.f_code.co_filename)
finally:
del frame
if not path:
path = os.getcwd()
return os.path.realpath(path) | Return the path to the source file of the current frames' caller. |
def reindex(self, kdims=[], force=False):
if not isinstance(kdims, list):
kdims = [kdims]
kdims = [self.get_dimension(kd, strict=True) for kd in kdims]
dropped = [kd for kd in self.kdims if kd not in kdims]
if dropped:
raise ValueError("DynamicMap does not allow dropping dimensions, "
"reindex may only be used to reorder dimensions.")
return super(DynamicMap, self).reindex(kdims, force) | Reorders key dimensions on DynamicMap
Create a new object with a reordered set of key dimensions.
Dropping dimensions is not allowed on a DynamicMap.
Args:
kdims: List of dimensions to reindex the mapping with
force: Not applicable to a DynamicMap
Returns:
Reindexed DynamicMap |
def identify_names(filename):
node, _ = parse_source_file(filename)
if node is None:
return {}
finder = NameFinder()
finder.visit(node)
names = list(finder.get_mapping())
names += extract_object_names_from_docs(filename)
example_code_obj = collections.OrderedDict()
for name, full_name in names:
if name in example_code_obj:
continue
splitted = full_name.rsplit('.', 1)
if len(splitted) == 1:
continue
module, attribute = splitted
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj | Builds a codeobj summary by identifying and resolving used names. |
def np2model_tensor(a):
"Tranform numpy array `a` to a tensor of the same type."
dtype = model_type(a.dtype)
res = as_tensor(a)
if not dtype: return res
return res.type(dtype) | Tranform numpy array `a` to a tensor of the same type. |
def iiif_info_handler(prefix=None, identifier=None,
config=None, klass=None, auth=None, **args):
if (not auth or degraded_request(identifier) or auth.info_authz()):
if (auth):
logging.debug("Authorized for image %s" % identifier)
i = IIIFHandler(prefix, identifier, config, klass, auth)
try:
return i.image_information_response()
except IIIFError as e:
return i.error_response(e)
elif (auth.info_authn()):
abort(401)
else:
response = redirect(host_port_prefix(
config.host, config.port, prefix) + '/' + identifier + '-deg/info.json')
response.headers['Access-control-allow-origin'] = '*'
return response | Handler for IIIF Image Information requests. |
def _linear_inverse_kamb(cos_dist, sigma=3):
n = float(cos_dist.size)
radius = _kamb_radius(n, sigma)
f = 2 / (1 - radius)
cos_dist = cos_dist[cos_dist >= radius]
count = (f * (cos_dist - radius))
return count, _kamb_units(n, radius) | Kernel function from Vollmer for linear smoothing. |
def new(cls, store_type, store_entries):
if store_type not in ['jks', 'jceks']:
raise UnsupportedKeystoreTypeException("The Keystore Type '%s' is not supported" % store_type)
entries = {}
for entry in store_entries:
if not isinstance(entry, AbstractKeystoreEntry):
raise UnsupportedKeystoreEntryTypeException("Entries must be a KeyStore Entry")
if store_type != 'jceks' and isinstance(entry, SecretKeyEntry):
raise UnsupportedKeystoreEntryTypeException('Secret Key only allowed in JCEKS keystores')
alias = entry.alias
if alias in entries:
raise DuplicateAliasException("Found duplicate alias '%s'" % alias)
entries[alias] = entry
return cls(store_type, entries) | Helper function to create a new KeyStore.
:param string store_type: What kind of keystore
the store should be. Valid options are jks or jceks.
:param list store_entries: Existing entries that
should be added to the keystore.
:returns: A loaded :class:`KeyStore` instance,
with the specified entries.
:raises DuplicateAliasException: If some of the
entries have the same alias.
:raises UnsupportedKeyStoreTypeException: If the keystore is of
an unsupported type
:raises UnsupportedKeyStoreEntryTypeException: If some
of the keystore entries are unsupported (in this keystore type) |
def StreamMedia(self, callback=None, finish_callback=None,
additional_headers=None, use_chunks=True):
callback = callback or self.progress_callback
finish_callback = finish_callback or self.finish_callback
self.EnsureInitialized()
while True:
if self.__initial_response is not None:
response = self.__initial_response
self.__initial_response = None
else:
end_byte = self.__ComputeEndByte(self.progress,
use_chunks=use_chunks)
response = self.__GetChunk(
self.progress, end_byte,
additional_headers=additional_headers)
if self.total_size is None:
self.__SetTotal(response.info)
response = self.__ProcessResponse(response)
self._ExecuteCallback(callback, response)
if (response.status_code == http_client.OK or
self.progress >= self.total_size):
break
self._ExecuteCallback(finish_callback, response) | Stream the entire download.
Args:
callback: (default: None) Callback to call as each chunk is
completed.
finish_callback: (default: None) Callback to call when the
download is complete.
additional_headers: (default: None) Additional headers to
include in fetching bytes.
use_chunks: (bool, default: True) If False, ignore self.chunksize
and stream this download in a single request.
Returns:
None. Streams bytes into self.stream. |
def run(self, *args):
if self.source is None:
self.model.summary()
else:
x_data, y_data = next(iter(self.source.train_loader()))
self.model.summary(input_size=x_data.shape[1:]) | Print model summary |
def parseMemory(buffer, size):
ret = libxml2mod.xmlParseMemory(buffer, size)
if ret is None:raise parserError('xmlParseMemory() failed')
return xmlDoc(_obj=ret) | parse an XML in-memory block and build a tree. |
def request(self, arg=None):
if not self.status:
return '{"result": "No message"}'
try:
status_dict = json.loads(mpstatus_to_json(self.status))
except Exception as e:
print(e)
return
if not arg:
return json.dumps(status_dict)
new_dict = status_dict
args = arg.split('/')
for key in args:
if key in new_dict:
new_dict = new_dict[key]
else:
return '{"key": "%s", "last_dict": %s}' % (key, json.dumps(new_dict))
return json.dumps(new_dict) | Deal with requests |
def wait_for_zone_op(access_token, project, zone, name, interval=1.0):
assert isinstance(interval, (int, float))
assert interval >= 0.1
status = 'RUNNING'
progress = 0
LOGGER.info('Waiting for zone operation "%s" to finish...', name)
while status != 'DONE':
r = requests.get('https://www.googleapis.com/compute/v1/'
'projects/%s/zones/%s/operations/%s?access_token=%s'
% (project, zone, name, access_token.access_token))
r.raise_for_status()
result = r.json()
status = result['status']
progress = result['progress']
time.sleep(interval)
LOGGER.info('Zone operation "%s" done!', name) | Wait until a zone operation is finished.
TODO: docstring |
def polynomial_reduce_mod( poly, polymod, p ):
assert polymod[-1] == 1
assert len( polymod ) > 1
while len( poly ) >= len( polymod ):
if poly[-1] != 0:
for i in range( 2, len( polymod ) + 1 ):
poly[-i] = ( poly[-i] - poly[-1] * polymod[-i] ) % p
poly = poly[0:-1]
return poly | Reduce poly by polymod, integer arithmetic modulo p.
Polynomials are represented as lists of coefficients
of increasing powers of x. |
def grok_filter_name(element):
e_name = None
if element.name == 'default':
if isinstance(element.node, jinja2.nodes.Getattr):
e_name = element.node.node.name
else:
e_name = element.node.name
return e_name | Extracts the name, which may be embedded, for a Jinja2
filter node |
def send_keys_to_element(self, element, *keys_to_send):
self.click(element)
self.send_keys(*keys_to_send)
return self | Sends keys to an element.
:Args:
- element: The element to send keys.
- keys_to_send: The keys to send. Modifier keys constants can be found in the
'Keys' class. |
def get_alpha_or_number(number, template):
match = re.match(r".*\{alpha:(\d+a\d+d)\}$", template.strip())
if match and match.groups():
format = match.groups()[0]
return to_alpha(number, format)
return number | Returns an Alphanumber that represents the number passed in, expressed
as defined in the template. Otherwise, returns the number |
def clean(self):
if os.path.isdir(self.schema_mof_dir):
shutil.rmtree(self.schema_mof_dir) | Remove all of the MOF files and the `schema_mof_dir` for the defined
schema. This is useful because while the downloaded schema is a single
compressed zip file, it creates several thousand MOF files that take up
considerable space.
The next time the :class:`~pywbem_mock.DMTFCIMSchema` object for this
`schema_version` and `schema_root_dir` is created, the MOF file are
extracted again. |
def event(self, event):
if event.type() == QEvent.KeyPress and event.key() == Qt.Key_Tab:
return False
return QWidget.event(self, event) | Qt Override.
Usefull when in line edit mode. |
def getIndxOps(self, valu, cmpr='='):
func = self.indxcmpr.get(cmpr)
if func is None:
raise s_exc.NoSuchCmpr(name=self.name, cmpr=cmpr)
return func(valu) | Return a list of index operation tuples to lift values in a table.
Valid index operations include:
('eq', <indx>)
('pref', <indx>)
('range', (<minindx>, <maxindx>)) |
def query_by_post(postid):
return TabReply.select().where(
TabReply.post_id == postid
).order_by(TabReply.timestamp.desc()) | Get reply list of certain post. |
def from_learners(cls, learn_gen:Learner, learn_crit:Learner, switcher:Callback=None,
weights_gen:Tuple[float,float]=None, **learn_kwargs):
"Create a GAN from `learn_gen` and `learn_crit`."
losses = gan_loss_from_func(learn_gen.loss_func, learn_crit.loss_func, weights_gen=weights_gen)
return cls(learn_gen.data, learn_gen.model, learn_crit.model, *losses, switcher=switcher, **learn_kwargs) | Create a GAN from `learn_gen` and `learn_crit`. |
def update_nodes(self, char, patch, backdate=False):
if backdate:
parbranch, parrev = self._real._parentbranch_rev.get(
self._real.branch, ('trunk', 0)
)
tick_now = self._real.tick
self._real.tick = parrev
for i, (n, npatch) in enumerate(patch.items(), 1):
self.update_node(char, n, npatch)
if backdate:
self._real.tick = tick_now | Change the stats of nodes in a character according to a
dictionary. |
def _linefeed(self):
last_line = self._cursor.blockNumber() == self._text_edit.blockCount() - 1
if self._cursor.atEnd() or last_line:
if last_line:
self._cursor.movePosition(self._cursor.EndOfBlock)
self._cursor.insertText('\n')
else:
self._cursor.movePosition(self._cursor.Down)
self._cursor.movePosition(self._cursor.StartOfBlock)
self._text_edit.setTextCursor(self._cursor) | Performs a line feed. |
def validate(self, instance, value):
if isinstance(value, datetime.datetime):
return value
if not isinstance(value, string_types):
self.error(
instance=instance,
value=value,
extra='Cannot convert non-strings to datetime.',
)
try:
return self.from_json(value)
except ValueError:
self.error(
instance=instance,
value=value,
extra='Invalid format for converting to datetime.',
) | Check if value is a valid datetime object or JSON datetime string |
def on_peer_down(self, peer):
LOG.debug('Cleaning obsolete paths whose source/version: %s/%s',
peer.ip_address, peer.version_num)
self._table_manager.clean_stale_routes(peer) | Peer down handler.
Cleans up the paths in global tables that was received from this peer. |
def get_tx_amount(cls, txid, txindex):
for api_call in cls.GET_TX_AMOUNT_MAIN:
try:
return api_call(txid, txindex)
except cls.IGNORED_ERRORS:
pass
raise ConnectionError('All APIs are unreachable.') | Gets the amount of a given transaction output.
:param txid: The transaction id in question.
:type txid: ``str``
:param txindex: The transaction index in question.
:type txindex: ``int``
:raises ConnectionError: If all API services fail.
:rtype: ``Decimal`` |
def get(self, device_id: int) -> Optional[Device]:
return self._devices.get(device_id) | Get device using the specified ID, or None if not found. |
def import_sip04_data_all(data_filename):
filename, fformat = os.path.splitext(data_filename)
if fformat == '.csv':
print('Import SIP04 data from .csv file')
df_all = _import_csv_file(data_filename)
elif fformat == '.mat':
print('Import SIP04 data from .mat file')
df_all = _import_mat_file(data_filename)
else:
print('Please use .csv or .mat format.')
df_all = None
return df_all | Import ALL data from the result files
Parameters
----------
data_filename : string
Path to .mat or .csv file containing SIP-04 measurement results. Note
that the .csv file does not contain all data contained in the .mat
file!
Returns
-------
df_all : :py:class:`pandas.DataFrame`
The data, contained in a DataFrame |
def hydrate_time(nanoseconds, tz=None):
seconds, nanoseconds = map(int, divmod(nanoseconds, 1000000000))
minutes, seconds = map(int, divmod(seconds, 60))
hours, minutes = map(int, divmod(minutes, 60))
seconds = (1000000000 * seconds + nanoseconds) / 1000000000
t = Time(hours, minutes, seconds)
if tz is None:
return t
tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)
zone = FixedOffset(tz_offset_minutes)
return zone.localize(t) | Hydrator for `Time` and `LocalTime` values.
:param nanoseconds:
:param tz:
:return: Time |
def extend_array(a, n):
a_new = a.copy()
for d in range(a.ndim):
a_new = np.repeat(a_new, n, axis=d)
return a_new | Increase the resolution of an array by duplicating its values to fill
a larger array.
Parameters
----------
a: array, shape (a1, a2, a3, ...)
n: integer
Factor by which to expand the array.
Returns
-------
ae: array, shape (n * a1, n * a2, n * a3, ...) |
def set_topics(self, topics):
if set(topics).difference(self._topics):
future = self.cluster.request_update()
else:
future = Future().success(set(topics))
self._topics = set(topics)
return future | Set specific topics to track for metadata.
Arguments:
topics (list of str): topics to check for metadata
Returns:
Future: resolves after metadata request/response |
def assert_hashable(*args, **kw):
try:
for i, arg in enumerate(args):
hash(arg)
except TypeError:
raise TypeError('Argument in position %d is not hashable: %r' % (i, arg))
try:
for key, val in iterate_items(kw):
hash(val)
except TypeError:
raise TypeError('Keyword argument %r is not hashable: %r' % (key, val)) | Verify that each argument is hashable.
Passes silently if successful. Raises descriptive TypeError otherwise.
Example::
>>> assert_hashable(1, 'foo', bar='baz')
>>> assert_hashable(1, [], baz='baz')
Traceback (most recent call last):
...
TypeError: Argument in position 1 is not hashable: []
>>> assert_hashable(1, 'foo', bar=[])
Traceback (most recent call last):
...
TypeError: Keyword argument 'bar' is not hashable: [] |
def _only_main(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
if not self.is_main:
return getattr(self.main, func.__name__)(*args, **kwargs)
return func(self, *args, **kwargs)
return wrapper | Call the given `func` only from the main project |
def zip(self, *items):
return self.__class__(list(zip(self.items, *items))) | Zip the collection together with one or more arrays.
:param items: The items to zip
:type items: list
:rtype: Collection |
def set_wake_on_modem(enabled):
state = salt.utils.mac_utils.validate_enabled(enabled)
cmd = 'systemsetup -setwakeonmodem {0}'.format(state)
salt.utils.mac_utils.execute_return_success(cmd)
return salt.utils.mac_utils.confirm_updated(
state,
get_wake_on_modem,
) | Set whether or not the computer will wake from sleep when modem activity is
detected.
:param bool enabled: True to enable, False to disable. "On" and "Off" are
also acceptable values. Additionally you can pass 1 and 0 to represent
True and False respectively
:return: True if successful, False if not
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' power.set_wake_on_modem True |
def set_state(self, state):
self.basicevent.SetBinaryState(BinaryState=int(state))
self._state = int(state) | Set the state of this device to on or off. |
def get_domain_config(self, domain):
domain_root = self.identify_domain_root(domain)
host = ''
if len(domain_root) != len(domain):
host = domain.replace('.' + domain_root, '')
domain_connect_api = self._identify_domain_connect_api(domain_root)
ret = self._get_domain_config_for_root(domain_root, domain_connect_api)
return DomainConnectConfig(domain, domain_root, host, ret) | Makes a discovery of domain name and resolves configuration of DNS provider
:param domain: str
domain name
:return: DomainConnectConfig
domain connect config
:raises: NoDomainConnectRecordException
when no _domainconnect record found
:raises: NoDomainConnectSettingsException
when settings are not found |
def _send_to_kafka(self, master):
appid_topic = "{prefix}.outbound_{appid}".format(
prefix=self.topic_prefix,
appid=master['appid'])
firehose_topic = "{prefix}.outbound_firehose".format(
prefix=self.topic_prefix)
try:
if self.use_appid_topics:
f1 = self.producer.send(appid_topic, master)
f1.add_callback(self._kafka_success)
f1.add_errback(self._kafka_failure)
f2 = self.producer.send(firehose_topic, master)
f2.add_callback(self._kafka_success)
f2.add_errback(self._kafka_failure)
return True
except Exception as ex:
message = "An exception '{0}' occured while sending a message " \
"to kafka. Arguments:\n{1!r}" \
.format(type(ex).__name__, ex.args)
self.logger.error(message)
return False | Sends the message back to Kafka
@param master: the final dict to send
@returns: True if successfully sent to kafka |
def create_version(self, service_id, inherit_service_id=None, comment=None):
body = self._formdata({
"service_id": service_id,
"inherit_service_id": inherit_service_id,
"comment": comment,
}, FastlyVersion.FIELDS)
content = self._fetch("/service/%s/version" % service_id, method="POST", body=body)
return FastlyVersion(self, content) | Create a version for a particular service. |
def _count_counters(self, counter):
if getattr(self, 'as_set', False):
return len(set(counter))
else:
return sum(counter.values()) | Return all elements count from Counter |
def get_accessibility(self, plugin_override=True):
vals = self._hook_manager.call_hook('course_accessibility', course=self, default=self._accessible)
return vals[0] if len(vals) and plugin_override else self._accessible | Return the AccessibleTime object associated with the accessibility of this course |
def remove(self, item):
item = self.__coerce(item)
self.all.remove(item)
super().remove(item) | Remove either an unparsed argument string or an argument object.
:param Union[str,Arg] item: Item to remove
>>> arguments = TexArgs([RArg('arg0'), '[arg2]', '{arg3}'])
>>> arguments.remove('{arg0}')
>>> len(arguments)
2
>>> arguments[0]
OArg('arg2') |
def run(ctx, commandline):
file = ctx.obj['FILE']
dotenv_as_dict = dotenv_values(file)
if not commandline:
click.echo('No command given.')
exit(1)
ret = run_command(commandline, dotenv_as_dict)
exit(ret) | Run command with environment variables present. |
def kwargs(self):
if hasattr(self, '_has_kwargs') and self._has_kwargs:
raise NotImplementedError(
"Class %s does not provide a kwargs property"
% str(self.__class__.__name__))
return {} | The dictionary of keyword-only arguments for the instantiation of
the Expression |
def _get_methods_that_calculate_outputs(inputs, outputs, methods):
intermediates = get_calculatable_quantities(inputs, methods)
return_methods = {}
outputs = list(outputs)
keep_going = True
while keep_going:
keep_going = False
for output in outputs:
try:
output_dict = return_methods[output]
except:
output_dict = {}
for args, func in methods[output].items():
if args not in output_dict.keys():
needed = []
for arg in args:
if arg in inputs:
pass
elif arg in outputs:
pass
elif arg in intermediates:
if arg not in outputs:
needed.append(arg)
else:
break
else:
output_dict[args] = func
if len(needed) > 0:
outputs.extend(needed)
keep_going = True
if len(output_dict) > 0:
return_methods[output] = output_dict
return return_methods | Given iterables of input variable names, output variable names,
and a methods dictionary, returns the subset of the methods dictionary
that can be calculated, doesn't calculate something we already have,
and only contains equations that might help calculate the outputs from
the inputs. |
def _check_reply_pending(self, option):
if not self.telnet_opt_dict.has_key(option):
self.telnet_opt_dict[option] = TelnetOption()
return self.telnet_opt_dict[option].reply_pending | Test the status of requested Telnet options. |
def eigenvector_sensitivity(T, k, j, right=True):
r
T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric')
if _issparse(T):
_showSparseConversionWarning()
eigenvector_sensitivity(T.todense(), k, j, right=right)
else:
return dense.sensitivity.eigenvector_sensitivity(T, k, j, right=right) | r"""Sensitivity matrix of a selected eigenvector element.
Parameters
----------
T : (M, M) ndarray
Transition matrix (stochastic matrix).
k : int
Eigenvector index
j : int
Element index
right : bool
If True compute for right eigenvector, otherwise compute for left eigenvector.
Returns
-------
S : (M, M) ndarray
Sensitivity matrix for the j-th element of the k-th eigenvector. |
def is_token_valid(self, token):
try:
_tinfo = self.handler.info(token)
except KeyError:
return False
if is_expired(int(_tinfo['exp'])) or _tinfo['black_listed']:
return False
session_info = self[_tinfo['sid']]
if session_info["oauth_state"] == "authz":
if _tinfo['handler'] != self.handler['code']:
return False
elif session_info["oauth_state"] == "token":
if _tinfo['handler'] != self.handler['access_token']:
return False
return True | Checks validity of a given token
:param token: Access or refresh token |
def should_sample(self):
return self.span_context.trace_options.enabled \
or self.sampler.should_sample(self.span_context.trace_id) | Determine whether to sample this request or not.
If the context enables tracing, return True.
Else follow the decision of the sampler.
:rtype: bool
:returns: Whether to trace the request or not. |
def kill(self, wait=True, sig=None):
if sig is None:
sig = self._sig_kill
if self.running():
os.killpg(self.process.pid, sig)
if wait:
self.process.wait()
self._kill_all_kids(sig)
self._clear_process()
return self | Kill the process if running.
:param bool wait: set to `True` to wait for the process to end,
or False, to simply proceed after sending signal.
:param int sig: signal used to kill process run by the executor.
None by default.
:returns: itself
:rtype: SimpleExecutor |
def _put_bucket_cors(self):
if self.s3props['cors']['enabled'] and self.s3props['website']['enabled']:
cors_config = {}
cors_rules = []
for each_rule in self.s3props['cors']['cors_rules']:
cors_rules.append({
'AllowedHeaders': each_rule['cors_headers'],
'AllowedMethods': each_rule['cors_methods'],
'AllowedOrigins': each_rule['cors_origins'],
'ExposeHeaders': each_rule['cors_expose_headers'],
'MaxAgeSeconds': each_rule['cors_max_age']
})
cors_config = {
'CORSRules': cors_rules
}
LOG.debug(cors_config)
_response = self.s3client.put_bucket_cors(Bucket=self.bucket, CORSConfiguration=cors_config)
else:
_response = self.s3client.delete_bucket_cors(Bucket=self.bucket)
LOG.debug('Response setting up S3 CORS: %s', _response)
LOG.info('S3 CORS configuration updated') | Adds bucket cors configuration. |
def density_matrix(self):
size = 2 ** len(self._qubit_map)
return np.reshape(self._density_matrix, (size, size)) | Returns the density matrix at this step in the simulation.
The density matrix that is stored in this result is returned in the
computational basis with these basis states defined by the qubit_map.
In particular the value in the qubit_map is the index of the qubit,
and these are translated into binary vectors where the last qubit is
the 1s bit of the index, the second-to-last is the 2s bit of the index,
and so forth (i.e. big endian ordering). The density matrix is a
`2 ** num_qubits` square matrix, with rows and columns ordered by
the computational basis as just described.
Example:
qubit_map: {QubitA: 0, QubitB: 1, QubitC: 2}
Then the returned density matrix will have (row and column) indices
mapped to qubit basis states like the following table
| QubitA | QubitB | QubitC
:-: | :----: | :----: | :----:
0 | 0 | 0 | 0
1 | 0 | 0 | 1
2 | 0 | 1 | 0
3 | 0 | 1 | 1
4 | 1 | 0 | 0
5 | 1 | 0 | 1
6 | 1 | 1 | 0
7 | 1 | 1 | 1 |
def convert_complexFaultSource(self, node):
geom = node.complexFaultGeometry
edges = self.geo_lines(geom)
mfd = self.convert_mfdist(node)
msr = valid.SCALEREL[~node.magScaleRel]()
with context(self.fname, node):
cmplx = source.ComplexFaultSource(
source_id=node['id'],
name=node['name'],
tectonic_region_type=node.attrib.get('tectonicRegion'),
mfd=mfd,
rupture_mesh_spacing=self.complex_fault_mesh_spacing,
magnitude_scaling_relationship=msr,
rupture_aspect_ratio=~node.ruptAspectRatio,
edges=edges,
rake=~node.rake,
temporal_occurrence_model=self.get_tom(node))
return cmplx | Convert the given node into a complex fault object.
:param node: a node with tag areaGeometry
:returns: a :class:`openquake.hazardlib.source.ComplexFaultSource`
instance |
def decode_csr(b64der):
try:
return x509.load_der_x509_csr(
decode_b64jose(b64der), default_backend())
except ValueError as error:
raise DeserializationError(error) | Decode JOSE Base-64 DER-encoded CSR.
:param str b64der: The encoded CSR.
:rtype: `cryptography.x509.CertificateSigningRequest`
:return: The decoded CSR. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.