code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def rectify_pgroups(self):
pdata_groups = list(self.parameter_data.loc[:,"pargp"].\
value_counts().keys())
need_groups = []
existing_groups = list(self.parameter_groups.pargpnme)
for pg in pdata_groups:
if pg not in existing_groups:
need_groups.append(pg)
if len(need_groups) > 0:
defaults = copy.copy(pst_utils.pst_config["pargp_defaults"])
for grp in need_groups:
defaults["pargpnme"] = grp
self.parameter_groups = \
self.parameter_groups.append(defaults,ignore_index=True)
for gp in self.parameter_groups.loc[:,"pargpnme"]:
if gp in pdata_groups and gp not in need_groups:
need_groups.append(gp)
self.parameter_groups.index = self.parameter_groups.pargpnme
self.parameter_groups = self.parameter_groups.loc[need_groups,:] | private method to synchronize parameter groups section with
the parameter data section |
def pathFromHere_explore(self, astr_startPath = '/'):
self.l_lwd = []
self.treeExplore(startPath = astr_startPath, f=self.lwd)
return self.l_lwd | Return a list of paths from "here" in the stree, using the
child explore access.
:param astr_startPath: path from which to start
:return: a list of paths from "here" |
def installSite(self):
for iface, priority in self.__getPowerupInterfaces__([]):
self.store.powerUp(self, iface, priority) | Not using the dependency system for this class because it's only
installed via the command line, and multiple instances can be
installed. |
def Load(cls, file_input, client=None):
if client is None:
client = AdWordsClient.LoadFromStorage()
try:
data = yaml.safe_load(file_input)
except yaml.YAMLError as e:
raise googleads.errors.GoogleAdsError(
'Error loading IncrementalUploadHelper from file: %s' % str(e))
try:
request_builder = BatchJobHelper.GetRequestBuilder(
client, version=data['version'], server=data['server']
)
return cls(request_builder, data['upload_url'],
current_content_length=data['current_content_length'],
is_last=data['is_last'])
except KeyError as e:
raise googleads.errors.GoogleAdsValueError(
'Can\'t parse IncrementalUploadHelper from file. Required field '
'"%s" is missing.' % e.message) | Loads an IncrementalUploadHelper from the given file-like object.
Args:
file_input: a file-like object containing a serialized
IncrementalUploadHelper.
client: an AdWordsClient instance. If not specified, an AdWordsClient will
be instantiated using the default configuration file.
Returns:
An IncrementalUploadHelper instance initialized using the contents of the
serialized input file.
Raises:
GoogleAdsError: If there is an error reading the input file containing the
serialized IncrementalUploadHelper.
GoogleAdsValueError: If the contents of the input file can't be parsed to
produce an IncrementalUploadHelper. |
def prune_by_work_count(self, minimum=None, maximum=None, label=None):
self._logger.info('Pruning results by work count')
count_fieldname = 'tmp_count'
matches = self._matches
if label is not None:
matches = matches[matches[constants.LABEL_FIELDNAME] == label]
filtered = matches[matches[constants.COUNT_FIELDNAME] > 0]
grouped = filtered.groupby(constants.NGRAM_FIELDNAME, sort=False)
counts = pd.DataFrame(grouped[constants.WORK_FIELDNAME].nunique())
counts.rename(columns={constants.WORK_FIELDNAME: count_fieldname},
inplace=True)
if minimum:
counts = counts[counts[count_fieldname] >= minimum]
if maximum:
counts = counts[counts[count_fieldname] <= maximum]
self._matches = pd.merge(self._matches, counts,
left_on=constants.NGRAM_FIELDNAME,
right_index=True)
del self._matches[count_fieldname] | Removes results rows for n-grams that are not attested in a
number of works in the range specified by `minimum` and
`maximum`.
Work here encompasses all witnesses, so that the same n-gram
appearing in multiple witnesses of the same work are counted
as a single work.
If `label` is specified, the works counted are restricted to
those associated with `label`.
:param minimum: minimum number of works
:type minimum: `int`
:param maximum: maximum number of works
:type maximum: `int`
:param label: optional label to restrict requirement to
:type label: `str` |
def fetch(self, from_time, until_time=None):
until_time = until_time or datetime.now()
time_info, values = whisper.fetch(self.path,
from_time.strftime('%s'),
until_time.strftime('%s'))
start_time, end_time, step = time_info
current = start_time
times = []
while current <= end_time:
times.append(current)
current += step
return zip(times, values) | This method fetch data from the database according to the period
given
fetch(path, fromTime, untilTime=None)
fromTime is an datetime
untilTime is also an datetime, but defaults to now.
Returns a tuple of (timeInfo, valueList)
where timeInfo is itself a tuple of (fromTime, untilTime, step)
Returns None if no data can be returned |
def _preprocess_input(self, input):
if not re.search(preprocess_chars, input):
return input
input = self._add_punctuation_spacing(input)
return input | Preprocesses the input before it's split into a list. |
def peek(self, lpBaseAddress, nSize):
data = ''
if nSize > 0:
try:
hProcess = self.get_handle( win32.PROCESS_VM_READ |
win32.PROCESS_QUERY_INFORMATION )
for mbi in self.get_memory_map(lpBaseAddress,
lpBaseAddress + nSize):
if not mbi.is_readable():
nSize = mbi.BaseAddress - lpBaseAddress
break
if nSize > 0:
data = win32.ReadProcessMemory(
hProcess, lpBaseAddress, nSize)
except WindowsError:
e = sys.exc_info()[1]
msg = "Error reading process %d address %s: %s"
msg %= (self.get_pid(),
HexDump.address(lpBaseAddress),
e.strerror)
warnings.warn(msg)
return data | Reads the memory of the process.
@see: L{read}
@type lpBaseAddress: int
@param lpBaseAddress: Memory address to begin reading.
@type nSize: int
@param nSize: Number of bytes to read.
@rtype: str
@return: Bytes read from the process memory.
Returns an empty string on error. |
def from_netcdf(filename):
groups = {}
with nc.Dataset(filename, mode="r") as data:
data_groups = list(data.groups)
for group in data_groups:
with xr.open_dataset(filename, group=group) as data:
groups[group] = data
return InferenceData(**groups) | Initialize object from a netcdf file.
Expects that the file will have groups, each of which can be loaded by xarray.
Parameters
----------
filename : str
location of netcdf file
Returns
-------
InferenceData object |
def _recursive_overwrite(self, src, dest):
if os.path.isdir(src):
if not os.path.isdir(dest):
os.makedirs(dest)
files = os.listdir(src)
for f in files:
self._recursive_overwrite(os.path.join(src, f),
os.path.join(dest, f))
else:
shutil.copyfile(src, dest, follow_symlinks=False) | Copy src to dest, recursively and with file overwrite. |
def vault_relation_complete(backend=None):
vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND)
vault_kv()
return vault_kv.complete | Determine whether vault relation is complete
:param backend: Name of secrets backend requested
:ptype backend: string
:returns: whether the relation to vault is complete
:rtype: bool |
def help(self, command=None):
from spython.utils import check_install
check_install()
cmd = ['singularity','--help']
if command != None:
cmd.append(command)
help = self._run_command(cmd)
return help | help prints the general function help, or help for a specific command
Parameters
==========
command: the command to get help for, if none, prints general help |
def _unbind(cls, boundname):
try:
fs = CPE2_3_FS(boundname)
except:
try:
uri = CPE2_3_URI(boundname)
except:
return CPE2_3_WFN(boundname)
else:
return CPE2_3_WFN(uri.as_wfn())
else:
return CPE2_3_WFN(fs.as_wfn()) | Unbinds a bound form to a WFN.
:param string boundname: CPE name
:returns: WFN object associated with boundname.
:rtype: CPE2_3_WFN |
def get_command(self, name):
def command(options):
client = ZookeeperClient(
"%s:%d" % (options.pop('host'), options.pop('port')),
session_timeout=1000
)
path = options.pop('path_prefix')
force = options.pop('force')
extra = options.pop('extra')
options.update(extra)
controller = Command(client, path, self.services, force)
method = getattr(controller, "cmd_%s" % name)
return method(**options)
return command | Wrap command class in constructor. |
def copy_non_reserved(props, target):
target.update(
{
key: value
for key, value in props.items()
if not is_reserved_property(key)
}
)
return target | Copies all properties with non-reserved names from ``props`` to ``target``
:param props: A dictionary of properties
:param target: Another dictionary
:return: The target dictionary |
def _wrap_client(self, region_name, method, *args, **kwargs):
try:
return method(*args, **kwargs)
except botocore.exceptions.BotoCoreError:
self._regional_clients.pop(region_name)
_LOGGER.error(
'Removing regional client "%s" from cache due to BotoCoreError on %s call', region_name, method.__name__
)
raise | Proxies all calls to a kms clients methods and removes misbehaving clients
:param str region_name: AWS Region ID (ex: us-east-1)
:param callable method: a method on the KMS client to proxy
:param tuple args: list of arguments to pass to the provided ``method``
:param dict kwargs: dictonary of keyword arguments to pass to the provided ``method`` |
def get_available_types_for_scene(self, element):
available = []
for typ, inter in self.types.items():
if inter(self).is_available_for_scene(element):
available.append(typ)
return available | Return a list of types that can be used in combination with the given element
to add new reftracks to the scene.
This allows for example the user, to add new reftracks (aliens) to the scene.
So e.g. for a shader, it wouldn't make sense to make it available to be added to the scene, because
one would use them only as children of let's say an asset or cache.
Some types might only be available for shots or assets etc.
:param element: the element that could be used in conjuction with the returned types to create new reftracks.
:type element: :class:`jukeboxcore.djadapter.models.Asset` | :class:`jukeboxcore.djadapter.models.Shot`
:returns: a list of types
:rtype: :class:`list`
:raises: None |
def cublasCher2k(handle, uplo, trans, n, k, alpha, A, lda, B, ldb, beta, C, ldc):
status = _libcublas.cublasCher2k_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
n, k, ctypes.byref(cuda.cuFloatComplex(alpha.real,
alpha.imag)),
int(A), lda, int(B), ldb,
ctypes.byref(cuda.cuFloatComplex(beta.real,
beta.imag)),
int(C), ldc)
cublasCheckStatus(status) | Rank-2k operation on Hermitian matrix. |
def _getArrays(items, attr, defaultValue):
arrays = dict([(key, []) for key in attr])
for item in items:
for key in attr:
arrays[key].append(getattr(item, key, defaultValue))
for key in [_ for _ in viewkeys(arrays)]:
arrays[key] = numpy.array(arrays[key])
return arrays | Return arrays with equal size of item attributes from a list of sorted
"items" for fast and convenient data processing.
:param attr: list of item attributes that should be added to the returned
array.
:param defaultValue: if an item is missing an attribute, the "defaultValue"
is added to the array instead.
:returns: {'attribute1': numpy.array([attributeValue1, ...]), ...} |
def list_vms(access_token, subscription_id, resource_group):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachines',
'?api-version=', COMP_API])
return do_get(endpoint, access_token) | List VMs in a resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
Returns:
HTTP response. JSON body of a list of VM model views. |
def _login(self):
self.logger.debug("Logging into " + "{}/{}".format(self._im_api_url, "j_spring_security_check"))
self._im_session.headers.update({'Content-Type':'application/x-www-form-urlencoded', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.99 Safari/537.36'})
self.j_username = self._username
self.j_password = self._password
requests.packages.urllib3.disable_warnings()
payload = {'j_username': self.j_username, 'j_password': self.j_password, 'submit':'Login'}
r = self._im_session.post(
"{}/{}".format(self._im_api_url,"j_spring_security_check"),
verify=self._im_verify_ssl,
data=payload)
self.logger.debug("Login POST response: " + "{}".format(r.text))
self._im_logged_in = True | LOGIN CAN ONLY BE DONE BY POSTING TO A HTTP FORM.
A COOKIE IS THEN USED FOR INTERACTING WITH THE API |
def _to_string(self, fmt, locale=None):
if fmt not in self._FORMATS:
raise ValueError("Format [{}] is not supported".format(fmt))
fmt = self._FORMATS[fmt]
if callable(fmt):
return fmt(self)
return self.format(fmt, locale=locale) | Format the instance to a common string format.
:param fmt: The name of the string format
:type fmt: string
:param locale: The locale to use
:type locale: str or None
:rtype: str |
def linkify_s_by_hst(self, hosts):
for serv in self:
if not hasattr(serv, 'host_name'):
serv.host = None
continue
try:
hst_name = serv.host_name
hst = hosts.find_by_name(hst_name)
if hst is not None:
serv.host = hst.uuid
hst.add_service_link(serv.uuid)
else:
err = "Warning: the service '%s' got an invalid host_name '%s'" % \
(serv.get_name(), hst_name)
serv.configuration_warnings.append(err)
continue
except AttributeError:
pass | Link services with their parent host
:param hosts: Hosts to look for simple host
:type hosts: alignak.objects.host.Hosts
:return: None |
def sendMessage(self,chat_id,text,parse_mode=None,disable_web=None,reply_msg_id=None,markup=None):
payload={'chat_id' : chat_id, 'text' : text, 'parse_mode': parse_mode , 'disable_web_page_preview' : disable_web , 'reply_to_message_id' : reply_msg_id}
if(markup):
payload['reply_markup']=json.dumps(markup)
response_str = self._command('sendMessage',payload,method='post')
return _validate_response_msg(response_str) | On failure returns False
On success returns Message Object |
def process_cpp(self, path, suffix):
_cpplint_state.ResetErrorCounts()
cpplint.ProcessFile(str(path), _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
errors = _cpplint_state.errors_by_category.copy()
if suffix == 'h':
self.cpp_header_map[str(path)] = errors
else:
self.cpp_src_map[str(path)] = errors | Process a cpp file. |
def delayed_unpacking(self, container, fun, *args, **kwargs):
try:
self._delayed += 1
blob = self._begin()
try:
fun(*args, **kwargs)
self._commit(blob)
return container
except DelayPacking:
self._rollback(blob)
continuation = (fun, args, kwargs)
self._pending.append(continuation)
return container
finally:
self._delayed -= 1 | Should be used when unpacking mutable values.
This allows circular references resolution by pausing serialization. |
def get_way(self, way_id, resolve_missing=False):
ways = self.get_ways(way_id=way_id)
if len(ways) == 0:
if resolve_missing is False:
raise exception.DataIncomplete("Resolve missing way is disabled")
query = ("\n"
"[out:json];\n"
"way({way_id});\n"
"out body;\n"
)
query = query.format(
way_id=way_id
)
tmp_result = self.api.query(query)
self.expand(tmp_result)
ways = self.get_ways(way_id=way_id)
if len(ways) == 0:
raise exception.DataIncomplete("Unable to resolve requested way")
return ways[0] | Get a way by its ID.
:param way_id: The way ID
:type way_id: Integer
:param resolve_missing: Query the Overpass API if the way is missing in the result set.
:return: The way
:rtype: overpy.Way
:raises overpy.exception.DataIncomplete: The requested way is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and the way can't be resolved. |
def _check_types(self) -> None:
all_instance_fields_and_types: List[Dict[str, str]] = [{k: v.__class__.__name__
for k, v in x.fields.items()}
for x in self.instances]
if not all([all_instance_fields_and_types[0] == x for x in all_instance_fields_and_types]):
raise ConfigurationError("You cannot construct a Batch with non-homogeneous Instances.") | Check that all the instances have the same types. |
def usergroup_exists(name=None, node=None, nodeids=None, **kwargs):
conn_args = _login(**kwargs)
zabbix_version = apiinfo_version(**kwargs)
ret = {}
try:
if conn_args:
if _LooseVersion(zabbix_version) > _LooseVersion("2.5"):
if not name:
name = ''
ret = usergroup_get(name, None, **kwargs)
return bool(ret)
else:
method = 'usergroup.exists'
params = {}
if not name and not node and not nodeids:
return {'result': False, 'comment': 'Please submit name, node or nodeids parameter to check if '
'at least one user group exists.'}
if name:
params['name'] = name
if _LooseVersion(zabbix_version) < _LooseVersion("2.4"):
if node:
params['node'] = node
if nodeids:
params['nodeids'] = nodeids
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return ret['result']
else:
raise KeyError
except KeyError:
return ret | Checks if at least one user group that matches the given filter criteria exists
.. versionadded:: 2016.3.0
:param name: names of the user groups
:param node: name of the node the user groups must belong to (This will override the nodeids parameter.)
:param nodeids: IDs of the nodes the user groups must belong to
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: True if at least one user group that matches the given filter criteria exists, else False.
CLI Example:
.. code-block:: bash
salt '*' zabbix.usergroup_exists Guests |
def dump(
self, stream, progress=None, lower=None, upper=None,
incremental=False, deltas=False
):
cmd = [SVNADMIN, 'dump', '.']
if progress is None:
cmd.append('-q')
if lower is not None:
cmd.append('-r')
if upper is None:
cmd.append(str(int(lower)))
else:
cmd.append('%d:%d' % (int(lower), int(upper)))
if incremental:
cmd.append('--incremental')
if deltas:
cmd.append('--deltas')
p = subprocess.Popen(cmd, cwd=self.path, stdout=stream, stderr=progress)
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd) | Dump the repository to a dumpfile stream.
:param stream: A file stream to which the dumpfile is written
:param progress: A file stream to which progress is written
:param lower: Must be a numeric version number
:param upper: Must be a numeric version number
See ``svnadmin help dump`` for details on the other arguments. |
def lower_unsupported_metafield_expressions(ir_blocks):
def visitor_fn(expression):
if not isinstance(expression, expressions.LocalField):
return expression
if expression.field_name not in constants.UNSUPPORTED_META_FIELDS:
return expression
raise NotImplementedError(
u'Encountered unsupported metafield {} in LocalField {} during construction of '
u'SQL query tree for IR blocks {}.'.format(
constants.UNSUPPORTED_META_FIELDS[expression.field_name], expression, ir_blocks))
new_ir_blocks = [
block.visit_and_update_expressions(visitor_fn)
for block in ir_blocks
]
return new_ir_blocks | Raise exception if an unsupported metafield is encountered in any LocalField expression. |
def aggregate(self, index):
if isinstance(index, string_types):
col_df_grouped = self.col_df.groupby(self.df[index])
else:
self.col_df.index = pd.MultiIndex.from_arrays([self.df[i] for i in index])
col_df_grouped = self.col_df.groupby(level=index)
self.col_df.index = self.df.index
self.reduced_df = pd.DataFrame({
colred: col_df_grouped[colred.column].agg(colred.agg_func)
for colred in self.column_reductions
})
reduced_dfs = []
for cf in self.column_functions:
reduced_dfs.append(cf.apply_and_name(self))
return pd.concat(reduced_dfs, axis=1) | Performs a groupby of the unique Columns by index, as constructed from self.df.
Args:
index (str, or pd.Index): Index or column name of self.df.
Returns:
pd.DataFrame: A dataframe, aggregated by index, that contains the result
of the various ColumnFunctions, and named accordingly. |
def define_objective_with_I(I, *args):
objective = I[0][0]
if len(args) > 2 or len(args) == 0:
raise Exception("Wrong number of arguments!")
elif len(args) == 1:
A = args[0].parties[0]
B = args[0].parties[1]
else:
A = args[0]
B = args[1]
i, j = 0, 1
for m_Bj in B:
for Bj in m_Bj:
objective += I[i][j] * Bj
j += 1
i += 1
for m_Ai in A:
for Ai in m_Ai:
objective += I[i][0] * Ai
j = 1
for m_Bj in B:
for Bj in m_Bj:
objective += I[i][j] * Ai * Bj
j += 1
i += 1
return -objective | Define a polynomial using measurements and an I matrix describing a Bell
inequality.
:param I: The I matrix of a Bell inequality in the Collins-Gisin notation.
:type I: list of list of int.
:param args: Either the measurements of Alice and Bob or a `Probability`
class describing their measurement operators.
:type A: tuple of list of list of
:class:`sympy.physics.quantum.operator.HermitianOperator` or
:class:`ncpol2sdpa.Probability`
:returns: :class:`sympy.core.expr.Expr` -- the objective function to be
solved as a minimization problem to find the maximum quantum
violation. Note that the sign is flipped compared to the Bell
inequality. |
def get_assessments_offered_by_banks(self, bank_ids):
assessment_offered_list = []
for bank_id in bank_ids:
assessment_offered_list += list(
self.get_assessments_offered_by_bank(bank_id))
return objects.AssessmentOfferedList(assessment_offered_list) | Gets the list of ``AssessmentOffered`` objects corresponding to a list of ``Banks``.
arg: bank_ids (osid.id.IdList): list of bank ``Ids``
return: (osid.assessment.AssessmentOfferedList) - list of
assessments offered
raise: NullArgument - ``bank_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.* |
def _add_genotypes(self, variant_obj, gemini_variant, case_id,
individual_objs):
for ind in individual_objs:
index = ind.ind_index
variant_obj.add_individual(Genotype(
sample_id=ind.ind_id,
genotype=gemini_variant['gts'][index],
case_id=case_id,
phenotype=ind.phenotype,
ref_depth=gemini_variant['gt_ref_depths'][index],
alt_depth=gemini_variant['gt_alt_depths'][index],
depth=gemini_variant['gt_depths'][index],
genotype_quality=gemini_variant['gt_quals'][index]
)) | Add the genotypes for a variant for all individuals
Args:
variant_obj (puzzle.models.Variant)
gemini_variant (GeminiQueryRow): The gemini variant
case_id (str): related case id
individual_objs (list(dict)): A list of Individuals |
def at_css(self, css, timeout = DEFAULT_AT_TIMEOUT, **kw):
return self.wait_for_safe(lambda: super(WaitMixin, self).at_css(css),
timeout = timeout,
**kw) | Returns the first node matching the given CSSv3 expression or ``None``
if a timeout occurs. |
def extract_relations(dgtree, relations=None):
if hasattr(dgtree, 'reltypes'):
return dgtree.reltypes
if relations is None:
relations = {}
if is_leaf(dgtree):
return relations
root_label = dgtree.label()
if root_label == '':
assert dgtree == DGParentedTree('', []), \
"The tree has no root label, but isn't empty: {}".format(dgtree)
return relations
elif root_label in NUCLEARITY_LABELS:
for child in dgtree:
relations.update(extract_relations(child, relations))
else:
child_labels = [child.label() for child in dgtree]
assert all(label in NUCLEARITY_LABELS for label in child_labels)
if 'S' in child_labels:
relations[root_label] = 'rst'
else:
relations[root_label] = 'multinuc'
for child in dgtree:
relations.update(extract_relations(child, relations))
return relations | Extracts relations from a DGParentedTree.
Given a DGParentedTree, returns a (relation name, relation type) dict
of all the RST relations occurring in that tree. |
def deserialize(serialized_material_description):
try:
_raw_material_description = serialized_material_description[Tag.BINARY.dynamodb_tag]
material_description_bytes = io.BytesIO(_raw_material_description)
total_bytes = len(_raw_material_description)
except (TypeError, KeyError):
message = "Invalid material description"
_LOGGER.exception(message)
raise InvalidMaterialDescriptionError(message)
_read_version(material_description_bytes)
material_description = {}
try:
while material_description_bytes.tell() < total_bytes:
name = to_str(decode_value(material_description_bytes))
value = to_str(decode_value(material_description_bytes))
material_description[name] = value
except struct.error:
message = "Invalid material description"
_LOGGER.exception(message)
raise InvalidMaterialDescriptionError(message)
return material_description | Deserialize a serialized material description attribute into a material description dictionary.
:param dict serialized_material_description: DynamoDB attribute value containing serialized material description.
:returns: Material description dictionary
:rtype: dict
:raises InvalidMaterialDescriptionError: if malformed version
:raises InvalidMaterialDescriptionVersionError: if unknown version is found |
def generate_ctrlptsw2d_file(file_in='', file_out='ctrlptsw.txt'):
ctrlpts2d, size_u, size_v = _read_ctrltps2d_file(file_in)
new_ctrlpts2d = generate_ctrlptsw2d(ctrlpts2d)
_save_ctrlpts2d_file(new_ctrlpts2d, size_u, size_v, file_out) | Generates weighted control points from unweighted ones in 2-D.
This function
#. Takes in a 2-D control points file whose coordinates are organized in (x, y, z, w) format
#. Converts into (x*w, y*w, z*w, w) format
#. Saves the result to a file
Therefore, the resultant file could be a direct input of the NURBS.Surface class.
:param file_in: name of the input file (to be read)
:type file_in: str
:param file_out: name of the output file (to be saved)
:type file_out: str
:raises IOError: an error occurred reading or writing the file |
def alpha_blend(self, other):
fa = self.__a + other.__a - (self.__a * other.__a)
if fa==0: sa = 0
else: sa = min(1.0, self.__a/other.__a)
da = 1.0 - sa
sr, sg, sb = [v * sa for v in self.__rgb]
dr, dg, db = [v * da for v in other.__rgb]
return Color((sr+dr, sg+dg, sb+db), 'rgb', fa, self.__wref) | Alpha-blend this color on the other one.
Args:
:other:
The grapefruit.Color to alpha-blend with this one.
Returns:
A grapefruit.Color instance which is the result of alpha-blending
this color on the other one.
>>> c1 = Color.from_rgb(1, 0.5, 0, 0.2)
>>> c2 = Color.from_rgb(1, 1, 1, 0.8)
>>> c3 = c1.alpha_blend(c2)
>>> c3
Color(1.0, 0.875, 0.75, 0.84) |
def vinet_v_single(p, v0, k0, k0p, min_strain=0.01):
if p <= 1.e-5:
return v0
def f_diff(v, v0, k0, k0p, p):
return vinet_p(v, v0, k0, k0p) - p
v = brenth(f_diff, v0, v0 * min_strain, args=(v0, k0, k0p, p))
return v | find volume at given pressure using brenth in scipy.optimize
this is for single p value, not vectorized
:param p: pressure in GPa
:param v0: unit-cell volume in A^3 at 1 bar
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at reference conditions
:param min_strain: defining minimum v/v0 value to search volume for
:return: unit cell volume at high pressure in A^3 |
def authenticate(self):
if self.__token:
try:
resp = self._refresh_token()
except exceptions.TVDBRequestException as err:
if getattr(err.response, 'status_code', 0) == 401:
resp = self._login()
else:
raise
else:
resp = self._login()
self.__token = resp.get('token')
self._token_timer = timeutil.utcnow() | Aquire authorization token for using thetvdb apis. |
def _disambiguate_star_fusion_junctions(star_junction_file, contamination_bam, disambig_out_file, data):
out_file = disambig_out_file
fusiondict = {}
with open(star_junction_file, "r") as in_handle:
for my_line in in_handle:
my_line_split = my_line.strip().split("\t")
if len(my_line_split) < 10:
continue
fusiondict[my_line_split[9]] = my_line.strip("\n")
with pysam.Samfile(contamination_bam, "rb") as samfile:
for my_read in samfile:
if my_read.is_unmapped or my_read.is_secondary:
continue
if my_read.qname in fusiondict:
fusiondict.pop(my_read.qname)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, 'w') as myhandle:
for my_key in fusiondict:
print(fusiondict[my_key], file=myhandle)
return out_file | Disambiguate detected fusions based on alignments to another species. |
def service(
state, host,
*args, **kwargs
):
if host.fact.which('systemctl'):
yield systemd(state, host, *args, **kwargs)
return
if host.fact.which('initctl'):
yield upstart(state, host, *args, **kwargs)
return
if host.fact.directory('/etc/init.d'):
yield d(state, host, *args, **kwargs)
return
if host.fact.directory('/etc/rc.d'):
yield rc(state, host, *args, **kwargs)
return
raise OperationError((
'No init system found '
'(no systemctl, initctl, /etc/init.d or /etc/rc.d found)'
)) | Manage the state of services. This command checks for the presence of all the
init systems pyinfra can handle and executes the relevant operation. See init
system sepcific operation for arguments. |
def _set_alternates(self, alts):
alternates_path = osp.join(self.common_dir, 'objects', 'info', 'alternates')
if not alts:
if osp.isfile(alternates_path):
os.remove(alternates_path)
else:
with open(alternates_path, 'wb') as f:
f.write("\n".join(alts).encode(defenc)) | Sets the alternates
:param alts:
is the array of string paths representing the alternates at which
git should look for objects, i.e. /home/user/repo/.git/objects
:raise NoSuchPathError:
:note:
The method does not check for the existence of the paths in alts
as the caller is responsible. |
def ned_to_use(tensor):
return np.array(ROT_NED_USE * np.matrix(tensor) * ROT_NED_USE.T) | Converts a tensor in NED coordinate sytem to USE |
def map_package(shutit_pexpect_session, package, install_type):
if package in PACKAGE_MAP.keys():
for itype in PACKAGE_MAP[package].keys():
if itype == install_type:
ret = PACKAGE_MAP[package][install_type]
if isinstance(ret,str):
return ret
if callable(ret):
ret(shutit_pexpect_session)
return ''
return package | If package mapping exists, then return it, else return package. |
def compile_query(query):
if isinstance(query, dict):
expressions = []
for key, value in query.items():
if key.startswith('$'):
if key not in query_funcs:
raise AttributeError('Invalid operator: {}'.format(key))
expressions.append(query_funcs[key](value))
else:
expressions.append(filter_query(key, value))
if len(expressions) > 1:
return boolean_operator_query(operator.and_)(expressions)
else:
return (
expressions[0]
if len(expressions)
else lambda query_function: query_function(None, None)
)
else:
return query | Compile each expression in query recursively. |
def purge_obsolete_samples(self, config, now):
expire_age = config.samples * config.time_window_ms
for sample in self._samples:
if now - sample.last_window_ms >= expire_age:
sample.reset(now) | Timeout any windows that have expired in the absence of any events |
def size(self, value):
self._size = value
self._thumb = self._link_to_img() | Set the size parameter and regenerate the thumbnail link. |
def getmetadata(self, key=None):
if self.metadata:
d = self.doc.submetadata[self.metadata]
elif self.parent:
d = self.parent.getmetadata()
elif self.doc:
d = self.doc.metadata
else:
return None
if key:
return d[key]
else:
return d | Get the metadata that applies to this element, automatically inherited from parent elements |
def read(self, input_file):
key, value = None, None
import sys
for line in input_file:
if line == '\n':
break
if line[-1:] == '\n':
line = line[:-1]
item = line.split(':', 1)
if len(item) == 2:
self._update(key, value)
key, value = item[0], urllib.unquote(item[1])
elif key is not None:
value = '\n'.join([value, urllib.unquote(line)])
self._update(key, value)
return | Reads an InputHeader from `input_file`.
The input header is read as a sequence of *<key>***:***<value>* pairs
separated by a newline. The end of the input header is signalled by an
empty line or an end-of-file.
:param input_file: File-like object that supports iteration over lines |
def read(self, pin, is_differential=False):
pin = pin if is_differential else pin + 0x04
return self._read(pin) | I2C Interface for ADS1x15-based ADCs reads.
params:
:param pin: individual or differential pin.
:param bool is_differential: single-ended or differential read. |
def clean(self):
super(SlotCreationForm,self).clean()
startDate = self.cleaned_data.get('startDate')
endDate = self.cleaned_data.get('endDate')
startTime = self.cleaned_data.get('startTime')
endTime = self.cleaned_data.get('endTime')
instructor = self.cleaned_data.get('instructorId')
existingSlots = InstructorAvailabilitySlot.objects.filter(
instructor=instructor,
startTime__gt=(
ensure_localtime(datetime.combine(startDate,startTime)) -
timedelta(minutes=getConstant('privateLessons__lessonLengthInterval'))
),
startTime__lt=ensure_localtime(datetime.combine(endDate,endTime)),
)
if existingSlots.exists():
raise ValidationError(_('Newly created slots cannot overlap existing slots for this instructor.'),code='invalid') | Only allow submission if there are not already slots in the submitted window,
and only allow rooms associated with the chosen location. |
def getOverlayTextureSize(self, ulOverlayHandle):
fn = self.function_table.getOverlayTextureSize
pWidth = c_uint32()
pHeight = c_uint32()
result = fn(ulOverlayHandle, byref(pWidth), byref(pHeight))
return result, pWidth.value, pHeight.value | Get the size of the overlay texture |
def set_data(self, frames):
data_frames = []
for frame in frames:
frame = frame.swapaxes(0, 1)
if len(frame.shape) < 3:
frame = np.array([frame]).swapaxes(0, 2).swapaxes(0, 1)
data_frames.append(frame)
frames_n = len(data_frames)
data_frames = np.array(data_frames)
data_frames = np.rollaxis(data_frames, 3)
data_frames = data_frames.swapaxes(2, 3)
self.data = data_frames
self.length = frames_n | Prepare the input of model |
def return_feature_list_base(dbpath, set_object):
engine = create_engine('sqlite:////' + dbpath)
session_cl = sessionmaker(bind=engine)
session = session_cl()
return_list = []
tmp_object = session.query(set_object).get(1)
for feature in tmp_object.features:
return_list.append(feature)
session.close()
return return_list | Generic function which returns a list of the names of all available features
Parameters
----------
dbpath : string, path to SQLite database file
set_object : object (either TestSet or TrainSet) which is stored in the database
Returns
-------
return_list : list of strings corresponding to all available features |
def get_deliveryserver(self, domainid, serverid):
return self.api_call(
ENDPOINTS['deliveryservers']['get'],
dict(domainid=domainid, serverid=serverid)) | Get a delivery server |
def add_scm_info(self):
scm = get_scm()
if scm:
revision = scm.commit_id
branch = scm.branch_name or revision
else:
revision, branch = 'none', 'none'
self.add_infos(('revision', revision), ('branch', branch)) | Adds SCM-related info. |
def get_k8s_metadata():
k8s_metadata = {}
gcp_cluster = (gcp_metadata_config.GcpMetadataConfig
.get_attribute(gcp_metadata_config.CLUSTER_NAME_KEY))
if gcp_cluster is not None:
k8s_metadata[CLUSTER_NAME_KEY] = gcp_cluster
for attribute_key, attribute_env in _K8S_ENV_ATTRIBUTES.items():
attribute_value = os.environ.get(attribute_env)
if attribute_value is not None:
k8s_metadata[attribute_key] = attribute_value
return k8s_metadata | Get kubernetes container metadata, as on GCP GKE. |
def mate_top(self):
" top of the stator"
return Mate(self, CoordSystem(
origin=(0, 0, self.length/2),
xDir=(0, 1, 0),
normal=(0, 0, 1)
)) | top of the stator |
def long2str(l):
if type(l) not in (types.IntType, types.LongType):
raise ValueError('the input must be an integer')
if l < 0:
raise ValueError('the input must be greater than 0')
s = ''
while l:
s = s + chr(l & 255)
l >>= 8
return s | Convert an integer to a string. |
def get_firmware_version(self, cached=True):
if cached and self.firmware_version != 'unknown':
return self.firmware_version
firmware_version = self.get_characteristic_handle_from_uuid(UUID_FIRMWARE_REVISION)
if firmware_version is None:
logger.warn('Failed to find handle for firmware version')
return None
self.firmware_version = self.dongle._read_attribute(self.conn_handle, firmware_version)
return self.firmware_version | Returns the SK8 device firmware version.
Args:
cached (bool): if True, returns the locally cached copy of the firmware version.
If this is set to False, or the version is not cached, it will read from
the device instead.
Returns:
str. The current firmware version string. May be `None` if an error occurs. |
def bind_client(self, new):
top = self._stack[-1]
self._stack[-1] = (new, top[1]) | Binds a new client to the hub. |
def linsys(x0, rho, P, q):
return np.linalg.solve(rho * np.eye(q.shape[0]) + P, rho * x0.copy() + q) | Proximal operator for the linear approximation Ax = b
Minimizes the function:
.. math:: f(x) = (1/2)||Ax-b||_2^2 = (1/2)x^TA^TAx - (b^TA)x + b^Tb
Parameters
----------
x0 : array_like
The starting or initial point used in the proximal update step
rho : float
Momentum parameter for the proximal step (larger value -> stays closer to x0)
P : array_like
The symmetric matrix A^TA, where we are trying to approximate Ax=b
q : array_like
The vector A^Tb, where we are trying to approximate Ax=b
Returns
-------
theta : array_like
The parameter vector found after running the proximal update step |
def compile(self):
if self.buffer is None:
self.buffer = self._compile_value(self.data, 0)
return self.buffer.strip() | Return Hip string if already compiled else compile it. |
def coerce_str_to_bool(val: t.Union[str, int, bool, None], strict: bool = False) -> bool:
if isinstance(val, str):
val = val.lower()
flag = ENV_STR_BOOL_COERCE_MAP.get(val, None)
if flag is not None:
return flag
if strict:
raise ValueError('Unsupported value for boolean flag: `%s`' % val)
return bool(val) | Converts a given string ``val`` into a boolean.
:param val: any string representation of boolean
:param strict: raise ``ValueError`` if ``val`` does not look like a boolean-like object
:return: ``True`` if ``val`` is thruthy, ``False`` otherwise.
:raises ValueError: if ``strict`` specified and ``val`` got anything except
``['', 0, 1, true, false, on, off, True, False]`` |
def _request(self, func, url, version=1, *args, **kwargs):
return_json = kwargs.pop('return_json', False)
url = self.api_url[version] + url
response = func(url, *args, **kwargs)
if 'proxies' not in kwargs:
kwargs['proxies'] = self.proxydict
response.raise_for_status()
try:
json_response = response.json()
except ValueError:
json_response = None
if isinstance(json_response, dict):
error = json_response.get('error')
if error:
raise BitstampError(error)
elif json_response.get('status') == "error":
raise BitstampError(json_response.get('reason'))
if return_json:
if json_response is None:
raise BitstampError(
"Could not decode json for: " + response.text)
return json_response
return response | Make a generic request, adding in any proxy defined by the instance.
Raises a ``requests.HTTPError`` if the response status isn't 200, and
raises a :class:`BitstampError` if the response contains a json encoded
error message. |
def uri_path(self, path):
path = path.strip("/")
tmp = path.split("?")
path = tmp[0]
paths = path.split("/")
for p in paths:
option = Option()
option.number = defines.OptionRegistry.URI_PATH.number
option.value = p
self.add_option(option)
if len(tmp) > 1:
query = tmp[1]
self.uri_query = query | Set the Uri-Path of a request.
:param path: the Uri-Path |
def show_log(self):
self.action_show_report.setEnabled(True)
self.action_show_log.setEnabled(False)
self.load_html_file(self.log_path) | Show log. |
def most_confused(self, min_val:int=1, slice_size:int=1)->Collection[Tuple[str,str,int]]:
"Sorted descending list of largest non-diagonal entries of confusion matrix, presented as actual, predicted, number of occurrences."
cm = self.confusion_matrix(slice_size=slice_size)
np.fill_diagonal(cm, 0)
res = [(self.data.classes[i],self.data.classes[j],cm[i,j])
for i,j in zip(*np.where(cm>=min_val))]
return sorted(res, key=itemgetter(2), reverse=True) | Sorted descending list of largest non-diagonal entries of confusion matrix, presented as actual, predicted, number of occurrences. |
def validate_packet(self, data):
expected_length = data[0] + 1
if len(data) != expected_length:
raise InvalidPacketLength(
"Expected packet length to be %s bytes but it was %s bytes"
% (expected_length, len(data))
)
if expected_length < 4:
raise MalformedPacket(
"Expected packet length to be larger than 4 bytes but \
it was %s bytes"
% (len(data))
)
packet_type = data[1]
if self.PACKET_TYPES and packet_type not in self.PACKET_TYPES:
types = ",".join("0x{:02x}".format(pt) for pt in self.PACKET_TYPES)
raise UnknownPacketType(
"Expected packet type to be one of [%s] but recieved %s"
% (types, packet_type)
)
sub_type = data[2]
if self.PACKET_SUBTYPES and sub_type not in self.PACKET_SUBTYPES:
types = \
",".join("0x{:02x}".format(pt) for pt in self.PACKET_SUBTYPES)
raise UnknownPacketSubtype(
"Expected packet type to be one of [%s] but recieved %s"
% (types, sub_type))
return True | Validate a packet against this packet handler and determine if it
meets the requirements. This is done by checking the following
conditions are true.
- The length of the packet is equal to the first byte.
- The second byte is in the set of defined PACKET_TYPES for this class.
- The third byte is in the set of this class defined PACKET_SUBTYPES.
If one or more of these conditions isn't met then we have a packet that
isn't valid or at least isn't understood by this handler.
:param data: bytearray to be verified
:type data: bytearray
:raises: :py:class:`rfxcom.exceptions.InvalidPacketLength`: If the
number of bytes in the packet doesn't match the expected length.
:raises: :py:class:`rfxcom.exceptions.UnknownPacketType`: If the packet
type is unknown to this packet handler
:raises: :py:class:`rfxcom.exceptions.UnknownPacketSubtype`: If the
packet sub type is unknown to this packet handler
:return: true is returned if validation passes.
:rtype: boolean |
def save_session(zap_helper, file_path):
console.debug('Saving the session to "{0}"'.format(file_path))
zap_helper.zap.core.save_session(file_path, overwrite='true') | Save the session. |
def getter_(self, fget) -> 'BaseProperty':
self.fget = fget
self.set_doc(fget.__doc__)
return self | Add the given getter function and its docstring to the
property and return it. |
def to_swagger(self):
return dict_filter(
operationId=self.operation_id,
description=(self.callback.__doc__ or '').strip() or None,
summary=self.summary or None,
tags=list(self.tags) or None,
deprecated=self.deprecated or None,
consumes=list(self.consumes) or None,
parameters=[param.to_swagger(self.resource) for param in self.parameters] or None,
produces=list(self.produces) or None,
responses=dict(resp.to_swagger(self.resource) for resp in self.responses) or None,
security=self.security.to_swagger() if self.security else None,
) | Generate a dictionary for documentation generation. |
def npix_to_nside(npix):
npix = np.asanyarray(npix, dtype=np.int64)
if not np.all(npix % 12 == 0):
raise ValueError('Number of pixels must be divisible by 12')
square_root = np.sqrt(npix / 12)
if not np.all(square_root ** 2 == npix / 12):
raise ValueError('Number of pixels is not of the form 12 * nside ** 2')
return np.round(square_root).astype(int) | Find the number of pixels on the side of one of the 12 'top-level' HEALPix
tiles given a total number of pixels.
Parameters
----------
npix : int
The number of pixels in the HEALPix map.
Returns
-------
nside : int
The number of pixels on the side of one of the 12 'top-level' HEALPix tiles. |
def getmoduleinfo(path):
filename = os.path.basename(path)
suffixes = map(lambda (suffix, mode, mtype):
(-len(suffix), suffix, mode, mtype), imp.get_suffixes())
suffixes.sort()
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return filename[:neglen], suffix, mode, mtype | Get the module name, suffix, mode, and module type for a given file. |
def get_generation_code(self):
if len(self.gates) < 1:
code = ''
else:
import_list = set([gate._gencode_gate_class for gate in self.gates])
import_list = 'from FlowCytometryTools import ' + ', '.join(import_list)
code_list = [gate.get_generation_code() for gate in self.gates]
code_list.sort()
code_list = '\n'.join(code_list)
code = import_list + 2 * '\n' + code_list
self.callback(Event('generated_code',
{'code': code}))
return code | Return python code that generates all drawn gates. |
def reset(self, source):
self.tokens = []
self.source = source
self.pos = 0 | Reset scanner's state.
:param source: Source for parsing |
def get_unique_groups(input_list):
out_list = []
for item in input_list:
if item not in out_list:
out_list.append(item)
return out_list | Function to get a unique list of groups. |
def __parse_main(self, args):
if six.PY2:
self._subparsers_action.add_parser("__dummy")
return super(FuncArgParser, self).parse_known_args(
list(args) + ['__dummy'])
return super(FuncArgParser, self).parse_known_args(args) | Parse the main arguments only. This is a work around for python 2.7
because argparse does not allow to parse arguments without subparsers |
def _isinstance(expr, classname):
for cls in type(expr).__mro__:
if cls.__name__ == classname:
return True
return False | Check whether `expr` is an instance of the class with name
`classname`
This is like the builtin `isinstance`, but it take the `classname` a
string, instead of the class directly. Useful for when we don't want to
import the class for which we want to check (also, remember that
printer choose rendering method based on the class name, so this is
totally ok) |
def wncond(left, right, window):
assert isinstance(window, stypes.SpiceCell)
assert window.dtype == 1
left = ctypes.c_double(left)
right = ctypes.c_double(right)
libspice.wncond_c(left, right, ctypes.byref(window))
return window | Contract each of the intervals of a double precision window.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wncond_c.html
:param left: Amount added to each left endpoint.
:type left: float
:param right: Amount subtracted from each right endpoint.
:type right: float
:param window: Window to be contracted
:type window: spiceypy.utils.support_types.SpiceCell
:return: Contracted Window.
:rtype: spiceypy.utils.support_types.SpiceCell |
async def jsk_vc_stop(self, ctx: commands.Context):
voice = ctx.guild.voice_client
voice.stop()
await ctx.send(f"Stopped playing audio in {voice.channel.name}.") | Stops running an audio source, if there is one. |
def _represent_match_traversal(match_traversal):
output = []
output.append(_first_step_to_match(match_traversal[0]))
for step in match_traversal[1:]:
output.append(_subsequent_step_to_match(step))
return u''.join(output) | Emit MATCH query code for an entire MATCH traversal sequence. |
def update_plot_limits(ax, white_space):
if hasattr(ax, 'zz_dataLim'):
bounds = ax.xy_dataLim.bounds
ax.set_xlim(bounds[0] - white_space, bounds[0] + bounds[2] + white_space)
ax.set_ylim(bounds[1] - white_space, bounds[1] + bounds[3] + white_space)
bounds = ax.zz_dataLim.bounds
ax.set_zlim(bounds[0] - white_space, bounds[0] + bounds[2] + white_space)
else:
bounds = ax.dataLim.bounds
assert not any(map(np.isinf, bounds)), 'Cannot set bounds if dataLim has infinite elements'
ax.set_xlim(bounds[0] - white_space, bounds[0] + bounds[2] + white_space)
ax.set_ylim(bounds[1] - white_space, bounds[1] + bounds[3] + white_space) | Sets the limit options of a matplotlib plot.
Args:
ax: matplotlib axes
white_space(float): whitespace added to surround the tight limit of the data
Note: This relies on ax.dataLim (in 2d) and ax.[xy, zz]_dataLim being set in 3d |
def SaveGDAL(filename, rda):
if type(rda) is not rdarray:
raise Exception("A richdem.rdarray or numpy.ndarray is required!")
if not GDAL_AVAILABLE:
raise Exception("richdem.SaveGDAL() requires GDAL.")
driver = gdal.GetDriverByName('GTiff')
data_type = gdal.GDT_Float32
data_set = driver.Create(filename, xsize=rda.shape[1], ysize=rda.shape[0], bands=1, eType=data_type)
data_set.SetGeoTransform(rda.geotransform)
data_set.SetProjection(rda.projection)
band = data_set.GetRasterBand(1)
band.SetNoDataValue(rda.no_data)
band.WriteArray(np.array(rda))
for k,v in rda.metadata.items():
data_set.SetMetadataItem(str(k),str(v)) | Save a GDAL file.
Saves a RichDEM array to a data file in GeoTIFF format.
If you need to do something more complicated, look at the source of this
function.
Args:
filename (str): Name of the raster file to be created
rda (rdarray): Data to save.
Returns:
No Return |
def handle_data(self, data):
if data:
inTag = self._inTag
if len(inTag) > 0:
if inTag[-1].tagName not in PRESERVE_CONTENTS_TAGS:
data = data.replace('\t', ' ').strip('\r\n')
if data.startswith(' '):
data = ' ' + data.lstrip()
if data.endswith(' '):
data = data.rstrip() + ' '
inTag[-1].appendText(data)
elif data.strip():
raise MultipleRootNodeException() | handle_data - Internal for parsing |
def _kill_process(self, pid, cgroups=None, sig=signal.SIGKILL):
if self._user is not None:
if not cgroups:
cgroups = find_cgroups_of_process(pid)
pids = cgroups.get_all_tasks(FREEZER)
try:
if pid == next(pids):
pid = next(pids)
except StopIteration:
pass
finally:
pids.close()
self._kill_process0(pid, sig) | Try to send signal to given process, either directly of with sudo.
Because we cannot send signals to the sudo process itself,
this method checks whether the target is the sudo process
and redirects the signal to sudo's child in this case. |
def precheck():
binaries = ['make']
for bin in binaries:
if not which(bin):
msg = 'Dependency fail -- Unable to locate rquired binary: '
stdout_message('%s: %s' % (msg, ACCENT + bin + RESET))
return False
elif not root():
return False
return True | Pre-run dependency check |
def invert_map(map):
res = dict((v, k) for k, v in map.items())
if not len(res) == len(map):
raise ValueError('Key conflict in inverted mapping')
return res | Given a dictionary, return another dictionary with keys and values
switched. If any of the values resolve to the same key, raises
a ValueError.
>>> numbers = dict(a=1, b=2, c=3)
>>> letters = invert_map(numbers)
>>> letters[1]
'a'
>>> numbers['d'] = 3
>>> invert_map(numbers)
Traceback (most recent call last):
...
ValueError: Key conflict in inverted mapping |
def _push_broks(self):
data = cherrypy.request.json
with self.app.arbiter_broks_lock:
logger.debug("Pushing %d broks", len(data['broks']))
self.app.arbiter_broks.extend([unserialize(elem, True) for elem in data['broks']]) | Push the provided broks objects to the broker daemon
Only used on a Broker daemon by the Arbiter
:param: broks
:type: list
:return: None |
def get_user(user, driver):
response = ApitaxResponse()
driver: Driver = LoadedDrivers.getDriver(driver)
user: User = driver.getApitaxUser(User(username=user))
response.body.add({'user': {'username': user.username, 'role': user.role}})
return Response(status=200, body=response.getResponseBody()) | Retrieve a user
Retrieve a user # noqa: E501
:param user: Get user with this name
:type user: str
:param driver: The driver to use for the request. ie. github
:type driver: str
:rtype: Response |
def shiftAccent(self, shiftAmount):
if shiftAmount == 0:
return
self.pointList = [(time + shiftAmount, pitch)
for time, pitch in self.pointList]
if shiftAmount < 0:
self.netLeftShift += shiftAmount
elif shiftAmount >= 0:
self.netRightShift += shiftAmount | Move the whole accent earlier or later |
def require_root(fn):
@wraps(fn)
def xex(*args, **kwargs):
assert os.geteuid() == 0, \
"You have to be root to run function '%s'." % fn.__name__
return fn(*args, **kwargs)
return xex | Decorator to make sure, that user is root. |
def idfn(fixture_params: Iterable[Any]) -> str:
return ":".join((str(item) for item in fixture_params)) | Function for pytest to produce uniform names for fixtures. |
def execute_until_in_scope(
expr, scope, aggcontext=None, clients=None, post_execute_=None, **kwargs
):
assert aggcontext is not None, 'aggcontext is None'
assert clients is not None, 'clients is None'
assert post_execute_ is not None, 'post_execute_ is None'
op = expr.op()
if op in scope:
return scope[op]
new_scope = execute_bottom_up(
expr,
scope,
aggcontext=aggcontext,
post_execute_=post_execute_,
clients=clients,
**kwargs,
)
new_scope = toolz.merge(
new_scope, pre_execute(op, *clients, scope=scope, **kwargs)
)
return execute_until_in_scope(
expr,
new_scope,
aggcontext=aggcontext,
clients=clients,
post_execute_=post_execute_,
**kwargs,
) | Execute until our op is in `scope`.
Parameters
----------
expr : ibis.expr.types.Expr
scope : Mapping
aggcontext : Optional[AggregationContext]
clients : List[ibis.client.Client]
kwargs : Mapping |
def close(self, silent=False):
if not silent:
saveme = get_input("Save database contents to '{}/'? (y, [n]) \n"
"To save elsewhere, run db.save() before closing. ".format(self.directory))
if saveme.lower() == 'y':
self.save()
delete = get_input("Do you want to delete {0}? (y,[n]) \n"
"Don't worry, a new one will be generated if you run astrodb.Database('{1}') "
.format(self.dbpath, self.sqlpath))
if delete.lower() == 'y':
print("Deleting {}".format(self.dbpath))
os.system("rm {}".format(self.dbpath))
print('Closing connection')
self.conn.close() | Close the database and ask to save and delete the file
Parameters
----------
silent: bool
Close quietly without saving or deleting (Default: False). |
def serialize_artifact_json_blobs(artifacts):
for artifact in artifacts:
blob = artifact['blob']
if (artifact['type'].lower() == 'json' and
not isinstance(blob, str)):
artifact['blob'] = json.dumps(blob)
return artifacts | Ensure that JSON artifact blobs passed as dicts are converted to JSON |
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
self.pk = None
super(ConfigurationModel, self).save(
force_insert,
force_update,
using,
update_fields
)
cache.delete(self.cache_key_name(*[getattr(self, key) for key in self.KEY_FIELDS]))
if self.KEY_FIELDS:
cache.delete(self.key_values_cache_key_name()) | Clear the cached value when saving a new configuration entry |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.