code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def word_frame_pos(self, _id):
left = int(self.words[_id][0]/1000)
right = max(left+1, int(self.words[_id][1]/1000))
return (left, right) | Get the position of words |
def iterative_overlap_assembly(
variant_sequences,
min_overlap_size=MIN_VARIANT_SEQUENCE_ASSEMBLY_OVERLAP_SIZE):
if len(variant_sequences) <= 1:
return variant_sequences
n_before_collapse = len(variant_sequences)
variant_sequences = collapse_substrings(variant_sequences)
n_after_collapse = len(variant_sequences)
logger.info(
"Collapsed %d -> %d sequences",
n_before_collapse,
n_after_collapse)
merged_variant_sequences = greedy_merge(variant_sequences, min_overlap_size)
return list(sorted(
merged_variant_sequences,
key=lambda seq: -len(seq.reads))) | Assembles longer sequences from reads centered on a variant by
between merging all pairs of overlapping sequences and collapsing
shorter sequences onto every longer sequence which contains them.
Returns a list of variant sequences, sorted by decreasing read support. |
def get_attached_preparation_wait_kwargs(self, action, container_name, kwargs=None):
c_kwargs = dict(container=container_name)
client_config = action.client_config
c_kwargs = dict(container=container_name)
wait_timeout = client_config.get('wait_timeout')
if wait_timeout is not None:
c_kwargs['timeout'] = wait_timeout
update_kwargs(c_kwargs, kwargs)
return c_kwargs | Generates keyword arguments for waiting for a container when preparing a volume. The container name may be
the container being prepared, or the id of the container calling preparation commands.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param container_name: Container name or id. Set ``None`` when included in kwargs for ``create_container``.
:type container_name: unicode | str | NoneType
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:return: Resulting keyword arguments.
:rtype: dict |
def _input_optional(inp):
if 'default' in inp.keys():
return True
typ = inp.get('type')
if isinstance(typ, six.string_types):
return typ.endswith('?')
elif isinstance(typ, dict):
return False
elif isinstance(typ, list):
return bool(u'null' in typ)
else:
raise ValueError('Invalid input "{}"'.format(inp.get['id'])) | Returns True if a step input parameter is optional.
Args:
inp (dict): a dictionary representation of an input.
Raises:
ValueError: The inp provided is not valid. |
def parse_subprotocol_item(
header: str, pos: int, header_name: str
) -> Tuple[Subprotocol, int]:
item, pos = parse_token(header, pos, header_name)
return cast(Subprotocol, item), pos | Parse a subprotocol from ``header`` at the given position.
Return the subprotocol value and the new position.
Raise :exc:`~websockets.exceptions.InvalidHeaderFormat` on invalid inputs. |
def build_filter_from_kwargs(self, **kwargs):
query = None
for path_to_convert, value in kwargs.items():
path_parts = path_to_convert.split('__')
lookup_class = None
try:
lookup_class = lookups.registry[path_parts[-1]]
path_to_convert = '__'.join(path_parts[:-1])
except KeyError:
pass
path = lookup_to_path(path_to_convert)
if lookup_class:
q = QueryNode(path, lookup=lookup_class(value))
else:
q = path == value
if query:
query = query & q
else:
query = q
return query | Convert django-s like lookup to SQLAlchemy ones |
def can_update_topics_to_announces(self, forum, user):
return (
self._perform_basic_permission_check(forum, user, 'can_edit_posts') and
self._perform_basic_permission_check(forum, user, 'can_post_announcements')
) | Given a forum, checks whether the user can change its topic types to announces. |
def resize(self, new_size):
if new_size == len(self):
return
else:
self._saved = LimitedSizeDict(size_limit=2**5)
new_arr = zeros(new_size, dtype=self.dtype)
if len(self) <= new_size:
new_arr[0:len(self)] = self
else:
new_arr[:] = self[0:new_size]
self._data = new_arr._data | Resize self to new_size |
def getPageSizeByName(self, pageSizeName):
pageSize = None
lowerCaseNames = {pageSize.lower(): pageSize for pageSize in
self.availablePageSizes()}
if pageSizeName.lower() in lowerCaseNames:
pageSize = getattr(QPagedPaintDevice, lowerCaseNames[pageSizeName.lower()])
return pageSize | Returns a validated PageSize instance corresponding to the given
name. Returns None if the name is not a valid PageSize. |
def after_connect(self):
show_users = self.device.send("show users", timeout=120)
result = re.search(pattern_manager.pattern(self.platform, 'connected_locally'), show_users)
if result:
self.log('Locally connected to Calvados. Exiting.')
self.device.send('exit')
return True
return False | Execute after connect. |
def write_publication(self, values):
con = self.connection or self._connect()
self._initialize(con)
cur = con.cursor()
values = (values['pub_id'],
values['title'],
json.dumps(values['authors']),
values['journal'],
values['volume'],
values['number'],
values['pages'],
values['year'],
values['publisher'],
values['doi'],
json.dumps(values['tags']))
q = self.default + ',' + ', '.join('?' * len(values))
cur.execute('INSERT OR IGNORE INTO publication VALUES ({})'.format(q),
values)
pid = self.get_last_id(cur, table='publication')
if self.connection is None:
con.commit()
con.close()
return pid | Write publication info to db
Parameters
----------
values: dict with entries
{'pub_id': str (short name for publication),
'authors': list of str ()
'journal': str,
'volume': str,
'number': str,
'pages': 'str'
'year': int,
'publisher': str,
'doi': str,
'tags': list of str} |
def to_networkx(self):
try:
from networkx import DiGraph, set_node_attributes
except ImportError:
raise ImportError('You must have networkx installed to export networkx graphs')
result = DiGraph()
for row in self._raw_tree:
result.add_edge(row['parent'], row['child'], weight=row['lambda_val'])
set_node_attributes(result, dict(self._raw_tree[['child', 'child_size']]), 'size')
return result | Return a NetworkX DiGraph object representing the condensed tree.
Edge weights in the graph are the lamba values at which child nodes
'leave' the parent cluster.
Nodes have a `size` attribute attached giving the number of points
that are in the cluster (or 1 if it is a singleton point) at the
point of cluster creation (fewer points may be in the cluster at
larger lambda values). |
def post_build(self, pkt, pay):
if conf.contribs['CAN']['swap-bytes']:
return CAN.inv_endianness(pkt) + pay
return pkt + pay | Implements the swap-bytes functionality when building
this is based on a copy of the Packet.self_build default method.
The goal is to affect only the CAN layer data and keep
under layers (e.g LinuxCooked) unchanged |
def get_default_ref(repo):
assert isinstance(repo, github.Repository.Repository), type(repo)
default_branch = repo.default_branch
default_branch_ref = "heads/{ref}".format(ref=default_branch)
try:
head = repo.get_git_ref(default_branch_ref)
except github.RateLimitExceededException:
raise
except github.GithubException as e:
msg = "error getting ref: {ref}".format(ref=default_branch_ref)
raise CaughtRepositoryError(repo, e, msg) from None
return head | Return a `github.GitRef` object for the HEAD of the default branch.
Parameters
----------
repo: github.Repository.Repository
repo to get default branch head ref from
Returns
-------
head : :class:`github.GitRef` instance
Raises
------
github.RateLimitExceededException
codekit.pygithub.CaughtRepositoryError |
def _take_values(self, item: Node) -> DictBasicType:
values = super()._take_values(item)
values['_parent'] = None
return values | Takes snapshot of the object and replaces _parent property value on None to avoid
infitinite recursion in GPflow tree traversing.
:param item: GPflow node object.
:return: dictionary snapshot of the node object. |
def list_privileges(name, **client_args):
client = _client(**client_args)
res = {}
for item in client.get_list_privileges(name):
res[item['database']] = item['privilege'].split()[0].lower()
return res | List privileges from a user.
name
Name of the user from whom privileges will be listed.
CLI Example:
.. code-block:: bash
salt '*' influxdb.list_privileges <name> |
def wind(direction: Number,
speed: Number,
gust: Number,
vardir: typing.List[Number] = None,
unit: str = 'kt',
cardinals: bool = True,
spoken: bool = False) -> str:
ret = ''
target = 'spoken' if spoken else 'repr'
if direction:
if direction.repr in WIND_DIR_REPR:
ret += WIND_DIR_REPR[direction.repr]
elif direction.value is None:
ret += direction.repr
else:
if cardinals:
ret += get_cardinal_direction(direction.value) + '-'
ret += getattr(direction, target)
if vardir and isinstance(vardir, list):
vardir = [getattr(var, target) for var in vardir]
ret += ' (variable {} to {})'.format(*vardir)
if speed and speed.value:
ret += f' at {speed.value}{unit}'
if gust and gust.value:
ret += f' gusting to {gust.value}{unit}'
return ret | Format wind elements into a readable sentence
Returns the translation string
Ex: NNE-020 (variable 010 to 040) at 14kt gusting to 20kt |
def set_group_status(group_id, status, **kwargs):
user_id = kwargs.get('user_id')
try:
group_i = db.DBSession.query(ResourceGroup).filter(ResourceGroup.id == group_id).one()
except NoResultFound:
raise ResourceNotFoundError("ResourceGroup %s not found"%(group_id))
group_i.network.check_write_permission(user_id)
group_i.status = status
db.DBSession.flush()
return group_i | Set the status of a group to 'X' |
def load_config_file(self, path, profile=None):
config_cls = self.get_config_reader()
return config_cls.load_config(self, path, profile=profile) | Load the standard config file. |
def _is_in_try_again(self, x, y):
if self.won == 1:
x1, y1, x2, y2 = self._won_try_again
return x1 <= x < x2 and y1 <= y < y2
elif self.lost:
x1, y1, x2, y2 = self._lost_try_again
return x1 <= x < x2 and y1 <= y < y2
return False | Checks if the game is to be restarted. |
def _write(self, text, x, y, colour=Screen.COLOUR_WHITE,
attr=Screen.A_NORMAL, bg=Screen.COLOUR_BLACK):
if y >= self._height or x >= self._width:
return
if len(text) + x > self._width:
text = text[:self._width - x]
self._plain_image[y] = text.join(
[self._plain_image[y][:x], self._plain_image[y][x + len(text):]])
for i, _ in enumerate(text):
self._colour_map[y][x + i] = (colour, attr, bg) | Write some text to the specified location in the current image.
:param text: The text to be added.
:param x: The X coordinate in the image.
:param y: The Y coordinate in the image.
:param colour: The colour of the text to add.
:param attr: The attribute of the image.
:param bg: The background colour of the text to add. |
def is_dir(self, pathobj):
try:
stat = self.stat(pathobj)
return stat.is_dir
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
return False | Returns True if given path is a directory |
def update_fw_local_router(self, net_id, subnet_id, router_id, os_result):
fw_dict = self.get_fw_dict()
fw_dict.update({'router_id': router_id, 'router_net_id': net_id,
'router_subnet_id': subnet_id})
self.store_dummy_router_net(net_id, subnet_id, router_id)
self.update_fw_local_result(os_result=os_result) | Update the FW with router attributes. |
def import_all(path):
plist = []
fid = 0
while True:
try:
p = PolygonFilter(filename=path, fileid=fid)
plist.append(p)
fid += 1
except IndexError:
break
return plist | Import all polygons from a .poly file.
Returns a list of the imported polygon filters |
def get(self, sid):
return EngagementContext(self._version, flow_sid=self._solution['flow_sid'], sid=sid, ) | Constructs a EngagementContext
:param sid: Engagement Sid.
:returns: twilio.rest.studio.v1.flow.engagement.EngagementContext
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementContext |
def add_gene_ids(self, genes_list):
orig_num_genes = len(self.genes)
for g in list(set(genes_list)):
if not self.genes.has_id(g):
new_gene = GenePro(id=g, pdb_file_type=self.pdb_file_type, root_dir=self.genes_dir)
if self.model:
self.model.genes.append(new_gene)
else:
self.genes.append(new_gene)
log.info('Added {} genes to GEM-PRO project'.format(len(self.genes)-orig_num_genes)) | Add gene IDs manually into the GEM-PRO project.
Args:
genes_list (list): List of gene IDs as strings. |
def sample(program: Union[circuits.Circuit, schedules.Schedule],
*,
noise: devices.NoiseModel = devices.NO_NOISE,
param_resolver: Optional[study.ParamResolver] = None,
repetitions: int = 1,
dtype: Type[np.number] = np.complex64) -> study.TrialResult:
if noise == devices.NO_NOISE and protocols.has_unitary(program):
return sparse_simulator.Simulator(dtype=dtype).run(
program=program,
param_resolver=param_resolver,
repetitions=repetitions)
return density_matrix_simulator.DensityMatrixSimulator(
dtype=dtype, noise=noise).run(program=program,
param_resolver=param_resolver,
repetitions=repetitions) | Simulates sampling from the given circuit or schedule.
Args:
program: The circuit or schedule to sample from.
noise: Noise model to use while running the simulation.
param_resolver: Parameters to run with the program.
repetitions: The number of samples to take.
dtype: The `numpy.dtype` used by the simulation. Typically one of
`numpy.complex64` or `numpy.complex128`.
Favors speed over precision by default, i.e. uses `numpy.complex64`. |
def pymmh3_hash64(key: Union[bytes, bytearray],
seed: int = 0,
x64arch: bool = True) -> Tuple[int, int]:
hash_128 = pymmh3_hash128(key, seed, x64arch)
unsigned_val1 = hash_128 & 0xFFFFFFFFFFFFFFFF
if unsigned_val1 & 0x8000000000000000 == 0:
signed_val1 = unsigned_val1
else:
signed_val1 = -((unsigned_val1 ^ 0xFFFFFFFFFFFFFFFF) + 1)
unsigned_val2 = (hash_128 >> 64) & 0xFFFFFFFFFFFFFFFF
if unsigned_val2 & 0x8000000000000000 == 0:
signed_val2 = unsigned_val2
else:
signed_val2 = -((unsigned_val2 ^ 0xFFFFFFFFFFFFFFFF) + 1)
return signed_val1, signed_val2 | Implements 64bit murmur3 hash, as per ``pymmh3``. Returns a tuple.
Args:
key: data to hash
seed: seed
x64arch: is a 64-bit architecture available?
Returns:
tuple: tuple of integers, ``(signed_val1, signed_val2)`` |
def _header_string(basis_dict):
tw = textwrap.TextWrapper(initial_indent='', subsequent_indent=' ' * 20)
header = '-' * 70 + '\n'
header += ' Basis Set Exchange\n'
header += ' Version ' + version() + '\n'
header += ' ' + _main_url + '\n'
header += '-' * 70 + '\n'
header += ' Basis set: ' + basis_dict['name'] + '\n'
header += tw.fill(' Description: ' + basis_dict['description']) + '\n'
header += ' Role: ' + basis_dict['role'] + '\n'
header += tw.fill(' Version: {} ({})'.format(basis_dict['version'],
basis_dict['revision_description'])) + '\n'
header += '-' * 70 + '\n'
return header | Creates a header with information about a basis set
Information includes description, revision, etc, but not references |
def sum_stats(stats_data):
t_bounces = 0
t_complaints = 0
t_delivery_attempts = 0
t_rejects = 0
for dp in stats_data:
t_bounces += int(dp['Bounces'])
t_complaints += int(dp['Complaints'])
t_delivery_attempts += int(dp['DeliveryAttempts'])
t_rejects += int(dp['Rejects'])
return {
'Bounces': t_bounces,
'Complaints': t_complaints,
'DeliveryAttempts': t_delivery_attempts,
'Rejects': t_rejects,
} | Summarize the bounces, complaints, delivery attempts and rejects from a
list of datapoints. |
def handler(self):
'Parametrized handler function'
return ft.partial(self.base.handler, parameter=self.parameter)\
if self.parameter else self.base.handler | Parametrized handler function |
def _delete_security_groups(self):
group_names = self._get_all_group_names_for_cluster()
for group in group_names:
self.ec2.delete_security_group(group) | Delete the security groups for each role in the cluster, and the group for
the cluster. |
def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None,
profile=None):
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return {'exists': bool(conn)}
rds = conn.describe_db_subnet_groups(DBSubnetGroupName=name)
return {'exists': bool(rds)}
except ClientError as e:
if "DBSubnetGroupNotFoundFault" in e.message:
return {'exists': False}
else:
return {'error': __utils__['boto3.get_error'](e)} | Check to see if an RDS subnet group exists.
CLI example::
salt myminion boto_rds.subnet_group_exists my-param-group \
region=us-east-1 |
def visit_versions(self, func):
for bound in self.bounds:
if bound.lower is not _LowerBound.min:
result = func(bound.lower.version)
if isinstance(result, Version):
bound.lower.version = result
if bound.upper is not _UpperBound.inf:
result = func(bound.upper.version)
if isinstance(result, Version):
bound.upper.version = result | Visit each version in the range, and apply a function to each.
This is for advanced usage only.
If `func` returns a `Version`, this call will change the versions in
place.
It is possible to change versions in a way that is nonsensical - for
example setting an upper bound to a smaller version than the lower bound.
Use at your own risk.
Args:
func (callable): Takes a `Version` instance arg, and is applied to
every version in the range. If `func` returns a `Version`, it
will replace the existing version, updating this `VersionRange`
instance in place. |
def get_canonical_encoding_name(name):
import codecs
try:
codec = codecs.lookup(name)
except LookupError:
return name
else:
return codec.name | Given an encoding name, get the canonical name from a codec lookup.
:param str name: The name of the codec to lookup
:return: The canonical version of the codec name
:rtype: str |
def dependencies(self, tkn: str) -> Set[str]:
return set(self.dependency_list(tkn)) | Return all the items that tkn depends on as a set
:param tkn:
:return: |
def add_audio(self,
customization_id,
audio_name,
audio_resource,
contained_content_type=None,
allow_overwrite=None,
content_type=None,
**kwargs):
if customization_id is None:
raise ValueError('customization_id must be provided')
if audio_name is None:
raise ValueError('audio_name must be provided')
if audio_resource is None:
raise ValueError('audio_resource must be provided')
headers = {
'Contained-Content-Type': contained_content_type,
'Content-Type': content_type
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'add_audio')
headers.update(sdk_headers)
params = {'allow_overwrite': allow_overwrite}
data = audio_resource
url = '/v1/acoustic_customizations/{0}/audio/{1}'.format(
*self._encode_path_vars(customization_id, audio_name))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
data=data,
accept_json=True)
return response | Add an audio resource.
Adds an audio resource to a custom acoustic model. Add audio content that reflects
the acoustic characteristics of the audio that you plan to transcribe. You must
use credentials for the instance of the service that owns a model to add an audio
resource to it. Adding audio data does not affect the custom acoustic model until
you train the model for the new data by using the **Train a custom acoustic
model** method.
You can add individual audio files or an archive file that contains multiple audio
files. Adding multiple audio files via a single archive file is significantly more
efficient than adding each file individually. You can add audio resources in any
format that the service supports for speech recognition.
You can use this method to add any number of audio resources to a custom model by
calling the method once for each audio or archive file. But the addition of one
audio resource must be fully complete before you can add another. You must add a
minimum of 10 minutes and a maximum of 100 hours of audio that includes speech,
not just silence, to a custom acoustic model before you can train it. No audio
resource, audio- or archive-type, can be larger than 100 MB. To add an audio
resource that has the same name as an existing audio resource, set the
`allow_overwrite` parameter to `true`; otherwise, the request fails.
The method is asynchronous. It can take several seconds to complete depending on
the duration of the audio and, in the case of an archive file, the total number of
audio files being processed. The service returns a 201 response code if the audio
is valid. It then asynchronously analyzes the contents of the audio file or files
and automatically extracts information about the audio such as its length,
sampling rate, and encoding. You cannot submit requests to add additional audio
resources to a custom acoustic model, or to train the model, until the service's
analysis of all audio files for the current request completes.
To determine the status of the service's analysis of the audio, use the **Get an
audio resource** method to poll the status of the audio. The method accepts the
customization ID of the custom model and the name of the audio resource, and it
returns the status of the resource. Use a loop to check the status of the audio
every few seconds until it becomes `ok`.
**See also:** [Add audio to the custom acoustic
model](https://cloud.ibm.com/docs/services/speech-to-text/acoustic-create.html#addAudio).
### Content types for audio-type resources
You can add an individual audio file in any format that the service supports for
speech recognition. For an audio-type resource, use the `Content-Type` parameter
to specify the audio format (MIME type) of the audio file, including specifying
the sampling rate, channels, and endianness where indicated.
* `audio/alaw` (Specify the sampling rate (`rate`) of the audio.)
* `audio/basic` (Use only with narrowband models.)
* `audio/flac`
* `audio/g729` (Use only with narrowband models.)
* `audio/l16` (Specify the sampling rate (`rate`) and optionally the number of
channels (`channels`) and endianness (`endianness`) of the audio.)
* `audio/mp3`
* `audio/mpeg`
* `audio/mulaw` (Specify the sampling rate (`rate`) of the audio.)
* `audio/ogg` (The service automatically detects the codec of the input audio.)
* `audio/ogg;codecs=opus`
* `audio/ogg;codecs=vorbis`
* `audio/wav` (Provide audio with a maximum of nine channels.)
* `audio/webm` (The service automatically detects the codec of the input audio.)
* `audio/webm;codecs=opus`
* `audio/webm;codecs=vorbis`
The sampling rate of an audio file must match the sampling rate of the base model
for the custom model: for broadband models, at least 16 kHz; for narrowband
models, at least 8 kHz. If the sampling rate of the audio is higher than the
minimum required rate, the service down-samples the audio to the appropriate rate.
If the sampling rate of the audio is lower than the minimum required rate, the
service labels the audio file as `invalid`.
**See also:** [Audio
formats](https://cloud.ibm.com/docs/services/speech-to-text/audio-formats.html).
### Content types for archive-type resources
You can add an archive file (**.zip** or **.tar.gz** file) that contains audio
files in any format that the service supports for speech recognition. For an
archive-type resource, use the `Content-Type` parameter to specify the media type
of the archive file:
* `application/zip` for a **.zip** file
* `application/gzip` for a **.tar.gz** file.
When you add an archive-type resource, the `Contained-Content-Type` header is
optional depending on the format of the files that you are adding:
* For audio files of type `audio/alaw`, `audio/basic`, `audio/l16`, or
`audio/mulaw`, you must use the `Contained-Content-Type` header to specify the
format of the contained audio files. Include the `rate`, `channels`, and
`endianness` parameters where necessary. In this case, all audio files contained
in the archive file must have the same audio format.
* For audio files of all other types, you can omit the `Contained-Content-Type`
header. In this case, the audio files contained in the archive file can have any
of the formats not listed in the previous bullet. The audio files do not need to
have the same format.
Do not use the `Contained-Content-Type` header when adding an audio-type resource.
### Naming restrictions for embedded audio files
The name of an audio file that is embedded within an archive-type resource must
meet the following restrictions:
* Include a maximum of 128 characters in the file name; this includes the file
extension.
* Do not include spaces, slashes, or backslashes in the file name.
* Do not use the name of an audio file that has already been added to the custom
model as part of an archive-type resource.
:param str customization_id: The customization ID (GUID) of the custom acoustic
model that is to be used for the request. You must make the request with
credentials for the instance of the service that owns the custom model.
:param str audio_name: The name of the new audio resource for the custom acoustic
model. Use a localized name that matches the language of the custom model and
reflects the contents of the resource.
* Include a maximum of 128 characters in the name.
* Do not include spaces, slashes, or backslashes in the name.
* Do not use the name of an audio resource that has already been added to the
custom model.
:param file audio_resource: The audio resource that is to be added to the custom
acoustic model, an individual audio file or an archive file.
:param str contained_content_type: **For an archive-type resource,** specify the
format of the audio files that are contained in the archive file if they are of
type `audio/alaw`, `audio/basic`, `audio/l16`, or `audio/mulaw`. Include the
`rate`, `channels`, and `endianness` parameters where necessary. In this case, all
audio files that are contained in the archive file must be of the indicated type.
For all other audio formats, you can omit the header. In this case, the audio
files can be of multiple types as long as they are not of the types listed in the
previous paragraph.
The parameter accepts all of the audio formats that are supported for use with
speech recognition. For more information, see **Content types for audio-type
resources** in the method description.
**For an audio-type resource,** omit the header.
:param bool allow_overwrite: If `true`, the specified audio resource overwrites an
existing audio resource with the same name. If `false`, the request fails if an
audio resource with the same name already exists. The parameter has no effect if
an audio resource with the same name does not already exist.
:param str content_type: For an audio-type resource, the format (MIME type) of the
audio. For more information, see **Content types for audio-type resources** in the
method description.
For an archive-type resource, the media type of the archive file. For more
information, see **Content types for archive-type resources** in the method
description.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse |
def delegated_login(self, login, admin_zc, duration=0):
selector = zobjects.Account(name=login).to_selector()
delegate_args = {'account': selector}
if duration:
delegate_args['duration': duration]
resp = admin_zc.request('DelegateAuth', delegate_args)
lifetime = resp['lifetime']
authToken = resp['authToken']
self.login_account = login
self.login_with_authToken(authToken, lifetime) | Use another client to get logged in via delegated_auth mechanism by an
already logged in admin.
:param admin_zc: An already logged-in admin client
:type admin_zc: ZimbraAdminClient
:param login: the user login (or email) you want to log as |
def re_authenticate(self, notify: bool=False) -> bool:
timeout = datetime.now() + \
timedelta(seconds=self.reauthenticatetimeout)
while True:
if self.authenticate():
return True
if notify:
if not self._notifyrunning:
return False
else:
if timeout and datetime.now() > timeout:
return False
time.sleep(self.retryinterval) | Authenticate again after failure.
Keep trying with 10 sec interval. If called from the notify thread
we will not have a timeout, but will end if the notify thread has
been cancled.
Will return True if authentication was successful. |
def set_timeout(self, network_timeout):
if network_timeout == self._network_timeout:
return
self._network_timeout = network_timeout
self._disconnect() | Set the timeout for existing and future Clients.
Close all current connections. This will cause future operations to
create new Clients with the network_timeout passed through
socketTimeoutMS optional parameter.
Args:
network_timeout: The new value in milliseconds for the timeout. |
def convert_dict_to_params(src_dict):
return "&".join([
"{}={}".format(key, value)
for key, value in src_dict.items()
]) | convert dict to params string
Args:
src_dict (dict): source mapping data structure
Returns:
str: string params data
Examples:
>>> src_dict = {
"a": 1,
"b": 2
}
>>> convert_dict_to_params(src_dict)
>>> "a=1&b=2" |
def format_errors(self, errors, many):
if not errors:
return {}
if isinstance(errors, (list, tuple)):
return {'errors': errors}
formatted_errors = []
if many:
for index, errors in iteritems(errors):
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message, index=index)
for message in field_errors
])
else:
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message)
for message in field_errors
])
return {'errors': formatted_errors} | Format validation errors as JSON Error objects. |
def _package(self, task, *args, **kw):
return self.codec.encode([task, args, kw]) | Used internally. Simply wraps the arguments up in a list and encodes
the list. |
def search(cls, args):
safe_shell_pat = re.escape(args['<pattern>']).replace(r'\*', '.*')
pat_str = '.*{}.*'.format(safe_shell_pat)
pattern = re.compile(pat_str, re.IGNORECASE)
remote_json = NAppsManager.search(pattern)
remote = set()
for napp in remote_json:
username = napp.get('username', napp.get('author'))
remote.add(((username, napp.get('name')), napp.get('description')))
cls._print_napps(remote) | Search for NApps in NApps server matching a pattern. |
def _path_importer_cache(cls, path):
if path == '':
try:
path = os.getcwd()
except FileNotFoundError:
return None
try:
finder = sys.path_importer_cache[path]
except KeyError:
finder = cls._path_hooks(path)
sys.path_importer_cache[path] = finder
return finder | Get the finder for the path entry from sys.path_importer_cache.
If the path entry is not in the cache, find the appropriate finder
and cache it. If no finder is available, store None. |
def click_signal(target_usage, target_vendor_id):
all_devices = hid.HidDeviceFilter(vendor_id = target_vendor_id).get_devices()
if not all_devices:
print("Can't find target device (vendor_id = 0x%04x)!" % target_vendor_id)
else:
for device in all_devices:
try:
device.open()
for report in device.find_output_reports():
if target_usage in report:
report[target_usage] = 1
report.send()
report[target_usage] = 0
report.send()
print("\nUsage clicked!\n")
return
finally:
device.close()
print("The target device was found, but the requested usage does not exist!\n") | This function will find a particular target_usage over output reports on
target_vendor_id related devices, then it will flip the signal to simulate
a 'click' event |
def decode(self, value):
if self.encoding:
value = value.decode(self.encoding)
return self.deserialize(value) | Decode value. |
def fetch_partial(self, container, obj, size):
return self._manager.fetch_partial(container, obj, size) | Returns the first 'size' bytes of an object. If the object is smaller
than the specified 'size' value, the entire object is returned. |
def diag_sparse(A):
if isspmatrix(A):
return A.diagonal()
else:
if(np.ndim(A) != 1):
raise ValueError('input diagonal array expected to be 1d')
return csr_matrix((np.asarray(A), np.arange(len(A)),
np.arange(len(A)+1)), (len(A), len(A))) | Return a diagonal.
If A is a sparse matrix (e.g. csr_matrix or csc_matrix)
- return the diagonal of A as an array
Otherwise
- return a csr_matrix with A on the diagonal
Parameters
----------
A : sparse matrix or 1d array
General sparse matrix or array of diagonal entries
Returns
-------
B : array or sparse matrix
Diagonal sparse is returned as csr if A is dense otherwise return an
array of the diagonal
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import diag_sparse
>>> d = 2.0*np.ones((3,)).ravel()
>>> print diag_sparse(d).todense()
[[ 2. 0. 0.]
[ 0. 2. 0.]
[ 0. 0. 2.]] |
def hashed_download(url, temp, digest):
def opener():
opener = build_opener(HTTPSHandler())
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
return opener
def read_chunks(response, chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
response = opener().open(url)
path = join(temp, urlparse(url).path.split('/')[-1])
actual_hash = sha256()
with open(path, 'wb') as file:
for chunk in read_chunks(response, 4096):
file.write(chunk)
actual_hash.update(chunk)
actual_digest = actual_hash.hexdigest()
if actual_digest != digest:
raise HashError(url, path, actual_digest, digest)
return path | Download ``url`` to ``temp``, make sure it has the SHA-256 ``digest``,
and return its path. |
def friendly_type_name(raw_type: typing.Type) -> str:
try:
return _TRANSLATE_TYPE[raw_type]
except KeyError:
LOGGER.error('unmanaged value type: %s', raw_type)
return str(raw_type) | Returns a user-friendly type name
:param raw_type: raw type (str, int, ...)
:return: user friendly type as string |
def _define_array_view(data_type):
element_type = data_type.element_type
element_view = _resolve_view(element_type)
if element_view is None:
mixins = (_DirectArrayViewMixin,)
attributes = _get_mixin_attributes(mixins)
elif isinstance(element_type, _ATOMIC):
mixins = (_IndirectAtomicArrayViewMixin,)
attributes = _get_mixin_attributes(mixins)
attributes.update({
'_element_view': element_view,
})
else:
mixins = (_IndirectCompositeArrayViewMixin,)
attributes = _get_mixin_attributes(mixins)
attributes.update({
'_element_view': element_view,
})
name = data_type.name if data_type.name else 'ArrayView'
return type(name, (), attributes) | Define a new view object for a `Array` type. |
def get_online_symbol_data(database_id):
import pymysql
import pymysql.cursors
cfg = get_database_configuration()
mysql = cfg['mysql_online']
connection = pymysql.connect(host=mysql['host'],
user=mysql['user'],
passwd=mysql['passwd'],
db=mysql['db'],
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor()
sql = ("SELECT `id`, `formula_in_latex`, `unicode_dec`, `font`, "
"`font_style` FROM `wm_formula` WHERE `id` =%i") % database_id
cursor.execute(sql)
datasets = cursor.fetchall()
if len(datasets) == 1:
return datasets[0]
else:
return None | Get from the server. |
def is_app(command, *app_names, **kwargs):
at_least = kwargs.pop('at_least', 0)
if kwargs:
raise TypeError("got an unexpected keyword argument '{}'".format(kwargs.keys()))
if len(command.script_parts) > at_least:
return command.script_parts[0] in app_names
return False | Returns `True` if command is call to one of passed app names. |
def _longest_val_in_column(self, col):
try:
return max([len(x[col]) for x in self.table if x[col]]) + 2
except KeyError:
logger.error("there is no column %r", col)
raise | get size of longest value in specific column
:param col: str, column name
:return int |
def fit_predict(self, features, labels):
self.fit(features, labels)
return self.predict(features) | Convenience function that fits a pipeline then predicts on the
provided features
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
labels: array-like {n_samples}
List of class labels for prediction
Returns
----------
array-like: {n_samples}
Predicted labels for the provided features |
def report_and_raise(probe_name, probe_result, failure_msg):
log.info('%s? %s' % (probe_name, probe_result))
if not probe_result:
raise exceptions.ProbeException(failure_msg)
else:
return True | Logs the probe result and raises on failure |
def get_company_user(self, email):
users = self.get_company_users()
for user in users:
if user['email'] == email:
return user
msg = 'No user with email: "{email}" associated with this company.'
raise FMBaseError(msg.format(email=email)) | Get company user based on email.
:param email: address of contact
:type email: ``str``, ``unicode``
:rtype: ``dict`` with contact information |
def isuncertainties(arg_list):
for arg in arg_list:
if isinstance(arg, (list, tuple)) and isinstance(arg[0], uct.UFloat):
return True
elif isinstance(arg, np.ndarray) and isinstance(
np.atleast_1d(arg)[0], uct.UFloat):
return True
elif isinstance(arg, (float, uct.UFloat)) and \
isinstance(arg, uct.UFloat):
return True
return False | check if the input list contains any elements with uncertainties class
:param arg_list: list of arguments
:return: True/False |
def ListOutputModules(self):
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, column_names=['Name', 'Description'],
title='Output Modules')
for name, output_class in output_manager.OutputManager.GetOutputClasses():
table_view.AddRow([name, output_class.DESCRIPTION])
table_view.Write(self._output_writer)
disabled_classes = list(
output_manager.OutputManager.GetDisabledOutputClasses())
if not disabled_classes:
return
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, column_names=['Name', 'Description'],
title='Disabled Output Modules')
for name, output_class in disabled_classes:
table_view.AddRow([name, output_class.DESCRIPTION])
table_view.Write(self._output_writer) | Lists the output modules. |
def validator(func, *args, **kwargs):
def wrapper(func, *args, **kwargs):
value = func(*args, **kwargs)
if not value:
return ValidationFailure(
func, func_args_as_dict(func, args, kwargs)
)
return True
return decorator(wrapper, func) | A decorator that makes given function validator.
Whenever the given function is called and returns ``False`` value
this decorator returns :class:`ValidationFailure` object.
Example::
>>> @validator
... def even(value):
... return not (value % 2)
>>> even(4)
True
>>> even(5)
ValidationFailure(func=even, args={'value': 5})
:param func: function to decorate
:param args: positional function arguments
:param kwargs: key value function arguments |
def ensure_object_is_string(item, title):
assert isinstance(title, str)
if not isinstance(item, str):
msg = "{} must be a string. {} passed instead."
raise TypeError(msg.format(title, type(item)))
return None | Checks that the item is a string. If not, raises ValueError. |
def OnCellText(self, event):
row, col, _ = self.grid.actions.cursor
self.grid.GetTable().SetValue(row, col, event.code)
event.Skip() | Text entry event handler |
def st_ctime(self):
ctime = self._st_ctime_ns / 1e9
return ctime if self.use_float else int(ctime) | Return the creation time in seconds. |
def owned_by(self, owner, also_check_group=False):
if also_check_group:
return self.owner == owner and self.group == owner
else:
return self.owner == owner | Checks if the specified user or user and group own the file.
Args:
owner (str): the user (or group) name for which we ask about ownership
also_check_group (bool): if set to True, both user owner and group owner checked
if set to False, only user owner checked
Returns:
bool: True if owner of the file is the specified owner |
def db_aws_list_regions(self):
regions = self.db_services.list_regions()
if regions != []:
print "Avaliable AWS regions:"
for reg in regions:
print '\t' + reg,
if reg == self.db_services.get_region():
print "(currently selected)"
else:
print '' | List AWS DB regions |
def add_email_grant(self, permission, email_address, headers=None):
policy = self.get_acl(headers=headers)
policy.acl.add_email_grant(permission, email_address)
self.set_acl(policy, headers=headers) | Convenience method that provides a quick way to add an email grant
to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the AWS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the command
will apply the grant to all keys within the bucket
or not. The default value is False. By passing a
True value, the call will iterate through all keys
in the bucket and apply the same grant to each key.
CAUTION: If you have a lot of keys, this could take
a long time! |
def search_reads(
self, read_group_ids, reference_id=None, start=None, end=None):
request = protocol.SearchReadsRequest()
request.read_group_ids.extend(read_group_ids)
request.reference_id = pb.string(reference_id)
request.start = pb.int(start)
request.end = pb.int(end)
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "reads", protocol.SearchReadsResponse) | Returns an iterator over the Reads fulfilling the specified
conditions from the specified read_group_ids.
:param str read_group_ids: The IDs of the
:class:`ga4gh.protocol.ReadGroup` of interest.
:param str reference_id: The name of the
:class:`ga4gh.protocol.Reference` we wish to return reads
mapped to.
:param int start: The start position (0-based) of this query. If a
reference is specified, this defaults to 0. Genomic positions are
non-negative integers less than reference length. Requests spanning
the join of circular genomes are represented as two requests one on
each side of the join (position 0).
:param int end: The end position (0-based, exclusive) of this query.
If a reference is specified, this defaults to the reference's
length.
:return: An iterator over the
:class:`ga4gh.protocol.ReadAlignment` objects defined by
the query parameters.
:rtype: iter |
def jing(rng_filepath, *xml_filepaths):
cmd = ['java', '-jar']
cmd.extend([str(JING_JAR), str(rng_filepath)])
for xml_filepath in xml_filepaths:
cmd.append(str(xml_filepath))
proc = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
out, err = proc.communicate()
return _parse_jing_output(out.decode('utf-8')) | Run jing.jar using the RNG file against the given XML file. |
def generate_key(key_length=64):
if hasattr(random, 'SystemRandom'):
logging.info('Generating a secure random key using SystemRandom.')
choice = random.SystemRandom().choice
else:
msg = "WARNING: SystemRandom not present. Generating a random "\
"key using random.choice (NOT CRYPTOGRAPHICALLY SECURE)."
logging.warning(msg)
choice = random.choice
return ''.join(map(lambda x: choice(string.digits + string.ascii_letters),
range(key_length))) | Secret key generator.
The quality of randomness depends on operating system support,
see http://docs.python.org/library/random.html#random.SystemRandom. |
def _lookup_online(word):
URL = "https://www.diki.pl/{word}"
HEADERS = {
"User-Agent": (
"Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; "
"Trident/7.0; rv:11.0) like Gecko"
)
}
logger.debug("Looking up online: %s", word)
quoted_word = urllib.parse.quote(word)
req = urllib.request.Request(URL.format(word=quoted_word), headers=HEADERS)
with urllib.request.urlopen(req) as response:
html_string = response.read().decode()
return html.unescape(html_string) | Look up word on diki.pl.
Parameters
----------
word : str
Word too look up.
Returns
-------
str
website HTML content. |
def read_from_hdx(identifier, configuration=None):
organization = Organization(configuration=configuration)
result = organization._load_from_hdx('organization', identifier)
if result:
return organization
return None | Reads the organization given by identifier from HDX and returns Organization object
Args:
identifier (str): Identifier of organization
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[Organization]: Organization object if successful read, None if not |
def PYTHON_VERSION(stats, info):
version = sys.version.replace(' \n', ' ').replace('\n', ' ')
python = ';'.join([str(c) for c in sys.version_info] + [version])
info.append(('python', python)) | Python interpreter version.
This is a flag you can pass to `Stats.submit()`. |
def get(self, key, default=miss):
if key not in self._dict:
return default
return self[key] | Return the value for given key if it exists. |
def construct(self, request_args=None, **kwargs):
if request_args is None:
request_args = {}
request_args, post_args = self.do_pre_construct(request_args,
**kwargs)
if 'state' in self.msg_type.c_param and 'state' in kwargs:
if 'state' not in request_args:
request_args['state'] = kwargs['state']
_args = self.gather_request_args(**request_args)
request = self.msg_type(**_args)
return self.do_post_construct(request, **post_args) | Instantiate the request as a message class instance with
attribute values gathered in a pre_construct method or in the
gather_request_args method.
:param request_args:
:param kwargs: extra keyword arguments
:return: message class instance |
def comment(self, body):
data = json.dumps({'body': body})
r = requests.post(
"https://kippt.com/api/clips/%s/comments" (self.id),
headers=self.kippt.header,
data=data
)
return (r.json()) | Comment on a clip.
Parameters:
- body (Required) |
def ismatch(a,b):
if a == b:
return True
else:
try:
if isinstance(a,b):
return True
except TypeError:
try:
if isinstance(b,a):
return True
except TypeError:
try:
if isinstance(a,type(b)):
return True
except TypeError:
try:
if isinstance(b,type(a)):
return True
except TypeError:
if str(a).lower() == str(b).lower():
return True
else:
return False | Method to allow smart comparisons between classes, instances,
and string representations of units and give the right answer.
For internal use only. |
def destructuring_stmt_handle(self, original, loc, tokens):
internal_assert(len(tokens) == 2, "invalid destructuring assignment tokens", tokens)
matches, item = tokens
out = match_handle(loc, [matches, "in", item, None])
out += self.pattern_error(original, loc, match_to_var, match_check_var)
return out | Process match assign blocks. |
def _seconds_have_elapsed(token, num_seconds):
now = timeit.default_timer()
then = _log_timer_per_token.get(token, None)
if then is None or (now - then) >= num_seconds:
_log_timer_per_token[token] = now
return True
else:
return False | Tests if 'num_seconds' have passed since 'token' was requested.
Not strictly thread-safe - may log with the wrong frequency if called
concurrently from multiple threads. Accuracy depends on resolution of
'timeit.default_timer()'.
Always returns True on the first call for a given 'token'.
Args:
token: The token for which to look up the count.
num_seconds: The number of seconds to test for.
Returns:
Whether it has been >= 'num_seconds' since 'token' was last requested. |
def _FormatSocketUnixToken(self, token_data):
protocol = bsmtoken.BSM_PROTOCOLS.get(token_data.socket_family, 'UNKNOWN')
return {
'protocols': protocol,
'family': token_data.socket_family,
'path': token_data.socket_path} | Formats an Unix socket token as a dictionary of values.
Args:
token_data (bsm_token_data_sockunix): AUT_SOCKUNIX token data.
Returns:
dict[str, str]: token values. |
def _node_has_namespace_helper(node: BaseEntity, namespace: str) -> bool:
return namespace == node.get(NAMESPACE) | Check that the node has namespace information.
Might have cross references in future. |
def genUserCert(self, name, signas=None, outp=None, csr=None):
pkey, cert = self._genBasePkeyCert(name, pkey=csr)
cert.add_extensions([
crypto.X509Extension(b'nsCertType', False, b'client'),
crypto.X509Extension(b'keyUsage', False, b'digitalSignature'),
crypto.X509Extension(b'extendedKeyUsage', False, b'clientAuth'),
crypto.X509Extension(b'basicConstraints', False, b'CA:FALSE'),
])
if signas is not None:
self.signCertAs(cert, signas)
else:
self.selfSignCert(cert, pkey)
crtpath = self._saveCertTo(cert, 'users', '%s.crt' % name)
if outp is not None:
outp.printf('cert saved: %s' % (crtpath,))
if not pkey._only_public:
keypath = self._savePkeyTo(pkey, 'users', '%s.key' % name)
if outp is not None:
outp.printf('key saved: %s' % (keypath,))
return pkey, cert | Generates a user keypair.
Args:
name (str): The name of the user keypair.
signas (str): The CA keypair to sign the new user keypair with.
outp (synapse.lib.output.Output): The output buffer.
csr (OpenSSL.crypto.PKey): The CSR public key when generating the keypair from a CSR.
Examples:
Generate a user cert for the user "myuser":
myuserkey, myusercert = cdir.genUserCert('myuser')
Returns:
((OpenSSL.crypto.PKey, OpenSSL.crypto.X509)): Tuple containing the key and certificate objects. |
def simple_returns(prices):
if isinstance(prices, (pd.DataFrame, pd.Series)):
out = prices.pct_change().iloc[1:]
else:
out = np.diff(prices, axis=0)
np.divide(out, prices[:-1], out=out)
return out | Compute simple returns from a timeseries of prices.
Parameters
----------
prices : pd.Series, pd.DataFrame or np.ndarray
Prices of assets in wide-format, with assets as columns,
and indexed by datetimes.
Returns
-------
returns : array-like
Returns of assets in wide-format, with assets as columns,
and index coerced to be tz-aware. |
def get_alpha_value(self):
if isinstance(self.__alpha_value, float) is False:
raise TypeError("The type of __alpha_value must be float.")
return self.__alpha_value | getter
Learning rate. |
def _process_wave_param(self, pval):
return self._process_generic_param(
pval, self._internal_wave_unit, equivalencies=u.spectral()) | Process individual model parameter representing wavelength. |
def validate_user_data(self, expected, actual, api_version=None):
self.log.debug('Validating user data...')
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
if e['name'] == act.name:
a = {'enabled': act.enabled, 'name': act.name,
'email': act.email, 'id': act.id}
if api_version == 3:
a['default_project_id'] = getattr(act,
'default_project_id',
'none')
else:
a['tenantId'] = act.tenantId
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected user data - {}".format(ret)
if not found:
return "user {} does not exist".format(e['name'])
return ret | Validate user data.
Validate a list of actual user data vs a list of expected user
data. |
def loadcsv(filename):
dataframe = _pd.read_csv(filename)
data = {}
for key, value in dataframe.items():
data[key] = value.values
return data | Load data from CSV file.
Returns a single dict with column names as keys. |
def invitations(self):
url = "%s/invitations" % self.root
return Invitations(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | returns a class to access the current user's invitations |
def _force_read(
self,
element,
value,
text_prefix_before,
text_suffix_before,
text_prefix_after,
text_suffix_after,
data_of
):
if (text_prefix_before) or (text_suffix_before):
text_before = text_prefix_before + value + text_suffix_before
else:
text_before = ''
if (text_prefix_after) or (text_suffix_after):
text_after = text_prefix_after + value + text_suffix_after
else:
text_after = ''
self._force_read_simple(element, text_before, text_after, data_of) | Force the screen reader display an information of element with prefixes
or suffixes.
:param element: The reference element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
:param value: The value to be show.
:type value: str
:param text_prefix_before: The prefix of value to show before the
element.
:type text_prefix_before: str
:param text_suffix_before: The suffix of value to show before the
element.
:type text_suffix_before: str
:param text_prefix_after: The prefix of value to show after the
element.
:type text_prefix_after: str
:param text_suffix_after: The suffix of value to show after the
element.
:type text_suffix_after: str
:param data_of: The name of attribute that links the content with
element.
:type data_of: str |
def temperature(self, what):
self._temperature = units.validate_quantity(what, u.K) | Set temperature. |
def _populate_input_for_name_id(self, config, record, context, data):
user_id = ""
user_id_from_attrs = config['user_id_from_attrs']
for attr in user_id_from_attrs:
if attr in record["attributes"]:
value = record["attributes"][attr]
if isinstance(value, list):
value.sort()
user_id += "".join(value)
satosa_logging(
logger,
logging.DEBUG,
"Added attribute {} with values {} to input for NameID".format(attr, value),
context.state
)
else:
user_id += value
satosa_logging(
logger,
logging.DEBUG,
"Added attribute {} with value {} to input for NameID".format(attr, value),
context.state
)
if not user_id:
satosa_logging(
logger,
logging.WARNING,
"Input for NameID is empty so not overriding default",
context.state
)
else:
data.subject_id = user_id
satosa_logging(
logger,
logging.DEBUG,
"Input for NameID is {}".format(data.subject_id),
context.state
) | Use a record found in LDAP to populate input for
NameID generation. |
def main():
reporter = BugReporter()
print("JSON report:")
print(reporter.as_json())
print()
print("Markdown report:")
print(reporter.as_markdown())
print("SQL report:")
print(reporter.as_sql())
print("Choose the appropriate format (if you're submitting a Github Issue "
"please chose the Markdown report) and paste it!") | Pretty-print the bug information as JSON |
def has_value(obj, name):
if obj is None:
return (False, None)
elif isinstance(obj, dict):
return (name in obj, obj.get(name))
elif hasattr(obj, name):
return (True, getattr(obj, name))
elif hasattr(obj, "__getitem__") and hasattr(obj, "__contains__") and name in obj:
return (True, obj[name])
else:
return (False, None) | A flexible method for getting values from objects by name.
returns:
- obj is None: (False, None)
- obj is dict: (name in obj, obj.get(name))
- obj hasattr(name): (True, getattr(obj, name))
- else: (False, None)
:obj: the object to pull values from
:name: the name to use when getting the value |
def get_col_rgba(color, transparency=None, opacity=None):
r, g, b = color.red, color.green, color.blue
r /= 65535.
g /= 65535.
b /= 65535.
if transparency is not None or opacity is None:
transparency = 0 if transparency is None else transparency
if transparency < 0 or transparency > 1:
raise ValueError("Transparency must be between 0 and 1")
alpha = 1 - transparency
else:
if opacity < 0 or opacity > 1:
raise ValueError("Opacity must be between 0 and 1")
alpha = opacity
return r, g, b, alpha | This class converts a Gdk.Color into its r, g, b parts and adds an alpha according to needs
If both transparency and opacity is None, alpha is set to 1 => opaque
:param Gdk.Color color: Color to extract r, g and b from
:param float | None transparency: Value between 0 (opaque) and 1 (transparent) or None if opacity is to be used
:param float | None opacity: Value between 0 (transparent) and 1 (opaque) or None if transparency is to be used
:return: Red, Green, Blue and Alpha value (all between 0.0 - 1.0) |
def _group_cluster(self, clusters, adj_list, counts):
groups = []
for cluster in clusters:
groups.append(sorted(cluster, key=lambda x: counts[x],
reverse=True))
return groups | return groups for cluster or directional methods |
def set_stage_for_epoch(self, epoch_start, name, attr='stage', save=True):
if self.rater is None:
raise IndexError('You need to have at least one rater')
for one_epoch in self.rater.iterfind('stages/epoch'):
if int(one_epoch.find('epoch_start').text) == epoch_start:
one_epoch.find(attr).text = name
if save:
self.save()
return
raise KeyError('epoch starting at ' + str(epoch_start) + ' not found') | Change the stage for one specific epoch.
Parameters
----------
epoch_start : int
start time of the epoch, in seconds
name : str
description of the stage or qualifier.
attr : str, optional
either 'stage' or 'quality'
save : bool
whether to save every time one epoch is scored
Raises
------
KeyError
When the epoch_start is not in the list of epochs.
IndexError
When there is no rater / epochs at all
Notes
-----
In the GUI, you want to save as often as possible, even if it slows
down the program, but it's the safer option. But if you're converting
a dataset, you want to save at the end. Do not forget to save! |
def w(msg, *args, **kwargs):
return logging.log(WARN, msg, *args, **kwargs) | log a message at warn level; |
def xpointerNewRange(self, startindex, end, endindex):
if end is None: end__o = None
else: end__o = end._o
ret = libxml2mod.xmlXPtrNewRange(self._o, startindex, end__o, endindex)
if ret is None:raise treeError('xmlXPtrNewRange() failed')
return xpathObjectRet(ret) | Create a new xmlXPathObjectPtr of type range |
def get_crime_categories(self, date=None):
return sorted(self._get_crime_categories(date=date).values(),
key=lambda c: c.name) | Get a list of crime categories, valid for a particular date. Uses the
crime-categories_ API call.
.. _crime-categories:
https://data.police.uk/docs/method/crime-categories/
:rtype: list
:param date: The date of the crime categories to get.
:type date: str or None
:return: A ``list`` of crime categories which are valid at the
specified date (or at the latest date, if ``None``). |
def compare_filesystems(fs0, fs1, concurrent=False):
if concurrent:
future0 = concurrent_hash_filesystem(fs0)
future1 = concurrent_hash_filesystem(fs1)
files0 = future0.result()
files1 = future1.result()
else:
files0 = hash_filesystem(fs0)
files1 = hash_filesystem(fs1)
return file_comparison(files0, files1) | Compares the two given filesystems.
fs0 and fs1 are two mounted GuestFS instances
containing the two disks to be compared.
If the concurrent flag is True,
two processes will be used speeding up the comparison on multiple CPUs.
Returns a dictionary containing files created, removed and modified.
{'created_files': [<files in fs1 and not in fs0>],
'deleted_files': [<files in fs0 and not in fs1>],
'modified_files': [<files in both fs0 and fs1 but different>]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.