code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def unique_slugify(instance, value, slug_field_name='slug', queryset=None,
slug_separator='-'):
"""
Calculates and stores a unique slug of ``value`` for an instance.
``slug_field_name`` should be a string matching the name of the field to
store the slug in (and the field to check against for uniqueness).
``queryset`` usually doesn't need to be explicitly provided - it'll default
to using the ``.all()`` queryset from the model's default manager.
"""
slug_field = instance._meta.get_field(slug_field_name)
slug = getattr(instance, slug_field.attname)
slug_len = slug_field.max_length
# Sort out the initial slug, limiting its length if necessary.
slug = slugify(value)
if slug_len:
slug = slug[:slug_len]
slug = _slug_strip(slug, slug_separator)
original_slug = slug
# Create the queryset if one wasn't explicitly provided and exclude the
# current instance from the queryset.
if queryset is None:
queryset = instance.__class__._default_manager.all()
if instance.pk:
queryset = queryset.exclude(pk=instance.pk)
# Find a unique slug. If one matches, at '-2' to the end and try again
# (then '-3', etc).
next = 2
while not slug or queryset.filter(**{slug_field_name: slug}):
slug = original_slug
end = '%s%s' % (slug_separator, next)
if slug_len and len(slug) + len(end) > slug_len:
slug = slug[:slug_len-len(end)]
slug = _slug_strip(slug, slug_separator)
slug = '%s%s' % (slug, end)
next += 1
setattr(instance, slug_field.attname, slug) | Calculates and stores a unique slug of ``value`` for an instance.
``slug_field_name`` should be a string matching the name of the field to
store the slug in (and the field to check against for uniqueness).
``queryset`` usually doesn't need to be explicitly provided - it'll default
to using the ``.all()`` queryset from the model's default manager. | Below is the the instruction that describes the task:
### Input:
Calculates and stores a unique slug of ``value`` for an instance.
``slug_field_name`` should be a string matching the name of the field to
store the slug in (and the field to check against for uniqueness).
``queryset`` usually doesn't need to be explicitly provided - it'll default
to using the ``.all()`` queryset from the model's default manager.
### Response:
def unique_slugify(instance, value, slug_field_name='slug', queryset=None,
slug_separator='-'):
"""
Calculates and stores a unique slug of ``value`` for an instance.
``slug_field_name`` should be a string matching the name of the field to
store the slug in (and the field to check against for uniqueness).
``queryset`` usually doesn't need to be explicitly provided - it'll default
to using the ``.all()`` queryset from the model's default manager.
"""
slug_field = instance._meta.get_field(slug_field_name)
slug = getattr(instance, slug_field.attname)
slug_len = slug_field.max_length
# Sort out the initial slug, limiting its length if necessary.
slug = slugify(value)
if slug_len:
slug = slug[:slug_len]
slug = _slug_strip(slug, slug_separator)
original_slug = slug
# Create the queryset if one wasn't explicitly provided and exclude the
# current instance from the queryset.
if queryset is None:
queryset = instance.__class__._default_manager.all()
if instance.pk:
queryset = queryset.exclude(pk=instance.pk)
# Find a unique slug. If one matches, at '-2' to the end and try again
# (then '-3', etc).
next = 2
while not slug or queryset.filter(**{slug_field_name: slug}):
slug = original_slug
end = '%s%s' % (slug_separator, next)
if slug_len and len(slug) + len(end) > slug_len:
slug = slug[:slug_len-len(end)]
slug = _slug_strip(slug, slug_separator)
slug = '%s%s' % (slug, end)
next += 1
setattr(instance, slug_field.attname, slug) |
def run(self):
"""Runs the command.
Args:
self (CleanCommand): the ``CleanCommand`` instance
Returns:
``None``
"""
for build_dir in self.build_dirs:
if os.path.isdir(build_dir):
sys.stdout.write('Removing %s%s' % (build_dir, os.linesep))
shutil.rmtree(build_dir)
for (root, dirs, files) in os.walk(self.cwd):
for name in files:
fullpath = os.path.join(root, name)
if any(fullpath.endswith(ext) for ext in self.build_artifacts):
sys.stdout.write('Removing %s%s' % (fullpath, os.linesep))
os.remove(fullpath) | Runs the command.
Args:
self (CleanCommand): the ``CleanCommand`` instance
Returns:
``None`` | Below is the the instruction that describes the task:
### Input:
Runs the command.
Args:
self (CleanCommand): the ``CleanCommand`` instance
Returns:
``None``
### Response:
def run(self):
"""Runs the command.
Args:
self (CleanCommand): the ``CleanCommand`` instance
Returns:
``None``
"""
for build_dir in self.build_dirs:
if os.path.isdir(build_dir):
sys.stdout.write('Removing %s%s' % (build_dir, os.linesep))
shutil.rmtree(build_dir)
for (root, dirs, files) in os.walk(self.cwd):
for name in files:
fullpath = os.path.join(root, name)
if any(fullpath.endswith(ext) for ext in self.build_artifacts):
sys.stdout.write('Removing %s%s' % (fullpath, os.linesep))
os.remove(fullpath) |
def complete(text, state):
""" Auto complete scss constructions in interactive mode. """
for cmd in COMMANDS:
if cmd.startswith(text):
if not state:
return cmd
else:
state -= 1 | Auto complete scss constructions in interactive mode. | Below is the the instruction that describes the task:
### Input:
Auto complete scss constructions in interactive mode.
### Response:
def complete(text, state):
""" Auto complete scss constructions in interactive mode. """
for cmd in COMMANDS:
if cmd.startswith(text):
if not state:
return cmd
else:
state -= 1 |
def clearConnections( self, cls ):
"""
Clears all the connections for this node.
:param cls | <subclass of XNodeConnection> || None
:return <int> | number of connections removed
"""
count = 0
for connection in self.connections(cls):
connection.remove()
count += 1
return count | Clears all the connections for this node.
:param cls | <subclass of XNodeConnection> || None
:return <int> | number of connections removed | Below is the the instruction that describes the task:
### Input:
Clears all the connections for this node.
:param cls | <subclass of XNodeConnection> || None
:return <int> | number of connections removed
### Response:
def clearConnections( self, cls ):
"""
Clears all the connections for this node.
:param cls | <subclass of XNodeConnection> || None
:return <int> | number of connections removed
"""
count = 0
for connection in self.connections(cls):
connection.remove()
count += 1
return count |
def get_objective_bank_ids_by_activity(self, activity_id):
"""Gets the list of ``ObjectiveBank Ids`` mapped to a ``Activity``.
arg: activity_id (osid.id.Id): ``Id`` of a ``Activity``
return: (osid.id.IdList) - list of objective bank ``Ids``
raise: NotFound - ``activity_id`` is not found
raise: NullArgument - ``activity_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
mgr = self._get_provider_manager('LEARNING', local=True)
lookup_session = mgr.get_activity_lookup_session(proxy=self._proxy)
lookup_session.use_federated_objective_bank_view()
activity = lookup_session.get_activity(activity_id)
id_list = []
for idstr in activity._my_map['assignedObjectiveBankIds']:
id_list.append(Id(idstr))
return IdList(id_list) | Gets the list of ``ObjectiveBank Ids`` mapped to a ``Activity``.
arg: activity_id (osid.id.Id): ``Id`` of a ``Activity``
return: (osid.id.IdList) - list of objective bank ``Ids``
raise: NotFound - ``activity_id`` is not found
raise: NullArgument - ``activity_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the list of ``ObjectiveBank Ids`` mapped to a ``Activity``.
arg: activity_id (osid.id.Id): ``Id`` of a ``Activity``
return: (osid.id.IdList) - list of objective bank ``Ids``
raise: NotFound - ``activity_id`` is not found
raise: NullArgument - ``activity_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_objective_bank_ids_by_activity(self, activity_id):
"""Gets the list of ``ObjectiveBank Ids`` mapped to a ``Activity``.
arg: activity_id (osid.id.Id): ``Id`` of a ``Activity``
return: (osid.id.IdList) - list of objective bank ``Ids``
raise: NotFound - ``activity_id`` is not found
raise: NullArgument - ``activity_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
mgr = self._get_provider_manager('LEARNING', local=True)
lookup_session = mgr.get_activity_lookup_session(proxy=self._proxy)
lookup_session.use_federated_objective_bank_view()
activity = lookup_session.get_activity(activity_id)
id_list = []
for idstr in activity._my_map['assignedObjectiveBankIds']:
id_list.append(Id(idstr))
return IdList(id_list) |
def segment_shakespeare_works(input_file=PATH_SHAKESPEARE, verbose=False):
"""Find start and end of each volume within _Complete Works of William Shakespeare_
"""
works = [{}]
meta = {}
j = 0
for i, line in enumerate(generate_lines(input_file=input_file)):
if 'title' not in meta:
match = RE_GUTEN_LINE.match(line)
if match:
meta['title'] = match.groups()[0]
meta['body_start'] = i
continue
if j >= len(works):
works += [{}]
if not len(works[j]):
match = RE_YEAR_LINE.match(line)
if match:
if verbose:
print(" year {:02d}, {}: {}".format(j, i, match.group()))
works[j]['year'] = int(match.group())
works[j]['start'] = i
elif len(works[j]) == 2:
match = RE_TITLE_LINE.match(line)
if match:
if verbose:
print("title {:02d}, {}: {}".format(j, i, match.groups()[0]))
works[j]['title'] = match.groups()[0]
works[j]['title_lineno'] = i
elif len(works[j]) == 4:
match = RE_BY_LINE.match(line)
if match:
if verbose:
print(" by {:02d}, {}: {}".format(j, i, match.group()))
works[j]['by'] = match.groups()[2]
works[j]['by_lineno'] = i
elif len(works[j]) > 4:
match = RE_ACT_SCENE_LINE.match(line)
if match:
section_meta = {
'start': i,
'title': match.groups()[0],
'act_roman': match.groups()[1].split()[-1],
'act': int(DICT_ROMAN2INT[match.groups()[1].split()[-1]]),
'scene': int(match.groups()[2].split()[-1]),
}
works[j]['sections'] = works[j].get('sections', []) + [section_meta]
else:
match = RE_THE_END.match(line)
if match and 'GUTENBERG' not in match.group().upper():
if verbose:
print(" stop {:02d}, {}: {}".format(j, i, match.group()))
works[j]['stop'] = i
j += 1
if not len(works[-1]):
works = works[:-1]
meta['volumes'] = works
return meta | Find start and end of each volume within _Complete Works of William Shakespeare_ | Below is the the instruction that describes the task:
### Input:
Find start and end of each volume within _Complete Works of William Shakespeare_
### Response:
def segment_shakespeare_works(input_file=PATH_SHAKESPEARE, verbose=False):
"""Find start and end of each volume within _Complete Works of William Shakespeare_
"""
works = [{}]
meta = {}
j = 0
for i, line in enumerate(generate_lines(input_file=input_file)):
if 'title' not in meta:
match = RE_GUTEN_LINE.match(line)
if match:
meta['title'] = match.groups()[0]
meta['body_start'] = i
continue
if j >= len(works):
works += [{}]
if not len(works[j]):
match = RE_YEAR_LINE.match(line)
if match:
if verbose:
print(" year {:02d}, {}: {}".format(j, i, match.group()))
works[j]['year'] = int(match.group())
works[j]['start'] = i
elif len(works[j]) == 2:
match = RE_TITLE_LINE.match(line)
if match:
if verbose:
print("title {:02d}, {}: {}".format(j, i, match.groups()[0]))
works[j]['title'] = match.groups()[0]
works[j]['title_lineno'] = i
elif len(works[j]) == 4:
match = RE_BY_LINE.match(line)
if match:
if verbose:
print(" by {:02d}, {}: {}".format(j, i, match.group()))
works[j]['by'] = match.groups()[2]
works[j]['by_lineno'] = i
elif len(works[j]) > 4:
match = RE_ACT_SCENE_LINE.match(line)
if match:
section_meta = {
'start': i,
'title': match.groups()[0],
'act_roman': match.groups()[1].split()[-1],
'act': int(DICT_ROMAN2INT[match.groups()[1].split()[-1]]),
'scene': int(match.groups()[2].split()[-1]),
}
works[j]['sections'] = works[j].get('sections', []) + [section_meta]
else:
match = RE_THE_END.match(line)
if match and 'GUTENBERG' not in match.group().upper():
if verbose:
print(" stop {:02d}, {}: {}".format(j, i, match.group()))
works[j]['stop'] = i
j += 1
if not len(works[-1]):
works = works[:-1]
meta['volumes'] = works
return meta |
def compute_partition_size(result, processes):
"""
Attempts to compute the partition size to evenly distribute work across processes. Defaults to
1 if the length of result cannot be determined.
:param result: Result to compute on
:param processes: Number of processes to use
:return: Best partition size
"""
try:
return max(math.ceil(len(result) / processes), 1)
except TypeError:
return 1 | Attempts to compute the partition size to evenly distribute work across processes. Defaults to
1 if the length of result cannot be determined.
:param result: Result to compute on
:param processes: Number of processes to use
:return: Best partition size | Below is the the instruction that describes the task:
### Input:
Attempts to compute the partition size to evenly distribute work across processes. Defaults to
1 if the length of result cannot be determined.
:param result: Result to compute on
:param processes: Number of processes to use
:return: Best partition size
### Response:
def compute_partition_size(result, processes):
"""
Attempts to compute the partition size to evenly distribute work across processes. Defaults to
1 if the length of result cannot be determined.
:param result: Result to compute on
:param processes: Number of processes to use
:return: Best partition size
"""
try:
return max(math.ceil(len(result) / processes), 1)
except TypeError:
return 1 |
def match_msequence(self, tokens, item):
"""Matches a middle sequence."""
series_type, head_matches, middle, _, last_matches = tokens
self.add_check("_coconut.isinstance(" + item + ", _coconut.abc.Sequence)")
self.add_check("_coconut.len(" + item + ") >= " + str(len(head_matches) + len(last_matches)))
if middle != wildcard:
if len(head_matches) and len(last_matches):
splice = "[" + str(len(head_matches)) + ":" + str(-len(last_matches)) + "]"
elif len(head_matches):
splice = "[" + str(len(head_matches)) + ":]"
elif len(last_matches):
splice = "[:" + str(-len(last_matches)) + "]"
else:
splice = ""
self.assign_to_series(middle, series_type, item + splice)
self.match_all_in(head_matches, item)
for i, match in enumerate(last_matches):
self.match(match, item + "[" + str(i - len(last_matches)) + "]") | Matches a middle sequence. | Below is the the instruction that describes the task:
### Input:
Matches a middle sequence.
### Response:
def match_msequence(self, tokens, item):
"""Matches a middle sequence."""
series_type, head_matches, middle, _, last_matches = tokens
self.add_check("_coconut.isinstance(" + item + ", _coconut.abc.Sequence)")
self.add_check("_coconut.len(" + item + ") >= " + str(len(head_matches) + len(last_matches)))
if middle != wildcard:
if len(head_matches) and len(last_matches):
splice = "[" + str(len(head_matches)) + ":" + str(-len(last_matches)) + "]"
elif len(head_matches):
splice = "[" + str(len(head_matches)) + ":]"
elif len(last_matches):
splice = "[:" + str(-len(last_matches)) + "]"
else:
splice = ""
self.assign_to_series(middle, series_type, item + splice)
self.match_all_in(head_matches, item)
for i, match in enumerate(last_matches):
self.match(match, item + "[" + str(i - len(last_matches)) + "]") |
def close(self):
"""
Close all endpoint file descriptors.
"""
ep_list = self._ep_list
while ep_list:
ep_list.pop().close()
self._closed = True | Close all endpoint file descriptors. | Below is the the instruction that describes the task:
### Input:
Close all endpoint file descriptors.
### Response:
def close(self):
"""
Close all endpoint file descriptors.
"""
ep_list = self._ep_list
while ep_list:
ep_list.pop().close()
self._closed = True |
def amount_converter(obj):
"""Converts amount value from several types into Decimal."""
if isinstance(obj, Decimal):
return obj
elif isinstance(obj, (str, int, float)):
return Decimal(str(obj))
else:
raise ValueError('do not know how to convert: {}'.format(type(obj))) | Converts amount value from several types into Decimal. | Below is the the instruction that describes the task:
### Input:
Converts amount value from several types into Decimal.
### Response:
def amount_converter(obj):
"""Converts amount value from several types into Decimal."""
if isinstance(obj, Decimal):
return obj
elif isinstance(obj, (str, int, float)):
return Decimal(str(obj))
else:
raise ValueError('do not know how to convert: {}'.format(type(obj))) |
def install_host(trg_queue, *hosts, **kwargs):
''' Atomically install host queues '''
user = kwargs.pop('user', None)
group = kwargs.pop('group', None)
mode = kwargs.pop('mode', None)
item_user = kwargs.pop('item_user', None)
item_group = kwargs.pop('item_group', None)
item_mode = kwargs.pop('item_mode', None)
is_down = kwargs.pop('is_down', False)
#set modes
mode, user, group, item_user, item_group, item_mode =\
_def_mode(mode, user, group, item_user, item_group, item_mode)
uid, gid = uid_gid(user, group)
host_path = fsq_path.hosts(trg_queue)
try:
_instdir(host_path, mode, uid, gid)
except (OSError, IOError, ), e:
if e.errno not in ( errno.EEXIST, errno.ENOTEMPTY, ):
raise FSQInstallError(e.errno, wrap_io_os_err(e))
if hosts:
for host in hosts:
host = fsq_path.valid_name(host)
# uid_gid makes calls to the pw db and|or gr db, in addition to
# potentially stat'ing, as such, we want to avoid calling it
# unless we absoultely have to
uid, gid = uid_gid(user, group)
tmp_full, tmp_queue = _tmp_trg(host, host_path)
try:
# open once to cut down on stat/open for chown/chmod combo
fd = os.open(tmp_full, os.O_RDONLY)
try:
# always fchmod here as mkdtemp is different than normal
# mkdir
os.fchmod(fd, mode)
if -1 != uid or -1 != gid:
os.fchown(fd, uid, gid)
finally:
os.close(fd)
# bless our queue with its children
_instdir(fsq_path.tmp(trg_queue, tmp_queue), mode, uid, gid)
_instdir(fsq_path.queue(trg_queue, tmp_queue), mode, uid, gid)
_instdir(fsq_path.done(trg_queue, tmp_queue), mode, uid, gid)
_instdir(fsq_path.fail(trg_queue, tmp_queue), mode, uid, gid)
# down via configure.down if necessary
if is_down:
down_host(tmp_queue, host, user=item_user,
group=item_group, mode=item_mode)
# atomic commit -- by rename
os.rename(tmp_full, fsq_path.base(trg_queue, host))
except (OSError, IOError, ), e:
shutil.rmtree(tmp_full)
if e.errno == errno.ENOTEMPTY:
raise FSQInstallError(e.errno, u'queue exists: {0}'.format(
trg_queue))
if isinstance(e, FSQError):
raise e | Atomically install host queues | Below is the the instruction that describes the task:
### Input:
Atomically install host queues
### Response:
def install_host(trg_queue, *hosts, **kwargs):
''' Atomically install host queues '''
user = kwargs.pop('user', None)
group = kwargs.pop('group', None)
mode = kwargs.pop('mode', None)
item_user = kwargs.pop('item_user', None)
item_group = kwargs.pop('item_group', None)
item_mode = kwargs.pop('item_mode', None)
is_down = kwargs.pop('is_down', False)
#set modes
mode, user, group, item_user, item_group, item_mode =\
_def_mode(mode, user, group, item_user, item_group, item_mode)
uid, gid = uid_gid(user, group)
host_path = fsq_path.hosts(trg_queue)
try:
_instdir(host_path, mode, uid, gid)
except (OSError, IOError, ), e:
if e.errno not in ( errno.EEXIST, errno.ENOTEMPTY, ):
raise FSQInstallError(e.errno, wrap_io_os_err(e))
if hosts:
for host in hosts:
host = fsq_path.valid_name(host)
# uid_gid makes calls to the pw db and|or gr db, in addition to
# potentially stat'ing, as such, we want to avoid calling it
# unless we absoultely have to
uid, gid = uid_gid(user, group)
tmp_full, tmp_queue = _tmp_trg(host, host_path)
try:
# open once to cut down on stat/open for chown/chmod combo
fd = os.open(tmp_full, os.O_RDONLY)
try:
# always fchmod here as mkdtemp is different than normal
# mkdir
os.fchmod(fd, mode)
if -1 != uid or -1 != gid:
os.fchown(fd, uid, gid)
finally:
os.close(fd)
# bless our queue with its children
_instdir(fsq_path.tmp(trg_queue, tmp_queue), mode, uid, gid)
_instdir(fsq_path.queue(trg_queue, tmp_queue), mode, uid, gid)
_instdir(fsq_path.done(trg_queue, tmp_queue), mode, uid, gid)
_instdir(fsq_path.fail(trg_queue, tmp_queue), mode, uid, gid)
# down via configure.down if necessary
if is_down:
down_host(tmp_queue, host, user=item_user,
group=item_group, mode=item_mode)
# atomic commit -- by rename
os.rename(tmp_full, fsq_path.base(trg_queue, host))
except (OSError, IOError, ), e:
shutil.rmtree(tmp_full)
if e.errno == errno.ENOTEMPTY:
raise FSQInstallError(e.errno, u'queue exists: {0}'.format(
trg_queue))
if isinstance(e, FSQError):
raise e |
def get_week_start_end_day():
"""
Get the week start date and end date
"""
t = date.today()
wd = t.weekday()
return (t - timedelta(wd), t + timedelta(6 - wd)) | Get the week start date and end date | Below is the the instruction that describes the task:
### Input:
Get the week start date and end date
### Response:
def get_week_start_end_day():
"""
Get the week start date and end date
"""
t = date.today()
wd = t.weekday()
return (t - timedelta(wd), t + timedelta(6 - wd)) |
def sp_search_query(query):
"""Translate a Mopidy search query to a Spotify search query"""
result = []
for (field, values) in query.items():
field = SEARCH_FIELD_MAP.get(field, field)
if field is None:
continue
for value in values:
if field == 'year':
value = _transform_year(value)
if value is not None:
result.append('%s:%d' % (field, value))
elif field == 'any':
result.append('"%s"' % value)
else:
result.append('%s:"%s"' % (field, value))
return ' '.join(result) | Translate a Mopidy search query to a Spotify search query | Below is the the instruction that describes the task:
### Input:
Translate a Mopidy search query to a Spotify search query
### Response:
def sp_search_query(query):
"""Translate a Mopidy search query to a Spotify search query"""
result = []
for (field, values) in query.items():
field = SEARCH_FIELD_MAP.get(field, field)
if field is None:
continue
for value in values:
if field == 'year':
value = _transform_year(value)
if value is not None:
result.append('%s:%d' % (field, value))
elif field == 'any':
result.append('"%s"' % value)
else:
result.append('%s:"%s"' % (field, value))
return ' '.join(result) |
def parseGopkgImportPath(self, path):
"""
Definition: gopkg.in/<v>/<repo> || gopkg.in/<repo>.<v> || gopkg.in/<project>/<repo>
"""
parts = path.split('/')
if re.match('v[0-9]+', parts[1]):
if len(parts) < 3:
raise ValueError("Import path %s is not in gopkg.in/<v>/<repo> form" % path)
project = ""
repository = parts[2]
version = parts[1]
prefix = "/".join(parts[:3])
provider_prefix = "gopkg.in/%s/%s" % (parts[1], parts[2])
else:
if len(parts) < 2:
raise ValueError("Import path %s is not in gopkg.in/[<repo>.<v>|<project>/<repo>] form" % path)
dotparts = parts[1].split(".")
if len(dotparts) == 1:
# gopkg.in/<project>/<repo>
if len(parts) != 3:
raise ValueError("Import path %s is not in gopkg.in/<project>/<repo> form" % path)
prefix = "/".join(parts[:3])
project = parts[1]
dotparts = parts[2].split(".")
repository = dotparts[0]
if len(dotparts) == 0:
version = ""
else:
version = dotparts[1]
provider_prefix = "gopkg.in/%s/%s" % (parts[1], parts[2])
else:
if len(dotparts) != 2:
raise ValueError("Import path %s is not in gopkg.in/<repo>.<v> form" % path)
prefix = "/".join(parts[:2])
project = ""
repository = dotparts[0]
version = dotparts[1]
provider_prefix = "gopkg.in/%s" % parts[1]
repo = {}
repo["prefix"] = prefix
repo["signature"] = {"provider": "gopkg", "username": project, "project": repository, "version": version}
return repo | Definition: gopkg.in/<v>/<repo> || gopkg.in/<repo>.<v> || gopkg.in/<project>/<repo> | Below is the the instruction that describes the task:
### Input:
Definition: gopkg.in/<v>/<repo> || gopkg.in/<repo>.<v> || gopkg.in/<project>/<repo>
### Response:
def parseGopkgImportPath(self, path):
"""
Definition: gopkg.in/<v>/<repo> || gopkg.in/<repo>.<v> || gopkg.in/<project>/<repo>
"""
parts = path.split('/')
if re.match('v[0-9]+', parts[1]):
if len(parts) < 3:
raise ValueError("Import path %s is not in gopkg.in/<v>/<repo> form" % path)
project = ""
repository = parts[2]
version = parts[1]
prefix = "/".join(parts[:3])
provider_prefix = "gopkg.in/%s/%s" % (parts[1], parts[2])
else:
if len(parts) < 2:
raise ValueError("Import path %s is not in gopkg.in/[<repo>.<v>|<project>/<repo>] form" % path)
dotparts = parts[1].split(".")
if len(dotparts) == 1:
# gopkg.in/<project>/<repo>
if len(parts) != 3:
raise ValueError("Import path %s is not in gopkg.in/<project>/<repo> form" % path)
prefix = "/".join(parts[:3])
project = parts[1]
dotparts = parts[2].split(".")
repository = dotparts[0]
if len(dotparts) == 0:
version = ""
else:
version = dotparts[1]
provider_prefix = "gopkg.in/%s/%s" % (parts[1], parts[2])
else:
if len(dotparts) != 2:
raise ValueError("Import path %s is not in gopkg.in/<repo>.<v> form" % path)
prefix = "/".join(parts[:2])
project = ""
repository = dotparts[0]
version = dotparts[1]
provider_prefix = "gopkg.in/%s" % parts[1]
repo = {}
repo["prefix"] = prefix
repo["signature"] = {"provider": "gopkg", "username": project, "project": repository, "version": version}
return repo |
def request(self, method, url, erc, **kwargs):
"""Abstract base method for making requests to the Webex Teams APIs.
This base method:
* Expands the API endpoint URL to an absolute URL
* Makes the actual HTTP request to the API endpoint
* Provides support for Webex Teams rate-limiting
* Inspects response codes and raises exceptions as appropriate
Args:
method(basestring): The request-method type ('GET', 'POST', etc.).
url(basestring): The URL of the API endpoint to be called.
erc(int): The expected response code that should be returned by the
Webex Teams API endpoint to indicate success.
**kwargs: Passed on to the requests package.
Raises:
ApiError: If anything other than the expected response code is
returned by the Webex Teams API endpoint.
"""
# Ensure the url is an absolute URL
abs_url = self.abs_url(url)
# Update request kwargs with session defaults
kwargs.setdefault('timeout', self.single_request_timeout)
while True:
# Make the HTTP request to the API endpoint
response = self._req_session.request(method, abs_url, **kwargs)
try:
# Check the response code for error conditions
check_response_code(response, erc)
except RateLimitError as e:
# Catch rate-limit errors
# Wait and retry if automatic rate-limit handling is enabled
if self.wait_on_rate_limit:
warnings.warn(RateLimitWarning(response))
time.sleep(e.retry_after)
continue
else:
# Re-raise the RateLimitError
raise
else:
return response | Abstract base method for making requests to the Webex Teams APIs.
This base method:
* Expands the API endpoint URL to an absolute URL
* Makes the actual HTTP request to the API endpoint
* Provides support for Webex Teams rate-limiting
* Inspects response codes and raises exceptions as appropriate
Args:
method(basestring): The request-method type ('GET', 'POST', etc.).
url(basestring): The URL of the API endpoint to be called.
erc(int): The expected response code that should be returned by the
Webex Teams API endpoint to indicate success.
**kwargs: Passed on to the requests package.
Raises:
ApiError: If anything other than the expected response code is
returned by the Webex Teams API endpoint. | Below is the the instruction that describes the task:
### Input:
Abstract base method for making requests to the Webex Teams APIs.
This base method:
* Expands the API endpoint URL to an absolute URL
* Makes the actual HTTP request to the API endpoint
* Provides support for Webex Teams rate-limiting
* Inspects response codes and raises exceptions as appropriate
Args:
method(basestring): The request-method type ('GET', 'POST', etc.).
url(basestring): The URL of the API endpoint to be called.
erc(int): The expected response code that should be returned by the
Webex Teams API endpoint to indicate success.
**kwargs: Passed on to the requests package.
Raises:
ApiError: If anything other than the expected response code is
returned by the Webex Teams API endpoint.
### Response:
def request(self, method, url, erc, **kwargs):
"""Abstract base method for making requests to the Webex Teams APIs.
This base method:
* Expands the API endpoint URL to an absolute URL
* Makes the actual HTTP request to the API endpoint
* Provides support for Webex Teams rate-limiting
* Inspects response codes and raises exceptions as appropriate
Args:
method(basestring): The request-method type ('GET', 'POST', etc.).
url(basestring): The URL of the API endpoint to be called.
erc(int): The expected response code that should be returned by the
Webex Teams API endpoint to indicate success.
**kwargs: Passed on to the requests package.
Raises:
ApiError: If anything other than the expected response code is
returned by the Webex Teams API endpoint.
"""
# Ensure the url is an absolute URL
abs_url = self.abs_url(url)
# Update request kwargs with session defaults
kwargs.setdefault('timeout', self.single_request_timeout)
while True:
# Make the HTTP request to the API endpoint
response = self._req_session.request(method, abs_url, **kwargs)
try:
# Check the response code for error conditions
check_response_code(response, erc)
except RateLimitError as e:
# Catch rate-limit errors
# Wait and retry if automatic rate-limit handling is enabled
if self.wait_on_rate_limit:
warnings.warn(RateLimitWarning(response))
time.sleep(e.retry_after)
continue
else:
# Re-raise the RateLimitError
raise
else:
return response |
def notify(self, correlation_id, event, value):
"""
Fires event specified by its name and notifies all registered
IEventListener listeners
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param event: the name of the event that is to be fired.
:param value: the event arguments (parameters).
"""
e = self.find_event(event)
if e != None:
e.notify(correlation_id, value) | Fires event specified by its name and notifies all registered
IEventListener listeners
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param event: the name of the event that is to be fired.
:param value: the event arguments (parameters). | Below is the the instruction that describes the task:
### Input:
Fires event specified by its name and notifies all registered
IEventListener listeners
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param event: the name of the event that is to be fired.
:param value: the event arguments (parameters).
### Response:
def notify(self, correlation_id, event, value):
"""
Fires event specified by its name and notifies all registered
IEventListener listeners
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param event: the name of the event that is to be fired.
:param value: the event arguments (parameters).
"""
e = self.find_event(event)
if e != None:
e.notify(correlation_id, value) |
def runGetRequest(self, obj):
"""
Runs a get request by converting the specified datamodel
object into its protocol representation.
"""
protocolElement = obj.toProtocolElement()
jsonString = protocol.toJson(protocolElement)
return jsonString | Runs a get request by converting the specified datamodel
object into its protocol representation. | Below is the the instruction that describes the task:
### Input:
Runs a get request by converting the specified datamodel
object into its protocol representation.
### Response:
def runGetRequest(self, obj):
"""
Runs a get request by converting the specified datamodel
object into its protocol representation.
"""
protocolElement = obj.toProtocolElement()
jsonString = protocol.toJson(protocolElement)
return jsonString |
def main(host='localhost', port=8086):
"""Instantiate the connection to the InfluxDB client."""
user = 'root'
password = 'root'
dbname = 'demo'
protocol = 'json'
client = DataFrameClient(host, port, user, password, dbname)
print("Create pandas DataFrame")
df = pd.DataFrame(data=list(range(30)),
index=pd.date_range(start='2014-11-16',
periods=30, freq='H'), columns=['0'])
print("Create database: " + dbname)
client.create_database(dbname)
print("Write DataFrame")
client.write_points(df, 'demo', protocol=protocol)
print("Write DataFrame with Tags")
client.write_points(df, 'demo',
{'k1': 'v1', 'k2': 'v2'}, protocol=protocol)
print("Read DataFrame")
client.query("select * from demo")
print("Delete database: " + dbname)
client.drop_database(dbname) | Instantiate the connection to the InfluxDB client. | Below is the the instruction that describes the task:
### Input:
Instantiate the connection to the InfluxDB client.
### Response:
def main(host='localhost', port=8086):
"""Instantiate the connection to the InfluxDB client."""
user = 'root'
password = 'root'
dbname = 'demo'
protocol = 'json'
client = DataFrameClient(host, port, user, password, dbname)
print("Create pandas DataFrame")
df = pd.DataFrame(data=list(range(30)),
index=pd.date_range(start='2014-11-16',
periods=30, freq='H'), columns=['0'])
print("Create database: " + dbname)
client.create_database(dbname)
print("Write DataFrame")
client.write_points(df, 'demo', protocol=protocol)
print("Write DataFrame with Tags")
client.write_points(df, 'demo',
{'k1': 'v1', 'k2': 'v2'}, protocol=protocol)
print("Read DataFrame")
client.query("select * from demo")
print("Delete database: " + dbname)
client.drop_database(dbname) |
def parse_classi_or_classii_allele_name(name, infer_pair=True):
"""
Handle different forms of both single and alpha-beta allele names.
Alpha-beta alleles may look like:
DPA10105-DPB110001
HLA-DPA1*01:05-DPB1*100:01
hla-dpa1*0105-dpb1*10001
dpa1*0105-dpb1*10001
HLA-DPA1*01:05/DPB1*100:01
Other class II alleles may look like:
DRB1_0102
DRB101:02
HLA-DRB1_0102
"""
species, name = split_species_prefix(name)
# Handle the case where alpha/beta pairs are separated with a /.
name = name.replace("/", "-")
# Ignored underscores, such as with DRB1_0102
name = name.replace("_", "*")
parts = name.split("-")
if len(parts) == 2:
alpha_string, beta_string = parts
alpha = parse_allele_name(alpha_string)
beta = parse_allele_name(beta_string)
return (alpha, beta)
elif len(parts) == 1:
parsed = parse_allele_name(name, species)
if parsed.species == "HLA" and infer_pair:
alpha = infer_alpha_chain(parsed)
if alpha is not None:
return (alpha, parsed)
return (parsed,)
else:
raise AlleleParseError(
"Allele has too many parts: %s" % name) | Handle different forms of both single and alpha-beta allele names.
Alpha-beta alleles may look like:
DPA10105-DPB110001
HLA-DPA1*01:05-DPB1*100:01
hla-dpa1*0105-dpb1*10001
dpa1*0105-dpb1*10001
HLA-DPA1*01:05/DPB1*100:01
Other class II alleles may look like:
DRB1_0102
DRB101:02
HLA-DRB1_0102 | Below is the the instruction that describes the task:
### Input:
Handle different forms of both single and alpha-beta allele names.
Alpha-beta alleles may look like:
DPA10105-DPB110001
HLA-DPA1*01:05-DPB1*100:01
hla-dpa1*0105-dpb1*10001
dpa1*0105-dpb1*10001
HLA-DPA1*01:05/DPB1*100:01
Other class II alleles may look like:
DRB1_0102
DRB101:02
HLA-DRB1_0102
### Response:
def parse_classi_or_classii_allele_name(name, infer_pair=True):
"""
Handle different forms of both single and alpha-beta allele names.
Alpha-beta alleles may look like:
DPA10105-DPB110001
HLA-DPA1*01:05-DPB1*100:01
hla-dpa1*0105-dpb1*10001
dpa1*0105-dpb1*10001
HLA-DPA1*01:05/DPB1*100:01
Other class II alleles may look like:
DRB1_0102
DRB101:02
HLA-DRB1_0102
"""
species, name = split_species_prefix(name)
# Handle the case where alpha/beta pairs are separated with a /.
name = name.replace("/", "-")
# Ignored underscores, such as with DRB1_0102
name = name.replace("_", "*")
parts = name.split("-")
if len(parts) == 2:
alpha_string, beta_string = parts
alpha = parse_allele_name(alpha_string)
beta = parse_allele_name(beta_string)
return (alpha, beta)
elif len(parts) == 1:
parsed = parse_allele_name(name, species)
if parsed.species == "HLA" and infer_pair:
alpha = infer_alpha_chain(parsed)
if alpha is not None:
return (alpha, parsed)
return (parsed,)
else:
raise AlleleParseError(
"Allele has too many parts: %s" % name) |
def calculate_feature_vectorizer_output_shapes(operator):
'''
Allowed input/output patterns are
1. [N, C_1], ..., [N, C_n] ---> [N, C_1 + ... + C_n]
Feature vectorizer concatenates all input tensors along the C-axis, so the output dimension along C-axis is simply
a sum of all input features.
'''
check_input_and_output_numbers(operator, input_count_range=[1, None], output_count_range=1)
check_input_and_output_types(operator, good_input_types=[FloatTensorType, Int64TensorType, FloatType, Int64Type])
if any(len(variable.type.shape) != 2 for variable in operator.inputs):
raise RuntimeError('Input(s) must be 2-D tensor(s)')
# Find the first batch size which is not unknown
N = 'None'
for variable in operator.inputs:
if variable.type.shape[0] != 'None':
N = variable.type.shape[0]
break
for variable in operator.inputs:
if variable.type.shape[0] not in ['None', N]:
raise RuntimeError('The batch dimensions should be the same to all input tensors.')
C = sum(info.inputDimensions for info in operator.raw_operator.featureVectorizer.inputList)
# Currently, we only expect numerical inputs. If both of integers and floats exist, we may convert integers into
# floats before concatenating them. Thus, the output type is integer-like only if all inputs are integer-like.
doc_string = operator.outputs[0].type.doc_string
if all(isinstance(variable.type, (Int64TensorType, Int64Type)) for variable in operator.inputs):
operator.outputs[0].type = Int64TensorType([N, C], doc_string=doc_string)
elif isinstance(operator.inputs[0].type, (FloatTensorType, FloatType)):
operator.outputs[0].type = FloatTensorType([N, C], doc_string=doc_string)
else:
raise ValueError('Unsupported input type: %s' % type(operator.inputs[0].type)) | Allowed input/output patterns are
1. [N, C_1], ..., [N, C_n] ---> [N, C_1 + ... + C_n]
Feature vectorizer concatenates all input tensors along the C-axis, so the output dimension along C-axis is simply
a sum of all input features. | Below is the the instruction that describes the task:
### Input:
Allowed input/output patterns are
1. [N, C_1], ..., [N, C_n] ---> [N, C_1 + ... + C_n]
Feature vectorizer concatenates all input tensors along the C-axis, so the output dimension along C-axis is simply
a sum of all input features.
### Response:
def calculate_feature_vectorizer_output_shapes(operator):
'''
Allowed input/output patterns are
1. [N, C_1], ..., [N, C_n] ---> [N, C_1 + ... + C_n]
Feature vectorizer concatenates all input tensors along the C-axis, so the output dimension along C-axis is simply
a sum of all input features.
'''
check_input_and_output_numbers(operator, input_count_range=[1, None], output_count_range=1)
check_input_and_output_types(operator, good_input_types=[FloatTensorType, Int64TensorType, FloatType, Int64Type])
if any(len(variable.type.shape) != 2 for variable in operator.inputs):
raise RuntimeError('Input(s) must be 2-D tensor(s)')
# Find the first batch size which is not unknown
N = 'None'
for variable in operator.inputs:
if variable.type.shape[0] != 'None':
N = variable.type.shape[0]
break
for variable in operator.inputs:
if variable.type.shape[0] not in ['None', N]:
raise RuntimeError('The batch dimensions should be the same to all input tensors.')
C = sum(info.inputDimensions for info in operator.raw_operator.featureVectorizer.inputList)
# Currently, we only expect numerical inputs. If both of integers and floats exist, we may convert integers into
# floats before concatenating them. Thus, the output type is integer-like only if all inputs are integer-like.
doc_string = operator.outputs[0].type.doc_string
if all(isinstance(variable.type, (Int64TensorType, Int64Type)) for variable in operator.inputs):
operator.outputs[0].type = Int64TensorType([N, C], doc_string=doc_string)
elif isinstance(operator.inputs[0].type, (FloatTensorType, FloatType)):
operator.outputs[0].type = FloatTensorType([N, C], doc_string=doc_string)
else:
raise ValueError('Unsupported input type: %s' % type(operator.inputs[0].type)) |
def make_connection(self):
"Create a new connection"
if self._created_connections >= self.max_connections:
raise ConnectionError("Too many connections")
self._created_connections += 1
return self.connection_class(**self.connection_kwargs) | Create a new connection | Below is the the instruction that describes the task:
### Input:
Create a new connection
### Response:
def make_connection(self):
"Create a new connection"
if self._created_connections >= self.max_connections:
raise ConnectionError("Too many connections")
self._created_connections += 1
return self.connection_class(**self.connection_kwargs) |
def genre(self):
""" Cette routine convertit les indications morphologiques, données dans le fichier lemmes.la, pour exprimer le genre du mot dans la langue courante.
:return: Genre
:rtype: str
"""
_genre = ""
if " m." in self._indMorph:
_genre += "m"
if " f." in self._indMorph:
_genre += "f"
if " n." in self._indMorph:
_genre += "n"
_genre = _genre.strip()
if self._renvoi and not _genre:
lr = self._lemmatiseur.lemme(self._renvoi)
if lr:
return lr.genre()
return _genre | Cette routine convertit les indications morphologiques, données dans le fichier lemmes.la, pour exprimer le genre du mot dans la langue courante.
:return: Genre
:rtype: str | Below is the the instruction that describes the task:
### Input:
Cette routine convertit les indications morphologiques, données dans le fichier lemmes.la, pour exprimer le genre du mot dans la langue courante.
:return: Genre
:rtype: str
### Response:
def genre(self):
""" Cette routine convertit les indications morphologiques, données dans le fichier lemmes.la, pour exprimer le genre du mot dans la langue courante.
:return: Genre
:rtype: str
"""
_genre = ""
if " m." in self._indMorph:
_genre += "m"
if " f." in self._indMorph:
_genre += "f"
if " n." in self._indMorph:
_genre += "n"
_genre = _genre.strip()
if self._renvoi and not _genre:
lr = self._lemmatiseur.lemme(self._renvoi)
if lr:
return lr.genre()
return _genre |
def write_vasp_input(self, vasp_input_set=MPRelaxSet, output_dir=".",
create_directory=True, **kwargs):
"""
Writes VASP input to an output_dir.
Args:
vasp_input_set:
pymatgen.io.vaspio_set.VaspInputSet like object that creates
vasp input files from structures
output_dir: Directory to output files
create_directory: Create the directory if not present. Defaults to
True.
\\*\\*kwargs: All keyword args supported by the VASP input set.
"""
vasp_input_set(self.final_structure, **kwargs).write_input(
output_dir, make_dir_if_not_present=create_directory)
with open(os.path.join(output_dir, "transformations.json"), "w") as fp:
json.dump(self.as_dict(), fp) | Writes VASP input to an output_dir.
Args:
vasp_input_set:
pymatgen.io.vaspio_set.VaspInputSet like object that creates
vasp input files from structures
output_dir: Directory to output files
create_directory: Create the directory if not present. Defaults to
True.
\\*\\*kwargs: All keyword args supported by the VASP input set. | Below is the the instruction that describes the task:
### Input:
Writes VASP input to an output_dir.
Args:
vasp_input_set:
pymatgen.io.vaspio_set.VaspInputSet like object that creates
vasp input files from structures
output_dir: Directory to output files
create_directory: Create the directory if not present. Defaults to
True.
\\*\\*kwargs: All keyword args supported by the VASP input set.
### Response:
def write_vasp_input(self, vasp_input_set=MPRelaxSet, output_dir=".",
create_directory=True, **kwargs):
"""
Writes VASP input to an output_dir.
Args:
vasp_input_set:
pymatgen.io.vaspio_set.VaspInputSet like object that creates
vasp input files from structures
output_dir: Directory to output files
create_directory: Create the directory if not present. Defaults to
True.
\\*\\*kwargs: All keyword args supported by the VASP input set.
"""
vasp_input_set(self.final_structure, **kwargs).write_input(
output_dir, make_dir_if_not_present=create_directory)
with open(os.path.join(output_dir, "transformations.json"), "w") as fp:
json.dump(self.as_dict(), fp) |
def set_led(self, red=0, green=0, blue=0):
"""Sets the LED color. Values are RGB between 0-255."""
self._led = (red, green, blue)
self._control() | Sets the LED color. Values are RGB between 0-255. | Below is the the instruction that describes the task:
### Input:
Sets the LED color. Values are RGB between 0-255.
### Response:
def set_led(self, red=0, green=0, blue=0):
"""Sets the LED color. Values are RGB between 0-255."""
self._led = (red, green, blue)
self._control() |
def variables(self, value):
"""
Setter for **self.__variables** attribute.
:param value: Attribute value.
:type value: dict
"""
if value is not None:
assert type(value) is dict, "'{0}' attribute: '{1}' type is not 'dict'!".format("variables", value)
for key, element in value.iteritems():
assert type(key) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"variables", key)
assert type(element) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"variables", element)
self.__variables = value | Setter for **self.__variables** attribute.
:param value: Attribute value.
:type value: dict | Below is the the instruction that describes the task:
### Input:
Setter for **self.__variables** attribute.
:param value: Attribute value.
:type value: dict
### Response:
def variables(self, value):
"""
Setter for **self.__variables** attribute.
:param value: Attribute value.
:type value: dict
"""
if value is not None:
assert type(value) is dict, "'{0}' attribute: '{1}' type is not 'dict'!".format("variables", value)
for key, element in value.iteritems():
assert type(key) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"variables", key)
assert type(element) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"variables", element)
self.__variables = value |
def probe_wdl(self, board: chess.Board) -> int:
"""
Probes for win/draw/loss-information.
Returns ``1`` if the side to move is winning, ``0`` if it is a draw,
and ``-1`` if the side to move is losing.
>>> import chess
>>> import chess.gaviota
>>>
>>> with chess.gaviota.open_tablebase("data/gaviota") as tablebase:
... board = chess.Board("8/4k3/8/B7/8/8/8/4K3 w - - 0 1")
... print(tablebase.probe_wdl(board))
...
0
:raises: :exc:`KeyError` (or specifically
:exc:`chess.gaviota.MissingTableError`) if the probe fails. Use
:func:`~chess.gaviota.PythonTablebase.get_wdl()` if you prefer
to get ``None`` instead of an exception.
Note that probing a corrupted table file is undefined behavior.
"""
dtm = self.probe_dtm(board)
if dtm == 0:
if board.is_checkmate():
return -1
else:
return 0
elif dtm > 0:
return 1
else:
return -1 | Probes for win/draw/loss-information.
Returns ``1`` if the side to move is winning, ``0`` if it is a draw,
and ``-1`` if the side to move is losing.
>>> import chess
>>> import chess.gaviota
>>>
>>> with chess.gaviota.open_tablebase("data/gaviota") as tablebase:
... board = chess.Board("8/4k3/8/B7/8/8/8/4K3 w - - 0 1")
... print(tablebase.probe_wdl(board))
...
0
:raises: :exc:`KeyError` (or specifically
:exc:`chess.gaviota.MissingTableError`) if the probe fails. Use
:func:`~chess.gaviota.PythonTablebase.get_wdl()` if you prefer
to get ``None`` instead of an exception.
Note that probing a corrupted table file is undefined behavior. | Below is the the instruction that describes the task:
### Input:
Probes for win/draw/loss-information.
Returns ``1`` if the side to move is winning, ``0`` if it is a draw,
and ``-1`` if the side to move is losing.
>>> import chess
>>> import chess.gaviota
>>>
>>> with chess.gaviota.open_tablebase("data/gaviota") as tablebase:
... board = chess.Board("8/4k3/8/B7/8/8/8/4K3 w - - 0 1")
... print(tablebase.probe_wdl(board))
...
0
:raises: :exc:`KeyError` (or specifically
:exc:`chess.gaviota.MissingTableError`) if the probe fails. Use
:func:`~chess.gaviota.PythonTablebase.get_wdl()` if you prefer
to get ``None`` instead of an exception.
Note that probing a corrupted table file is undefined behavior.
### Response:
def probe_wdl(self, board: chess.Board) -> int:
"""
Probes for win/draw/loss-information.
Returns ``1`` if the side to move is winning, ``0`` if it is a draw,
and ``-1`` if the side to move is losing.
>>> import chess
>>> import chess.gaviota
>>>
>>> with chess.gaviota.open_tablebase("data/gaviota") as tablebase:
... board = chess.Board("8/4k3/8/B7/8/8/8/4K3 w - - 0 1")
... print(tablebase.probe_wdl(board))
...
0
:raises: :exc:`KeyError` (or specifically
:exc:`chess.gaviota.MissingTableError`) if the probe fails. Use
:func:`~chess.gaviota.PythonTablebase.get_wdl()` if you prefer
to get ``None`` instead of an exception.
Note that probing a corrupted table file is undefined behavior.
"""
dtm = self.probe_dtm(board)
if dtm == 0:
if board.is_checkmate():
return -1
else:
return 0
elif dtm > 0:
return 1
else:
return -1 |
def get_data(self, as_text=False):
"""The string representation of the request body. Whenever you call
this property the request iterable is encoded and flattened. This
can lead to unwanted behavior if you stream big data.
This behavior can be disabled by setting
:attr:`implicit_sequence_conversion` to `False`.
If `as_text` is set to `True` the return value will be a decoded
unicode string.
.. versionadded:: 0.9
"""
self._ensure_sequence()
rv = b''.join(self.iter_encoded())
if as_text:
rv = rv.decode(self.charset)
return rv | The string representation of the request body. Whenever you call
this property the request iterable is encoded and flattened. This
can lead to unwanted behavior if you stream big data.
This behavior can be disabled by setting
:attr:`implicit_sequence_conversion` to `False`.
If `as_text` is set to `True` the return value will be a decoded
unicode string.
.. versionadded:: 0.9 | Below is the the instruction that describes the task:
### Input:
The string representation of the request body. Whenever you call
this property the request iterable is encoded and flattened. This
can lead to unwanted behavior if you stream big data.
This behavior can be disabled by setting
:attr:`implicit_sequence_conversion` to `False`.
If `as_text` is set to `True` the return value will be a decoded
unicode string.
.. versionadded:: 0.9
### Response:
def get_data(self, as_text=False):
"""The string representation of the request body. Whenever you call
this property the request iterable is encoded and flattened. This
can lead to unwanted behavior if you stream big data.
This behavior can be disabled by setting
:attr:`implicit_sequence_conversion` to `False`.
If `as_text` is set to `True` the return value will be a decoded
unicode string.
.. versionadded:: 0.9
"""
self._ensure_sequence()
rv = b''.join(self.iter_encoded())
if as_text:
rv = rv.decode(self.charset)
return rv |
async def eval(self, text, opts=None, user=None):
'''
Evaluate a storm query and yield Nodes only.
'''
if user is None:
user = self.auth.getUserByName('root')
await self.boss.promote('storm', user=user, info={'query': text})
async with await self.snap(user=user) as snap:
async for node in snap.eval(text, opts=opts, user=user):
yield node | Evaluate a storm query and yield Nodes only. | Below is the the instruction that describes the task:
### Input:
Evaluate a storm query and yield Nodes only.
### Response:
async def eval(self, text, opts=None, user=None):
'''
Evaluate a storm query and yield Nodes only.
'''
if user is None:
user = self.auth.getUserByName('root')
await self.boss.promote('storm', user=user, info={'query': text})
async with await self.snap(user=user) as snap:
async for node in snap.eval(text, opts=opts, user=user):
yield node |
def load(self):
"""
Extract tabular data as |TableData| instances from an Excel file.
|spreadsheet_load_desc|
:return:
Loaded |TableData| iterator.
|TableData| created for each sheet in the workbook.
|load_table_name_desc|
=================== ====================================
Format specifier Value after the replacement
=================== ====================================
``%(filename)s`` Filename of the workbook
``%(sheet)s`` Name of the sheet
``%(format_name)s`` ``"spreadsheet"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ====================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the header row is not found.
:raises pytablereader.error.OpenError:
If failed to open the source file.
"""
import xlrd
self._validate()
self._logger.logging_load()
try:
workbook = xlrd.open_workbook(self.source)
except xlrd.biffh.XLRDError as e:
raise OpenError(e)
for worksheet in workbook.sheets():
self._worksheet = worksheet
if self._is_empty_sheet():
continue
self.__extract_not_empty_col_idx()
try:
start_row_idx = self._get_start_row_idx()
except DataError:
continue
rows = [
self.__get_row_values(row_idx)
for row_idx in range(start_row_idx + 1, self._row_count)
]
self.inc_table_count()
headers = self.__get_row_values(start_row_idx)
yield TableData(
self._make_table_name(),
headers,
rows,
dp_extractor=self.dp_extractor,
type_hints=self._extract_type_hints(headers),
) | Extract tabular data as |TableData| instances from an Excel file.
|spreadsheet_load_desc|
:return:
Loaded |TableData| iterator.
|TableData| created for each sheet in the workbook.
|load_table_name_desc|
=================== ====================================
Format specifier Value after the replacement
=================== ====================================
``%(filename)s`` Filename of the workbook
``%(sheet)s`` Name of the sheet
``%(format_name)s`` ``"spreadsheet"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ====================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the header row is not found.
:raises pytablereader.error.OpenError:
If failed to open the source file. | Below is the the instruction that describes the task:
### Input:
Extract tabular data as |TableData| instances from an Excel file.
|spreadsheet_load_desc|
:return:
Loaded |TableData| iterator.
|TableData| created for each sheet in the workbook.
|load_table_name_desc|
=================== ====================================
Format specifier Value after the replacement
=================== ====================================
``%(filename)s`` Filename of the workbook
``%(sheet)s`` Name of the sheet
``%(format_name)s`` ``"spreadsheet"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ====================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the header row is not found.
:raises pytablereader.error.OpenError:
If failed to open the source file.
### Response:
def load(self):
"""
Extract tabular data as |TableData| instances from an Excel file.
|spreadsheet_load_desc|
:return:
Loaded |TableData| iterator.
|TableData| created for each sheet in the workbook.
|load_table_name_desc|
=================== ====================================
Format specifier Value after the replacement
=================== ====================================
``%(filename)s`` Filename of the workbook
``%(sheet)s`` Name of the sheet
``%(format_name)s`` ``"spreadsheet"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ====================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the header row is not found.
:raises pytablereader.error.OpenError:
If failed to open the source file.
"""
import xlrd
self._validate()
self._logger.logging_load()
try:
workbook = xlrd.open_workbook(self.source)
except xlrd.biffh.XLRDError as e:
raise OpenError(e)
for worksheet in workbook.sheets():
self._worksheet = worksheet
if self._is_empty_sheet():
continue
self.__extract_not_empty_col_idx()
try:
start_row_idx = self._get_start_row_idx()
except DataError:
continue
rows = [
self.__get_row_values(row_idx)
for row_idx in range(start_row_idx + 1, self._row_count)
]
self.inc_table_count()
headers = self.__get_row_values(start_row_idx)
yield TableData(
self._make_table_name(),
headers,
rows,
dp_extractor=self.dp_extractor,
type_hints=self._extract_type_hints(headers),
) |
def add_function(self, func):
""" Record line profiling information for the given Python function.
"""
try:
# func_code does not exist in Python3
code = func.__code__
except AttributeError:
import warnings
warnings.warn("Could not extract a code object for the object %r"
% (func,))
return
if code not in self.code_map:
self.code_map[code] = {}
self.functions.append(func) | Record line profiling information for the given Python function. | Below is the the instruction that describes the task:
### Input:
Record line profiling information for the given Python function.
### Response:
def add_function(self, func):
""" Record line profiling information for the given Python function.
"""
try:
# func_code does not exist in Python3
code = func.__code__
except AttributeError:
import warnings
warnings.warn("Could not extract a code object for the object %r"
% (func,))
return
if code not in self.code_map:
self.code_map[code] = {}
self.functions.append(func) |
def _flatten_file_with_secondary(input, out_dir):
"""Flatten file representation with secondary indices (CWL-like)
"""
out = []
orig_dir = os.path.dirname(input["base"])
for finfo in [input["base"]] + input.get("secondary", []):
cur_dir = os.path.dirname(finfo)
if cur_dir != orig_dir and cur_dir.startswith(orig_dir):
cur_out_dir = os.path.join(out_dir, cur_dir.replace(orig_dir + "/", ""))
else:
cur_out_dir = out_dir
out.append({"path": finfo, "dir": cur_out_dir})
return out | Flatten file representation with secondary indices (CWL-like) | Below is the the instruction that describes the task:
### Input:
Flatten file representation with secondary indices (CWL-like)
### Response:
def _flatten_file_with_secondary(input, out_dir):
"""Flatten file representation with secondary indices (CWL-like)
"""
out = []
orig_dir = os.path.dirname(input["base"])
for finfo in [input["base"]] + input.get("secondary", []):
cur_dir = os.path.dirname(finfo)
if cur_dir != orig_dir and cur_dir.startswith(orig_dir):
cur_out_dir = os.path.join(out_dir, cur_dir.replace(orig_dir + "/", ""))
else:
cur_out_dir = out_dir
out.append({"path": finfo, "dir": cur_out_dir})
return out |
def get_machines(self, origin, hostnames):
"""Return a set of machines based on `hostnames`.
Any hostname that is not found will result in an error.
"""
hostnames = {
hostname: True
for hostname in hostnames
}
machines = origin.Machines.read(hostnames=hostnames)
machines = [
machine
for machine in machines
if hostnames.pop(machine.hostname, False)
]
if len(hostnames) > 0:
raise CommandError(
"Unable to find %s %s." % (
"machines" if len(hostnames) > 1 else "machine",
','.join(hostnames)))
return machines | Return a set of machines based on `hostnames`.
Any hostname that is not found will result in an error. | Below is the the instruction that describes the task:
### Input:
Return a set of machines based on `hostnames`.
Any hostname that is not found will result in an error.
### Response:
def get_machines(self, origin, hostnames):
"""Return a set of machines based on `hostnames`.
Any hostname that is not found will result in an error.
"""
hostnames = {
hostname: True
for hostname in hostnames
}
machines = origin.Machines.read(hostnames=hostnames)
machines = [
machine
for machine in machines
if hostnames.pop(machine.hostname, False)
]
if len(hostnames) > 0:
raise CommandError(
"Unable to find %s %s." % (
"machines" if len(hostnames) > 1 else "machine",
','.join(hostnames)))
return machines |
def __dtw_calc_accu_cost(C, D, D_steps, step_sizes_sigma,
weights_mul, weights_add, max_0, max_1): # pragma: no cover
'''Calculate the accumulated cost matrix D.
Use dynamic programming to calculate the accumulated costs.
Parameters
----------
C : np.ndarray [shape=(N, M)]
pre-computed cost matrix
D : np.ndarray [shape=(N, M)]
accumulated cost matrix
D_steps : np.ndarray [shape=(N, M)]
steps which were used for calculating D
step_sizes_sigma : np.ndarray [shape=[n, 2]]
Specifies allowed step sizes as used by the dtw.
weights_add : np.ndarray [shape=[n, ]]
Additive weights to penalize certain step sizes.
weights_mul : np.ndarray [shape=[n, ]]
Multiplicative weights to penalize certain step sizes.
max_0 : int
maximum number of steps in step_sizes_sigma in dim 0.
max_1 : int
maximum number of steps in step_sizes_sigma in dim 1.
Returns
-------
D : np.ndarray [shape=(N,M)]
accumulated cost matrix.
D[N,M] is the total alignment cost.
When doing subsequence DTW, D[N,:] indicates a matching function.
D_steps : np.ndarray [shape=(N,M)]
steps which were used for calculating D.
See Also
--------
dtw
'''
for cur_n in range(max_0, D.shape[0]):
for cur_m in range(max_1, D.shape[1]):
# accumulate costs
for cur_step_idx, cur_w_add, cur_w_mul in zip(range(step_sizes_sigma.shape[0]),
weights_add, weights_mul):
cur_D = D[cur_n - step_sizes_sigma[cur_step_idx, 0],
cur_m - step_sizes_sigma[cur_step_idx, 1]]
cur_C = cur_w_mul * C[cur_n - max_0, cur_m - max_1]
cur_C += cur_w_add
cur_cost = cur_D + cur_C
# check if cur_cost is smaller than the one stored in D
if cur_cost < D[cur_n, cur_m]:
D[cur_n, cur_m] = cur_cost
# save step-index
D_steps[cur_n, cur_m] = cur_step_idx
return D, D_steps | Calculate the accumulated cost matrix D.
Use dynamic programming to calculate the accumulated costs.
Parameters
----------
C : np.ndarray [shape=(N, M)]
pre-computed cost matrix
D : np.ndarray [shape=(N, M)]
accumulated cost matrix
D_steps : np.ndarray [shape=(N, M)]
steps which were used for calculating D
step_sizes_sigma : np.ndarray [shape=[n, 2]]
Specifies allowed step sizes as used by the dtw.
weights_add : np.ndarray [shape=[n, ]]
Additive weights to penalize certain step sizes.
weights_mul : np.ndarray [shape=[n, ]]
Multiplicative weights to penalize certain step sizes.
max_0 : int
maximum number of steps in step_sizes_sigma in dim 0.
max_1 : int
maximum number of steps in step_sizes_sigma in dim 1.
Returns
-------
D : np.ndarray [shape=(N,M)]
accumulated cost matrix.
D[N,M] is the total alignment cost.
When doing subsequence DTW, D[N,:] indicates a matching function.
D_steps : np.ndarray [shape=(N,M)]
steps which were used for calculating D.
See Also
--------
dtw | Below is the the instruction that describes the task:
### Input:
Calculate the accumulated cost matrix D.
Use dynamic programming to calculate the accumulated costs.
Parameters
----------
C : np.ndarray [shape=(N, M)]
pre-computed cost matrix
D : np.ndarray [shape=(N, M)]
accumulated cost matrix
D_steps : np.ndarray [shape=(N, M)]
steps which were used for calculating D
step_sizes_sigma : np.ndarray [shape=[n, 2]]
Specifies allowed step sizes as used by the dtw.
weights_add : np.ndarray [shape=[n, ]]
Additive weights to penalize certain step sizes.
weights_mul : np.ndarray [shape=[n, ]]
Multiplicative weights to penalize certain step sizes.
max_0 : int
maximum number of steps in step_sizes_sigma in dim 0.
max_1 : int
maximum number of steps in step_sizes_sigma in dim 1.
Returns
-------
D : np.ndarray [shape=(N,M)]
accumulated cost matrix.
D[N,M] is the total alignment cost.
When doing subsequence DTW, D[N,:] indicates a matching function.
D_steps : np.ndarray [shape=(N,M)]
steps which were used for calculating D.
See Also
--------
dtw
### Response:
def __dtw_calc_accu_cost(C, D, D_steps, step_sizes_sigma,
weights_mul, weights_add, max_0, max_1): # pragma: no cover
'''Calculate the accumulated cost matrix D.
Use dynamic programming to calculate the accumulated costs.
Parameters
----------
C : np.ndarray [shape=(N, M)]
pre-computed cost matrix
D : np.ndarray [shape=(N, M)]
accumulated cost matrix
D_steps : np.ndarray [shape=(N, M)]
steps which were used for calculating D
step_sizes_sigma : np.ndarray [shape=[n, 2]]
Specifies allowed step sizes as used by the dtw.
weights_add : np.ndarray [shape=[n, ]]
Additive weights to penalize certain step sizes.
weights_mul : np.ndarray [shape=[n, ]]
Multiplicative weights to penalize certain step sizes.
max_0 : int
maximum number of steps in step_sizes_sigma in dim 0.
max_1 : int
maximum number of steps in step_sizes_sigma in dim 1.
Returns
-------
D : np.ndarray [shape=(N,M)]
accumulated cost matrix.
D[N,M] is the total alignment cost.
When doing subsequence DTW, D[N,:] indicates a matching function.
D_steps : np.ndarray [shape=(N,M)]
steps which were used for calculating D.
See Also
--------
dtw
'''
for cur_n in range(max_0, D.shape[0]):
for cur_m in range(max_1, D.shape[1]):
# accumulate costs
for cur_step_idx, cur_w_add, cur_w_mul in zip(range(step_sizes_sigma.shape[0]),
weights_add, weights_mul):
cur_D = D[cur_n - step_sizes_sigma[cur_step_idx, 0],
cur_m - step_sizes_sigma[cur_step_idx, 1]]
cur_C = cur_w_mul * C[cur_n - max_0, cur_m - max_1]
cur_C += cur_w_add
cur_cost = cur_D + cur_C
# check if cur_cost is smaller than the one stored in D
if cur_cost < D[cur_n, cur_m]:
D[cur_n, cur_m] = cur_cost
# save step-index
D_steps[cur_n, cur_m] = cur_step_idx
return D, D_steps |
def ConfigureRequest(self, upload_config, http_request, url_builder):
"""Configure the request and url for this upload."""
# Validate total_size vs. max_size
if (self.total_size and upload_config.max_size and
self.total_size > upload_config.max_size):
raise exceptions.InvalidUserInputError(
'Upload too big: %s larger than max size %s' % (
self.total_size, upload_config.max_size))
# Validate mime type
if not util.AcceptableMimeType(upload_config.accept, self.mime_type):
raise exceptions.InvalidUserInputError(
'MIME type %s does not match any accepted MIME ranges %s' % (
self.mime_type, upload_config.accept))
self.__SetDefaultUploadStrategy(upload_config, http_request)
if self.strategy == SIMPLE_UPLOAD:
url_builder.relative_path = upload_config.simple_path
if http_request.body:
url_builder.query_params['uploadType'] = 'multipart'
self.__ConfigureMultipartRequest(http_request)
else:
url_builder.query_params['uploadType'] = 'media'
self.__ConfigureMediaRequest(http_request)
# Once the entire body is written, compress the body if configured
# to. Both multipart and media request uploads will read the
# entire stream into memory, which means full compression is also
# safe to perform. Because the strategy is set to SIMPLE_UPLOAD,
# StreamInChunks throws an exception, meaning double compression
# cannot happen.
if self.__gzip_encoded:
http_request.headers['Content-Encoding'] = 'gzip'
# Turn the body into a stream so that we can compress it, then
# read the compressed bytes. In the event of a retry (e.g. if
# our access token has expired), we need to be able to re-read
# the body, which we can't do with a stream. So, we consume the
# bytes from the stream now and store them in a re-readable
# bytes container.
http_request.body = (
compression.CompressStream(
six.BytesIO(http_request.body))[0].read())
else:
url_builder.relative_path = upload_config.resumable_path
url_builder.query_params['uploadType'] = 'resumable'
self.__ConfigureResumableRequest(http_request) | Configure the request and url for this upload. | Below is the the instruction that describes the task:
### Input:
Configure the request and url for this upload.
### Response:
def ConfigureRequest(self, upload_config, http_request, url_builder):
"""Configure the request and url for this upload."""
# Validate total_size vs. max_size
if (self.total_size and upload_config.max_size and
self.total_size > upload_config.max_size):
raise exceptions.InvalidUserInputError(
'Upload too big: %s larger than max size %s' % (
self.total_size, upload_config.max_size))
# Validate mime type
if not util.AcceptableMimeType(upload_config.accept, self.mime_type):
raise exceptions.InvalidUserInputError(
'MIME type %s does not match any accepted MIME ranges %s' % (
self.mime_type, upload_config.accept))
self.__SetDefaultUploadStrategy(upload_config, http_request)
if self.strategy == SIMPLE_UPLOAD:
url_builder.relative_path = upload_config.simple_path
if http_request.body:
url_builder.query_params['uploadType'] = 'multipart'
self.__ConfigureMultipartRequest(http_request)
else:
url_builder.query_params['uploadType'] = 'media'
self.__ConfigureMediaRequest(http_request)
# Once the entire body is written, compress the body if configured
# to. Both multipart and media request uploads will read the
# entire stream into memory, which means full compression is also
# safe to perform. Because the strategy is set to SIMPLE_UPLOAD,
# StreamInChunks throws an exception, meaning double compression
# cannot happen.
if self.__gzip_encoded:
http_request.headers['Content-Encoding'] = 'gzip'
# Turn the body into a stream so that we can compress it, then
# read the compressed bytes. In the event of a retry (e.g. if
# our access token has expired), we need to be able to re-read
# the body, which we can't do with a stream. So, we consume the
# bytes from the stream now and store them in a re-readable
# bytes container.
http_request.body = (
compression.CompressStream(
six.BytesIO(http_request.body))[0].read())
else:
url_builder.relative_path = upload_config.resumable_path
url_builder.query_params['uploadType'] = 'resumable'
self.__ConfigureResumableRequest(http_request) |
def _blocks(self, name):
"""Inner wrapper to search for blocks by name.
"""
i = len(self)
while i >= 0:
i -= 1
if name in self[i]['__names__']:
for b in self[i]['__blocks__']:
r = b.raw()
if r and r == name:
return b
else:
for b in self[i]['__blocks__']:
r = b.raw()
if r and name.startswith(r):
b = utility.blocksearch(b, name)
if b:
return b
return False | Inner wrapper to search for blocks by name. | Below is the the instruction that describes the task:
### Input:
Inner wrapper to search for blocks by name.
### Response:
def _blocks(self, name):
"""Inner wrapper to search for blocks by name.
"""
i = len(self)
while i >= 0:
i -= 1
if name in self[i]['__names__']:
for b in self[i]['__blocks__']:
r = b.raw()
if r and r == name:
return b
else:
for b in self[i]['__blocks__']:
r = b.raw()
if r and name.startswith(r):
b = utility.blocksearch(b, name)
if b:
return b
return False |
def _validate_samples_factors(mwtabfile, validate_samples=True, validate_factors=True):
"""Validate ``Samples`` and ``Factors`` identifiers across the file.
:param mwtabfile: Instance of :class:`~mwtab.mwtab.MWTabFile`.
:type mwtabfile: :class:`~mwtab.mwtab.MWTabFile`
:return: None
:rtype: :py:obj:`None`
"""
from_subject_samples = {i["local_sample_id"] for i in mwtabfile["SUBJECT_SAMPLE_FACTORS"]["SUBJECT_SAMPLE_FACTORS"]}
from_subject_factors = {i["factors"] for i in mwtabfile["SUBJECT_SAMPLE_FACTORS"]["SUBJECT_SAMPLE_FACTORS"]}
if validate_samples:
if "MS_METABOLITE_DATA" in mwtabfile:
from_metabolite_data_samples = set(mwtabfile["MS_METABOLITE_DATA"]["MS_METABOLITE_DATA_START"]["Samples"])
assert from_subject_samples == from_metabolite_data_samples
if "NMR_BINNED_DATA" in mwtabfile:
from_nmr_binned_data_samples = set(mwtabfile["NMR_BINNED_DATA"]["NMR_BINNED_DATA_START"]["Fields"][1:])
assert from_subject_samples == from_nmr_binned_data_samples
if validate_factors:
if "MS_METABOLITE_DATA" in mwtabfile:
from_metabolite_data_factors = set(mwtabfile["MS_METABOLITE_DATA"]["MS_METABOLITE_DATA_START"]["Factors"])
assert from_subject_factors == from_metabolite_data_factors | Validate ``Samples`` and ``Factors`` identifiers across the file.
:param mwtabfile: Instance of :class:`~mwtab.mwtab.MWTabFile`.
:type mwtabfile: :class:`~mwtab.mwtab.MWTabFile`
:return: None
:rtype: :py:obj:`None` | Below is the the instruction that describes the task:
### Input:
Validate ``Samples`` and ``Factors`` identifiers across the file.
:param mwtabfile: Instance of :class:`~mwtab.mwtab.MWTabFile`.
:type mwtabfile: :class:`~mwtab.mwtab.MWTabFile`
:return: None
:rtype: :py:obj:`None`
### Response:
def _validate_samples_factors(mwtabfile, validate_samples=True, validate_factors=True):
"""Validate ``Samples`` and ``Factors`` identifiers across the file.
:param mwtabfile: Instance of :class:`~mwtab.mwtab.MWTabFile`.
:type mwtabfile: :class:`~mwtab.mwtab.MWTabFile`
:return: None
:rtype: :py:obj:`None`
"""
from_subject_samples = {i["local_sample_id"] for i in mwtabfile["SUBJECT_SAMPLE_FACTORS"]["SUBJECT_SAMPLE_FACTORS"]}
from_subject_factors = {i["factors"] for i in mwtabfile["SUBJECT_SAMPLE_FACTORS"]["SUBJECT_SAMPLE_FACTORS"]}
if validate_samples:
if "MS_METABOLITE_DATA" in mwtabfile:
from_metabolite_data_samples = set(mwtabfile["MS_METABOLITE_DATA"]["MS_METABOLITE_DATA_START"]["Samples"])
assert from_subject_samples == from_metabolite_data_samples
if "NMR_BINNED_DATA" in mwtabfile:
from_nmr_binned_data_samples = set(mwtabfile["NMR_BINNED_DATA"]["NMR_BINNED_DATA_START"]["Fields"][1:])
assert from_subject_samples == from_nmr_binned_data_samples
if validate_factors:
if "MS_METABOLITE_DATA" in mwtabfile:
from_metabolite_data_factors = set(mwtabfile["MS_METABOLITE_DATA"]["MS_METABOLITE_DATA_START"]["Factors"])
assert from_subject_factors == from_metabolite_data_factors |
def registration_request_verify(registration_request):
"""
Verifies that all required parameters and correct values are included in the client registration request.
:param registration_request: the authentication request to verify
:raise InvalidClientRegistrationRequest: if the registration is incorrect
"""
try:
registration_request.verify()
except MessageException as e:
raise InvalidClientRegistrationRequest(str(e), registration_request, oauth_error='invalid_request') from e | Verifies that all required parameters and correct values are included in the client registration request.
:param registration_request: the authentication request to verify
:raise InvalidClientRegistrationRequest: if the registration is incorrect | Below is the the instruction that describes the task:
### Input:
Verifies that all required parameters and correct values are included in the client registration request.
:param registration_request: the authentication request to verify
:raise InvalidClientRegistrationRequest: if the registration is incorrect
### Response:
def registration_request_verify(registration_request):
"""
Verifies that all required parameters and correct values are included in the client registration request.
:param registration_request: the authentication request to verify
:raise InvalidClientRegistrationRequest: if the registration is incorrect
"""
try:
registration_request.verify()
except MessageException as e:
raise InvalidClientRegistrationRequest(str(e), registration_request, oauth_error='invalid_request') from e |
def _build_int_array_el(el_name, parent, list_):
"""build a soapenc:Array made of ints called `el_name` as a child
of `parent`"""
el = parent.add_child(el_name)
el.add_attribute('xmlns:soapenc',
'http://schemas.xmlsoap.org/soap/encoding/')
el.add_attribute('xsi:type', 'soapenc:Array')
el.add_attribute('soapenc:arrayType', 'xsd:int[{:d}]'.format(len(list_)))
for item in list_:
item_el = el.add_child('item', str(item))
item_el.add_attribute('xsi:type', 'xsd:int')
return el | build a soapenc:Array made of ints called `el_name` as a child
of `parent` | Below is the the instruction that describes the task:
### Input:
build a soapenc:Array made of ints called `el_name` as a child
of `parent`
### Response:
def _build_int_array_el(el_name, parent, list_):
"""build a soapenc:Array made of ints called `el_name` as a child
of `parent`"""
el = parent.add_child(el_name)
el.add_attribute('xmlns:soapenc',
'http://schemas.xmlsoap.org/soap/encoding/')
el.add_attribute('xsi:type', 'soapenc:Array')
el.add_attribute('soapenc:arrayType', 'xsd:int[{:d}]'.format(len(list_)))
for item in list_:
item_el = el.add_child('item', str(item))
item_el.add_attribute('xsi:type', 'xsd:int')
return el |
def make_absolute_paths(content):
"""Convert all MEDIA files into a file://URL paths in order to
correctly get it displayed in PDFs."""
overrides = [
{
'root': settings.MEDIA_ROOT,
'url': settings.MEDIA_URL,
},
{
'root': settings.STATIC_ROOT,
'url': settings.STATIC_URL,
}
]
has_scheme = re.compile(r'^[^:/]+://')
for x in overrides:
if not x['url'] or has_scheme.match(x['url']):
continue
if not x['root'].endswith('/'):
x['root'] += '/'
occur_pattern = '''["|']({0}.*?)["|']'''
occurences = re.findall(occur_pattern.format(x['url']), content)
occurences = list(set(occurences)) # Remove dups
for occur in occurences:
content = content.replace(occur,
pathname2fileurl(x['root']) +
occur[len(x['url']):])
return content | Convert all MEDIA files into a file://URL paths in order to
correctly get it displayed in PDFs. | Below is the the instruction that describes the task:
### Input:
Convert all MEDIA files into a file://URL paths in order to
correctly get it displayed in PDFs.
### Response:
def make_absolute_paths(content):
"""Convert all MEDIA files into a file://URL paths in order to
correctly get it displayed in PDFs."""
overrides = [
{
'root': settings.MEDIA_ROOT,
'url': settings.MEDIA_URL,
},
{
'root': settings.STATIC_ROOT,
'url': settings.STATIC_URL,
}
]
has_scheme = re.compile(r'^[^:/]+://')
for x in overrides:
if not x['url'] or has_scheme.match(x['url']):
continue
if not x['root'].endswith('/'):
x['root'] += '/'
occur_pattern = '''["|']({0}.*?)["|']'''
occurences = re.findall(occur_pattern.format(x['url']), content)
occurences = list(set(occurences)) # Remove dups
for occur in occurences:
content = content.replace(occur,
pathname2fileurl(x['root']) +
occur[len(x['url']):])
return content |
def stop(self, **kwargs):
"""Stop the environment.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabStopError: If the operation failed
"""
path = '%s/%s/stop' % (self.manager.path, self.get_id())
self.manager.gitlab.http_post(path, **kwargs) | Stop the environment.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabStopError: If the operation failed | Below is the the instruction that describes the task:
### Input:
Stop the environment.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabStopError: If the operation failed
### Response:
def stop(self, **kwargs):
"""Stop the environment.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabStopError: If the operation failed
"""
path = '%s/%s/stop' % (self.manager.path, self.get_id())
self.manager.gitlab.http_post(path, **kwargs) |
def ca_bundle(self, ca_bundle):
"""
Sets the ca_bundle of this V1alpha1WebhookClientConfig.
`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.
:param ca_bundle: The ca_bundle of this V1alpha1WebhookClientConfig.
:type: str
"""
if ca_bundle is not None and not re.search('^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', ca_bundle):
raise ValueError("Invalid value for `ca_bundle`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`")
self._ca_bundle = ca_bundle | Sets the ca_bundle of this V1alpha1WebhookClientConfig.
`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.
:param ca_bundle: The ca_bundle of this V1alpha1WebhookClientConfig.
:type: str | Below is the the instruction that describes the task:
### Input:
Sets the ca_bundle of this V1alpha1WebhookClientConfig.
`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.
:param ca_bundle: The ca_bundle of this V1alpha1WebhookClientConfig.
:type: str
### Response:
def ca_bundle(self, ca_bundle):
"""
Sets the ca_bundle of this V1alpha1WebhookClientConfig.
`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.
:param ca_bundle: The ca_bundle of this V1alpha1WebhookClientConfig.
:type: str
"""
if ca_bundle is not None and not re.search('^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', ca_bundle):
raise ValueError("Invalid value for `ca_bundle`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`")
self._ca_bundle = ca_bundle |
def _parse_references(xml):
"""Parse the references to ``Reference`` instances."""
references = []
ref_finder = HTMLReferenceFinder(xml)
for elm, uri_attr in ref_finder:
type_ = _discover_uri_type(elm.get(uri_attr))
references.append(Reference(elm, type_, uri_attr))
return references | Parse the references to ``Reference`` instances. | Below is the the instruction that describes the task:
### Input:
Parse the references to ``Reference`` instances.
### Response:
def _parse_references(xml):
"""Parse the references to ``Reference`` instances."""
references = []
ref_finder = HTMLReferenceFinder(xml)
for elm, uri_attr in ref_finder:
type_ = _discover_uri_type(elm.get(uri_attr))
references.append(Reference(elm, type_, uri_attr))
return references |
def get_nts_sections(self, sections, sortby=None):
"""Given a list of sections containing GO IDs, get a list of sections w/GO nts."""
goids = self.get_goids_sections(sections)
gosubdag = GoSubDag(goids, self.go2obj)
return [(sec, gosubdag.get_nts(gos, sortby)) for sec, gos in sections] | Given a list of sections containing GO IDs, get a list of sections w/GO nts. | Below is the the instruction that describes the task:
### Input:
Given a list of sections containing GO IDs, get a list of sections w/GO nts.
### Response:
def get_nts_sections(self, sections, sortby=None):
"""Given a list of sections containing GO IDs, get a list of sections w/GO nts."""
goids = self.get_goids_sections(sections)
gosubdag = GoSubDag(goids, self.go2obj)
return [(sec, gosubdag.get_nts(gos, sortby)) for sec, gos in sections] |
def _import_epublication(self, epub):
"""
Fill internal property ._POST dictionary with data from EPublication.
"""
# mrs. Svobodová requires that annotation exported by us have this
# prefix
prefixed_annotation = ANNOTATION_PREFIX + epub.anotace
self._POST["P0501010__a"] = epub.ISBN
self._POST["P07012001_a"] = epub.nazev
self._POST["P07032001_e"] = epub.podnazev
self._POST["P0502010__b"] = epub.vazba
self._POST["P0504010__d"] = epub.cena
self._POST["P07042001_h"] = epub.castDil
self._POST["P07052001_i"] = epub.nazevCasti
self._POST["P0902210__c"] = epub.nakladatelVydavatel
self._POST["P0903210__d"] = epub.datumVydani
self._POST["P0801205__a"] = epub.poradiVydani
self._POST["P1502IST1_b"] = epub.zpracovatelZaznamu
self._POST["P0503010__x"] = epub.format
self._POST["P110185640u"] = epub.url or ""
self._POST["P0901210__a"] = epub.mistoVydani
self._POST["P0601010__a"] = epub.ISBNSouboruPublikaci
self._POST["P1801URL__u"] = epub.internal_url
self._POST["P1001330__a"] = prefixed_annotation if epub.anotace else ""
if len(epub.autori) > 3:
epub.autori[2] = ", ".join(epub.autori[2:])
epub.autori = epub.autori[:3]
# check whether the autors have required type (string)
for author in epub.autori:
error_msg = "Bad type of author (%s) (str is required)."
assert isinstance(author, basestring), (error_msg % type(author))
authors_fields = ("P1301ZAK__b", "P1302ZAK__c", "P1303ZAK__c")
self._POST.update(dict(zip(authors_fields, epub.autori))) | Fill internal property ._POST dictionary with data from EPublication. | Below is the the instruction that describes the task:
### Input:
Fill internal property ._POST dictionary with data from EPublication.
### Response:
def _import_epublication(self, epub):
"""
Fill internal property ._POST dictionary with data from EPublication.
"""
# mrs. Svobodová requires that annotation exported by us have this
# prefix
prefixed_annotation = ANNOTATION_PREFIX + epub.anotace
self._POST["P0501010__a"] = epub.ISBN
self._POST["P07012001_a"] = epub.nazev
self._POST["P07032001_e"] = epub.podnazev
self._POST["P0502010__b"] = epub.vazba
self._POST["P0504010__d"] = epub.cena
self._POST["P07042001_h"] = epub.castDil
self._POST["P07052001_i"] = epub.nazevCasti
self._POST["P0902210__c"] = epub.nakladatelVydavatel
self._POST["P0903210__d"] = epub.datumVydani
self._POST["P0801205__a"] = epub.poradiVydani
self._POST["P1502IST1_b"] = epub.zpracovatelZaznamu
self._POST["P0503010__x"] = epub.format
self._POST["P110185640u"] = epub.url or ""
self._POST["P0901210__a"] = epub.mistoVydani
self._POST["P0601010__a"] = epub.ISBNSouboruPublikaci
self._POST["P1801URL__u"] = epub.internal_url
self._POST["P1001330__a"] = prefixed_annotation if epub.anotace else ""
if len(epub.autori) > 3:
epub.autori[2] = ", ".join(epub.autori[2:])
epub.autori = epub.autori[:3]
# check whether the autors have required type (string)
for author in epub.autori:
error_msg = "Bad type of author (%s) (str is required)."
assert isinstance(author, basestring), (error_msg % type(author))
authors_fields = ("P1301ZAK__b", "P1302ZAK__c", "P1303ZAK__c")
self._POST.update(dict(zip(authors_fields, epub.autori))) |
def dir_name_changed(self, widget, data=None):
"""
Function is used for controlling
label Full Directory project name
and storing current project directory
in configuration manager
"""
config_manager.set_config_value("da.project_dir", self.dir_name.get_text())
self.update_full_label() | Function is used for controlling
label Full Directory project name
and storing current project directory
in configuration manager | Below is the the instruction that describes the task:
### Input:
Function is used for controlling
label Full Directory project name
and storing current project directory
in configuration manager
### Response:
def dir_name_changed(self, widget, data=None):
"""
Function is used for controlling
label Full Directory project name
and storing current project directory
in configuration manager
"""
config_manager.set_config_value("da.project_dir", self.dir_name.get_text())
self.update_full_label() |
def send_request(self, worker_class_or_function, args, on_receive=None):
"""
Requests some work to be done by the backend. You can get notified of
the work results by passing a callback (on_receive).
:param worker_class_or_function: Worker class or function
:param args: worker args, any Json serializable objects
:param on_receive: an optional callback executed when we receive the
worker's results. The callback will be called with one arguments:
the results of the worker (object)
:raise: backend.NotRunning if the backend process is not running.
"""
if not self.running:
try:
# try to restart the backend if it crashed.
self.start(self.server_script, interpreter=self.interpreter,
args=self.args)
except AttributeError:
pass # not started yet
finally:
# caller should try again, later
raise NotRunning()
else:
comm('sending request, worker=%r' % worker_class_or_function)
# create a socket, the request will be send as soon as the socket
# has connected
socket = JsonTcpClient(
self.editor, self._port, worker_class_or_function, args,
on_receive=on_receive)
socket.finished.connect(self._rm_socket)
self._sockets.append(socket)
# restart heartbeat timer
self._heartbeat_timer.start() | Requests some work to be done by the backend. You can get notified of
the work results by passing a callback (on_receive).
:param worker_class_or_function: Worker class or function
:param args: worker args, any Json serializable objects
:param on_receive: an optional callback executed when we receive the
worker's results. The callback will be called with one arguments:
the results of the worker (object)
:raise: backend.NotRunning if the backend process is not running. | Below is the the instruction that describes the task:
### Input:
Requests some work to be done by the backend. You can get notified of
the work results by passing a callback (on_receive).
:param worker_class_or_function: Worker class or function
:param args: worker args, any Json serializable objects
:param on_receive: an optional callback executed when we receive the
worker's results. The callback will be called with one arguments:
the results of the worker (object)
:raise: backend.NotRunning if the backend process is not running.
### Response:
def send_request(self, worker_class_or_function, args, on_receive=None):
"""
Requests some work to be done by the backend. You can get notified of
the work results by passing a callback (on_receive).
:param worker_class_or_function: Worker class or function
:param args: worker args, any Json serializable objects
:param on_receive: an optional callback executed when we receive the
worker's results. The callback will be called with one arguments:
the results of the worker (object)
:raise: backend.NotRunning if the backend process is not running.
"""
if not self.running:
try:
# try to restart the backend if it crashed.
self.start(self.server_script, interpreter=self.interpreter,
args=self.args)
except AttributeError:
pass # not started yet
finally:
# caller should try again, later
raise NotRunning()
else:
comm('sending request, worker=%r' % worker_class_or_function)
# create a socket, the request will be send as soon as the socket
# has connected
socket = JsonTcpClient(
self.editor, self._port, worker_class_or_function, args,
on_receive=on_receive)
socket.finished.connect(self._rm_socket)
self._sockets.append(socket)
# restart heartbeat timer
self._heartbeat_timer.start() |
def attach(cls, ip, vm, background=False, force=False):
""" Attach """
vm_ = Iaas.info(vm)
ip_ = cls.info(ip)
if not cls._check_and_detach(ip_, vm_):
return
# then we should attach the ip to the vm
attach = Iface._attach(ip_['iface_id'], vm_['id'])
if not background:
cls.display_progress(attach)
return attach | Attach | Below is the the instruction that describes the task:
### Input:
Attach
### Response:
def attach(cls, ip, vm, background=False, force=False):
""" Attach """
vm_ = Iaas.info(vm)
ip_ = cls.info(ip)
if not cls._check_and_detach(ip_, vm_):
return
# then we should attach the ip to the vm
attach = Iface._attach(ip_['iface_id'], vm_['id'])
if not background:
cls.display_progress(attach)
return attach |
def GetEntries(
self, parser_mediator, cookie_data=None, url=None, **kwargs):
"""Extracts event objects from the cookie.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (bytes): cookie data.
url (str): URL or path where the cookie got set.
"""
fields = cookie_data.split('.')
number_of_fields = len(fields)
if number_of_fields != 1:
parser_mediator.ProduceExtractionWarning(
'unsupported number of fields: {0:d} in cookie: {1:s}'.format(
number_of_fields, self.COOKIE_NAME))
return
try:
# TODO: fix that we're losing precision here use dfdatetime.
last_visit_posix_time = int(fields[0], 10) / 10000000
except ValueError:
last_visit_posix_time = None
if last_visit_posix_time is not None:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=last_visit_posix_time)
timestamp_description = definitions.TIME_DESCRIPTION_LAST_VISITED
else:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME
event_data = GoogleAnalyticsEventData('utmt')
event_data.cookie_name = self.COOKIE_NAME
event_data.url = url
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
parser_mediator.ProduceEventWithEventData(event, event_data) | Extracts event objects from the cookie.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (bytes): cookie data.
url (str): URL or path where the cookie got set. | Below is the the instruction that describes the task:
### Input:
Extracts event objects from the cookie.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (bytes): cookie data.
url (str): URL or path where the cookie got set.
### Response:
def GetEntries(
self, parser_mediator, cookie_data=None, url=None, **kwargs):
"""Extracts event objects from the cookie.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (bytes): cookie data.
url (str): URL or path where the cookie got set.
"""
fields = cookie_data.split('.')
number_of_fields = len(fields)
if number_of_fields != 1:
parser_mediator.ProduceExtractionWarning(
'unsupported number of fields: {0:d} in cookie: {1:s}'.format(
number_of_fields, self.COOKIE_NAME))
return
try:
# TODO: fix that we're losing precision here use dfdatetime.
last_visit_posix_time = int(fields[0], 10) / 10000000
except ValueError:
last_visit_posix_time = None
if last_visit_posix_time is not None:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=last_visit_posix_time)
timestamp_description = definitions.TIME_DESCRIPTION_LAST_VISITED
else:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME
event_data = GoogleAnalyticsEventData('utmt')
event_data.cookie_name = self.COOKIE_NAME
event_data.url = url
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
parser_mediator.ProduceEventWithEventData(event, event_data) |
def build_etag(self, response, include_etag=True, **kwargs):
"""
Add an etag to the response body.
Uses spooky where possible because it is empirically fast and well-regarded.
See: http://blog.reverberate.org/2012/01/state-of-hash-functions-2012.html
"""
if not include_etag:
return
if not spooky:
# use built-in md5
response.add_etag()
return
# use spooky
response.headers["ETag"] = quote_etag(
hexlify(
spooky.hash128(
response.get_data(),
).to_bytes(16, "little"),
).decode("utf-8"),
) | Add an etag to the response body.
Uses spooky where possible because it is empirically fast and well-regarded.
See: http://blog.reverberate.org/2012/01/state-of-hash-functions-2012.html | Below is the the instruction that describes the task:
### Input:
Add an etag to the response body.
Uses spooky where possible because it is empirically fast and well-regarded.
See: http://blog.reverberate.org/2012/01/state-of-hash-functions-2012.html
### Response:
def build_etag(self, response, include_etag=True, **kwargs):
"""
Add an etag to the response body.
Uses spooky where possible because it is empirically fast and well-regarded.
See: http://blog.reverberate.org/2012/01/state-of-hash-functions-2012.html
"""
if not include_etag:
return
if not spooky:
# use built-in md5
response.add_etag()
return
# use spooky
response.headers["ETag"] = quote_etag(
hexlify(
spooky.hash128(
response.get_data(),
).to_bytes(16, "little"),
).decode("utf-8"),
) |
def restore_yaml_comments(data, default_data):
"""Scan default_data for comments (we include empty lines in our
definition of comments) and place them before the same keys in data.
Only works with comments that are on one or more own lines, i.e.
not next to a yaml mapping.
"""
comment_map = dict()
default_lines = iter(default_data.splitlines())
for line in default_lines:
if not line:
comment = "\n"
elif line.startswith("#"):
comment = "{0}\n".format(line)
else:
continue
while True:
line = next(default_lines)
if line and not line.startswith("#"):
break
comment += "{0}\n".format(line)
key = line.split(':')[0].strip()
comment_map[key] = comment
out_lines = iter(data.splitlines())
out_data = ""
for line in out_lines:
key = line.split(':')[0].strip()
if key in comment_map:
out_data += comment_map[key]
out_data += "{0}\n".format(line)
return out_data | Scan default_data for comments (we include empty lines in our
definition of comments) and place them before the same keys in data.
Only works with comments that are on one or more own lines, i.e.
not next to a yaml mapping. | Below is the the instruction that describes the task:
### Input:
Scan default_data for comments (we include empty lines in our
definition of comments) and place them before the same keys in data.
Only works with comments that are on one or more own lines, i.e.
not next to a yaml mapping.
### Response:
def restore_yaml_comments(data, default_data):
"""Scan default_data for comments (we include empty lines in our
definition of comments) and place them before the same keys in data.
Only works with comments that are on one or more own lines, i.e.
not next to a yaml mapping.
"""
comment_map = dict()
default_lines = iter(default_data.splitlines())
for line in default_lines:
if not line:
comment = "\n"
elif line.startswith("#"):
comment = "{0}\n".format(line)
else:
continue
while True:
line = next(default_lines)
if line and not line.startswith("#"):
break
comment += "{0}\n".format(line)
key = line.split(':')[0].strip()
comment_map[key] = comment
out_lines = iter(data.splitlines())
out_data = ""
for line in out_lines:
key = line.split(':')[0].strip()
if key in comment_map:
out_data += comment_map[key]
out_data += "{0}\n".format(line)
return out_data |
def with_units(self, val, ua, ub):
"""Return value with unit.
args:
val (mixed): result
ua (str): 1st unit
ub (str): 2nd unit
raises:
SyntaxError
returns:
str
"""
if not val:
return str(val)
if ua or ub:
if ua and ub:
if ua == ub:
return str(val) + ua
else:
# Nodejs version does not seem to mind mismatched
# units within expressions. So we choose the first
# as they do
# raise SyntaxError("Error in expression %s != %s" % (ua, ub))
return str(val) + ua
elif ua:
return str(val) + ua
elif ub:
return str(val) + ub
return repr(val) | Return value with unit.
args:
val (mixed): result
ua (str): 1st unit
ub (str): 2nd unit
raises:
SyntaxError
returns:
str | Below is the the instruction that describes the task:
### Input:
Return value with unit.
args:
val (mixed): result
ua (str): 1st unit
ub (str): 2nd unit
raises:
SyntaxError
returns:
str
### Response:
def with_units(self, val, ua, ub):
"""Return value with unit.
args:
val (mixed): result
ua (str): 1st unit
ub (str): 2nd unit
raises:
SyntaxError
returns:
str
"""
if not val:
return str(val)
if ua or ub:
if ua and ub:
if ua == ub:
return str(val) + ua
else:
# Nodejs version does not seem to mind mismatched
# units within expressions. So we choose the first
# as they do
# raise SyntaxError("Error in expression %s != %s" % (ua, ub))
return str(val) + ua
elif ua:
return str(val) + ua
elif ub:
return str(val) + ub
return repr(val) |
def _footer_start_thread(self, text, time):
"""Display given text in the footer. Clears after <time> seconds
"""
footerwid = urwid.AttrMap(urwid.Text(text), 'footer')
self.top.footer = footerwid
load_thread = Thread(target=self._loading_thread, args=(time,))
load_thread.daemon = True
load_thread.start() | Display given text in the footer. Clears after <time> seconds | Below is the the instruction that describes the task:
### Input:
Display given text in the footer. Clears after <time> seconds
### Response:
def _footer_start_thread(self, text, time):
"""Display given text in the footer. Clears after <time> seconds
"""
footerwid = urwid.AttrMap(urwid.Text(text), 'footer')
self.top.footer = footerwid
load_thread = Thread(target=self._loading_thread, args=(time,))
load_thread.daemon = True
load_thread.start() |
def _prepare_request(reddit_session, url, params, data, auth, files,
method=None):
"""Return a requests Request object that can be "prepared"."""
# Requests using OAuth for authorization must switch to using the oauth
# domain.
if getattr(reddit_session, '_use_oauth', False):
bearer = 'bearer {0}'.format(reddit_session.access_token)
headers = {'Authorization': bearer}
config = reddit_session.config
for prefix in (config.api_url, config.permalink_url):
if url.startswith(prefix):
if config.log_requests >= 1:
msg = 'substituting {0} for {1} in url\n'.format(
config.oauth_url, prefix)
sys.stderr.write(msg)
url = config.oauth_url + url[len(prefix):]
break
else:
headers = {}
headers.update(reddit_session.http.headers)
if method:
pass
elif data or files:
method = 'POST'
else:
method = 'GET'
# Log the request if logging is enabled
if reddit_session.config.log_requests >= 1:
sys.stderr.write('{0}: {1}\n'.format(method, url))
if reddit_session.config.log_requests >= 2:
if params:
sys.stderr.write('params: {0}\n'.format(params))
if data:
sys.stderr.write('data: {0}\n'.format(data))
if auth:
sys.stderr.write('auth: {0}\n'.format(auth))
# Prepare request
request = Request(method=method, url=url, headers=headers, params=params,
auth=auth, cookies=reddit_session.http.cookies)
if method == 'GET':
return request
# Most POST requests require adding `api_type` and `uh` to the data.
if data is True:
data = {}
if isinstance(data, dict):
if not auth:
data.setdefault('api_type', 'json')
if reddit_session.modhash:
data.setdefault('uh', reddit_session.modhash)
else:
request.headers.setdefault('Content-Type', 'application/json')
request.data = data
request.files = files
return request | Return a requests Request object that can be "prepared". | Below is the the instruction that describes the task:
### Input:
Return a requests Request object that can be "prepared".
### Response:
def _prepare_request(reddit_session, url, params, data, auth, files,
method=None):
"""Return a requests Request object that can be "prepared"."""
# Requests using OAuth for authorization must switch to using the oauth
# domain.
if getattr(reddit_session, '_use_oauth', False):
bearer = 'bearer {0}'.format(reddit_session.access_token)
headers = {'Authorization': bearer}
config = reddit_session.config
for prefix in (config.api_url, config.permalink_url):
if url.startswith(prefix):
if config.log_requests >= 1:
msg = 'substituting {0} for {1} in url\n'.format(
config.oauth_url, prefix)
sys.stderr.write(msg)
url = config.oauth_url + url[len(prefix):]
break
else:
headers = {}
headers.update(reddit_session.http.headers)
if method:
pass
elif data or files:
method = 'POST'
else:
method = 'GET'
# Log the request if logging is enabled
if reddit_session.config.log_requests >= 1:
sys.stderr.write('{0}: {1}\n'.format(method, url))
if reddit_session.config.log_requests >= 2:
if params:
sys.stderr.write('params: {0}\n'.format(params))
if data:
sys.stderr.write('data: {0}\n'.format(data))
if auth:
sys.stderr.write('auth: {0}\n'.format(auth))
# Prepare request
request = Request(method=method, url=url, headers=headers, params=params,
auth=auth, cookies=reddit_session.http.cookies)
if method == 'GET':
return request
# Most POST requests require adding `api_type` and `uh` to the data.
if data is True:
data = {}
if isinstance(data, dict):
if not auth:
data.setdefault('api_type', 'json')
if reddit_session.modhash:
data.setdefault('uh', reddit_session.modhash)
else:
request.headers.setdefault('Content-Type', 'application/json')
request.data = data
request.files = files
return request |
def djfrontend_fontawesome(version=None):
"""
Returns Font Awesome CSS file.
TEMPLATE_DEBUG returns full file, otherwise returns minified file.
"""
if version is None:
version = getattr(settings, 'DJFRONTEND_FONTAWESOME', DJFRONTEND_FONTAWESOME_DEFAULT)
return format_html(
'<link rel="stylesheet" href="{0}djfrontend/css/fontawesome/{1}/font-awesome{2}.css">',
_static_url, version, _min) | Returns Font Awesome CSS file.
TEMPLATE_DEBUG returns full file, otherwise returns minified file. | Below is the the instruction that describes the task:
### Input:
Returns Font Awesome CSS file.
TEMPLATE_DEBUG returns full file, otherwise returns minified file.
### Response:
def djfrontend_fontawesome(version=None):
"""
Returns Font Awesome CSS file.
TEMPLATE_DEBUG returns full file, otherwise returns minified file.
"""
if version is None:
version = getattr(settings, 'DJFRONTEND_FONTAWESOME', DJFRONTEND_FONTAWESOME_DEFAULT)
return format_html(
'<link rel="stylesheet" href="{0}djfrontend/css/fontawesome/{1}/font-awesome{2}.css">',
_static_url, version, _min) |
def filter(self, value=None, model=None, context=None):
"""
Sequentially applies all the filters to provided value
:param value: a value to filter
:param model: parent entity
:param context: filtering context, usually parent entity
:return: filtered value
"""
if value is None:
return value
for filter_obj in self.filters:
value = filter_obj.filter(
value=value,
model=model,
context=context if self.use_context else None
)
return value | Sequentially applies all the filters to provided value
:param value: a value to filter
:param model: parent entity
:param context: filtering context, usually parent entity
:return: filtered value | Below is the the instruction that describes the task:
### Input:
Sequentially applies all the filters to provided value
:param value: a value to filter
:param model: parent entity
:param context: filtering context, usually parent entity
:return: filtered value
### Response:
def filter(self, value=None, model=None, context=None):
"""
Sequentially applies all the filters to provided value
:param value: a value to filter
:param model: parent entity
:param context: filtering context, usually parent entity
:return: filtered value
"""
if value is None:
return value
for filter_obj in self.filters:
value = filter_obj.filter(
value=value,
model=model,
context=context if self.use_context else None
)
return value |
def Deserialize(self, reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
"""
self.HashStart = reader.ReadSerializableArray('neocore.UInt256.UInt256')
self.HashStop = reader.ReadUInt256() | Deserialize full object.
Args:
reader (neo.IO.BinaryReader): | Below is the the instruction that describes the task:
### Input:
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
### Response:
def Deserialize(self, reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
"""
self.HashStart = reader.ReadSerializableArray('neocore.UInt256.UInt256')
self.HashStop = reader.ReadUInt256() |
def rm(package, force=False):
"""
Remove a package (all instances) from the local store.
"""
team, owner, pkg = parse_package(package)
if not force:
confirmed = input("Remove {0}? (y/n) ".format(package))
if confirmed.lower() != 'y':
return
store = PackageStore()
deleted = store.remove_package(team, owner, pkg)
for obj in deleted:
print("Removed: {0}".format(obj)) | Remove a package (all instances) from the local store. | Below is the the instruction that describes the task:
### Input:
Remove a package (all instances) from the local store.
### Response:
def rm(package, force=False):
"""
Remove a package (all instances) from the local store.
"""
team, owner, pkg = parse_package(package)
if not force:
confirmed = input("Remove {0}? (y/n) ".format(package))
if confirmed.lower() != 'y':
return
store = PackageStore()
deleted = store.remove_package(team, owner, pkg)
for obj in deleted:
print("Removed: {0}".format(obj)) |
def get_all_params(self, session=None):
"""Return the parameters in a list of array."""
_params = []
for p in self.all_params:
if session is None:
_params.append(p.eval())
else:
_params.append(session.run(p))
return _params | Return the parameters in a list of array. | Below is the the instruction that describes the task:
### Input:
Return the parameters in a list of array.
### Response:
def get_all_params(self, session=None):
"""Return the parameters in a list of array."""
_params = []
for p in self.all_params:
if session is None:
_params.append(p.eval())
else:
_params.append(session.run(p))
return _params |
def next(self):
"""
Implementation of next method from Iterator.
:return: Result
:raises: StopIteration if IndexError occurs.
"""
try:
result = self.data[self.index]
except IndexError:
self.index = 0
raise StopIteration
self.index += 1
return result | Implementation of next method from Iterator.
:return: Result
:raises: StopIteration if IndexError occurs. | Below is the the instruction that describes the task:
### Input:
Implementation of next method from Iterator.
:return: Result
:raises: StopIteration if IndexError occurs.
### Response:
def next(self):
"""
Implementation of next method from Iterator.
:return: Result
:raises: StopIteration if IndexError occurs.
"""
try:
result = self.data[self.index]
except IndexError:
self.index = 0
raise StopIteration
self.index += 1
return result |
def add_chart(self, chart_type, x, y, cx, cy, chart_data):
"""Add a new chart of *chart_type* to the slide.
The chart is positioned at (*x*, *y*), has size (*cx*, *cy*), and
depicts *chart_data*. *chart_type* is one of the :ref:`XlChartType`
enumeration values. *chart_data* is a |ChartData| object populated
with the categories and series values for the chart.
Note that a |GraphicFrame| shape object is returned, not the |Chart|
object contained in that graphic frame shape. The chart object may be
accessed using the :attr:`chart` property of the returned
|GraphicFrame| object.
"""
rId = self.part.add_chart_part(chart_type, chart_data)
graphicFrame = self._add_chart_graphicFrame(rId, x, y, cx, cy)
self._recalculate_extents()
return self._shape_factory(graphicFrame) | Add a new chart of *chart_type* to the slide.
The chart is positioned at (*x*, *y*), has size (*cx*, *cy*), and
depicts *chart_data*. *chart_type* is one of the :ref:`XlChartType`
enumeration values. *chart_data* is a |ChartData| object populated
with the categories and series values for the chart.
Note that a |GraphicFrame| shape object is returned, not the |Chart|
object contained in that graphic frame shape. The chart object may be
accessed using the :attr:`chart` property of the returned
|GraphicFrame| object. | Below is the the instruction that describes the task:
### Input:
Add a new chart of *chart_type* to the slide.
The chart is positioned at (*x*, *y*), has size (*cx*, *cy*), and
depicts *chart_data*. *chart_type* is one of the :ref:`XlChartType`
enumeration values. *chart_data* is a |ChartData| object populated
with the categories and series values for the chart.
Note that a |GraphicFrame| shape object is returned, not the |Chart|
object contained in that graphic frame shape. The chart object may be
accessed using the :attr:`chart` property of the returned
|GraphicFrame| object.
### Response:
def add_chart(self, chart_type, x, y, cx, cy, chart_data):
"""Add a new chart of *chart_type* to the slide.
The chart is positioned at (*x*, *y*), has size (*cx*, *cy*), and
depicts *chart_data*. *chart_type* is one of the :ref:`XlChartType`
enumeration values. *chart_data* is a |ChartData| object populated
with the categories and series values for the chart.
Note that a |GraphicFrame| shape object is returned, not the |Chart|
object contained in that graphic frame shape. The chart object may be
accessed using the :attr:`chart` property of the returned
|GraphicFrame| object.
"""
rId = self.part.add_chart_part(chart_type, chart_data)
graphicFrame = self._add_chart_graphicFrame(rId, x, y, cx, cy)
self._recalculate_extents()
return self._shape_factory(graphicFrame) |
def check_validity(self, checks=None, report=True):
""" Runs a Symbol's validity checks.
Parameters
----------
checks : str, [str,], optional
Only run certain checks.
report : bool, optional
If set to False, the method will return only the result of the
check checks (True/False). Set to True, to have a
SymbolReport returned as well.
Returns
-------
Bool, or a Tuple of the form (Bool, SymbolReport)
"""
if report:
reportpoints = []
allchecks = []
checks_specified=False
if isinstance(checks, (str, unicode)):
checks = [checks]
checks_specified = True
elif isinstance(checks, (list, tuple)):
checks_specified = True
else:
checks = []
for val in self.validity:
if (val.validator in checks) or (not checks_specified):
ValCheck = validitychecks[val.validator]
anum = ValCheck.__init__.func_code.co_argcount - 2
args = []
for arg in SymbolValidity.argnames:
args.append(getattr(val, arg))
valid = ValCheck(self.datatable_df, *args[:anum])
res = valid.result
allchecks.append(res)
rp = ReportPoint('validation', val.validator, res, str(args[:anum]))
reportpoints.append(rp)
if report:
return all(allchecks), reportpoints
else:
return all(allchecks) | Runs a Symbol's validity checks.
Parameters
----------
checks : str, [str,], optional
Only run certain checks.
report : bool, optional
If set to False, the method will return only the result of the
check checks (True/False). Set to True, to have a
SymbolReport returned as well.
Returns
-------
Bool, or a Tuple of the form (Bool, SymbolReport) | Below is the the instruction that describes the task:
### Input:
Runs a Symbol's validity checks.
Parameters
----------
checks : str, [str,], optional
Only run certain checks.
report : bool, optional
If set to False, the method will return only the result of the
check checks (True/False). Set to True, to have a
SymbolReport returned as well.
Returns
-------
Bool, or a Tuple of the form (Bool, SymbolReport)
### Response:
def check_validity(self, checks=None, report=True):
""" Runs a Symbol's validity checks.
Parameters
----------
checks : str, [str,], optional
Only run certain checks.
report : bool, optional
If set to False, the method will return only the result of the
check checks (True/False). Set to True, to have a
SymbolReport returned as well.
Returns
-------
Bool, or a Tuple of the form (Bool, SymbolReport)
"""
if report:
reportpoints = []
allchecks = []
checks_specified=False
if isinstance(checks, (str, unicode)):
checks = [checks]
checks_specified = True
elif isinstance(checks, (list, tuple)):
checks_specified = True
else:
checks = []
for val in self.validity:
if (val.validator in checks) or (not checks_specified):
ValCheck = validitychecks[val.validator]
anum = ValCheck.__init__.func_code.co_argcount - 2
args = []
for arg in SymbolValidity.argnames:
args.append(getattr(val, arg))
valid = ValCheck(self.datatable_df, *args[:anum])
res = valid.result
allchecks.append(res)
rp = ReportPoint('validation', val.validator, res, str(args[:anum]))
reportpoints.append(rp)
if report:
return all(allchecks), reportpoints
else:
return all(allchecks) |
def get_context_arguments(self):
"""Return a dictionary containing the current context arguments."""
cargs = {}
for context in self.__context_stack:
cargs.update(context.context_arguments)
return cargs | Return a dictionary containing the current context arguments. | Below is the the instruction that describes the task:
### Input:
Return a dictionary containing the current context arguments.
### Response:
def get_context_arguments(self):
"""Return a dictionary containing the current context arguments."""
cargs = {}
for context in self.__context_stack:
cargs.update(context.context_arguments)
return cargs |
def _paint_icon(self, iconic, painter, rect, mode, state, options):
"""Paint a single icon."""
painter.save()
color = options['color']
char = options['char']
color_options = {
QIcon.On: {
QIcon.Normal: (options['color_on'], options['on']),
QIcon.Disabled: (options['color_on_disabled'],
options['on_disabled']),
QIcon.Active: (options['color_on_active'],
options['on_active']),
QIcon.Selected: (options['color_on_selected'],
options['on_selected'])
},
QIcon.Off: {
QIcon.Normal: (options['color_off'], options['off']),
QIcon.Disabled: (options['color_off_disabled'],
options['off_disabled']),
QIcon.Active: (options['color_off_active'],
options['off_active']),
QIcon.Selected: (options['color_off_selected'],
options['off_selected'])
}
}
color, char = color_options[state][mode]
painter.setPen(QColor(color))
# A 16 pixel-high icon yields a font size of 14, which is pixel perfect
# for font-awesome. 16 * 0.875 = 14
# The reason why the glyph size is smaller than the icon size is to
# account for font bearing.
draw_size = 0.875 * round(rect.height() * options['scale_factor'])
prefix = options['prefix']
# Animation setup hook
animation = options.get('animation')
if animation is not None:
animation.setup(self, painter, rect)
painter.setFont(iconic.font(prefix, draw_size))
if 'offset' in options:
rect = QRect(rect)
rect.translate(options['offset'][0] * rect.width(),
options['offset'][1] * rect.height())
painter.setOpacity(options.get('opacity', 1.0))
painter.drawText(rect, Qt.AlignCenter | Qt.AlignVCenter, char)
painter.restore() | Paint a single icon. | Below is the the instruction that describes the task:
### Input:
Paint a single icon.
### Response:
def _paint_icon(self, iconic, painter, rect, mode, state, options):
"""Paint a single icon."""
painter.save()
color = options['color']
char = options['char']
color_options = {
QIcon.On: {
QIcon.Normal: (options['color_on'], options['on']),
QIcon.Disabled: (options['color_on_disabled'],
options['on_disabled']),
QIcon.Active: (options['color_on_active'],
options['on_active']),
QIcon.Selected: (options['color_on_selected'],
options['on_selected'])
},
QIcon.Off: {
QIcon.Normal: (options['color_off'], options['off']),
QIcon.Disabled: (options['color_off_disabled'],
options['off_disabled']),
QIcon.Active: (options['color_off_active'],
options['off_active']),
QIcon.Selected: (options['color_off_selected'],
options['off_selected'])
}
}
color, char = color_options[state][mode]
painter.setPen(QColor(color))
# A 16 pixel-high icon yields a font size of 14, which is pixel perfect
# for font-awesome. 16 * 0.875 = 14
# The reason why the glyph size is smaller than the icon size is to
# account for font bearing.
draw_size = 0.875 * round(rect.height() * options['scale_factor'])
prefix = options['prefix']
# Animation setup hook
animation = options.get('animation')
if animation is not None:
animation.setup(self, painter, rect)
painter.setFont(iconic.font(prefix, draw_size))
if 'offset' in options:
rect = QRect(rect)
rect.translate(options['offset'][0] * rect.width(),
options['offset'][1] * rect.height())
painter.setOpacity(options.get('opacity', 1.0))
painter.drawText(rect, Qt.AlignCenter | Qt.AlignVCenter, char)
painter.restore() |
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output) | Strips accents from a piece of text. | Below is the the instruction that describes the task:
### Input:
Strips accents from a piece of text.
### Response:
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output) |
def date_totals(entries, by):
"""Yield a user's name and a dictionary of their hours"""
date_dict = {}
for date, date_entries in groupby(entries, lambda x: x['date']):
if isinstance(date, datetime.datetime):
date = date.date()
d_entries = list(date_entries)
if by == 'user':
name = ' '.join((d_entries[0]['user__first_name'],
d_entries[0]['user__last_name']))
elif by == 'project':
name = d_entries[0]['project__name']
else:
name = d_entries[0][by]
pk = d_entries[0][by]
hours = get_hours_summary(d_entries)
date_dict[date] = hours
return name, pk, date_dict | Yield a user's name and a dictionary of their hours | Below is the the instruction that describes the task:
### Input:
Yield a user's name and a dictionary of their hours
### Response:
def date_totals(entries, by):
"""Yield a user's name and a dictionary of their hours"""
date_dict = {}
for date, date_entries in groupby(entries, lambda x: x['date']):
if isinstance(date, datetime.datetime):
date = date.date()
d_entries = list(date_entries)
if by == 'user':
name = ' '.join((d_entries[0]['user__first_name'],
d_entries[0]['user__last_name']))
elif by == 'project':
name = d_entries[0]['project__name']
else:
name = d_entries[0][by]
pk = d_entries[0][by]
hours = get_hours_summary(d_entries)
date_dict[date] = hours
return name, pk, date_dict |
def log_future_exceptions(logger, f, ignore=()):
"""Log any exceptions set to a future
Parameters
----------
logger : logging.Logger instance
logger.exception(...) is called if the future resolves with an exception
f : Future object
Future to be monitored for exceptions
ignore : Exception or tuple of Exception
Exptected exception(s) to ignore, i.e. they will not be logged.
Notes
-----
This is useful when an async task is started for its side effects without waiting for
the result. The problem is that if the future's resolution is not checked for
exceptions, unhandled exceptions in the async task will be silently ignored.
"""
def log_cb(f):
try:
f.result()
except ignore:
pass
except Exception:
logger.exception('Unhandled exception returned by future')
f.add_done_callback(log_cb) | Log any exceptions set to a future
Parameters
----------
logger : logging.Logger instance
logger.exception(...) is called if the future resolves with an exception
f : Future object
Future to be monitored for exceptions
ignore : Exception or tuple of Exception
Exptected exception(s) to ignore, i.e. they will not be logged.
Notes
-----
This is useful when an async task is started for its side effects without waiting for
the result. The problem is that if the future's resolution is not checked for
exceptions, unhandled exceptions in the async task will be silently ignored. | Below is the the instruction that describes the task:
### Input:
Log any exceptions set to a future
Parameters
----------
logger : logging.Logger instance
logger.exception(...) is called if the future resolves with an exception
f : Future object
Future to be monitored for exceptions
ignore : Exception or tuple of Exception
Exptected exception(s) to ignore, i.e. they will not be logged.
Notes
-----
This is useful when an async task is started for its side effects without waiting for
the result. The problem is that if the future's resolution is not checked for
exceptions, unhandled exceptions in the async task will be silently ignored.
### Response:
def log_future_exceptions(logger, f, ignore=()):
"""Log any exceptions set to a future
Parameters
----------
logger : logging.Logger instance
logger.exception(...) is called if the future resolves with an exception
f : Future object
Future to be monitored for exceptions
ignore : Exception or tuple of Exception
Exptected exception(s) to ignore, i.e. they will not be logged.
Notes
-----
This is useful when an async task is started for its side effects without waiting for
the result. The problem is that if the future's resolution is not checked for
exceptions, unhandled exceptions in the async task will be silently ignored.
"""
def log_cb(f):
try:
f.result()
except ignore:
pass
except Exception:
logger.exception('Unhandled exception returned by future')
f.add_done_callback(log_cb) |
def space_row(left, right, filler=' ', total_width=-1):
"""space the data in a row with optional filling
Arguments
---------
left : str, to be aligned left
right : str, to be aligned right
filler : str, default ' '.
must be of length 1
total_width : int, width of line.
if negative number is specified,
then that number of spaces is used between the left and right text
Returns
-------
str
"""
left = str(left)
right = str(right)
filler = str(filler)[:1]
if total_width < 0:
spacing = - total_width
else:
spacing = total_width - len(left) - len(right)
return left + filler * spacing + right | space the data in a row with optional filling
Arguments
---------
left : str, to be aligned left
right : str, to be aligned right
filler : str, default ' '.
must be of length 1
total_width : int, width of line.
if negative number is specified,
then that number of spaces is used between the left and right text
Returns
-------
str | Below is the the instruction that describes the task:
### Input:
space the data in a row with optional filling
Arguments
---------
left : str, to be aligned left
right : str, to be aligned right
filler : str, default ' '.
must be of length 1
total_width : int, width of line.
if negative number is specified,
then that number of spaces is used between the left and right text
Returns
-------
str
### Response:
def space_row(left, right, filler=' ', total_width=-1):
"""space the data in a row with optional filling
Arguments
---------
left : str, to be aligned left
right : str, to be aligned right
filler : str, default ' '.
must be of length 1
total_width : int, width of line.
if negative number is specified,
then that number of spaces is used between the left and right text
Returns
-------
str
"""
left = str(left)
right = str(right)
filler = str(filler)[:1]
if total_width < 0:
spacing = - total_width
else:
spacing = total_width - len(left) - len(right)
return left + filler * spacing + right |
def coerce(self, value):
"""
Coerces value to location hash.
"""
return {
'lat': float(value.get('lat', value.get('latitude'))),
'lon': float(value.get('lon', value.get('longitude')))
} | Coerces value to location hash. | Below is the the instruction that describes the task:
### Input:
Coerces value to location hash.
### Response:
def coerce(self, value):
"""
Coerces value to location hash.
"""
return {
'lat': float(value.get('lat', value.get('latitude'))),
'lon': float(value.get('lon', value.get('longitude')))
} |
def run_through(script, ensemble, roles=1, strict=False):
"""
:py:class:`turberfield.dialogue.model.SceneScript`.
"""
with script as dialogue:
selection = dialogue.select(ensemble, roles=roles)
if not any(selection.values()) or strict and not all(selection.values()):
return
try:
model = dialogue.cast(selection).run()
except (AttributeError, ValueError) as e:
log = logging.getLogger("turberfield.dialogue.player.run_through")
log.warning(". ".join(getattr(e, "args", e) or e))
return
else:
yield from model | :py:class:`turberfield.dialogue.model.SceneScript`. | Below is the the instruction that describes the task:
### Input:
:py:class:`turberfield.dialogue.model.SceneScript`.
### Response:
def run_through(script, ensemble, roles=1, strict=False):
"""
:py:class:`turberfield.dialogue.model.SceneScript`.
"""
with script as dialogue:
selection = dialogue.select(ensemble, roles=roles)
if not any(selection.values()) or strict and not all(selection.values()):
return
try:
model = dialogue.cast(selection).run()
except (AttributeError, ValueError) as e:
log = logging.getLogger("turberfield.dialogue.player.run_through")
log.warning(". ".join(getattr(e, "args", e) or e))
return
else:
yield from model |
def _GetModuleCodeObjects(module):
"""Gets all code objects defined in the specified module.
There are two BFS traversals involved. One in this function and the other in
_FindCodeObjectsReferents. Only the BFS in _FindCodeObjectsReferents has
a depth limit. This function does not. The motivation is that this function
explores code object of the module and they can have any arbitrary nesting
level. _FindCodeObjectsReferents, on the other hand, traverses through class
definitions and random references. It's much more expensive and will likely
go into unrelated objects.
There is also a limit on how many total objects are going to be traversed in
all. This limit makes sure that if something goes wrong, the lookup doesn't
hang.
Args:
module: module to explore.
Returns:
Set of code objects defined in module.
"""
visit_recorder = _VisitRecorder()
current = [module]
code_objects = set()
while current:
current = _FindCodeObjectsReferents(module, current, visit_recorder)
code_objects |= current
# Unfortunately Python code objects don't implement tp_traverse, so this
# type can't be used with gc.get_referents. The workaround is to get the
# relevant objects explicitly here.
current = [code_object.co_consts for code_object in current]
return code_objects | Gets all code objects defined in the specified module.
There are two BFS traversals involved. One in this function and the other in
_FindCodeObjectsReferents. Only the BFS in _FindCodeObjectsReferents has
a depth limit. This function does not. The motivation is that this function
explores code object of the module and they can have any arbitrary nesting
level. _FindCodeObjectsReferents, on the other hand, traverses through class
definitions and random references. It's much more expensive and will likely
go into unrelated objects.
There is also a limit on how many total objects are going to be traversed in
all. This limit makes sure that if something goes wrong, the lookup doesn't
hang.
Args:
module: module to explore.
Returns:
Set of code objects defined in module. | Below is the the instruction that describes the task:
### Input:
Gets all code objects defined in the specified module.
There are two BFS traversals involved. One in this function and the other in
_FindCodeObjectsReferents. Only the BFS in _FindCodeObjectsReferents has
a depth limit. This function does not. The motivation is that this function
explores code object of the module and they can have any arbitrary nesting
level. _FindCodeObjectsReferents, on the other hand, traverses through class
definitions and random references. It's much more expensive and will likely
go into unrelated objects.
There is also a limit on how many total objects are going to be traversed in
all. This limit makes sure that if something goes wrong, the lookup doesn't
hang.
Args:
module: module to explore.
Returns:
Set of code objects defined in module.
### Response:
def _GetModuleCodeObjects(module):
"""Gets all code objects defined in the specified module.
There are two BFS traversals involved. One in this function and the other in
_FindCodeObjectsReferents. Only the BFS in _FindCodeObjectsReferents has
a depth limit. This function does not. The motivation is that this function
explores code object of the module and they can have any arbitrary nesting
level. _FindCodeObjectsReferents, on the other hand, traverses through class
definitions and random references. It's much more expensive and will likely
go into unrelated objects.
There is also a limit on how many total objects are going to be traversed in
all. This limit makes sure that if something goes wrong, the lookup doesn't
hang.
Args:
module: module to explore.
Returns:
Set of code objects defined in module.
"""
visit_recorder = _VisitRecorder()
current = [module]
code_objects = set()
while current:
current = _FindCodeObjectsReferents(module, current, visit_recorder)
code_objects |= current
# Unfortunately Python code objects don't implement tp_traverse, so this
# type can't be used with gc.get_referents. The workaround is to get the
# relevant objects explicitly here.
current = [code_object.co_consts for code_object in current]
return code_objects |
def get_recently_played_games(self, steamID, count=0, format=None):
"""Request a list of recently played games by a given steam id.
steamID: The users ID
count: Number of games to return. (0 is all recent games.)
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'count' : count}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetRecentlyPlayedGames', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request a list of recently played games by a given steam id.
steamID: The users ID
count: Number of games to return. (0 is all recent games.)
format: Return format. None defaults to json. (json, xml, vdf) | Below is the the instruction that describes the task:
### Input:
Request a list of recently played games by a given steam id.
steamID: The users ID
count: Number of games to return. (0 is all recent games.)
format: Return format. None defaults to json. (json, xml, vdf)
### Response:
def get_recently_played_games(self, steamID, count=0, format=None):
"""Request a list of recently played games by a given steam id.
steamID: The users ID
count: Number of games to return. (0 is all recent games.)
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'count' : count}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetRecentlyPlayedGames', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) |
def getreferingobjs(self, iddgroups=None, fields=None):
"""Get a list of objects that refer to this object"""
return getreferingobjs(self, iddgroups=iddgroups, fields=fields) | Get a list of objects that refer to this object | Below is the the instruction that describes the task:
### Input:
Get a list of objects that refer to this object
### Response:
def getreferingobjs(self, iddgroups=None, fields=None):
"""Get a list of objects that refer to this object"""
return getreferingobjs(self, iddgroups=iddgroups, fields=fields) |
def daily_at(cls, at, target):
"""
Schedule a command to run at a specific time each day.
"""
daily = datetime.timedelta(days=1)
# convert when to the next datetime matching this time
when = datetime.datetime.combine(datetime.date.today(), at)
if when < now():
when += daily
return cls.at_time(cls._localize(when), daily, target) | Schedule a command to run at a specific time each day. | Below is the the instruction that describes the task:
### Input:
Schedule a command to run at a specific time each day.
### Response:
def daily_at(cls, at, target):
"""
Schedule a command to run at a specific time each day.
"""
daily = datetime.timedelta(days=1)
# convert when to the next datetime matching this time
when = datetime.datetime.combine(datetime.date.today(), at)
if when < now():
when += daily
return cls.at_time(cls._localize(when), daily, target) |
def _create_clock(self):
"""
If the clock property is not set, then create one based on frequency.
"""
trading_o_and_c = self.trading_calendar.schedule.ix[
self.sim_params.sessions]
market_closes = trading_o_and_c['market_close']
minutely_emission = False
if self.sim_params.data_frequency == 'minute':
market_opens = trading_o_and_c['market_open']
minutely_emission = self.sim_params.emission_rate == "minute"
# The calendar's execution times are the minutes over which we
# actually want to run the clock. Typically the execution times
# simply adhere to the market open and close times. In the case of
# the futures calendar, for example, we only want to simulate over
# a subset of the full 24 hour calendar, so the execution times
# dictate a market open time of 6:31am US/Eastern and a close of
# 5:00pm US/Eastern.
execution_opens = \
self.trading_calendar.execution_time_from_open(market_opens)
execution_closes = \
self.trading_calendar.execution_time_from_close(market_closes)
else:
# in daily mode, we want to have one bar per session, timestamped
# as the last minute of the session.
execution_closes = \
self.trading_calendar.execution_time_from_close(market_closes)
execution_opens = execution_closes
# FIXME generalize these values
before_trading_start_minutes = days_at_time(
self.sim_params.sessions,
time(8, 45),
"US/Eastern"
)
return MinuteSimulationClock(
self.sim_params.sessions,
execution_opens,
execution_closes,
before_trading_start_minutes,
minute_emission=minutely_emission,
) | If the clock property is not set, then create one based on frequency. | Below is the the instruction that describes the task:
### Input:
If the clock property is not set, then create one based on frequency.
### Response:
def _create_clock(self):
"""
If the clock property is not set, then create one based on frequency.
"""
trading_o_and_c = self.trading_calendar.schedule.ix[
self.sim_params.sessions]
market_closes = trading_o_and_c['market_close']
minutely_emission = False
if self.sim_params.data_frequency == 'minute':
market_opens = trading_o_and_c['market_open']
minutely_emission = self.sim_params.emission_rate == "minute"
# The calendar's execution times are the minutes over which we
# actually want to run the clock. Typically the execution times
# simply adhere to the market open and close times. In the case of
# the futures calendar, for example, we only want to simulate over
# a subset of the full 24 hour calendar, so the execution times
# dictate a market open time of 6:31am US/Eastern and a close of
# 5:00pm US/Eastern.
execution_opens = \
self.trading_calendar.execution_time_from_open(market_opens)
execution_closes = \
self.trading_calendar.execution_time_from_close(market_closes)
else:
# in daily mode, we want to have one bar per session, timestamped
# as the last minute of the session.
execution_closes = \
self.trading_calendar.execution_time_from_close(market_closes)
execution_opens = execution_closes
# FIXME generalize these values
before_trading_start_minutes = days_at_time(
self.sim_params.sessions,
time(8, 45),
"US/Eastern"
)
return MinuteSimulationClock(
self.sim_params.sessions,
execution_opens,
execution_closes,
before_trading_start_minutes,
minute_emission=minutely_emission,
) |
def write_module_file(name, path, package):
'''Creates an RST file for the module name passed in. It places it in the
path defined
'''
file_path = join(path, '%s.rst' % name)
mod_file = open(file_path, 'w')
mod_file.write('%s\n' % AUTOGEN)
mod_file.write('%s\n' % name.title())
mod_file.write('=' * len(name))
mod_file.write('\n\n')
mod_file.write('.. toctree::\n')
mod_file.write(' :maxdepth: 1\n\n')
mod_file.write('.. automodule:: %s.%s\n' % (package, name))
mod_file.write(' :members:\n')
mod_file.write(' :undoc-members:\n')
mod_file.write(' :show-inheritance:\n') | Creates an RST file for the module name passed in. It places it in the
path defined | Below is the the instruction that describes the task:
### Input:
Creates an RST file for the module name passed in. It places it in the
path defined
### Response:
def write_module_file(name, path, package):
'''Creates an RST file for the module name passed in. It places it in the
path defined
'''
file_path = join(path, '%s.rst' % name)
mod_file = open(file_path, 'w')
mod_file.write('%s\n' % AUTOGEN)
mod_file.write('%s\n' % name.title())
mod_file.write('=' * len(name))
mod_file.write('\n\n')
mod_file.write('.. toctree::\n')
mod_file.write(' :maxdepth: 1\n\n')
mod_file.write('.. automodule:: %s.%s\n' % (package, name))
mod_file.write(' :members:\n')
mod_file.write(' :undoc-members:\n')
mod_file.write(' :show-inheritance:\n') |
def _draw_chars(self, data, to_draw):
"""
Draw the specified charachters using the specified format.
"""
i = 0
while not self._cursor.atBlockEnd() and i < len(to_draw) and len(to_draw) > 1:
self._cursor.deleteChar()
i += 1
self._cursor.insertText(to_draw, data.fmt) | Draw the specified charachters using the specified format. | Below is the the instruction that describes the task:
### Input:
Draw the specified charachters using the specified format.
### Response:
def _draw_chars(self, data, to_draw):
"""
Draw the specified charachters using the specified format.
"""
i = 0
while not self._cursor.atBlockEnd() and i < len(to_draw) and len(to_draw) > 1:
self._cursor.deleteChar()
i += 1
self._cursor.insertText(to_draw, data.fmt) |
def streams(self):
"""Property providing access to the :class:`.StreamsAPI`"""
if self._streams_api is None:
self._streams_api = self.get_streams_api()
return self._streams_api | Property providing access to the :class:`.StreamsAPI` | Below is the the instruction that describes the task:
### Input:
Property providing access to the :class:`.StreamsAPI`
### Response:
def streams(self):
"""Property providing access to the :class:`.StreamsAPI`"""
if self._streams_api is None:
self._streams_api = self.get_streams_api()
return self._streams_api |
def getFasta(opened_file, sequence_name):
"""
Retrieves a sequence from an opened multifasta file
:param opened_file: an opened multifasta file eg. opened_file=open("/path/to/file.fa",'r+')
:param sequence_name: the name of the sequence to be retrieved eg. for '>2 dna:chromosome chromosome:GRCm38:2:1:182113224:1 REF' use: sequence_name=str(2)
returns: a string with the sequence of interest
"""
lines = opened_file.readlines()
seq=str("")
for i in range(0, len(lines)):
line = lines[i]
if line[0] == ">":
fChr=line.split(" ")[0].split("\n")[0]
fChr=fChr[1:]
if fChr == sequence_name:
s=i
code=['N','A','C','T','G']
firstbase=lines[s+1][0]
while firstbase in code:
s=s + 1
seq=seq+lines[s]
firstbase=lines[s+1][0]
if len(seq)==0:
seq=None
else:
seq=seq.split("\n")
seq="".join(seq)
return seq | Retrieves a sequence from an opened multifasta file
:param opened_file: an opened multifasta file eg. opened_file=open("/path/to/file.fa",'r+')
:param sequence_name: the name of the sequence to be retrieved eg. for '>2 dna:chromosome chromosome:GRCm38:2:1:182113224:1 REF' use: sequence_name=str(2)
returns: a string with the sequence of interest | Below is the the instruction that describes the task:
### Input:
Retrieves a sequence from an opened multifasta file
:param opened_file: an opened multifasta file eg. opened_file=open("/path/to/file.fa",'r+')
:param sequence_name: the name of the sequence to be retrieved eg. for '>2 dna:chromosome chromosome:GRCm38:2:1:182113224:1 REF' use: sequence_name=str(2)
returns: a string with the sequence of interest
### Response:
def getFasta(opened_file, sequence_name):
"""
Retrieves a sequence from an opened multifasta file
:param opened_file: an opened multifasta file eg. opened_file=open("/path/to/file.fa",'r+')
:param sequence_name: the name of the sequence to be retrieved eg. for '>2 dna:chromosome chromosome:GRCm38:2:1:182113224:1 REF' use: sequence_name=str(2)
returns: a string with the sequence of interest
"""
lines = opened_file.readlines()
seq=str("")
for i in range(0, len(lines)):
line = lines[i]
if line[0] == ">":
fChr=line.split(" ")[0].split("\n")[0]
fChr=fChr[1:]
if fChr == sequence_name:
s=i
code=['N','A','C','T','G']
firstbase=lines[s+1][0]
while firstbase in code:
s=s + 1
seq=seq+lines[s]
firstbase=lines[s+1][0]
if len(seq)==0:
seq=None
else:
seq=seq.split("\n")
seq="".join(seq)
return seq |
def get_writer_factory_for(self, name, *, format=None):
"""
Returns a callable to build a writer for the provided filename, eventually forcing a format.
:param name: filename
:param format: format
:return: type
"""
return self.get_factory_for(WRITER, name, format=format) | Returns a callable to build a writer for the provided filename, eventually forcing a format.
:param name: filename
:param format: format
:return: type | Below is the the instruction that describes the task:
### Input:
Returns a callable to build a writer for the provided filename, eventually forcing a format.
:param name: filename
:param format: format
:return: type
### Response:
def get_writer_factory_for(self, name, *, format=None):
"""
Returns a callable to build a writer for the provided filename, eventually forcing a format.
:param name: filename
:param format: format
:return: type
"""
return self.get_factory_for(WRITER, name, format=format) |
def load(handle):
"""Loads a module from a handle.
Currently this method only works with Tensorflow 2.x and can only load modules
created by calling tensorflow.saved_model.save(). The method works in both
eager and graph modes.
Depending on the type of handle used, the call may involve downloading a
Tensorflow Hub module to a local cache location specified by the
TFHUB_CACHE_DIR environment variable. If a copy of the module is already
present in the TFHUB_CACHE_DIR, the download step is skipped.
Currently, three types of module handles are supported:
1) Smart URL resolvers such as tfhub.dev, e.g.:
https://tfhub.dev/google/nnlm-en-dim128/1.
2) A directory on a file system supported by Tensorflow containing module
files. This may include a local directory (e.g. /usr/local/mymodule) or a
Google Cloud Storage bucket (gs://mymodule).
3) A URL pointing to a TGZ archive of a module, e.g.
https://example.com/mymodule.tar.gz.
Args:
handle: (string) the Module handle to resolve.
Returns:
A trackable object (see tf.saved_model.load() documentation for details).
Raises:
NotImplementedError: If the code is running against incompatible (1.x)
version of TF.
"""
if hasattr(tf_v1.saved_model, "load_v2"):
module_handle = resolve(handle)
if tf_v1.gfile.Exists(native_module.get_module_proto_path(module_handle)):
raise NotImplementedError("TF Hub module '%s' is stored using TF 1.x "
"format. Loading of the module using "
"hub.load() is not supported." % handle)
return tf_v1.saved_model.load_v2(module_handle)
else:
raise NotImplementedError("hub.load() is not implemented for TF < 1.14.x, "
"Current version: %s", tf.__version__) | Loads a module from a handle.
Currently this method only works with Tensorflow 2.x and can only load modules
created by calling tensorflow.saved_model.save(). The method works in both
eager and graph modes.
Depending on the type of handle used, the call may involve downloading a
Tensorflow Hub module to a local cache location specified by the
TFHUB_CACHE_DIR environment variable. If a copy of the module is already
present in the TFHUB_CACHE_DIR, the download step is skipped.
Currently, three types of module handles are supported:
1) Smart URL resolvers such as tfhub.dev, e.g.:
https://tfhub.dev/google/nnlm-en-dim128/1.
2) A directory on a file system supported by Tensorflow containing module
files. This may include a local directory (e.g. /usr/local/mymodule) or a
Google Cloud Storage bucket (gs://mymodule).
3) A URL pointing to a TGZ archive of a module, e.g.
https://example.com/mymodule.tar.gz.
Args:
handle: (string) the Module handle to resolve.
Returns:
A trackable object (see tf.saved_model.load() documentation for details).
Raises:
NotImplementedError: If the code is running against incompatible (1.x)
version of TF. | Below is the the instruction that describes the task:
### Input:
Loads a module from a handle.
Currently this method only works with Tensorflow 2.x and can only load modules
created by calling tensorflow.saved_model.save(). The method works in both
eager and graph modes.
Depending on the type of handle used, the call may involve downloading a
Tensorflow Hub module to a local cache location specified by the
TFHUB_CACHE_DIR environment variable. If a copy of the module is already
present in the TFHUB_CACHE_DIR, the download step is skipped.
Currently, three types of module handles are supported:
1) Smart URL resolvers such as tfhub.dev, e.g.:
https://tfhub.dev/google/nnlm-en-dim128/1.
2) A directory on a file system supported by Tensorflow containing module
files. This may include a local directory (e.g. /usr/local/mymodule) or a
Google Cloud Storage bucket (gs://mymodule).
3) A URL pointing to a TGZ archive of a module, e.g.
https://example.com/mymodule.tar.gz.
Args:
handle: (string) the Module handle to resolve.
Returns:
A trackable object (see tf.saved_model.load() documentation for details).
Raises:
NotImplementedError: If the code is running against incompatible (1.x)
version of TF.
### Response:
def load(handle):
"""Loads a module from a handle.
Currently this method only works with Tensorflow 2.x and can only load modules
created by calling tensorflow.saved_model.save(). The method works in both
eager and graph modes.
Depending on the type of handle used, the call may involve downloading a
Tensorflow Hub module to a local cache location specified by the
TFHUB_CACHE_DIR environment variable. If a copy of the module is already
present in the TFHUB_CACHE_DIR, the download step is skipped.
Currently, three types of module handles are supported:
1) Smart URL resolvers such as tfhub.dev, e.g.:
https://tfhub.dev/google/nnlm-en-dim128/1.
2) A directory on a file system supported by Tensorflow containing module
files. This may include a local directory (e.g. /usr/local/mymodule) or a
Google Cloud Storage bucket (gs://mymodule).
3) A URL pointing to a TGZ archive of a module, e.g.
https://example.com/mymodule.tar.gz.
Args:
handle: (string) the Module handle to resolve.
Returns:
A trackable object (see tf.saved_model.load() documentation for details).
Raises:
NotImplementedError: If the code is running against incompatible (1.x)
version of TF.
"""
if hasattr(tf_v1.saved_model, "load_v2"):
module_handle = resolve(handle)
if tf_v1.gfile.Exists(native_module.get_module_proto_path(module_handle)):
raise NotImplementedError("TF Hub module '%s' is stored using TF 1.x "
"format. Loading of the module using "
"hub.load() is not supported." % handle)
return tf_v1.saved_model.load_v2(module_handle)
else:
raise NotImplementedError("hub.load() is not implemented for TF < 1.14.x, "
"Current version: %s", tf.__version__) |
def remove_message(self, message):
"""
Removes a message.
:param message: Message to remove
"""
import time
_logger(self.__class__).log(5, 'removing message %s' % message)
t = time.time()
usd = message.block.userData()
if usd:
try:
usd.messages.remove(message)
except (AttributeError, ValueError):
pass
if message.decoration:
self.editor.decorations.remove(message.decoration)
self._messages.remove(message) | Removes a message.
:param message: Message to remove | Below is the the instruction that describes the task:
### Input:
Removes a message.
:param message: Message to remove
### Response:
def remove_message(self, message):
"""
Removes a message.
:param message: Message to remove
"""
import time
_logger(self.__class__).log(5, 'removing message %s' % message)
t = time.time()
usd = message.block.userData()
if usd:
try:
usd.messages.remove(message)
except (AttributeError, ValueError):
pass
if message.decoration:
self.editor.decorations.remove(message.decoration)
self._messages.remove(message) |
def freeze(caffe_def_path, caffemodel_path, inputs, output_file_path, output_node_names, graph_name='Graph',
conversion_out_dir_path=None, checkpoint_out_path=None, use_padding_same=False):
"""Freeze and shrink the graph based on a Caffe model, the input tensors and the output node names."""
with caffe_to_tensorflow_session(caffe_def_path, caffemodel_path, inputs, graph_name=graph_name,
conversion_out_dir_path=conversion_out_dir_path,
use_padding_same=use_padding_same) as sess:
saver = tf.train.Saver()
with (dummy_context_mgr(checkpoint_out_path) or util.TemporaryDirectory()) as temp_dir_path:
checkpoint_path = checkpoint_out_path or os.path.join(temp_dir_path, 'pose.ckpt')
saver.save(sess, checkpoint_path)
output_node_names = util.output_node_names_string_as_list(output_node_names)
tf_freeze.freeze_from_checkpoint(checkpoint_path, output_file_path, output_node_names) | Freeze and shrink the graph based on a Caffe model, the input tensors and the output node names. | Below is the the instruction that describes the task:
### Input:
Freeze and shrink the graph based on a Caffe model, the input tensors and the output node names.
### Response:
def freeze(caffe_def_path, caffemodel_path, inputs, output_file_path, output_node_names, graph_name='Graph',
conversion_out_dir_path=None, checkpoint_out_path=None, use_padding_same=False):
"""Freeze and shrink the graph based on a Caffe model, the input tensors and the output node names."""
with caffe_to_tensorflow_session(caffe_def_path, caffemodel_path, inputs, graph_name=graph_name,
conversion_out_dir_path=conversion_out_dir_path,
use_padding_same=use_padding_same) as sess:
saver = tf.train.Saver()
with (dummy_context_mgr(checkpoint_out_path) or util.TemporaryDirectory()) as temp_dir_path:
checkpoint_path = checkpoint_out_path or os.path.join(temp_dir_path, 'pose.ckpt')
saver.save(sess, checkpoint_path)
output_node_names = util.output_node_names_string_as_list(output_node_names)
tf_freeze.freeze_from_checkpoint(checkpoint_path, output_file_path, output_node_names) |
def keylist(self):
"""Return a list of names in order by value."""
items = self.enumerations.items()
items.sort(lambda a, b: self.cmp(a[1], b[1]))
# last item has highest value
rslt = [None] * (items[-1][1] + 1)
# map the values
for key, value in items:
rslt[value] = key
# return the result
return rslt | Return a list of names in order by value. | Below is the the instruction that describes the task:
### Input:
Return a list of names in order by value.
### Response:
def keylist(self):
"""Return a list of names in order by value."""
items = self.enumerations.items()
items.sort(lambda a, b: self.cmp(a[1], b[1]))
# last item has highest value
rslt = [None] * (items[-1][1] + 1)
# map the values
for key, value in items:
rslt[value] = key
# return the result
return rslt |
def pre_build_check():
"""
Try to verify build tools
"""
if os.environ.get('CASS_DRIVER_NO_PRE_BUILD_CHECK'):
return True
try:
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler
from distutils.dist import Distribution
# base build_ext just to emulate compiler option setup
be = build_ext(Distribution())
be.initialize_options()
be.finalize_options()
# First, make sure we have a Python include directory
have_python_include = any(os.path.isfile(os.path.join(p, 'Python.h')) for p in be.include_dirs)
if not have_python_include:
sys.stderr.write("Did not find 'Python.h' in %s.\n" % (be.include_dirs,))
return False
compiler = new_compiler(compiler=be.compiler)
customize_compiler(compiler)
try:
# We must be able to initialize the compiler if it has that method
if hasattr(compiler, "initialize"):
compiler.initialize()
except:
return False
executables = []
if compiler.compiler_type in ('unix', 'cygwin'):
executables = [compiler.executables[exe][0] for exe in ('compiler_so', 'linker_so')]
elif compiler.compiler_type == 'nt':
executables = [getattr(compiler, exe) for exe in ('cc', 'linker')]
if executables:
from distutils.spawn import find_executable
for exe in executables:
if not find_executable(exe):
sys.stderr.write("Failed to find %s for compiler type %s.\n" % (exe, compiler.compiler_type))
return False
except Exception as exc:
sys.stderr.write('%s\n' % str(exc))
sys.stderr.write("Failed pre-build check. Attempting anyway.\n")
# if we are unable to positively id the compiler type, or one of these assumptions fails,
# just proceed as we would have without the check
return True | Try to verify build tools | Below is the the instruction that describes the task:
### Input:
Try to verify build tools
### Response:
def pre_build_check():
"""
Try to verify build tools
"""
if os.environ.get('CASS_DRIVER_NO_PRE_BUILD_CHECK'):
return True
try:
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler
from distutils.dist import Distribution
# base build_ext just to emulate compiler option setup
be = build_ext(Distribution())
be.initialize_options()
be.finalize_options()
# First, make sure we have a Python include directory
have_python_include = any(os.path.isfile(os.path.join(p, 'Python.h')) for p in be.include_dirs)
if not have_python_include:
sys.stderr.write("Did not find 'Python.h' in %s.\n" % (be.include_dirs,))
return False
compiler = new_compiler(compiler=be.compiler)
customize_compiler(compiler)
try:
# We must be able to initialize the compiler if it has that method
if hasattr(compiler, "initialize"):
compiler.initialize()
except:
return False
executables = []
if compiler.compiler_type in ('unix', 'cygwin'):
executables = [compiler.executables[exe][0] for exe in ('compiler_so', 'linker_so')]
elif compiler.compiler_type == 'nt':
executables = [getattr(compiler, exe) for exe in ('cc', 'linker')]
if executables:
from distutils.spawn import find_executable
for exe in executables:
if not find_executable(exe):
sys.stderr.write("Failed to find %s for compiler type %s.\n" % (exe, compiler.compiler_type))
return False
except Exception as exc:
sys.stderr.write('%s\n' % str(exc))
sys.stderr.write("Failed pre-build check. Attempting anyway.\n")
# if we are unable to positively id the compiler type, or one of these assumptions fails,
# just proceed as we would have without the check
return True |
def basis_function_all(degree, knot_vector, span, knot):
""" Computes all non-zero basis functions of all degrees from 0 up to the input degree for a single parameter.
A slightly modified version of Algorithm A2.2 from The NURBS Book by Piegl & Tiller.
:param degree: degree, :math:`p`
:type degree: int
:param knot_vector: knot vector, :math:`U`
:type knot_vector: list, tuple
:param span: knot span, :math:`i`
:type span: int
:param knot: knot or parameter, :math:`u`
:type knot: float
:return: basis functions
:rtype: list
"""
N = [[None for _ in range(degree + 1)] for _ in range(degree + 1)]
for i in range(0, degree + 1):
bfuns = basis_function(i, knot_vector, span, knot)
for j in range(0, i + 1):
N[j][i] = bfuns[j]
return N | Computes all non-zero basis functions of all degrees from 0 up to the input degree for a single parameter.
A slightly modified version of Algorithm A2.2 from The NURBS Book by Piegl & Tiller.
:param degree: degree, :math:`p`
:type degree: int
:param knot_vector: knot vector, :math:`U`
:type knot_vector: list, tuple
:param span: knot span, :math:`i`
:type span: int
:param knot: knot or parameter, :math:`u`
:type knot: float
:return: basis functions
:rtype: list | Below is the the instruction that describes the task:
### Input:
Computes all non-zero basis functions of all degrees from 0 up to the input degree for a single parameter.
A slightly modified version of Algorithm A2.2 from The NURBS Book by Piegl & Tiller.
:param degree: degree, :math:`p`
:type degree: int
:param knot_vector: knot vector, :math:`U`
:type knot_vector: list, tuple
:param span: knot span, :math:`i`
:type span: int
:param knot: knot or parameter, :math:`u`
:type knot: float
:return: basis functions
:rtype: list
### Response:
def basis_function_all(degree, knot_vector, span, knot):
""" Computes all non-zero basis functions of all degrees from 0 up to the input degree for a single parameter.
A slightly modified version of Algorithm A2.2 from The NURBS Book by Piegl & Tiller.
:param degree: degree, :math:`p`
:type degree: int
:param knot_vector: knot vector, :math:`U`
:type knot_vector: list, tuple
:param span: knot span, :math:`i`
:type span: int
:param knot: knot or parameter, :math:`u`
:type knot: float
:return: basis functions
:rtype: list
"""
N = [[None for _ in range(degree + 1)] for _ in range(degree + 1)]
for i in range(0, degree + 1):
bfuns = basis_function(i, knot_vector, span, knot)
for j in range(0, i + 1):
N[j][i] = bfuns[j]
return N |
async def BlockUntilLeadershipReleased(self, name):
'''
name : str
Returns -> Error
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='LeadershipService',
request='BlockUntilLeadershipReleased',
version=2,
params=_params)
_params['Name'] = name
reply = await self.rpc(msg)
return reply | name : str
Returns -> Error | Below is the the instruction that describes the task:
### Input:
name : str
Returns -> Error
### Response:
async def BlockUntilLeadershipReleased(self, name):
'''
name : str
Returns -> Error
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='LeadershipService',
request='BlockUntilLeadershipReleased',
version=2,
params=_params)
_params['Name'] = name
reply = await self.rpc(msg)
return reply |
def _download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=None):
'''
Downloads the contents of all objects in s3_path into fp
Args:
`boto_conn`: S3 connection object
`s3_path`: S3 path to be downloaded
`fp`: The file object where data is to be downloaded
'''
#Progress bar to display download progress
def _callback(downloaded, total):
'''
Call function for upload.
`downloaded`: File size already downloaded (int)
`total`: Total file size to be downloaded (int)
'''
if (total is 0) or (downloaded == total):
return
progress = downloaded*100/total
sys.stderr.write('\r[{0}] {1}%'.format('#'*progress, progress))
sys.stderr.flush()
m = _URI_RE.match(s3_path)
bucket_name = m.group(1)
bucket = boto_conn.get_bucket(bucket_name)
retries = 6
if s3_path.endswith('/') is False:
#It is a file
key_name = m.group(2)
key_instance = bucket.get_key(key_name)
while key_instance is None and retries > 0:
retries = retries - 1
log.info("Results file is not available on s3. Retry: " + str(6-retries))
time.sleep(10)
key_instance = bucket.get_key(key_name)
if key_instance is None:
raise Exception("Results file not available on s3 yet. This can be because of s3 eventual consistency issues.")
log.info("Downloading file from %s" % s3_path)
if delim is None:
try:
key_instance.get_contents_to_file(fp) # cb=_callback
except boto.exception.S3ResponseError as e:
if (e.status == 403):
# SDK-191, boto gives an error while fetching the objects using versions which happens by default
# in the get_contents_to_file() api. So attempt one without specifying version.
log.warn("Access denied while fetching the s3 object. Retrying without specifying the version....")
key_instance.open()
fp.write(key_instance.read())
key_instance.close()
else:
raise
else:
# Get contents as string. Replace parameters and write to file.
_read_iteratively(key_instance, fp, delim=delim)
else:
#It is a folder
key_prefix = m.group(2)
bucket_paths = bucket.list(key_prefix)
for one_path in bucket_paths:
name = one_path.name
# Eliminate _tmp_ files which ends with $folder$
if name.endswith('$folder$'):
continue
log.info("Downloading file from %s" % name)
if delim is None:
one_path.get_contents_to_file(fp) # cb=_callback
else:
_read_iteratively(one_path, fp, delim=delim) | Downloads the contents of all objects in s3_path into fp
Args:
`boto_conn`: S3 connection object
`s3_path`: S3 path to be downloaded
`fp`: The file object where data is to be downloaded | Below is the the instruction that describes the task:
### Input:
Downloads the contents of all objects in s3_path into fp
Args:
`boto_conn`: S3 connection object
`s3_path`: S3 path to be downloaded
`fp`: The file object where data is to be downloaded
### Response:
def _download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=None):
'''
Downloads the contents of all objects in s3_path into fp
Args:
`boto_conn`: S3 connection object
`s3_path`: S3 path to be downloaded
`fp`: The file object where data is to be downloaded
'''
#Progress bar to display download progress
def _callback(downloaded, total):
'''
Call function for upload.
`downloaded`: File size already downloaded (int)
`total`: Total file size to be downloaded (int)
'''
if (total is 0) or (downloaded == total):
return
progress = downloaded*100/total
sys.stderr.write('\r[{0}] {1}%'.format('#'*progress, progress))
sys.stderr.flush()
m = _URI_RE.match(s3_path)
bucket_name = m.group(1)
bucket = boto_conn.get_bucket(bucket_name)
retries = 6
if s3_path.endswith('/') is False:
#It is a file
key_name = m.group(2)
key_instance = bucket.get_key(key_name)
while key_instance is None and retries > 0:
retries = retries - 1
log.info("Results file is not available on s3. Retry: " + str(6-retries))
time.sleep(10)
key_instance = bucket.get_key(key_name)
if key_instance is None:
raise Exception("Results file not available on s3 yet. This can be because of s3 eventual consistency issues.")
log.info("Downloading file from %s" % s3_path)
if delim is None:
try:
key_instance.get_contents_to_file(fp) # cb=_callback
except boto.exception.S3ResponseError as e:
if (e.status == 403):
# SDK-191, boto gives an error while fetching the objects using versions which happens by default
# in the get_contents_to_file() api. So attempt one without specifying version.
log.warn("Access denied while fetching the s3 object. Retrying without specifying the version....")
key_instance.open()
fp.write(key_instance.read())
key_instance.close()
else:
raise
else:
# Get contents as string. Replace parameters and write to file.
_read_iteratively(key_instance, fp, delim=delim)
else:
#It is a folder
key_prefix = m.group(2)
bucket_paths = bucket.list(key_prefix)
for one_path in bucket_paths:
name = one_path.name
# Eliminate _tmp_ files which ends with $folder$
if name.endswith('$folder$'):
continue
log.info("Downloading file from %s" % name)
if delim is None:
one_path.get_contents_to_file(fp) # cb=_callback
else:
_read_iteratively(one_path, fp, delim=delim) |
def Email(
self,
From,
To,
Cc=None,
Bcc=None,
Subject=None,
Tag=None,
HtmlBody=None,
TextBody=None,
Metadata=None,
ReplyTo=None,
Headers=None,
TrackOpens=None,
TrackLinks="None",
Attachments=None,
):
"""
Constructs :py:class:`Email` instance.
:return: :py:class:`Email`
"""
return Email(
manager=self,
From=From,
To=To,
Cc=Cc,
Bcc=Bcc,
Subject=Subject,
Tag=Tag,
HtmlBody=HtmlBody,
TextBody=TextBody,
Metadata=Metadata,
ReplyTo=ReplyTo,
Headers=Headers,
TrackOpens=TrackOpens,
TrackLinks=TrackLinks,
Attachments=Attachments,
) | Constructs :py:class:`Email` instance.
:return: :py:class:`Email` | Below is the the instruction that describes the task:
### Input:
Constructs :py:class:`Email` instance.
:return: :py:class:`Email`
### Response:
def Email(
self,
From,
To,
Cc=None,
Bcc=None,
Subject=None,
Tag=None,
HtmlBody=None,
TextBody=None,
Metadata=None,
ReplyTo=None,
Headers=None,
TrackOpens=None,
TrackLinks="None",
Attachments=None,
):
"""
Constructs :py:class:`Email` instance.
:return: :py:class:`Email`
"""
return Email(
manager=self,
From=From,
To=To,
Cc=Cc,
Bcc=Bcc,
Subject=Subject,
Tag=Tag,
HtmlBody=HtmlBody,
TextBody=TextBody,
Metadata=Metadata,
ReplyTo=ReplyTo,
Headers=Headers,
TrackOpens=TrackOpens,
TrackLinks=TrackLinks,
Attachments=Attachments,
) |
def sqs_delete_item(queue_url,
receipt_handle,
client=None,
raiseonfail=False):
"""This deletes a message from the queue, effectively acknowledging its
receipt.
Call this only when all messages retrieved from the queue have been
processed, since this will prevent redelivery of these messages to other
queue workers pulling fromn the same queue channel.
Parameters
----------
queue_url : str
The SQS URL of the queue where we got the messages from. This should be
the same queue used to retrieve the messages in `sqs_get_item`.
receipt_handle : str
The receipt handle of the queue message that we're responding to, and
will acknowledge receipt of. This will be present in each message
retrieved using `sqs_get_item`.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
Nothing.
"""
if not client:
client = boto3.client('sqs')
try:
client.delete_message(
QueueUrl=queue_url,
ReceiptHandle=receipt_handle
)
except Exception as e:
LOGEXCEPTION(
'could not delete message with receipt handle: '
'%s from queue: %s' % (receipt_handle, queue_url)
)
if raiseonfail:
raise | This deletes a message from the queue, effectively acknowledging its
receipt.
Call this only when all messages retrieved from the queue have been
processed, since this will prevent redelivery of these messages to other
queue workers pulling fromn the same queue channel.
Parameters
----------
queue_url : str
The SQS URL of the queue where we got the messages from. This should be
the same queue used to retrieve the messages in `sqs_get_item`.
receipt_handle : str
The receipt handle of the queue message that we're responding to, and
will acknowledge receipt of. This will be present in each message
retrieved using `sqs_get_item`.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
Nothing. | Below is the the instruction that describes the task:
### Input:
This deletes a message from the queue, effectively acknowledging its
receipt.
Call this only when all messages retrieved from the queue have been
processed, since this will prevent redelivery of these messages to other
queue workers pulling fromn the same queue channel.
Parameters
----------
queue_url : str
The SQS URL of the queue where we got the messages from. This should be
the same queue used to retrieve the messages in `sqs_get_item`.
receipt_handle : str
The receipt handle of the queue message that we're responding to, and
will acknowledge receipt of. This will be present in each message
retrieved using `sqs_get_item`.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
Nothing.
### Response:
def sqs_delete_item(queue_url,
receipt_handle,
client=None,
raiseonfail=False):
"""This deletes a message from the queue, effectively acknowledging its
receipt.
Call this only when all messages retrieved from the queue have been
processed, since this will prevent redelivery of these messages to other
queue workers pulling fromn the same queue channel.
Parameters
----------
queue_url : str
The SQS URL of the queue where we got the messages from. This should be
the same queue used to retrieve the messages in `sqs_get_item`.
receipt_handle : str
The receipt handle of the queue message that we're responding to, and
will acknowledge receipt of. This will be present in each message
retrieved using `sqs_get_item`.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
Nothing.
"""
if not client:
client = boto3.client('sqs')
try:
client.delete_message(
QueueUrl=queue_url,
ReceiptHandle=receipt_handle
)
except Exception as e:
LOGEXCEPTION(
'could not delete message with receipt handle: '
'%s from queue: %s' % (receipt_handle, queue_url)
)
if raiseonfail:
raise |
def __is_json_error(self, status, headers):
"""Determine if response is an error.
Args:
status: HTTP status code.
headers: Dictionary of (lowercase) header name to value.
Returns:
True if the response was an error, else False.
"""
content_header = headers.get('content-type', '')
content_type, unused_params = cgi.parse_header(content_header)
return (status.startswith('400') and
content_type.lower() in _ALL_JSON_CONTENT_TYPES) | Determine if response is an error.
Args:
status: HTTP status code.
headers: Dictionary of (lowercase) header name to value.
Returns:
True if the response was an error, else False. | Below is the the instruction that describes the task:
### Input:
Determine if response is an error.
Args:
status: HTTP status code.
headers: Dictionary of (lowercase) header name to value.
Returns:
True if the response was an error, else False.
### Response:
def __is_json_error(self, status, headers):
"""Determine if response is an error.
Args:
status: HTTP status code.
headers: Dictionary of (lowercase) header name to value.
Returns:
True if the response was an error, else False.
"""
content_header = headers.get('content-type', '')
content_type, unused_params = cgi.parse_header(content_header)
return (status.startswith('400') and
content_type.lower() in _ALL_JSON_CONTENT_TYPES) |
def _dcm_to_q(self, dcm):
"""
Create q from dcm
Reference:
- Shoemake, Quaternions,
http://www.cs.ucr.edu/~vbz/resources/quatut.pdf
:param dcm: 3x3 dcm array
returns: quaternion array
"""
assert(dcm.shape == (3, 3))
q = np.zeros(4)
tr = np.trace(dcm)
if tr > 0:
s = np.sqrt(tr + 1.0)
q[0] = s * 0.5
s = 0.5 / s
q[1] = (dcm[2][1] - dcm[1][2]) * s
q[2] = (dcm[0][2] - dcm[2][0]) * s
q[3] = (dcm[1][0] - dcm[0][1]) * s
else:
dcm_i = np.argmax(np.diag(dcm))
dcm_j = (dcm_i + 1) % 3
dcm_k = (dcm_i + 2) % 3
s = np.sqrt((dcm[dcm_i][dcm_i] - dcm[dcm_j][dcm_j] -
dcm[dcm_k][dcm_k]) + 1.0)
q[dcm_i + 1] = s * 0.5
s = 0.5 / s
q[dcm_j + 1] = (dcm[dcm_i][dcm_j] + dcm[dcm_j][dcm_i]) * s
q[dcm_k + 1] = (dcm[dcm_k][dcm_i] + dcm[dcm_i][dcm_k]) * s
q[0] = (dcm[dcm_k][dcm_j] - dcm[dcm_j][dcm_k]) * s
return q | Create q from dcm
Reference:
- Shoemake, Quaternions,
http://www.cs.ucr.edu/~vbz/resources/quatut.pdf
:param dcm: 3x3 dcm array
returns: quaternion array | Below is the the instruction that describes the task:
### Input:
Create q from dcm
Reference:
- Shoemake, Quaternions,
http://www.cs.ucr.edu/~vbz/resources/quatut.pdf
:param dcm: 3x3 dcm array
returns: quaternion array
### Response:
def _dcm_to_q(self, dcm):
"""
Create q from dcm
Reference:
- Shoemake, Quaternions,
http://www.cs.ucr.edu/~vbz/resources/quatut.pdf
:param dcm: 3x3 dcm array
returns: quaternion array
"""
assert(dcm.shape == (3, 3))
q = np.zeros(4)
tr = np.trace(dcm)
if tr > 0:
s = np.sqrt(tr + 1.0)
q[0] = s * 0.5
s = 0.5 / s
q[1] = (dcm[2][1] - dcm[1][2]) * s
q[2] = (dcm[0][2] - dcm[2][0]) * s
q[3] = (dcm[1][0] - dcm[0][1]) * s
else:
dcm_i = np.argmax(np.diag(dcm))
dcm_j = (dcm_i + 1) % 3
dcm_k = (dcm_i + 2) % 3
s = np.sqrt((dcm[dcm_i][dcm_i] - dcm[dcm_j][dcm_j] -
dcm[dcm_k][dcm_k]) + 1.0)
q[dcm_i + 1] = s * 0.5
s = 0.5 / s
q[dcm_j + 1] = (dcm[dcm_i][dcm_j] + dcm[dcm_j][dcm_i]) * s
q[dcm_k + 1] = (dcm[dcm_k][dcm_i] + dcm[dcm_i][dcm_k]) * s
q[0] = (dcm[dcm_k][dcm_j] - dcm[dcm_j][dcm_k]) * s
return q |
def _count_fields_recursive(dataset, fields):
"""Cuenta la información de campos optativos/recomendados/requeridos
desde 'fields', y cuenta la ocurrencia de los mismos en 'dataset'.
Args:
dataset (dict): diccionario con claves a ser verificadas.
fields (dict): diccionario con los campos a verificar en dataset
como claves, y 'optativo', 'recomendado', o 'requerido' como
valores. Puede tener objetios anidados pero no arrays.
Returns:
dict: diccionario con las claves 'recomendado', 'optativo',
'requerido', 'recomendado_total', 'optativo_total',
'requerido_total', con la cantidad como valores.
"""
key_count = {
'recomendado': 0,
'optativo': 0,
'requerido': 0,
'total_optativo': 0,
'total_recomendado': 0,
'total_requerido': 0
}
for k, v in fields.items():
# Si la clave es un diccionario se implementa recursivamente el
# mismo algoritmo
if isinstance(v, dict):
# dataset[k] puede ser o un dict o una lista, ej 'dataset' es
# list, 'publisher' no. Si no es lista, lo metemos en una.
# Si no es ninguno de los dos, dataset[k] es inválido
# y se pasa un diccionario vacío para poder comparar
elements = dataset.get(k)
if not isinstance(elements, (list, dict)):
elements = [{}]
if isinstance(elements, dict):
elements = [dataset[k].copy()]
for element in elements:
# Llamada recursiva y suma del resultado al nuestro
result = _count_fields_recursive(element, v)
for key in result:
key_count[key] += result[key]
# Es un elemento normal (no iterable), se verifica si está en
# dataset o no. Se suma 1 siempre al total de su tipo
else:
# total_requerido, total_recomendado, o total_optativo
key_count['total_' + v] += 1
if k in dataset:
key_count[v] += 1
return key_count | Cuenta la información de campos optativos/recomendados/requeridos
desde 'fields', y cuenta la ocurrencia de los mismos en 'dataset'.
Args:
dataset (dict): diccionario con claves a ser verificadas.
fields (dict): diccionario con los campos a verificar en dataset
como claves, y 'optativo', 'recomendado', o 'requerido' como
valores. Puede tener objetios anidados pero no arrays.
Returns:
dict: diccionario con las claves 'recomendado', 'optativo',
'requerido', 'recomendado_total', 'optativo_total',
'requerido_total', con la cantidad como valores. | Below is the the instruction that describes the task:
### Input:
Cuenta la información de campos optativos/recomendados/requeridos
desde 'fields', y cuenta la ocurrencia de los mismos en 'dataset'.
Args:
dataset (dict): diccionario con claves a ser verificadas.
fields (dict): diccionario con los campos a verificar en dataset
como claves, y 'optativo', 'recomendado', o 'requerido' como
valores. Puede tener objetios anidados pero no arrays.
Returns:
dict: diccionario con las claves 'recomendado', 'optativo',
'requerido', 'recomendado_total', 'optativo_total',
'requerido_total', con la cantidad como valores.
### Response:
def _count_fields_recursive(dataset, fields):
"""Cuenta la información de campos optativos/recomendados/requeridos
desde 'fields', y cuenta la ocurrencia de los mismos en 'dataset'.
Args:
dataset (dict): diccionario con claves a ser verificadas.
fields (dict): diccionario con los campos a verificar en dataset
como claves, y 'optativo', 'recomendado', o 'requerido' como
valores. Puede tener objetios anidados pero no arrays.
Returns:
dict: diccionario con las claves 'recomendado', 'optativo',
'requerido', 'recomendado_total', 'optativo_total',
'requerido_total', con la cantidad como valores.
"""
key_count = {
'recomendado': 0,
'optativo': 0,
'requerido': 0,
'total_optativo': 0,
'total_recomendado': 0,
'total_requerido': 0
}
for k, v in fields.items():
# Si la clave es un diccionario se implementa recursivamente el
# mismo algoritmo
if isinstance(v, dict):
# dataset[k] puede ser o un dict o una lista, ej 'dataset' es
# list, 'publisher' no. Si no es lista, lo metemos en una.
# Si no es ninguno de los dos, dataset[k] es inválido
# y se pasa un diccionario vacío para poder comparar
elements = dataset.get(k)
if not isinstance(elements, (list, dict)):
elements = [{}]
if isinstance(elements, dict):
elements = [dataset[k].copy()]
for element in elements:
# Llamada recursiva y suma del resultado al nuestro
result = _count_fields_recursive(element, v)
for key in result:
key_count[key] += result[key]
# Es un elemento normal (no iterable), se verifica si está en
# dataset o no. Se suma 1 siempre al total de su tipo
else:
# total_requerido, total_recomendado, o total_optativo
key_count['total_' + v] += 1
if k in dataset:
key_count[v] += 1
return key_count |
def assoc2(self, assets_by_site, assoc_dist, mode, asset_refs):
"""
Associated a list of assets by site to the site collection used
to instantiate GeographicObjects.
:param assets_by_sites: a list of lists of assets
:param assoc_dist: the maximum distance for association
:param mode: 'strict', 'warn' or 'filter'
:param asset_ref: ID of the assets are a list of strings
:returns: filtered site collection, filtered assets by site, discarded
"""
assert mode in 'strict filter', mode
self.objects.filtered # self.objects must be a SiteCollection
asset_dt = numpy.dtype(
[('asset_ref', vstr), ('lon', F32), ('lat', F32)])
assets_by_sid = collections.defaultdict(list)
discarded = []
for assets in assets_by_site:
lon, lat = assets[0].location
obj, distance = self.get_closest(lon, lat)
if distance <= assoc_dist:
# keep the assets, otherwise discard them
assets_by_sid[obj['sids']].extend(assets)
elif mode == 'strict':
raise SiteAssociationError(
'There is nothing closer than %s km '
'to site (%s %s)' % (assoc_dist, lon, lat))
else:
discarded.extend(assets)
sids = sorted(assets_by_sid)
if not sids:
raise SiteAssociationError(
'Could not associate any site to any assets within the '
'asset_hazard_distance of %s km' % assoc_dist)
assets_by_site = [
sorted(assets_by_sid[sid], key=operator.attrgetter('ordinal'))
for sid in sids]
data = [(asset_refs[asset.ordinal],) + asset.location
for asset in discarded]
discarded = numpy.array(data, asset_dt)
return self.objects.filtered(sids), assets_by_site, discarded | Associated a list of assets by site to the site collection used
to instantiate GeographicObjects.
:param assets_by_sites: a list of lists of assets
:param assoc_dist: the maximum distance for association
:param mode: 'strict', 'warn' or 'filter'
:param asset_ref: ID of the assets are a list of strings
:returns: filtered site collection, filtered assets by site, discarded | Below is the the instruction that describes the task:
### Input:
Associated a list of assets by site to the site collection used
to instantiate GeographicObjects.
:param assets_by_sites: a list of lists of assets
:param assoc_dist: the maximum distance for association
:param mode: 'strict', 'warn' or 'filter'
:param asset_ref: ID of the assets are a list of strings
:returns: filtered site collection, filtered assets by site, discarded
### Response:
def assoc2(self, assets_by_site, assoc_dist, mode, asset_refs):
"""
Associated a list of assets by site to the site collection used
to instantiate GeographicObjects.
:param assets_by_sites: a list of lists of assets
:param assoc_dist: the maximum distance for association
:param mode: 'strict', 'warn' or 'filter'
:param asset_ref: ID of the assets are a list of strings
:returns: filtered site collection, filtered assets by site, discarded
"""
assert mode in 'strict filter', mode
self.objects.filtered # self.objects must be a SiteCollection
asset_dt = numpy.dtype(
[('asset_ref', vstr), ('lon', F32), ('lat', F32)])
assets_by_sid = collections.defaultdict(list)
discarded = []
for assets in assets_by_site:
lon, lat = assets[0].location
obj, distance = self.get_closest(lon, lat)
if distance <= assoc_dist:
# keep the assets, otherwise discard them
assets_by_sid[obj['sids']].extend(assets)
elif mode == 'strict':
raise SiteAssociationError(
'There is nothing closer than %s km '
'to site (%s %s)' % (assoc_dist, lon, lat))
else:
discarded.extend(assets)
sids = sorted(assets_by_sid)
if not sids:
raise SiteAssociationError(
'Could not associate any site to any assets within the '
'asset_hazard_distance of %s km' % assoc_dist)
assets_by_site = [
sorted(assets_by_sid[sid], key=operator.attrgetter('ordinal'))
for sid in sids]
data = [(asset_refs[asset.ordinal],) + asset.location
for asset in discarded]
discarded = numpy.array(data, asset_dt)
return self.objects.filtered(sids), assets_by_site, discarded |
def putcol(self, value, startrow=0, nrow=-1, rowincr=1):
"""Put an entire column or part of it.
(see :func:`table.putcol`)"""
return self._table.putcol(self._column, value, startrow, nrow, rowincr) | Put an entire column or part of it.
(see :func:`table.putcol`) | Below is the the instruction that describes the task:
### Input:
Put an entire column or part of it.
(see :func:`table.putcol`)
### Response:
def putcol(self, value, startrow=0, nrow=-1, rowincr=1):
"""Put an entire column or part of it.
(see :func:`table.putcol`)"""
return self._table.putcol(self._column, value, startrow, nrow, rowincr) |
def get(self, app_id, view_specifier):
"""
Retrieve the definition of a given view, provided the app_id and the view_id
:param app_id: the app id
:param view_specifier:
Can be one of the following:
1. The view ID
2. The view's name
3. "last" to look up the last view used
"""
return self.transport.GET(url='/view/app/{}/{}'.format(app_id, view_specifier)) | Retrieve the definition of a given view, provided the app_id and the view_id
:param app_id: the app id
:param view_specifier:
Can be one of the following:
1. The view ID
2. The view's name
3. "last" to look up the last view used | Below is the the instruction that describes the task:
### Input:
Retrieve the definition of a given view, provided the app_id and the view_id
:param app_id: the app id
:param view_specifier:
Can be one of the following:
1. The view ID
2. The view's name
3. "last" to look up the last view used
### Response:
def get(self, app_id, view_specifier):
"""
Retrieve the definition of a given view, provided the app_id and the view_id
:param app_id: the app id
:param view_specifier:
Can be one of the following:
1. The view ID
2. The view's name
3. "last" to look up the last view used
"""
return self.transport.GET(url='/view/app/{}/{}'.format(app_id, view_specifier)) |
def get_skydir_distance_mask(src_skydir, skydir, dist, min_dist=None,
square=False, coordsys='CEL'):
"""Retrieve sources within a certain angular distance of an
(ra,dec) coordinate. This function supports two types of
geometric selections: circular (square=False) and square
(square=True). The circular selection finds all sources with a given
angular distance of the target position. The square selection
finds sources within an ROI-like region of size R x R where R
= 2 x dist.
Parameters
----------
src_skydir : `~astropy.coordinates.SkyCoord`
Array of sky directions.
skydir : `~astropy.coordinates.SkyCoord`
Sky direction with respect to which the selection will be applied.
dist : float
Maximum distance in degrees from the sky coordinate.
square : bool
Choose whether to apply a circular or square selection.
coordsys : str
Coordinate system to use when applying a selection with square=True.
"""
if dist is None:
dist = 180.
if not square:
dtheta = src_skydir.separation(skydir).rad
elif coordsys == 'CEL':
dtheta = get_linear_dist(skydir,
src_skydir.ra.rad,
src_skydir.dec.rad,
coordsys=coordsys)
elif coordsys == 'GAL':
dtheta = get_linear_dist(skydir,
src_skydir.galactic.l.rad,
src_skydir.galactic.b.rad,
coordsys=coordsys)
else:
raise Exception('Unrecognized coordinate system: %s' % coordsys)
msk = (dtheta < np.radians(dist))
if min_dist is not None:
msk &= (dtheta > np.radians(min_dist))
return msk | Retrieve sources within a certain angular distance of an
(ra,dec) coordinate. This function supports two types of
geometric selections: circular (square=False) and square
(square=True). The circular selection finds all sources with a given
angular distance of the target position. The square selection
finds sources within an ROI-like region of size R x R where R
= 2 x dist.
Parameters
----------
src_skydir : `~astropy.coordinates.SkyCoord`
Array of sky directions.
skydir : `~astropy.coordinates.SkyCoord`
Sky direction with respect to which the selection will be applied.
dist : float
Maximum distance in degrees from the sky coordinate.
square : bool
Choose whether to apply a circular or square selection.
coordsys : str
Coordinate system to use when applying a selection with square=True. | Below is the the instruction that describes the task:
### Input:
Retrieve sources within a certain angular distance of an
(ra,dec) coordinate. This function supports two types of
geometric selections: circular (square=False) and square
(square=True). The circular selection finds all sources with a given
angular distance of the target position. The square selection
finds sources within an ROI-like region of size R x R where R
= 2 x dist.
Parameters
----------
src_skydir : `~astropy.coordinates.SkyCoord`
Array of sky directions.
skydir : `~astropy.coordinates.SkyCoord`
Sky direction with respect to which the selection will be applied.
dist : float
Maximum distance in degrees from the sky coordinate.
square : bool
Choose whether to apply a circular or square selection.
coordsys : str
Coordinate system to use when applying a selection with square=True.
### Response:
def get_skydir_distance_mask(src_skydir, skydir, dist, min_dist=None,
square=False, coordsys='CEL'):
"""Retrieve sources within a certain angular distance of an
(ra,dec) coordinate. This function supports two types of
geometric selections: circular (square=False) and square
(square=True). The circular selection finds all sources with a given
angular distance of the target position. The square selection
finds sources within an ROI-like region of size R x R where R
= 2 x dist.
Parameters
----------
src_skydir : `~astropy.coordinates.SkyCoord`
Array of sky directions.
skydir : `~astropy.coordinates.SkyCoord`
Sky direction with respect to which the selection will be applied.
dist : float
Maximum distance in degrees from the sky coordinate.
square : bool
Choose whether to apply a circular or square selection.
coordsys : str
Coordinate system to use when applying a selection with square=True.
"""
if dist is None:
dist = 180.
if not square:
dtheta = src_skydir.separation(skydir).rad
elif coordsys == 'CEL':
dtheta = get_linear_dist(skydir,
src_skydir.ra.rad,
src_skydir.dec.rad,
coordsys=coordsys)
elif coordsys == 'GAL':
dtheta = get_linear_dist(skydir,
src_skydir.galactic.l.rad,
src_skydir.galactic.b.rad,
coordsys=coordsys)
else:
raise Exception('Unrecognized coordinate system: %s' % coordsys)
msk = (dtheta < np.radians(dist))
if min_dist is not None:
msk &= (dtheta > np.radians(min_dist))
return msk |
def get_instance(self, cls):
"""Return an instance for a class."""
binding = self._bindings.get(cls)
if binding:
return binding()
# Try to create a runtime binding.
with _BINDING_LOCK:
binding = self._bindings.get(cls)
if binding:
return binding()
if not self._bind_in_runtime:
raise InjectorException('No binding was found for key=%s' % cls)
if not callable(cls):
raise InjectorException(
'Cannot create a runtime binding, the key is not callable, key=%s' % cls)
instance = cls()
self._bindings[cls] = lambda: instance
logger.debug('Created a runtime binding for key=%s, instance=%s', cls, instance)
return instance | Return an instance for a class. | Below is the the instruction that describes the task:
### Input:
Return an instance for a class.
### Response:
def get_instance(self, cls):
"""Return an instance for a class."""
binding = self._bindings.get(cls)
if binding:
return binding()
# Try to create a runtime binding.
with _BINDING_LOCK:
binding = self._bindings.get(cls)
if binding:
return binding()
if not self._bind_in_runtime:
raise InjectorException('No binding was found for key=%s' % cls)
if not callable(cls):
raise InjectorException(
'Cannot create a runtime binding, the key is not callable, key=%s' % cls)
instance = cls()
self._bindings[cls] = lambda: instance
logger.debug('Created a runtime binding for key=%s, instance=%s', cls, instance)
return instance |
def no_more_a_problem(self, hosts, services, timeperiods, bi_modulations):
"""Remove this objects as an impact for other schedulingitem.
:param hosts: hosts objects, used to get impacts
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to get impacts
:type services: alignak.objects.service.Services
:param timeperiods: Timeperiods objects, used for update_business_impact_value
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param bi_modulations: business impact modulation are used when setting myself as problem
:type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations
:return: None
TODO: SchedulingItem object should not handle other schedulingitem obj.
We should call obj.register* on both obj.
This is 'Java' style
"""
was_pb = self.is_problem
if self.is_problem:
self.is_problem = False
# we warn impacts that we are no more a problem
for impact_id in self.impacts:
if impact_id in hosts:
impact = hosts[impact_id]
else:
impact = services[impact_id]
impact.unregister_a_problem(self)
# we can just drop our impacts list
self.impacts = []
# We update our business_impact value, it's not a huge thing :)
self.update_business_impact_value(hosts, services, timeperiods, bi_modulations)
# If we were a problem, we say to everyone
# our new status, with good business_impact value
if was_pb:
# And we register a new broks for update status
self.broks.append(self.get_update_status_brok()) | Remove this objects as an impact for other schedulingitem.
:param hosts: hosts objects, used to get impacts
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to get impacts
:type services: alignak.objects.service.Services
:param timeperiods: Timeperiods objects, used for update_business_impact_value
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param bi_modulations: business impact modulation are used when setting myself as problem
:type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations
:return: None
TODO: SchedulingItem object should not handle other schedulingitem obj.
We should call obj.register* on both obj.
This is 'Java' style | Below is the the instruction that describes the task:
### Input:
Remove this objects as an impact for other schedulingitem.
:param hosts: hosts objects, used to get impacts
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to get impacts
:type services: alignak.objects.service.Services
:param timeperiods: Timeperiods objects, used for update_business_impact_value
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param bi_modulations: business impact modulation are used when setting myself as problem
:type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations
:return: None
TODO: SchedulingItem object should not handle other schedulingitem obj.
We should call obj.register* on both obj.
This is 'Java' style
### Response:
def no_more_a_problem(self, hosts, services, timeperiods, bi_modulations):
"""Remove this objects as an impact for other schedulingitem.
:param hosts: hosts objects, used to get impacts
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to get impacts
:type services: alignak.objects.service.Services
:param timeperiods: Timeperiods objects, used for update_business_impact_value
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param bi_modulations: business impact modulation are used when setting myself as problem
:type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations
:return: None
TODO: SchedulingItem object should not handle other schedulingitem obj.
We should call obj.register* on both obj.
This is 'Java' style
"""
was_pb = self.is_problem
if self.is_problem:
self.is_problem = False
# we warn impacts that we are no more a problem
for impact_id in self.impacts:
if impact_id in hosts:
impact = hosts[impact_id]
else:
impact = services[impact_id]
impact.unregister_a_problem(self)
# we can just drop our impacts list
self.impacts = []
# We update our business_impact value, it's not a huge thing :)
self.update_business_impact_value(hosts, services, timeperiods, bi_modulations)
# If we were a problem, we say to everyone
# our new status, with good business_impact value
if was_pb:
# And we register a new broks for update status
self.broks.append(self.get_update_status_brok()) |
def put(self, path, data, **options):
"""
Parses PUT request options and dispatches a request
"""
data, options = self._update_request(data, options)
return self.request('put', path, data=data, **options) | Parses PUT request options and dispatches a request | Below is the the instruction that describes the task:
### Input:
Parses PUT request options and dispatches a request
### Response:
def put(self, path, data, **options):
"""
Parses PUT request options and dispatches a request
"""
data, options = self._update_request(data, options)
return self.request('put', path, data=data, **options) |
def _iter_all_paths(start, end, rand=False, path=tuple()):
"""Iterate through all paths from start to end."""
path = path + (start, )
if start is end:
yield path
else:
nodes = [start.lo, start.hi]
if rand: # pragma: no cover
random.shuffle(nodes)
for node in nodes:
if node is not None:
yield from _iter_all_paths(node, end, rand, path) | Iterate through all paths from start to end. | Below is the the instruction that describes the task:
### Input:
Iterate through all paths from start to end.
### Response:
def _iter_all_paths(start, end, rand=False, path=tuple()):
"""Iterate through all paths from start to end."""
path = path + (start, )
if start is end:
yield path
else:
nodes = [start.lo, start.hi]
if rand: # pragma: no cover
random.shuffle(nodes)
for node in nodes:
if node is not None:
yield from _iter_all_paths(node, end, rand, path) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.