code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def xep_role(typ, rawtext, text, lineno, inliner,
options={}, content=[]):
"""Role for PEP/RFC references that generate an index entry."""
env = inliner.document.settings.env
if not typ:
typ = env.config.default_role
else:
typ = typ.lower()
has_explicit_title, title, target = split_explicit_title(text)
title = utils.unescape(title)
target = utils.unescape(target)
targetid = 'index-%s' % env.new_serialno('index')
anchor = ''
anchorindex = target.find('#')
if anchorindex > 0:
target, anchor = target[:anchorindex], target[anchorindex:]
try:
xepnum = int(target)
except ValueError:
msg = inliner.reporter.error('invalid XEP number %s' % target,
line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
target = "{:04d}".format(xepnum)
if not has_explicit_title:
title = "XEP-" + target
indexnode = addnodes.index()
targetnode = nodes.target('', '', ids=[targetid])
inliner.document.note_explicit_target(targetnode)
indexnode['entries'] = [
('single', _('XMPP Extension Protocols (XEPs); XEP %s') % target,
targetid, '')]
ref = inliner.document.settings.xep_base_url + 'xep-%04d.html' % xepnum
rn = nodes.reference(title, title, internal=False, refuri=ref+anchor,
classes=[typ])
return [indexnode, targetnode, rn], [] | Role for PEP/RFC references that generate an index entry. | Below is the the instruction that describes the task:
### Input:
Role for PEP/RFC references that generate an index entry.
### Response:
def xep_role(typ, rawtext, text, lineno, inliner,
options={}, content=[]):
"""Role for PEP/RFC references that generate an index entry."""
env = inliner.document.settings.env
if not typ:
typ = env.config.default_role
else:
typ = typ.lower()
has_explicit_title, title, target = split_explicit_title(text)
title = utils.unescape(title)
target = utils.unescape(target)
targetid = 'index-%s' % env.new_serialno('index')
anchor = ''
anchorindex = target.find('#')
if anchorindex > 0:
target, anchor = target[:anchorindex], target[anchorindex:]
try:
xepnum = int(target)
except ValueError:
msg = inliner.reporter.error('invalid XEP number %s' % target,
line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
target = "{:04d}".format(xepnum)
if not has_explicit_title:
title = "XEP-" + target
indexnode = addnodes.index()
targetnode = nodes.target('', '', ids=[targetid])
inliner.document.note_explicit_target(targetnode)
indexnode['entries'] = [
('single', _('XMPP Extension Protocols (XEPs); XEP %s') % target,
targetid, '')]
ref = inliner.document.settings.xep_base_url + 'xep-%04d.html' % xepnum
rn = nodes.reference(title, title, internal=False, refuri=ref+anchor,
classes=[typ])
return [indexnode, targetnode, rn], [] |
def order_duplicate_volume(self, origin_volume_id, origin_snapshot_id=None,
duplicate_size=None, duplicate_iops=None,
duplicate_tier_level=None,
duplicate_snapshot_size=None,
hourly_billing_flag=False):
"""Places an order for a duplicate file volume.
:param origin_volume_id: The ID of the origin volume to be duplicated
:param origin_snapshot_id: Origin snapshot ID to use for duplication
:param duplicate_size: Size/capacity for the duplicate volume
:param duplicate_iops: The IOPS per GB for the duplicate volume
:param duplicate_tier_level: Tier level for the duplicate volume
:param duplicate_snapshot_size: Snapshot space size for the duplicate
:param hourly_billing_flag: Billing type, monthly (False)
or hourly (True), default to monthly.
:return: Returns a SoftLayer_Container_Product_Order_Receipt
"""
file_mask = 'id,billingItem[location,hourlyFlag],snapshotCapacityGb,'\
'storageType[keyName],capacityGb,originalVolumeSize,'\
'provisionedIops,storageTierLevel,'\
'staasVersion,hasEncryptionAtRest'
origin_volume = self.get_file_volume_details(origin_volume_id,
mask=file_mask)
order = storage_utils.prepare_duplicate_order_object(
self, origin_volume, duplicate_iops, duplicate_tier_level,
duplicate_size, duplicate_snapshot_size, 'file',
hourly_billing_flag
)
if origin_snapshot_id is not None:
order['duplicateOriginSnapshotId'] = origin_snapshot_id
return self.client.call('Product_Order', 'placeOrder', order) | Places an order for a duplicate file volume.
:param origin_volume_id: The ID of the origin volume to be duplicated
:param origin_snapshot_id: Origin snapshot ID to use for duplication
:param duplicate_size: Size/capacity for the duplicate volume
:param duplicate_iops: The IOPS per GB for the duplicate volume
:param duplicate_tier_level: Tier level for the duplicate volume
:param duplicate_snapshot_size: Snapshot space size for the duplicate
:param hourly_billing_flag: Billing type, monthly (False)
or hourly (True), default to monthly.
:return: Returns a SoftLayer_Container_Product_Order_Receipt | Below is the the instruction that describes the task:
### Input:
Places an order for a duplicate file volume.
:param origin_volume_id: The ID of the origin volume to be duplicated
:param origin_snapshot_id: Origin snapshot ID to use for duplication
:param duplicate_size: Size/capacity for the duplicate volume
:param duplicate_iops: The IOPS per GB for the duplicate volume
:param duplicate_tier_level: Tier level for the duplicate volume
:param duplicate_snapshot_size: Snapshot space size for the duplicate
:param hourly_billing_flag: Billing type, monthly (False)
or hourly (True), default to monthly.
:return: Returns a SoftLayer_Container_Product_Order_Receipt
### Response:
def order_duplicate_volume(self, origin_volume_id, origin_snapshot_id=None,
duplicate_size=None, duplicate_iops=None,
duplicate_tier_level=None,
duplicate_snapshot_size=None,
hourly_billing_flag=False):
"""Places an order for a duplicate file volume.
:param origin_volume_id: The ID of the origin volume to be duplicated
:param origin_snapshot_id: Origin snapshot ID to use for duplication
:param duplicate_size: Size/capacity for the duplicate volume
:param duplicate_iops: The IOPS per GB for the duplicate volume
:param duplicate_tier_level: Tier level for the duplicate volume
:param duplicate_snapshot_size: Snapshot space size for the duplicate
:param hourly_billing_flag: Billing type, monthly (False)
or hourly (True), default to monthly.
:return: Returns a SoftLayer_Container_Product_Order_Receipt
"""
file_mask = 'id,billingItem[location,hourlyFlag],snapshotCapacityGb,'\
'storageType[keyName],capacityGb,originalVolumeSize,'\
'provisionedIops,storageTierLevel,'\
'staasVersion,hasEncryptionAtRest'
origin_volume = self.get_file_volume_details(origin_volume_id,
mask=file_mask)
order = storage_utils.prepare_duplicate_order_object(
self, origin_volume, duplicate_iops, duplicate_tier_level,
duplicate_size, duplicate_snapshot_size, 'file',
hourly_billing_flag
)
if origin_snapshot_id is not None:
order['duplicateOriginSnapshotId'] = origin_snapshot_id
return self.client.call('Product_Order', 'placeOrder', order) |
def convert_invalid_url(url):
"""Convert invalid url with adding extra 'http://' schema into it
:param url:
:return:
"""
regex_valid_url = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return url if regex_valid_url.match(url) else 'http://{}'.format(url) | Convert invalid url with adding extra 'http://' schema into it
:param url:
:return: | Below is the the instruction that describes the task:
### Input:
Convert invalid url with adding extra 'http://' schema into it
:param url:
:return:
### Response:
def convert_invalid_url(url):
"""Convert invalid url with adding extra 'http://' schema into it
:param url:
:return:
"""
regex_valid_url = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return url if regex_valid_url.match(url) else 'http://{}'.format(url) |
def ipaddress_field_data(field, **kwargs):
"""
Return random value for IPAddressField
>>> result = any_form_field(forms.IPAddressField())
>>> type(result)
<type 'str'>
>>> from django.core.validators import ipv4_re
>>> import re
>>> re.match(ipv4_re, result) is not None
True
"""
choices = kwargs.get('choices')
if choices:
return random.choice(choices)
else:
nums = [str(xunit.any_int(min_value=0, max_value=255)) for _ in xrange(0, 4)]
return ".".join(nums) | Return random value for IPAddressField
>>> result = any_form_field(forms.IPAddressField())
>>> type(result)
<type 'str'>
>>> from django.core.validators import ipv4_re
>>> import re
>>> re.match(ipv4_re, result) is not None
True | Below is the the instruction that describes the task:
### Input:
Return random value for IPAddressField
>>> result = any_form_field(forms.IPAddressField())
>>> type(result)
<type 'str'>
>>> from django.core.validators import ipv4_re
>>> import re
>>> re.match(ipv4_re, result) is not None
True
### Response:
def ipaddress_field_data(field, **kwargs):
"""
Return random value for IPAddressField
>>> result = any_form_field(forms.IPAddressField())
>>> type(result)
<type 'str'>
>>> from django.core.validators import ipv4_re
>>> import re
>>> re.match(ipv4_re, result) is not None
True
"""
choices = kwargs.get('choices')
if choices:
return random.choice(choices)
else:
nums = [str(xunit.any_int(min_value=0, max_value=255)) for _ in xrange(0, 4)]
return ".".join(nums) |
def activate(self, prefix=None, backend=None):
"""
A decorator used to activate the mocker.
:param prefix:
:param backend: An instance of a storage backend.
"""
if isinstance(prefix, compat.string_types):
self.prefix = prefix
if isinstance(backend, RmoqStorageBackend):
self.backend = backend
def activate(func):
if isinstance(func, type):
return self._decorate_class(func)
def wrapper(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapper
return activate | A decorator used to activate the mocker.
:param prefix:
:param backend: An instance of a storage backend. | Below is the the instruction that describes the task:
### Input:
A decorator used to activate the mocker.
:param prefix:
:param backend: An instance of a storage backend.
### Response:
def activate(self, prefix=None, backend=None):
"""
A decorator used to activate the mocker.
:param prefix:
:param backend: An instance of a storage backend.
"""
if isinstance(prefix, compat.string_types):
self.prefix = prefix
if isinstance(backend, RmoqStorageBackend):
self.backend = backend
def activate(func):
if isinstance(func, type):
return self._decorate_class(func)
def wrapper(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapper
return activate |
def get_additional_charge_by_identifier(self, recurring_billing_id):
"""
Query extra charge information of an invoice from its identifier.
Args:
recurring_billing_id: Identifier of the additional charge.
Returns:
"""
fmt = 'recurringBillItems/{}'.format(recurring_billing_id)
return self.client._get(self.url + fmt, headers=self.get_headers()) | Query extra charge information of an invoice from its identifier.
Args:
recurring_billing_id: Identifier of the additional charge.
Returns: | Below is the the instruction that describes the task:
### Input:
Query extra charge information of an invoice from its identifier.
Args:
recurring_billing_id: Identifier of the additional charge.
Returns:
### Response:
def get_additional_charge_by_identifier(self, recurring_billing_id):
"""
Query extra charge information of an invoice from its identifier.
Args:
recurring_billing_id: Identifier of the additional charge.
Returns:
"""
fmt = 'recurringBillItems/{}'.format(recurring_billing_id)
return self.client._get(self.url + fmt, headers=self.get_headers()) |
def clean(cls, cnpj):
u"""
Retorna apenas os dígitos do CNPJ.
>>> CNPJ.clean('58.414.462/0001-35')
'58414462000135'
"""
if isinstance(cnpj, six.string_types):
cnpj = int(re.sub('[^0-9]', '', cnpj))
return '{0:014d}'.format(cnpj) | u"""
Retorna apenas os dígitos do CNPJ.
>>> CNPJ.clean('58.414.462/0001-35')
'58414462000135' | Below is the the instruction that describes the task:
### Input:
u"""
Retorna apenas os dígitos do CNPJ.
>>> CNPJ.clean('58.414.462/0001-35')
'58414462000135'
### Response:
def clean(cls, cnpj):
u"""
Retorna apenas os dígitos do CNPJ.
>>> CNPJ.clean('58.414.462/0001-35')
'58414462000135'
"""
if isinstance(cnpj, six.string_types):
cnpj = int(re.sub('[^0-9]', '', cnpj))
return '{0:014d}'.format(cnpj) |
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (Expr, str)) or is_list_like(w)):
raise TypeError("where must be passed as a string, Expr, "
"or list-like of Exprs")
return w | Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict). | Below is the the instruction that describes the task:
### Input:
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
### Response:
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (Expr, str)) or is_list_like(w)):
raise TypeError("where must be passed as a string, Expr, "
"or list-like of Exprs")
return w |
def pull_requests(self):
'''
Looks for any of the following pull request formats in the description field:
pr12345, pr 2345, PR2345, PR 2345
'''
pr_numbers = re.findall(r"[pP][rR]\s?[0-9]+", self.description)
pr_numbers += re.findall(re.compile("pull\s?request\s?[0-9]+", re.IGNORECASE), self.description)
# Remove Duplicates
pr_numbers = [re.sub('[^0-9]','', p) for p in pr_numbers]
return pr_numbers | Looks for any of the following pull request formats in the description field:
pr12345, pr 2345, PR2345, PR 2345 | Below is the the instruction that describes the task:
### Input:
Looks for any of the following pull request formats in the description field:
pr12345, pr 2345, PR2345, PR 2345
### Response:
def pull_requests(self):
'''
Looks for any of the following pull request formats in the description field:
pr12345, pr 2345, PR2345, PR 2345
'''
pr_numbers = re.findall(r"[pP][rR]\s?[0-9]+", self.description)
pr_numbers += re.findall(re.compile("pull\s?request\s?[0-9]+", re.IGNORECASE), self.description)
# Remove Duplicates
pr_numbers = [re.sub('[^0-9]','', p) for p in pr_numbers]
return pr_numbers |
def tarfile_extract(fileobj, dest_path):
"""Extract a tarfile described by a file object to a specified path.
Args:
fileobj (file): File object wrapping the target tarfile.
dest_path (str): Path to extract the contents of the tarfile to.
"""
# Though this method doesn't fit cleanly into the TarPartition object,
# tarballs are only ever extracted for partitions so the logic jives
# for the most part.
tar = tarfile.open(mode='r|', fileobj=fileobj,
bufsize=pipebuf.PIPE_BUF_BYTES)
# canonicalize dest_path so the prefix check below works
dest_path = os.path.realpath(dest_path)
# list of files that need fsyncing
extracted_files = []
# Iterate through each member of the tarfile individually. We must
# approach it this way because we are dealing with a pipe and the
# getmembers() method will consume it before we extract any data.
for member in tar:
assert not member.name.startswith('/')
relpath = os.path.join(dest_path, member.name)
# Workaround issue with tar handling of symlink, see:
# https://bugs.python.org/issue12800
if member.issym():
target_path = os.path.join(dest_path, member.name)
try:
os.symlink(member.linkname, target_path)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(target_path)
os.symlink(member.linkname, target_path)
else:
raise
continue
if member.isreg() and member.size >= pipebuf.PIPE_BUF_BYTES:
cat_extract(tar, member, relpath)
else:
tar.extract(member, path=dest_path)
filename = os.path.realpath(relpath)
extracted_files.append(filename)
# avoid accumulating an unbounded list of strings which
# could be quite large for a large database
if len(extracted_files) > 1000:
_fsync_files(extracted_files)
del extracted_files[:]
tar.close()
_fsync_files(extracted_files) | Extract a tarfile described by a file object to a specified path.
Args:
fileobj (file): File object wrapping the target tarfile.
dest_path (str): Path to extract the contents of the tarfile to. | Below is the the instruction that describes the task:
### Input:
Extract a tarfile described by a file object to a specified path.
Args:
fileobj (file): File object wrapping the target tarfile.
dest_path (str): Path to extract the contents of the tarfile to.
### Response:
def tarfile_extract(fileobj, dest_path):
"""Extract a tarfile described by a file object to a specified path.
Args:
fileobj (file): File object wrapping the target tarfile.
dest_path (str): Path to extract the contents of the tarfile to.
"""
# Though this method doesn't fit cleanly into the TarPartition object,
# tarballs are only ever extracted for partitions so the logic jives
# for the most part.
tar = tarfile.open(mode='r|', fileobj=fileobj,
bufsize=pipebuf.PIPE_BUF_BYTES)
# canonicalize dest_path so the prefix check below works
dest_path = os.path.realpath(dest_path)
# list of files that need fsyncing
extracted_files = []
# Iterate through each member of the tarfile individually. We must
# approach it this way because we are dealing with a pipe and the
# getmembers() method will consume it before we extract any data.
for member in tar:
assert not member.name.startswith('/')
relpath = os.path.join(dest_path, member.name)
# Workaround issue with tar handling of symlink, see:
# https://bugs.python.org/issue12800
if member.issym():
target_path = os.path.join(dest_path, member.name)
try:
os.symlink(member.linkname, target_path)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(target_path)
os.symlink(member.linkname, target_path)
else:
raise
continue
if member.isreg() and member.size >= pipebuf.PIPE_BUF_BYTES:
cat_extract(tar, member, relpath)
else:
tar.extract(member, path=dest_path)
filename = os.path.realpath(relpath)
extracted_files.append(filename)
# avoid accumulating an unbounded list of strings which
# could be quite large for a large database
if len(extracted_files) > 1000:
_fsync_files(extracted_files)
del extracted_files[:]
tar.close()
_fsync_files(extracted_files) |
def extract_db_info(self, obj, keys):
"""Extract metadata from serialized file"""
objl = self.convert_in(obj)
# FIXME: this is too complex
if isinstance(objl, self.__class__):
return objl.update_meta_info()
try:
with builtins.open(objl, mode='r') as fd:
state = json.load(fd)
except IOError as e:
raise e
result = super(BaseStructuredCalibration, self).extract_db_info(state, keys)
try:
minfo = state['meta_info']
result['mode'] = minfo['mode_name']
origin = minfo['origin']
date_obs = origin['date_obs']
except KeyError:
origin = {}
date_obs = "1970-01-01T00:00:00.00"
result['instrument'] = state['instrument']
result['uuid'] = state['uuid']
result['tags'] = state['tags']
result['type'] = state['type']
result['observation_date'] = conv.convert_date(date_obs)
result['origin'] = origin
return result | Extract metadata from serialized file | Below is the the instruction that describes the task:
### Input:
Extract metadata from serialized file
### Response:
def extract_db_info(self, obj, keys):
"""Extract metadata from serialized file"""
objl = self.convert_in(obj)
# FIXME: this is too complex
if isinstance(objl, self.__class__):
return objl.update_meta_info()
try:
with builtins.open(objl, mode='r') as fd:
state = json.load(fd)
except IOError as e:
raise e
result = super(BaseStructuredCalibration, self).extract_db_info(state, keys)
try:
minfo = state['meta_info']
result['mode'] = minfo['mode_name']
origin = minfo['origin']
date_obs = origin['date_obs']
except KeyError:
origin = {}
date_obs = "1970-01-01T00:00:00.00"
result['instrument'] = state['instrument']
result['uuid'] = state['uuid']
result['tags'] = state['tags']
result['type'] = state['type']
result['observation_date'] = conv.convert_date(date_obs)
result['origin'] = origin
return result |
def simulate(self):
""" Runs the particle simulation. Simulates one time step, dt, of the particle motion.
Calculates the force between each pair of particles and updates particles' motion accordingly
"""
# Main simulation loop
for i in range(self.iterations):
for a in self.particles:
if a.fixed:
continue
ftot = vector(0, 0, 0) # total force acting on particle a
for b in self.particles:
if a.negligible and b.negligible or a == b:
continue
ab = a.pos - b.pos
ftot += ((K_COULOMB * a.charge * b.charge) / mag2(ab)) * versor(ab)
a.vel += ftot / a.mass * self.dt # update velocity and position of a
a.pos += a.vel * self.dt
a.vtk_actor.pos(a.pos)
if vp:
vp.show(zoom=1.2)
vp.camera.Azimuth(0.1) | Runs the particle simulation. Simulates one time step, dt, of the particle motion.
Calculates the force between each pair of particles and updates particles' motion accordingly | Below is the the instruction that describes the task:
### Input:
Runs the particle simulation. Simulates one time step, dt, of the particle motion.
Calculates the force between each pair of particles and updates particles' motion accordingly
### Response:
def simulate(self):
""" Runs the particle simulation. Simulates one time step, dt, of the particle motion.
Calculates the force between each pair of particles and updates particles' motion accordingly
"""
# Main simulation loop
for i in range(self.iterations):
for a in self.particles:
if a.fixed:
continue
ftot = vector(0, 0, 0) # total force acting on particle a
for b in self.particles:
if a.negligible and b.negligible or a == b:
continue
ab = a.pos - b.pos
ftot += ((K_COULOMB * a.charge * b.charge) / mag2(ab)) * versor(ab)
a.vel += ftot / a.mass * self.dt # update velocity and position of a
a.pos += a.vel * self.dt
a.vtk_actor.pos(a.pos)
if vp:
vp.show(zoom=1.2)
vp.camera.Azimuth(0.1) |
def getAnalysisRequestTemplates(self):
"""
This functions builds a list of tuples with the object AnalysisRequestTemplates' uids and names.
:returns: A list of tuples where the first value of the tuple is the AnalysisRequestTemplate name and the
second one is the AnalysisRequestTemplate UID. --> [(ART.title),(ART.UID),...]
"""
l = []
art_uids = self.ar_templates
# I have to get the catalog in this way because I can't do it with 'self'...
pc = getToolByName(api.portal.get(), 'uid_catalog')
for art_uid in art_uids:
art_obj = pc(UID=art_uid)
if len(art_obj) != 0:
l.append((art_obj[0].Title, art_uid))
return l | This functions builds a list of tuples with the object AnalysisRequestTemplates' uids and names.
:returns: A list of tuples where the first value of the tuple is the AnalysisRequestTemplate name and the
second one is the AnalysisRequestTemplate UID. --> [(ART.title),(ART.UID),...] | Below is the the instruction that describes the task:
### Input:
This functions builds a list of tuples with the object AnalysisRequestTemplates' uids and names.
:returns: A list of tuples where the first value of the tuple is the AnalysisRequestTemplate name and the
second one is the AnalysisRequestTemplate UID. --> [(ART.title),(ART.UID),...]
### Response:
def getAnalysisRequestTemplates(self):
"""
This functions builds a list of tuples with the object AnalysisRequestTemplates' uids and names.
:returns: A list of tuples where the first value of the tuple is the AnalysisRequestTemplate name and the
second one is the AnalysisRequestTemplate UID. --> [(ART.title),(ART.UID),...]
"""
l = []
art_uids = self.ar_templates
# I have to get the catalog in this way because I can't do it with 'self'...
pc = getToolByName(api.portal.get(), 'uid_catalog')
for art_uid in art_uids:
art_obj = pc(UID=art_uid)
if len(art_obj) != 0:
l.append((art_obj[0].Title, art_uid))
return l |
def create(self, name, *args, **kwargs):
"""
Subclasses need to implement the _create_body() method to return a dict
that will be used for the API request body.
For cases where no response is returned from the API on creation, pass
`return_none=True` so that the _create method doesn't expect one.
For cases where you do not want the _create method to attempt to parse
the response, but instead have it returned directly, pass
`return_raw=True`.
For cases where the API returns information in the response and not the
response_body, pass `return_response=True`.
"""
return_none = kwargs.pop("return_none", False)
return_raw = kwargs.pop("return_raw", False)
return_response = kwargs.pop("return_response", False)
body = self._create_body(name, *args, **kwargs)
return self._create("/%s" % self.uri_base, body,
return_none=return_none, return_raw=return_raw,
return_response=return_response) | Subclasses need to implement the _create_body() method to return a dict
that will be used for the API request body.
For cases where no response is returned from the API on creation, pass
`return_none=True` so that the _create method doesn't expect one.
For cases where you do not want the _create method to attempt to parse
the response, but instead have it returned directly, pass
`return_raw=True`.
For cases where the API returns information in the response and not the
response_body, pass `return_response=True`. | Below is the the instruction that describes the task:
### Input:
Subclasses need to implement the _create_body() method to return a dict
that will be used for the API request body.
For cases where no response is returned from the API on creation, pass
`return_none=True` so that the _create method doesn't expect one.
For cases where you do not want the _create method to attempt to parse
the response, but instead have it returned directly, pass
`return_raw=True`.
For cases where the API returns information in the response and not the
response_body, pass `return_response=True`.
### Response:
def create(self, name, *args, **kwargs):
"""
Subclasses need to implement the _create_body() method to return a dict
that will be used for the API request body.
For cases where no response is returned from the API on creation, pass
`return_none=True` so that the _create method doesn't expect one.
For cases where you do not want the _create method to attempt to parse
the response, but instead have it returned directly, pass
`return_raw=True`.
For cases where the API returns information in the response and not the
response_body, pass `return_response=True`.
"""
return_none = kwargs.pop("return_none", False)
return_raw = kwargs.pop("return_raw", False)
return_response = kwargs.pop("return_response", False)
body = self._create_body(name, *args, **kwargs)
return self._create("/%s" % self.uri_base, body,
return_none=return_none, return_raw=return_raw,
return_response=return_response) |
def fixup(p, data_bits, ptr_bits, bits_set):
"""
Flip back the pointer qubits that were previously flipped indicated by
the flags `bits_set`.
"""
for i in range(ptr_bits):
if 0 != bits_set & (1 << i):
p.inst(X(data_bits + i)) | Flip back the pointer qubits that were previously flipped indicated by
the flags `bits_set`. | Below is the the instruction that describes the task:
### Input:
Flip back the pointer qubits that were previously flipped indicated by
the flags `bits_set`.
### Response:
def fixup(p, data_bits, ptr_bits, bits_set):
"""
Flip back the pointer qubits that were previously flipped indicated by
the flags `bits_set`.
"""
for i in range(ptr_bits):
if 0 != bits_set & (1 << i):
p.inst(X(data_bits + i)) |
def stream_gzip_decompress_lines(stream):
"""
Uncompress a gzip stream into lines of text.
Parameters
----------
Generator of chunks of gzip compressed text.
Returns
-------
Generator of uncompressed lines.
"""
dec = zlib.decompressobj(zlib.MAX_WBITS | 16)
previous = ""
for compressed_chunk in stream:
chunk = dec.decompress(compressed_chunk).decode()
if chunk:
lines = (previous + chunk).split("\n")
previous = lines.pop()
for line in lines:
yield line
yield previous | Uncompress a gzip stream into lines of text.
Parameters
----------
Generator of chunks of gzip compressed text.
Returns
-------
Generator of uncompressed lines. | Below is the the instruction that describes the task:
### Input:
Uncompress a gzip stream into lines of text.
Parameters
----------
Generator of chunks of gzip compressed text.
Returns
-------
Generator of uncompressed lines.
### Response:
def stream_gzip_decompress_lines(stream):
"""
Uncompress a gzip stream into lines of text.
Parameters
----------
Generator of chunks of gzip compressed text.
Returns
-------
Generator of uncompressed lines.
"""
dec = zlib.decompressobj(zlib.MAX_WBITS | 16)
previous = ""
for compressed_chunk in stream:
chunk = dec.decompress(compressed_chunk).decode()
if chunk:
lines = (previous + chunk).split("\n")
previous = lines.pop()
for line in lines:
yield line
yield previous |
def safe_encode(s):
"""Safely decodes a binary string to unicode"""
if isinstance(s, unicode):
return s.encode(defenc)
elif isinstance(s, bytes):
return s
elif s is not None:
raise TypeError('Expected bytes or text, but got %r' % (s,)) | Safely decodes a binary string to unicode | Below is the the instruction that describes the task:
### Input:
Safely decodes a binary string to unicode
### Response:
def safe_encode(s):
"""Safely decodes a binary string to unicode"""
if isinstance(s, unicode):
return s.encode(defenc)
elif isinstance(s, bytes):
return s
elif s is not None:
raise TypeError('Expected bytes or text, but got %r' % (s,)) |
def print_version(wrapper):
""" Prints the server version and license information."""
scanner_name = wrapper.get_scanner_name()
server_version = wrapper.get_server_version()
print("OSP Server for {0} version {1}".format(scanner_name, server_version))
protocol_version = wrapper.get_protocol_version()
print("OSP Version: {0}".format(protocol_version))
daemon_name = wrapper.get_daemon_name()
daemon_version = wrapper.get_daemon_version()
print("Using: {0} {1}".format(daemon_name, daemon_version))
print("Copyright (C) 2014, 2015 Greenbone Networks GmbH\n"
"License GPLv2+: GNU GPL version 2 or later\n"
"This is free software: you are free to change"
" and redistribute it.\n"
"There is NO WARRANTY, to the extent permitted by law.") | Prints the server version and license information. | Below is the the instruction that describes the task:
### Input:
Prints the server version and license information.
### Response:
def print_version(wrapper):
""" Prints the server version and license information."""
scanner_name = wrapper.get_scanner_name()
server_version = wrapper.get_server_version()
print("OSP Server for {0} version {1}".format(scanner_name, server_version))
protocol_version = wrapper.get_protocol_version()
print("OSP Version: {0}".format(protocol_version))
daemon_name = wrapper.get_daemon_name()
daemon_version = wrapper.get_daemon_version()
print("Using: {0} {1}".format(daemon_name, daemon_version))
print("Copyright (C) 2014, 2015 Greenbone Networks GmbH\n"
"License GPLv2+: GNU GPL version 2 or later\n"
"This is free software: you are free to change"
" and redistribute it.\n"
"There is NO WARRANTY, to the extent permitted by law.") |
def add_configurations(self, experiments):
"""Chains generator given experiment specifications.
Arguments:
experiments (Experiment | list | dict): Experiments to run.
"""
experiment_list = convert_to_experiment_list(experiments)
for experiment in experiment_list:
self._trial_generator = itertools.chain(
self._trial_generator,
self._generate_trials(experiment.spec, experiment.name)) | Chains generator given experiment specifications.
Arguments:
experiments (Experiment | list | dict): Experiments to run. | Below is the the instruction that describes the task:
### Input:
Chains generator given experiment specifications.
Arguments:
experiments (Experiment | list | dict): Experiments to run.
### Response:
def add_configurations(self, experiments):
"""Chains generator given experiment specifications.
Arguments:
experiments (Experiment | list | dict): Experiments to run.
"""
experiment_list = convert_to_experiment_list(experiments)
for experiment in experiment_list:
self._trial_generator = itertools.chain(
self._trial_generator,
self._generate_trials(experiment.spec, experiment.name)) |
def update_edges(self, elev_fn, dem_proc):
"""
After finishing a calculation, this will update the neighbors and the
todo for that tile
"""
interp = self.build_interpolator(dem_proc)
self.update_edge_todo(elev_fn, dem_proc)
self.set_neighbor_data(elev_fn, dem_proc, interp) | After finishing a calculation, this will update the neighbors and the
todo for that tile | Below is the the instruction that describes the task:
### Input:
After finishing a calculation, this will update the neighbors and the
todo for that tile
### Response:
def update_edges(self, elev_fn, dem_proc):
"""
After finishing a calculation, this will update the neighbors and the
todo for that tile
"""
interp = self.build_interpolator(dem_proc)
self.update_edge_todo(elev_fn, dem_proc)
self.set_neighbor_data(elev_fn, dem_proc, interp) |
def stats_compute(self, *args, **kwargs):
"""
Simply loop over the internal dictionary and
echo the list size at each key (i.e. the number
of files).
"""
totalElements = 0
totalKeys = 0
totalSize = 0
l_stats = []
d_report = {}
for k, v in sorted(self.d_inputTreeCallback.items(),
key = lambda kv: (kv[1]['diskUsage_raw']),
reverse = self.b_statsReverse):
str_report = "files: %5d; raw size: %12d; human size: %8s; %s" % (\
len(self.d_inputTree[k]),
self.d_inputTreeCallback[k]['diskUsage_raw'],
self.d_inputTreeCallback[k]['diskUsage_human'],
k)
d_report = {
'files': len(self.d_inputTree[k]),
'diskUsage_raw': self.d_inputTreeCallback[k]['diskUsage_raw'],
'diskUsage_human': self.d_inputTreeCallback[k]['diskUsage_human'],
'path': k
}
self.dp.qprint(str_report, level = 1)
l_stats.append(d_report)
totalElements += len(v)
totalKeys += 1
totalSize += self.d_inputTreeCallback[k]['diskUsage_raw']
str_totalSize_human = self.sizeof_fmt(totalSize)
return {
'status': True,
'dirs': totalKeys,
'files': totalElements,
'totalSize': totalSize,
'totalSize_human': str_totalSize_human,
'l_stats': l_stats,
'runTime': other.toc()
} | Simply loop over the internal dictionary and
echo the list size at each key (i.e. the number
of files). | Below is the the instruction that describes the task:
### Input:
Simply loop over the internal dictionary and
echo the list size at each key (i.e. the number
of files).
### Response:
def stats_compute(self, *args, **kwargs):
"""
Simply loop over the internal dictionary and
echo the list size at each key (i.e. the number
of files).
"""
totalElements = 0
totalKeys = 0
totalSize = 0
l_stats = []
d_report = {}
for k, v in sorted(self.d_inputTreeCallback.items(),
key = lambda kv: (kv[1]['diskUsage_raw']),
reverse = self.b_statsReverse):
str_report = "files: %5d; raw size: %12d; human size: %8s; %s" % (\
len(self.d_inputTree[k]),
self.d_inputTreeCallback[k]['diskUsage_raw'],
self.d_inputTreeCallback[k]['diskUsage_human'],
k)
d_report = {
'files': len(self.d_inputTree[k]),
'diskUsage_raw': self.d_inputTreeCallback[k]['diskUsage_raw'],
'diskUsage_human': self.d_inputTreeCallback[k]['diskUsage_human'],
'path': k
}
self.dp.qprint(str_report, level = 1)
l_stats.append(d_report)
totalElements += len(v)
totalKeys += 1
totalSize += self.d_inputTreeCallback[k]['diskUsage_raw']
str_totalSize_human = self.sizeof_fmt(totalSize)
return {
'status': True,
'dirs': totalKeys,
'files': totalElements,
'totalSize': totalSize,
'totalSize_human': str_totalSize_human,
'l_stats': l_stats,
'runTime': other.toc()
} |
def drop():
"""
Drop the current table if it exists
"""
# Ensure the connection is up
_State.connection()
_State.table.drop(checkfirst=True)
_State.metadata.remove(_State.table)
_State.table = None
_State.new_transaction() | Drop the current table if it exists | Below is the the instruction that describes the task:
### Input:
Drop the current table if it exists
### Response:
def drop():
"""
Drop the current table if it exists
"""
# Ensure the connection is up
_State.connection()
_State.table.drop(checkfirst=True)
_State.metadata.remove(_State.table)
_State.table = None
_State.new_transaction() |
def _extract_dependencies_by_root(cls, result):
"""
Only extracts the transitive dependencies for the given coursier resolve.
Note the "dependencies" field is already transitive.
Example:
{
"conflict_resolution": {},
"dependencies": [
{
"coord": "a",
"dependencies": ["b", "c"]
"file": ...
},
{
"coord": "b",
"dependencies": []
"file": ...
},
{
"coord": "c",
"dependencies": []
"file": ...
}
]
}
Should return { "a": ["b", "c"], "b": [], "c": [] }
:param result: coursier result like the example.
:return: a simplified view with the top artifact as the roots.
"""
flat_result = defaultdict(list)
for artifact in result['dependencies']:
flat_result[artifact['coord']].extend(artifact['dependencies'])
return flat_result | Only extracts the transitive dependencies for the given coursier resolve.
Note the "dependencies" field is already transitive.
Example:
{
"conflict_resolution": {},
"dependencies": [
{
"coord": "a",
"dependencies": ["b", "c"]
"file": ...
},
{
"coord": "b",
"dependencies": []
"file": ...
},
{
"coord": "c",
"dependencies": []
"file": ...
}
]
}
Should return { "a": ["b", "c"], "b": [], "c": [] }
:param result: coursier result like the example.
:return: a simplified view with the top artifact as the roots. | Below is the the instruction that describes the task:
### Input:
Only extracts the transitive dependencies for the given coursier resolve.
Note the "dependencies" field is already transitive.
Example:
{
"conflict_resolution": {},
"dependencies": [
{
"coord": "a",
"dependencies": ["b", "c"]
"file": ...
},
{
"coord": "b",
"dependencies": []
"file": ...
},
{
"coord": "c",
"dependencies": []
"file": ...
}
]
}
Should return { "a": ["b", "c"], "b": [], "c": [] }
:param result: coursier result like the example.
:return: a simplified view with the top artifact as the roots.
### Response:
def _extract_dependencies_by_root(cls, result):
"""
Only extracts the transitive dependencies for the given coursier resolve.
Note the "dependencies" field is already transitive.
Example:
{
"conflict_resolution": {},
"dependencies": [
{
"coord": "a",
"dependencies": ["b", "c"]
"file": ...
},
{
"coord": "b",
"dependencies": []
"file": ...
},
{
"coord": "c",
"dependencies": []
"file": ...
}
]
}
Should return { "a": ["b", "c"], "b": [], "c": [] }
:param result: coursier result like the example.
:return: a simplified view with the top artifact as the roots.
"""
flat_result = defaultdict(list)
for artifact in result['dependencies']:
flat_result[artifact['coord']].extend(artifact['dependencies'])
return flat_result |
def draw_lines_svg_layer(df_endpoints, layer_name, layer_number=1):
'''
Draw lines defined by endpoint coordinates as a layer in a SVG file.
Args:
df_endpoints (pandas.DataFrame) : Each row corresponds to the endpoints
of a single line, encoded through the columns: ``x_source``,
``y_source``, ``x_target``, and ``y_target``.
layer_name (str) : Name of Inkscape layer.
layer_number (int, optional) : Z-order index of Inkscape layer.
Returns
-------
StringIO.StringIO
A file-like object containing SVG XML source.
The XML contains a layer named ``"Connections"``, which in turn
contains one line per row in the input :data:`df_endpoints` table.
'''
# Note that `svgwrite.Drawing` requires a filepath to be specified during
# construction, *but* nothing is actually written to the path unless one of
# the `save*` methods is called.
#
# In this function, we do *not* call any of the `save*` methods. Instead,
# we use the `write` method to write to an in-memory file-like object.
dwg = svgwrite.Drawing('should_not_exist.svg', profile='tiny', debug=False)
dwg.attribs['width'] = df_endpoints[['x_source', 'x_target']].values.max()
dwg.attribs['height'] = df_endpoints[['y_source', 'y_target']].values.max()
nsmap = INKSCAPE_NSMAP
dwg.attribs['xmlns:inkscape'] = nsmap['inkscape']
coord_columns = ['x_source', 'y_source', 'x_target', 'y_target']
line_layer = dwg.g(id='layer%d' % layer_number,
**{'inkscape:label': layer_name,
'inkscape:groupmode': 'layer'})
for i, (x1, y1, x2, y2) in df_endpoints[coord_columns].iterrows():
line_i = dwg.line((x1, y1), (x2, y2), id='line%d' % i,
style='stroke:#000000; stroke-width:0.1;')
line_layer.add(line_i)
dwg.add(line_layer)
output = StringIO.StringIO()
dwg.write(output)
# Rewind file.
output.seek(0)
return output | Draw lines defined by endpoint coordinates as a layer in a SVG file.
Args:
df_endpoints (pandas.DataFrame) : Each row corresponds to the endpoints
of a single line, encoded through the columns: ``x_source``,
``y_source``, ``x_target``, and ``y_target``.
layer_name (str) : Name of Inkscape layer.
layer_number (int, optional) : Z-order index of Inkscape layer.
Returns
-------
StringIO.StringIO
A file-like object containing SVG XML source.
The XML contains a layer named ``"Connections"``, which in turn
contains one line per row in the input :data:`df_endpoints` table. | Below is the the instruction that describes the task:
### Input:
Draw lines defined by endpoint coordinates as a layer in a SVG file.
Args:
df_endpoints (pandas.DataFrame) : Each row corresponds to the endpoints
of a single line, encoded through the columns: ``x_source``,
``y_source``, ``x_target``, and ``y_target``.
layer_name (str) : Name of Inkscape layer.
layer_number (int, optional) : Z-order index of Inkscape layer.
Returns
-------
StringIO.StringIO
A file-like object containing SVG XML source.
The XML contains a layer named ``"Connections"``, which in turn
contains one line per row in the input :data:`df_endpoints` table.
### Response:
def draw_lines_svg_layer(df_endpoints, layer_name, layer_number=1):
'''
Draw lines defined by endpoint coordinates as a layer in a SVG file.
Args:
df_endpoints (pandas.DataFrame) : Each row corresponds to the endpoints
of a single line, encoded through the columns: ``x_source``,
``y_source``, ``x_target``, and ``y_target``.
layer_name (str) : Name of Inkscape layer.
layer_number (int, optional) : Z-order index of Inkscape layer.
Returns
-------
StringIO.StringIO
A file-like object containing SVG XML source.
The XML contains a layer named ``"Connections"``, which in turn
contains one line per row in the input :data:`df_endpoints` table.
'''
# Note that `svgwrite.Drawing` requires a filepath to be specified during
# construction, *but* nothing is actually written to the path unless one of
# the `save*` methods is called.
#
# In this function, we do *not* call any of the `save*` methods. Instead,
# we use the `write` method to write to an in-memory file-like object.
dwg = svgwrite.Drawing('should_not_exist.svg', profile='tiny', debug=False)
dwg.attribs['width'] = df_endpoints[['x_source', 'x_target']].values.max()
dwg.attribs['height'] = df_endpoints[['y_source', 'y_target']].values.max()
nsmap = INKSCAPE_NSMAP
dwg.attribs['xmlns:inkscape'] = nsmap['inkscape']
coord_columns = ['x_source', 'y_source', 'x_target', 'y_target']
line_layer = dwg.g(id='layer%d' % layer_number,
**{'inkscape:label': layer_name,
'inkscape:groupmode': 'layer'})
for i, (x1, y1, x2, y2) in df_endpoints[coord_columns].iterrows():
line_i = dwg.line((x1, y1), (x2, y2), id='line%d' % i,
style='stroke:#000000; stroke-width:0.1;')
line_layer.add(line_i)
dwg.add(line_layer)
output = StringIO.StringIO()
dwg.write(output)
# Rewind file.
output.seek(0)
return output |
def result_cached(task_id, wait=0, broker=None):
"""
Return the result from the cache backend
"""
if not broker:
broker = get_broker()
start = time()
while True:
r = broker.cache.get('{}:{}'.format(broker.list_key, task_id))
if r:
return SignedPackage.loads(r)['result']
if (time() - start) * 1000 >= wait >= 0:
break
sleep(0.01) | Return the result from the cache backend | Below is the the instruction that describes the task:
### Input:
Return the result from the cache backend
### Response:
def result_cached(task_id, wait=0, broker=None):
"""
Return the result from the cache backend
"""
if not broker:
broker = get_broker()
start = time()
while True:
r = broker.cache.get('{}:{}'.format(broker.list_key, task_id))
if r:
return SignedPackage.loads(r)['result']
if (time() - start) * 1000 >= wait >= 0:
break
sleep(0.01) |
def finder(package):
"""
Return a resource finder for a package.
:param package: The name of the package.
:return: A :class:`ResourceFinder` instance for the package.
"""
if package in _finder_cache:
result = _finder_cache[package]
else:
if package not in sys.modules:
__import__(package)
module = sys.modules[package]
path = getattr(module, '__path__', None)
if path is None:
raise DistlibException('You cannot get a finder for a module, '
'only for a package')
loader = getattr(module, '__loader__', None)
finder_maker = _finder_registry.get(type(loader))
if finder_maker is None:
raise DistlibException('Unable to locate finder for %r' % package)
result = finder_maker(module)
_finder_cache[package] = result
return result | Return a resource finder for a package.
:param package: The name of the package.
:return: A :class:`ResourceFinder` instance for the package. | Below is the the instruction that describes the task:
### Input:
Return a resource finder for a package.
:param package: The name of the package.
:return: A :class:`ResourceFinder` instance for the package.
### Response:
def finder(package):
"""
Return a resource finder for a package.
:param package: The name of the package.
:return: A :class:`ResourceFinder` instance for the package.
"""
if package in _finder_cache:
result = _finder_cache[package]
else:
if package not in sys.modules:
__import__(package)
module = sys.modules[package]
path = getattr(module, '__path__', None)
if path is None:
raise DistlibException('You cannot get a finder for a module, '
'only for a package')
loader = getattr(module, '__loader__', None)
finder_maker = _finder_registry.get(type(loader))
if finder_maker is None:
raise DistlibException('Unable to locate finder for %r' % package)
result = finder_maker(module)
_finder_cache[package] = result
return result |
def build_arch(self, arch):
'''Build any cython components, then install the Python module by
calling setup.py install with the target Python dir.
'''
Recipe.build_arch(self, arch)
self.build_cython_components(arch)
self.install_python_package(arch) | Build any cython components, then install the Python module by
calling setup.py install with the target Python dir. | Below is the the instruction that describes the task:
### Input:
Build any cython components, then install the Python module by
calling setup.py install with the target Python dir.
### Response:
def build_arch(self, arch):
'''Build any cython components, then install the Python module by
calling setup.py install with the target Python dir.
'''
Recipe.build_arch(self, arch)
self.build_cython_components(arch)
self.install_python_package(arch) |
def run(args):
"""
%prog run command ::: file1 file2
Parallelize a set of commands on grid. The syntax is modeled after GNU
parallel <http://www.gnu.org/s/parallel/man.html#options>
{} - input line
{.} - input line without extension
{_} - input line first part
{/} - basename of input line
{/.} - basename of input line without extension
{/_} - basename of input line first part
{#} - sequence number of job to run
::: - Use arguments from the command line as input source instead of stdin
(standard input).
If file name is `t/example.tar.gz`, then,
{} is "t/example.tar.gz", {.} is "t/example.tar", {_} is "t/example"
{/} is "example.tar.gz", {/.} is "example.tar", {/_} is "example"
A few examples:
ls -1 *.fastq | %prog run process {} {.}.pdf # use stdin
%prog run process {} {.}.pdf ::: *fastq # use :::
%prog run "zcat {} > {.}" ::: *.gz # quote redirection
%prog run < commands.list # run a list of commands
"""
p = OptionParser(run.__doc__)
p.set_grid_opts()
p.set_params(prog="grid")
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
sep = ":::"
if sep in args:
sepidx = args.index(sep)
filenames = args[sepidx + 1:]
args = args[:sepidx]
if not filenames:
filenames = [""]
else:
filenames = sys.stdin if not sys.stdin.isatty() else [""]
cmd = " ".join(args)
cmds = [] if filenames else [(cmd, None)]
for i, filename in enumerate(filenames):
filename = filename.strip()
noextname = filename.rsplit(".", 1)[0]
prefix, basename = op.split(filename)
basenoextname = basename.rsplit(".", 1)[0]
basefirstname = basename.split(".")[0]
firstname = op.join(prefix, basefirstname)
ncmd = cmd
if "{" in ncmd:
ncmd = ncmd.replace("{}", filename)
else:
ncmd += " " + filename
ncmd = ncmd.replace("{.}", noextname)
ncmd = ncmd.replace("{_}", firstname)
ncmd = ncmd.replace("{/}", basename)
ncmd = ncmd.replace("{/.}", basenoextname)
ncmd = ncmd.replace("{/_}", basefirstname)
ncmd = ncmd.replace("{#}", str(i))
outfile = None
if ">" in ncmd:
ncmd, outfile = ncmd.split(">", 1)
ncmd, outfile = ncmd.strip(), outfile.strip()
ncmd = ncmd.strip()
cmds.append((ncmd, outfile))
for ncmd, outfile in cmds:
p = GridProcess(ncmd, outfile=outfile, extra_opts=opts.extra, grid_opts=opts)
p.start() | %prog run command ::: file1 file2
Parallelize a set of commands on grid. The syntax is modeled after GNU
parallel <http://www.gnu.org/s/parallel/man.html#options>
{} - input line
{.} - input line without extension
{_} - input line first part
{/} - basename of input line
{/.} - basename of input line without extension
{/_} - basename of input line first part
{#} - sequence number of job to run
::: - Use arguments from the command line as input source instead of stdin
(standard input).
If file name is `t/example.tar.gz`, then,
{} is "t/example.tar.gz", {.} is "t/example.tar", {_} is "t/example"
{/} is "example.tar.gz", {/.} is "example.tar", {/_} is "example"
A few examples:
ls -1 *.fastq | %prog run process {} {.}.pdf # use stdin
%prog run process {} {.}.pdf ::: *fastq # use :::
%prog run "zcat {} > {.}" ::: *.gz # quote redirection
%prog run < commands.list # run a list of commands | Below is the the instruction that describes the task:
### Input:
%prog run command ::: file1 file2
Parallelize a set of commands on grid. The syntax is modeled after GNU
parallel <http://www.gnu.org/s/parallel/man.html#options>
{} - input line
{.} - input line without extension
{_} - input line first part
{/} - basename of input line
{/.} - basename of input line without extension
{/_} - basename of input line first part
{#} - sequence number of job to run
::: - Use arguments from the command line as input source instead of stdin
(standard input).
If file name is `t/example.tar.gz`, then,
{} is "t/example.tar.gz", {.} is "t/example.tar", {_} is "t/example"
{/} is "example.tar.gz", {/.} is "example.tar", {/_} is "example"
A few examples:
ls -1 *.fastq | %prog run process {} {.}.pdf # use stdin
%prog run process {} {.}.pdf ::: *fastq # use :::
%prog run "zcat {} > {.}" ::: *.gz # quote redirection
%prog run < commands.list # run a list of commands
### Response:
def run(args):
"""
%prog run command ::: file1 file2
Parallelize a set of commands on grid. The syntax is modeled after GNU
parallel <http://www.gnu.org/s/parallel/man.html#options>
{} - input line
{.} - input line without extension
{_} - input line first part
{/} - basename of input line
{/.} - basename of input line without extension
{/_} - basename of input line first part
{#} - sequence number of job to run
::: - Use arguments from the command line as input source instead of stdin
(standard input).
If file name is `t/example.tar.gz`, then,
{} is "t/example.tar.gz", {.} is "t/example.tar", {_} is "t/example"
{/} is "example.tar.gz", {/.} is "example.tar", {/_} is "example"
A few examples:
ls -1 *.fastq | %prog run process {} {.}.pdf # use stdin
%prog run process {} {.}.pdf ::: *fastq # use :::
%prog run "zcat {} > {.}" ::: *.gz # quote redirection
%prog run < commands.list # run a list of commands
"""
p = OptionParser(run.__doc__)
p.set_grid_opts()
p.set_params(prog="grid")
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
sep = ":::"
if sep in args:
sepidx = args.index(sep)
filenames = args[sepidx + 1:]
args = args[:sepidx]
if not filenames:
filenames = [""]
else:
filenames = sys.stdin if not sys.stdin.isatty() else [""]
cmd = " ".join(args)
cmds = [] if filenames else [(cmd, None)]
for i, filename in enumerate(filenames):
filename = filename.strip()
noextname = filename.rsplit(".", 1)[0]
prefix, basename = op.split(filename)
basenoextname = basename.rsplit(".", 1)[0]
basefirstname = basename.split(".")[0]
firstname = op.join(prefix, basefirstname)
ncmd = cmd
if "{" in ncmd:
ncmd = ncmd.replace("{}", filename)
else:
ncmd += " " + filename
ncmd = ncmd.replace("{.}", noextname)
ncmd = ncmd.replace("{_}", firstname)
ncmd = ncmd.replace("{/}", basename)
ncmd = ncmd.replace("{/.}", basenoextname)
ncmd = ncmd.replace("{/_}", basefirstname)
ncmd = ncmd.replace("{#}", str(i))
outfile = None
if ">" in ncmd:
ncmd, outfile = ncmd.split(">", 1)
ncmd, outfile = ncmd.strip(), outfile.strip()
ncmd = ncmd.strip()
cmds.append((ncmd, outfile))
for ncmd, outfile in cmds:
p = GridProcess(ncmd, outfile=outfile, extra_opts=opts.extra, grid_opts=opts)
p.start() |
def health(self, indices=None, level="cluster", wait_for_status=None,
wait_for_relocating_shards=None, timeout=30):
"""
Check the current :ref:`cluster health <es-guide-reference-api-admin-cluster-health>`.
Request Parameters
The cluster health API accepts the following request parameters:
:param level: Can be one of cluster, indices or shards. Controls the
details level of the health information returned.
Defaults to *cluster*.
:param wait_for_status: One of green, yellow or red. Will wait (until
the timeout provided) until the status of the
cluster changes to the one provided.
By default, will not wait for any status.
:param wait_for_relocating_shards: A number controlling to how many
relocating shards to wait for.
Usually will be 0 to indicate to
wait till all relocation have
happened. Defaults to not to wait.
:param timeout: A time based parameter controlling how long to wait
if one of the wait_for_XXX are provided.
Defaults to 30s.
"""
if indices:
path = make_path("_cluster", "health", ",".join(indices))
else:
path = make_path("_cluster", "health")
mapping = {}
if level != "cluster":
if level not in ["cluster", "indices", "shards"]:
raise ValueError("Invalid level: %s" % level)
mapping['level'] = level
if wait_for_status:
if wait_for_status not in ["green", "yellow", "red"]:
raise ValueError("Invalid wait_for_status: %s" % wait_for_status)
mapping['wait_for_status'] = wait_for_status
mapping['timeout'] = "%ds" % timeout
return self.conn._send_request('GET', path, params=mapping) | Check the current :ref:`cluster health <es-guide-reference-api-admin-cluster-health>`.
Request Parameters
The cluster health API accepts the following request parameters:
:param level: Can be one of cluster, indices or shards. Controls the
details level of the health information returned.
Defaults to *cluster*.
:param wait_for_status: One of green, yellow or red. Will wait (until
the timeout provided) until the status of the
cluster changes to the one provided.
By default, will not wait for any status.
:param wait_for_relocating_shards: A number controlling to how many
relocating shards to wait for.
Usually will be 0 to indicate to
wait till all relocation have
happened. Defaults to not to wait.
:param timeout: A time based parameter controlling how long to wait
if one of the wait_for_XXX are provided.
Defaults to 30s. | Below is the the instruction that describes the task:
### Input:
Check the current :ref:`cluster health <es-guide-reference-api-admin-cluster-health>`.
Request Parameters
The cluster health API accepts the following request parameters:
:param level: Can be one of cluster, indices or shards. Controls the
details level of the health information returned.
Defaults to *cluster*.
:param wait_for_status: One of green, yellow or red. Will wait (until
the timeout provided) until the status of the
cluster changes to the one provided.
By default, will not wait for any status.
:param wait_for_relocating_shards: A number controlling to how many
relocating shards to wait for.
Usually will be 0 to indicate to
wait till all relocation have
happened. Defaults to not to wait.
:param timeout: A time based parameter controlling how long to wait
if one of the wait_for_XXX are provided.
Defaults to 30s.
### Response:
def health(self, indices=None, level="cluster", wait_for_status=None,
wait_for_relocating_shards=None, timeout=30):
"""
Check the current :ref:`cluster health <es-guide-reference-api-admin-cluster-health>`.
Request Parameters
The cluster health API accepts the following request parameters:
:param level: Can be one of cluster, indices or shards. Controls the
details level of the health information returned.
Defaults to *cluster*.
:param wait_for_status: One of green, yellow or red. Will wait (until
the timeout provided) until the status of the
cluster changes to the one provided.
By default, will not wait for any status.
:param wait_for_relocating_shards: A number controlling to how many
relocating shards to wait for.
Usually will be 0 to indicate to
wait till all relocation have
happened. Defaults to not to wait.
:param timeout: A time based parameter controlling how long to wait
if one of the wait_for_XXX are provided.
Defaults to 30s.
"""
if indices:
path = make_path("_cluster", "health", ",".join(indices))
else:
path = make_path("_cluster", "health")
mapping = {}
if level != "cluster":
if level not in ["cluster", "indices", "shards"]:
raise ValueError("Invalid level: %s" % level)
mapping['level'] = level
if wait_for_status:
if wait_for_status not in ["green", "yellow", "red"]:
raise ValueError("Invalid wait_for_status: %s" % wait_for_status)
mapping['wait_for_status'] = wait_for_status
mapping['timeout'] = "%ds" % timeout
return self.conn._send_request('GET', path, params=mapping) |
def main():
"""Entry point for the script"""
desc = 'Converts between geodetic, modified apex, quasi-dipole and MLT'
parser = argparse.ArgumentParser(description=desc, prog='apexpy')
parser.add_argument('source', metavar='SOURCE',
choices=['geo', 'apex', 'qd', 'mlt'],
help='Convert from {geo, apex, qd, mlt}')
parser.add_argument('dest', metavar='DEST',
choices=['geo', 'apex', 'qd', 'mlt'],
help='Convert to {geo, apex, qd, mlt}')
desc = 'YYYY[MM[DD[HHMMSS]]] date/time for IGRF coefficients, time part '
desc += 'required for MLT calculations'
parser.add_argument('date', metavar='DATE', help=desc)
parser.add_argument('--height', dest='height', default=0, metavar='HEIGHT',
type=float, help='height for conversion')
parser.add_argument('--refh', dest='refh', metavar='REFH', type=float,
default=0,
help='reference height for modified apex coordinates')
parser.add_argument('-i', '--input', dest='file_in', metavar='FILE_IN',
type=argparse.FileType('r'), default=STDIN,
help='input file (stdin if none specified)')
parser.add_argument('-o', '--output', dest='file_out', metavar='FILE_OUT',
type=argparse.FileType('wb'), default=STDOUT,
help='output file (stdout if none specified)')
args = parser.parse_args()
array = np.loadtxt(args.file_in, ndmin=2)
if 'mlt' in [args.source, args.dest] and len(args.date) < 14:
desc = 'full date/time YYYYMMDDHHMMSS required for MLT calculations'
raise ValueError(desc)
if 9 <= len(args.date) <= 13:
desc = 'full date/time must be given as YYYYMMDDHHMMSS, not ' + \
'YYYYMMDDHHMMSS'[:len(args.date)]
raise ValueError(desc)
datetime = dt.datetime.strptime(args.date,
'%Y%m%d%H%M%S'[:len(args.date)-2])
A = apexpy.Apex(date=datetime, refh=args.refh)
lats, lons = A.convert(array[:, 0], array[:, 1], args.source, args.dest,
args.height, datetime=datetime)
np.savetxt(args.file_out, np.column_stack((lats, lons)), fmt='%.8f') | Entry point for the script | Below is the the instruction that describes the task:
### Input:
Entry point for the script
### Response:
def main():
"""Entry point for the script"""
desc = 'Converts between geodetic, modified apex, quasi-dipole and MLT'
parser = argparse.ArgumentParser(description=desc, prog='apexpy')
parser.add_argument('source', metavar='SOURCE',
choices=['geo', 'apex', 'qd', 'mlt'],
help='Convert from {geo, apex, qd, mlt}')
parser.add_argument('dest', metavar='DEST',
choices=['geo', 'apex', 'qd', 'mlt'],
help='Convert to {geo, apex, qd, mlt}')
desc = 'YYYY[MM[DD[HHMMSS]]] date/time for IGRF coefficients, time part '
desc += 'required for MLT calculations'
parser.add_argument('date', metavar='DATE', help=desc)
parser.add_argument('--height', dest='height', default=0, metavar='HEIGHT',
type=float, help='height for conversion')
parser.add_argument('--refh', dest='refh', metavar='REFH', type=float,
default=0,
help='reference height for modified apex coordinates')
parser.add_argument('-i', '--input', dest='file_in', metavar='FILE_IN',
type=argparse.FileType('r'), default=STDIN,
help='input file (stdin if none specified)')
parser.add_argument('-o', '--output', dest='file_out', metavar='FILE_OUT',
type=argparse.FileType('wb'), default=STDOUT,
help='output file (stdout if none specified)')
args = parser.parse_args()
array = np.loadtxt(args.file_in, ndmin=2)
if 'mlt' in [args.source, args.dest] and len(args.date) < 14:
desc = 'full date/time YYYYMMDDHHMMSS required for MLT calculations'
raise ValueError(desc)
if 9 <= len(args.date) <= 13:
desc = 'full date/time must be given as YYYYMMDDHHMMSS, not ' + \
'YYYYMMDDHHMMSS'[:len(args.date)]
raise ValueError(desc)
datetime = dt.datetime.strptime(args.date,
'%Y%m%d%H%M%S'[:len(args.date)-2])
A = apexpy.Apex(date=datetime, refh=args.refh)
lats, lons = A.convert(array[:, 0], array[:, 1], args.source, args.dest,
args.height, datetime=datetime)
np.savetxt(args.file_out, np.column_stack((lats, lons)), fmt='%.8f') |
def elevations(self):
"""Retrieves elevations/offsets from the output response
Returns:
elevations/offsets (namedtuple): A named tuple of list of
elevations/offsets
"""
resources = self.get_resource()
elevations = namedtuple('elevations_data', 'elevations')
try:
return [elevations(resource['elevations'])
for resource in resources]
except KeyError:
return [elevations(resource['offsets'])
for resource in resources]
except TypeError:
try:
if isinstance(resources['ElevationData']['Elevations'], dict):
return elevations(resources['ElevationData']['Elevations'])
except KeyError:
offsets = namedtuple('offsets_data', 'offsets')
try:
if isinstance(resources['SeaLevelData']['Offsets'], dict):
return offsets(resources['SeaLevelData']['Offsets'])
except KeyError:
print(KeyError) | Retrieves elevations/offsets from the output response
Returns:
elevations/offsets (namedtuple): A named tuple of list of
elevations/offsets | Below is the the instruction that describes the task:
### Input:
Retrieves elevations/offsets from the output response
Returns:
elevations/offsets (namedtuple): A named tuple of list of
elevations/offsets
### Response:
def elevations(self):
"""Retrieves elevations/offsets from the output response
Returns:
elevations/offsets (namedtuple): A named tuple of list of
elevations/offsets
"""
resources = self.get_resource()
elevations = namedtuple('elevations_data', 'elevations')
try:
return [elevations(resource['elevations'])
for resource in resources]
except KeyError:
return [elevations(resource['offsets'])
for resource in resources]
except TypeError:
try:
if isinstance(resources['ElevationData']['Elevations'], dict):
return elevations(resources['ElevationData']['Elevations'])
except KeyError:
offsets = namedtuple('offsets_data', 'offsets')
try:
if isinstance(resources['SeaLevelData']['Offsets'], dict):
return offsets(resources['SeaLevelData']['Offsets'])
except KeyError:
print(KeyError) |
def read_ipx(self, length):
"""Read Internetwork Packet Exchange.
Structure of IPX header [RFC 1132]:
Octets Bits Name Description
0 0 ipx.cksum Checksum
2 16 ipx.len Packet Length (header includes)
4 32 ipx.count Transport Control (hop count)
5 40 ipx.type Packet Type
6 48 ipx.dst Destination Address
18 144 ipx.src Source Address
"""
if length is None:
length = len(self)
_csum = self._read_fileng(2)
_tlen = self._read_unpack(2)
_ctrl = self._read_unpack(1)
_type = self._read_unpack(1)
_dsta = self._read_ipx_address()
_srca = self._read_ipx_address()
ipx = dict(
chksum=_csum,
len=_tlen,
count=_ctrl,
type=TYPE.get(_type),
dst=_dsta,
src=_srca,
)
proto = ipx['type']
length = ipx['len'] - 30
ipx['packet'] = self._read_packet(header=30, payload=length)
return self._decode_next_layer(ipx, proto, length) | Read Internetwork Packet Exchange.
Structure of IPX header [RFC 1132]:
Octets Bits Name Description
0 0 ipx.cksum Checksum
2 16 ipx.len Packet Length (header includes)
4 32 ipx.count Transport Control (hop count)
5 40 ipx.type Packet Type
6 48 ipx.dst Destination Address
18 144 ipx.src Source Address | Below is the the instruction that describes the task:
### Input:
Read Internetwork Packet Exchange.
Structure of IPX header [RFC 1132]:
Octets Bits Name Description
0 0 ipx.cksum Checksum
2 16 ipx.len Packet Length (header includes)
4 32 ipx.count Transport Control (hop count)
5 40 ipx.type Packet Type
6 48 ipx.dst Destination Address
18 144 ipx.src Source Address
### Response:
def read_ipx(self, length):
"""Read Internetwork Packet Exchange.
Structure of IPX header [RFC 1132]:
Octets Bits Name Description
0 0 ipx.cksum Checksum
2 16 ipx.len Packet Length (header includes)
4 32 ipx.count Transport Control (hop count)
5 40 ipx.type Packet Type
6 48 ipx.dst Destination Address
18 144 ipx.src Source Address
"""
if length is None:
length = len(self)
_csum = self._read_fileng(2)
_tlen = self._read_unpack(2)
_ctrl = self._read_unpack(1)
_type = self._read_unpack(1)
_dsta = self._read_ipx_address()
_srca = self._read_ipx_address()
ipx = dict(
chksum=_csum,
len=_tlen,
count=_ctrl,
type=TYPE.get(_type),
dst=_dsta,
src=_srca,
)
proto = ipx['type']
length = ipx['len'] - 30
ipx['packet'] = self._read_packet(header=30, payload=length)
return self._decode_next_layer(ipx, proto, length) |
def get(self, remote, local=None, preserve_mode=True):
"""
Download a file from the current connection to the local filesystem.
:param str remote:
Remote file to download.
May be absolute, or relative to the remote working directory.
.. note::
Most SFTP servers set the remote working directory to the
connecting user's home directory, and (unlike most shells) do
*not* expand tildes (``~``).
For example, instead of saying ``get("~/tmp/archive.tgz")``,
say ``get("tmp/archive.tgz")``.
:param local:
Local path to store downloaded file in, or a file-like object.
**If None or another 'falsey'/empty value is given** (the default),
the remote file is downloaded to the current working directory (as
seen by `os.getcwd`) using its remote filename.
**If a string is given**, it should be a path to a local directory
or file and is subject to similar behavior as that seen by common
Unix utilities or OpenSSH's ``sftp`` or ``scp`` tools.
For example, if the local path is a directory, the remote path's
base filename will be added onto it (so ``get('foo/bar/file.txt',
'/tmp/')`` would result in creation or overwriting of
``/tmp/file.txt``).
.. note::
When dealing with nonexistent file paths, normal Python file
handling concerns come into play - for example, a ``local``
path containing non-leaf directories which do not exist, will
typically result in an `OSError`.
**If a file-like object is given**, the contents of the remote file
are simply written into it.
:param bool preserve_mode:
Whether to `os.chmod` the local file so it matches the remote
file's mode (default: ``True``).
:returns: A `.Result` object.
.. versionadded:: 2.0
"""
# TODO: how does this API change if we want to implement
# remote-to-remote file transfer? (Is that even realistic?)
# TODO: handle v1's string interpolation bits, especially the default
# one, or at least think about how that would work re: split between
# single and multiple server targets.
# TODO: callback support
# TODO: how best to allow changing the behavior/semantics of
# remote/local (e.g. users might want 'safer' behavior that complains
# instead of overwriting existing files) - this likely ties into the
# "how to handle recursive/rsync" and "how to handle scp" questions
# Massage remote path
if not remote:
raise ValueError("Remote path must not be empty!")
orig_remote = remote
remote = posixpath.join(
self.sftp.getcwd() or self.sftp.normalize("."), remote
)
# Massage local path:
# - handle file-ness
# - if path, fill with remote name if empty, & make absolute
orig_local = local
is_file_like = hasattr(local, "write") and callable(local.write)
if not local:
local = posixpath.basename(remote)
if not is_file_like:
local = os.path.abspath(local)
# Run Paramiko-level .get() (side-effects only. womp.)
# TODO: push some of the path handling into Paramiko; it should be
# responsible for dealing with path cleaning etc.
# TODO: probably preserve warning message from v1 when overwriting
# existing files. Use logging for that obviously.
#
# If local appears to be a file-like object, use sftp.getfo, not get
if is_file_like:
self.sftp.getfo(remotepath=remote, fl=local)
else:
self.sftp.get(remotepath=remote, localpath=local)
# Set mode to same as remote end
# TODO: Push this down into SFTPClient sometime (requires backwards
# incompat release.)
if preserve_mode:
remote_mode = self.sftp.stat(remote).st_mode
mode = stat.S_IMODE(remote_mode)
os.chmod(local, mode)
# Return something useful
return Result(
orig_remote=orig_remote,
remote=remote,
orig_local=orig_local,
local=local,
connection=self.connection,
) | Download a file from the current connection to the local filesystem.
:param str remote:
Remote file to download.
May be absolute, or relative to the remote working directory.
.. note::
Most SFTP servers set the remote working directory to the
connecting user's home directory, and (unlike most shells) do
*not* expand tildes (``~``).
For example, instead of saying ``get("~/tmp/archive.tgz")``,
say ``get("tmp/archive.tgz")``.
:param local:
Local path to store downloaded file in, or a file-like object.
**If None or another 'falsey'/empty value is given** (the default),
the remote file is downloaded to the current working directory (as
seen by `os.getcwd`) using its remote filename.
**If a string is given**, it should be a path to a local directory
or file and is subject to similar behavior as that seen by common
Unix utilities or OpenSSH's ``sftp`` or ``scp`` tools.
For example, if the local path is a directory, the remote path's
base filename will be added onto it (so ``get('foo/bar/file.txt',
'/tmp/')`` would result in creation or overwriting of
``/tmp/file.txt``).
.. note::
When dealing with nonexistent file paths, normal Python file
handling concerns come into play - for example, a ``local``
path containing non-leaf directories which do not exist, will
typically result in an `OSError`.
**If a file-like object is given**, the contents of the remote file
are simply written into it.
:param bool preserve_mode:
Whether to `os.chmod` the local file so it matches the remote
file's mode (default: ``True``).
:returns: A `.Result` object.
.. versionadded:: 2.0 | Below is the the instruction that describes the task:
### Input:
Download a file from the current connection to the local filesystem.
:param str remote:
Remote file to download.
May be absolute, or relative to the remote working directory.
.. note::
Most SFTP servers set the remote working directory to the
connecting user's home directory, and (unlike most shells) do
*not* expand tildes (``~``).
For example, instead of saying ``get("~/tmp/archive.tgz")``,
say ``get("tmp/archive.tgz")``.
:param local:
Local path to store downloaded file in, or a file-like object.
**If None or another 'falsey'/empty value is given** (the default),
the remote file is downloaded to the current working directory (as
seen by `os.getcwd`) using its remote filename.
**If a string is given**, it should be a path to a local directory
or file and is subject to similar behavior as that seen by common
Unix utilities or OpenSSH's ``sftp`` or ``scp`` tools.
For example, if the local path is a directory, the remote path's
base filename will be added onto it (so ``get('foo/bar/file.txt',
'/tmp/')`` would result in creation or overwriting of
``/tmp/file.txt``).
.. note::
When dealing with nonexistent file paths, normal Python file
handling concerns come into play - for example, a ``local``
path containing non-leaf directories which do not exist, will
typically result in an `OSError`.
**If a file-like object is given**, the contents of the remote file
are simply written into it.
:param bool preserve_mode:
Whether to `os.chmod` the local file so it matches the remote
file's mode (default: ``True``).
:returns: A `.Result` object.
.. versionadded:: 2.0
### Response:
def get(self, remote, local=None, preserve_mode=True):
"""
Download a file from the current connection to the local filesystem.
:param str remote:
Remote file to download.
May be absolute, or relative to the remote working directory.
.. note::
Most SFTP servers set the remote working directory to the
connecting user's home directory, and (unlike most shells) do
*not* expand tildes (``~``).
For example, instead of saying ``get("~/tmp/archive.tgz")``,
say ``get("tmp/archive.tgz")``.
:param local:
Local path to store downloaded file in, or a file-like object.
**If None or another 'falsey'/empty value is given** (the default),
the remote file is downloaded to the current working directory (as
seen by `os.getcwd`) using its remote filename.
**If a string is given**, it should be a path to a local directory
or file and is subject to similar behavior as that seen by common
Unix utilities or OpenSSH's ``sftp`` or ``scp`` tools.
For example, if the local path is a directory, the remote path's
base filename will be added onto it (so ``get('foo/bar/file.txt',
'/tmp/')`` would result in creation or overwriting of
``/tmp/file.txt``).
.. note::
When dealing with nonexistent file paths, normal Python file
handling concerns come into play - for example, a ``local``
path containing non-leaf directories which do not exist, will
typically result in an `OSError`.
**If a file-like object is given**, the contents of the remote file
are simply written into it.
:param bool preserve_mode:
Whether to `os.chmod` the local file so it matches the remote
file's mode (default: ``True``).
:returns: A `.Result` object.
.. versionadded:: 2.0
"""
# TODO: how does this API change if we want to implement
# remote-to-remote file transfer? (Is that even realistic?)
# TODO: handle v1's string interpolation bits, especially the default
# one, or at least think about how that would work re: split between
# single and multiple server targets.
# TODO: callback support
# TODO: how best to allow changing the behavior/semantics of
# remote/local (e.g. users might want 'safer' behavior that complains
# instead of overwriting existing files) - this likely ties into the
# "how to handle recursive/rsync" and "how to handle scp" questions
# Massage remote path
if not remote:
raise ValueError("Remote path must not be empty!")
orig_remote = remote
remote = posixpath.join(
self.sftp.getcwd() or self.sftp.normalize("."), remote
)
# Massage local path:
# - handle file-ness
# - if path, fill with remote name if empty, & make absolute
orig_local = local
is_file_like = hasattr(local, "write") and callable(local.write)
if not local:
local = posixpath.basename(remote)
if not is_file_like:
local = os.path.abspath(local)
# Run Paramiko-level .get() (side-effects only. womp.)
# TODO: push some of the path handling into Paramiko; it should be
# responsible for dealing with path cleaning etc.
# TODO: probably preserve warning message from v1 when overwriting
# existing files. Use logging for that obviously.
#
# If local appears to be a file-like object, use sftp.getfo, not get
if is_file_like:
self.sftp.getfo(remotepath=remote, fl=local)
else:
self.sftp.get(remotepath=remote, localpath=local)
# Set mode to same as remote end
# TODO: Push this down into SFTPClient sometime (requires backwards
# incompat release.)
if preserve_mode:
remote_mode = self.sftp.stat(remote).st_mode
mode = stat.S_IMODE(remote_mode)
os.chmod(local, mode)
# Return something useful
return Result(
orig_remote=orig_remote,
remote=remote,
orig_local=orig_local,
local=local,
connection=self.connection,
) |
def get_object_list_json(request):
"""gmn.listObjects(session[, fromDate][, toDate][, formatId]
[, identifier][, replicaStatus][, start=0][, count=1000]
[, f=sysmetaField ...]) → ObjectListJson
GMN specific API for fast retrieval of object sysmeta elements.
"""
# TODO: Add to documentation
if "f" in request.GET:
field_list = request.GET.getlist("f")
else:
field_list = None
result_dict = d1_gmn.app.views.util.query_object_list(request, "object_list_json")
result_dict["fields"] = field_list
result_dict["objects"] = d1_gmn.app.sysmeta_extract.extract_values_query(
result_dict["query"], field_list
)
del result_dict["query"]
return django.http.HttpResponse(
d1_common.util.serialize_to_normalized_pretty_json(result_dict),
d1_common.const.CONTENT_TYPE_JSON,
) | gmn.listObjects(session[, fromDate][, toDate][, formatId]
[, identifier][, replicaStatus][, start=0][, count=1000]
[, f=sysmetaField ...]) → ObjectListJson
GMN specific API for fast retrieval of object sysmeta elements. | Below is the the instruction that describes the task:
### Input:
gmn.listObjects(session[, fromDate][, toDate][, formatId]
[, identifier][, replicaStatus][, start=0][, count=1000]
[, f=sysmetaField ...]) → ObjectListJson
GMN specific API for fast retrieval of object sysmeta elements.
### Response:
def get_object_list_json(request):
"""gmn.listObjects(session[, fromDate][, toDate][, formatId]
[, identifier][, replicaStatus][, start=0][, count=1000]
[, f=sysmetaField ...]) → ObjectListJson
GMN specific API for fast retrieval of object sysmeta elements.
"""
# TODO: Add to documentation
if "f" in request.GET:
field_list = request.GET.getlist("f")
else:
field_list = None
result_dict = d1_gmn.app.views.util.query_object_list(request, "object_list_json")
result_dict["fields"] = field_list
result_dict["objects"] = d1_gmn.app.sysmeta_extract.extract_values_query(
result_dict["query"], field_list
)
del result_dict["query"]
return django.http.HttpResponse(
d1_common.util.serialize_to_normalized_pretty_json(result_dict),
d1_common.const.CONTENT_TYPE_JSON,
) |
def keyring_update(cid, **kwargs):
"""
Downloads and saves public key(s).
Downloads public key(s) from the TVM service for a given client id and saves in the storage
for further usage.
Note, that the cocaine-runtime automatically refreshes its keyring after this call.
"""
ctx = Context(**kwargs)
ctx.execute_action('keyring:update', **{
'tvm': ctx.repo.create_secure_service('tvm'),
'cid': cid,
}) | Downloads and saves public key(s).
Downloads public key(s) from the TVM service for a given client id and saves in the storage
for further usage.
Note, that the cocaine-runtime automatically refreshes its keyring after this call. | Below is the the instruction that describes the task:
### Input:
Downloads and saves public key(s).
Downloads public key(s) from the TVM service for a given client id and saves in the storage
for further usage.
Note, that the cocaine-runtime automatically refreshes its keyring after this call.
### Response:
def keyring_update(cid, **kwargs):
"""
Downloads and saves public key(s).
Downloads public key(s) from the TVM service for a given client id and saves in the storage
for further usage.
Note, that the cocaine-runtime automatically refreshes its keyring after this call.
"""
ctx = Context(**kwargs)
ctx.execute_action('keyring:update', **{
'tvm': ctx.repo.create_secure_service('tvm'),
'cid': cid,
}) |
def slice_hidden(x, hidden_size, num_blocks):
"""Slice encoder hidden state under num_blocks.
Args:
x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size].
hidden_size: Dimension of the latent space.
num_blocks: Number of blocks in DVQ.
Returns:
Sliced states of shape [batch_size, latent_dim, num_blocks, block_dim].
"""
batch_size, latent_dim, _ = common_layers.shape_list(x)
block_dim = hidden_size // num_blocks
x_sliced = tf.reshape(x,
shape=[batch_size, latent_dim, num_blocks, block_dim])
return x_sliced | Slice encoder hidden state under num_blocks.
Args:
x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size].
hidden_size: Dimension of the latent space.
num_blocks: Number of blocks in DVQ.
Returns:
Sliced states of shape [batch_size, latent_dim, num_blocks, block_dim]. | Below is the the instruction that describes the task:
### Input:
Slice encoder hidden state under num_blocks.
Args:
x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size].
hidden_size: Dimension of the latent space.
num_blocks: Number of blocks in DVQ.
Returns:
Sliced states of shape [batch_size, latent_dim, num_blocks, block_dim].
### Response:
def slice_hidden(x, hidden_size, num_blocks):
"""Slice encoder hidden state under num_blocks.
Args:
x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size].
hidden_size: Dimension of the latent space.
num_blocks: Number of blocks in DVQ.
Returns:
Sliced states of shape [batch_size, latent_dim, num_blocks, block_dim].
"""
batch_size, latent_dim, _ = common_layers.shape_list(x)
block_dim = hidden_size // num_blocks
x_sliced = tf.reshape(x,
shape=[batch_size, latent_dim, num_blocks, block_dim])
return x_sliced |
def find_reachable(rdf, res):
"""Return the set of reachable resources starting from the given resource,
excluding the seen set of resources.
Note that the seen set is modified
in-place to reflect the ongoing traversal.
"""
starttime = time.time()
# This is almost a non-recursive breadth-first search algorithm, but a set
# is used as the "open" set instead of a FIFO, and an arbitrary element of
# the set is searched. This is slightly faster than DFS (using a stack)
# and much faster than BFS (using a FIFO).
seen = set() # used as the "closed" set
to_search = set([res]) # used as the "open" set
while len(to_search) > 0:
res = to_search.pop()
if res in seen:
continue
seen.add(res)
# res as subject
for p, o in rdf.predicate_objects(res):
if isinstance(p, URIRef) and p not in seen:
to_search.add(p)
if isinstance(o, URIRef) and o not in seen:
to_search.add(o)
# res as predicate
for s, o in rdf.subject_objects(res):
if isinstance(s, URIRef) and s not in seen:
to_search.add(s)
if isinstance(o, URIRef) and o not in seen:
to_search.add(o)
# res as object
for s, p in rdf.subject_predicates(res):
if isinstance(s, URIRef) and s not in seen:
to_search.add(s)
if isinstance(p, URIRef) and p not in seen:
to_search.add(p)
endtime = time.time()
logging.debug("find_reachable took %f seconds", (endtime - starttime))
return seen | Return the set of reachable resources starting from the given resource,
excluding the seen set of resources.
Note that the seen set is modified
in-place to reflect the ongoing traversal. | Below is the the instruction that describes the task:
### Input:
Return the set of reachable resources starting from the given resource,
excluding the seen set of resources.
Note that the seen set is modified
in-place to reflect the ongoing traversal.
### Response:
def find_reachable(rdf, res):
"""Return the set of reachable resources starting from the given resource,
excluding the seen set of resources.
Note that the seen set is modified
in-place to reflect the ongoing traversal.
"""
starttime = time.time()
# This is almost a non-recursive breadth-first search algorithm, but a set
# is used as the "open" set instead of a FIFO, and an arbitrary element of
# the set is searched. This is slightly faster than DFS (using a stack)
# and much faster than BFS (using a FIFO).
seen = set() # used as the "closed" set
to_search = set([res]) # used as the "open" set
while len(to_search) > 0:
res = to_search.pop()
if res in seen:
continue
seen.add(res)
# res as subject
for p, o in rdf.predicate_objects(res):
if isinstance(p, URIRef) and p not in seen:
to_search.add(p)
if isinstance(o, URIRef) and o not in seen:
to_search.add(o)
# res as predicate
for s, o in rdf.subject_objects(res):
if isinstance(s, URIRef) and s not in seen:
to_search.add(s)
if isinstance(o, URIRef) and o not in seen:
to_search.add(o)
# res as object
for s, p in rdf.subject_predicates(res):
if isinstance(s, URIRef) and s not in seen:
to_search.add(s)
if isinstance(p, URIRef) and p not in seen:
to_search.add(p)
endtime = time.time()
logging.debug("find_reachable took %f seconds", (endtime - starttime))
return seen |
def clear_symbols(self, index):
"""Clears all symbols begining with the index to the end of table"""
try:
del self.table[index:]
except Exception:
self.error()
self.table_len = len(self.table) | Clears all symbols begining with the index to the end of table | Below is the the instruction that describes the task:
### Input:
Clears all symbols begining with the index to the end of table
### Response:
def clear_symbols(self, index):
"""Clears all symbols begining with the index to the end of table"""
try:
del self.table[index:]
except Exception:
self.error()
self.table_len = len(self.table) |
def close(self):
"""Close the stream."""
self.current = Token(self.current.lineno, TOKEN_EOF, '')
self._next = None
self.closed = True | Close the stream. | Below is the the instruction that describes the task:
### Input:
Close the stream.
### Response:
def close(self):
"""Close the stream."""
self.current = Token(self.current.lineno, TOKEN_EOF, '')
self._next = None
self.closed = True |
def parallel_plot(df, color=None, palette=None):
"""From a dataframe create a parallel coordinate plot
"""
npts = df.shape[0]
ndims = len(df.columns)
if color is None:
color = np.ones(npts)
if palette is None:
palette = ['#ff0000']
cmap = LinearColorMapper(high=color.min(),
low=color.max(),
palette=palette)
data_source = ColumnDataSource(dict(
xs=np.arange(ndims)[None, :].repeat(npts, axis=0).tolist(),
ys=np.array((df-df.min())/(df.max()-df.min())).tolist(),
color=color))
p = figure(x_range=(-1, ndims),
y_range=(0, 1),
width=1000,
tools="pan, box_zoom")
# Create x axis ticks from columns contained in dataframe
fixed_x_ticks = FixedTicker(
ticks=np.arange(ndims), minor_ticks=[])
formatter_x_ticks = FuncTickFormatter(
code="return columns[index]", args={"columns": df.columns})
p.xaxis.ticker = fixed_x_ticks
p.xaxis.formatter = formatter_x_ticks
p.yaxis.visible = False
p.y_range.start = 0
p.y_range.end = 1
p.y_range.bounds = (-0.1, 1.1) # add a little padding around y axis
p.xgrid.visible = False
p.ygrid.visible = False
# Create extra y axis for each dataframe column
tickformatter = BasicTickFormatter(precision=1)
for index, col in enumerate(df.columns):
start = df[col].min()
end = df[col].max()
bound_min = start + abs(end-start) * (p.y_range.bounds[0] - p.y_range.start)
bound_max = end + abs(end-start) * (p.y_range.bounds[1] - p.y_range.end)
p.extra_y_ranges.update(
{col: Range1d(start=bound_min, end=bound_max, bounds=(bound_min, bound_max))})
fixedticks = FixedTicker(
ticks=np.linspace(start, end, 8), minor_ticks=[])
p.add_layout(LinearAxis(fixed_location=index, y_range_name=col,
ticker=fixedticks, formatter=tickformatter), 'right')
# create the data renderer ( MultiLine )
# specify selected and non selected style
non_selected_line_style = dict(line_color='grey', line_width=0.1, line_alpha=0.5)
selected_line_style = dict(line_color={'field': 'color', 'transform': cmap}, line_width=1)
parallel_renderer = p.multi_line(
xs="xs", ys="ys", source=data_source, **non_selected_line_style)
# Specify selection style
selected_lines = MultiLine(**selected_line_style)
# Specify non selection style
nonselected_lines = MultiLine(**non_selected_line_style)
parallel_renderer.selection_glyph = selected_lines
parallel_renderer.nonselection_glyph = nonselected_lines
p.y_range.start = p.y_range.bounds[0]
p.y_range.end = p.y_range.bounds[1]
rect_source = ColumnDataSource({
'x': [], 'y': [], 'width': [], 'height': []
})
# add rectangle selections
selection_renderer = p.rect(x='x', y='y', width='width', height='height',
source=rect_source,
fill_alpha=0.7, fill_color='#009933')
selection_tool = ParallelSelectionTool(
renderer_select=selection_renderer, renderer_data=parallel_renderer,
box_width=10)
# custom resets (reset only axes not selections)
reset_axes = ParallelResetTool()
# add tools and activate selection ones
p.add_tools(selection_tool, reset_axes)
p.toolbar.active_drag = selection_tool
return p | From a dataframe create a parallel coordinate plot | Below is the the instruction that describes the task:
### Input:
From a dataframe create a parallel coordinate plot
### Response:
def parallel_plot(df, color=None, palette=None):
"""From a dataframe create a parallel coordinate plot
"""
npts = df.shape[0]
ndims = len(df.columns)
if color is None:
color = np.ones(npts)
if palette is None:
palette = ['#ff0000']
cmap = LinearColorMapper(high=color.min(),
low=color.max(),
palette=palette)
data_source = ColumnDataSource(dict(
xs=np.arange(ndims)[None, :].repeat(npts, axis=0).tolist(),
ys=np.array((df-df.min())/(df.max()-df.min())).tolist(),
color=color))
p = figure(x_range=(-1, ndims),
y_range=(0, 1),
width=1000,
tools="pan, box_zoom")
# Create x axis ticks from columns contained in dataframe
fixed_x_ticks = FixedTicker(
ticks=np.arange(ndims), minor_ticks=[])
formatter_x_ticks = FuncTickFormatter(
code="return columns[index]", args={"columns": df.columns})
p.xaxis.ticker = fixed_x_ticks
p.xaxis.formatter = formatter_x_ticks
p.yaxis.visible = False
p.y_range.start = 0
p.y_range.end = 1
p.y_range.bounds = (-0.1, 1.1) # add a little padding around y axis
p.xgrid.visible = False
p.ygrid.visible = False
# Create extra y axis for each dataframe column
tickformatter = BasicTickFormatter(precision=1)
for index, col in enumerate(df.columns):
start = df[col].min()
end = df[col].max()
bound_min = start + abs(end-start) * (p.y_range.bounds[0] - p.y_range.start)
bound_max = end + abs(end-start) * (p.y_range.bounds[1] - p.y_range.end)
p.extra_y_ranges.update(
{col: Range1d(start=bound_min, end=bound_max, bounds=(bound_min, bound_max))})
fixedticks = FixedTicker(
ticks=np.linspace(start, end, 8), minor_ticks=[])
p.add_layout(LinearAxis(fixed_location=index, y_range_name=col,
ticker=fixedticks, formatter=tickformatter), 'right')
# create the data renderer ( MultiLine )
# specify selected and non selected style
non_selected_line_style = dict(line_color='grey', line_width=0.1, line_alpha=0.5)
selected_line_style = dict(line_color={'field': 'color', 'transform': cmap}, line_width=1)
parallel_renderer = p.multi_line(
xs="xs", ys="ys", source=data_source, **non_selected_line_style)
# Specify selection style
selected_lines = MultiLine(**selected_line_style)
# Specify non selection style
nonselected_lines = MultiLine(**non_selected_line_style)
parallel_renderer.selection_glyph = selected_lines
parallel_renderer.nonselection_glyph = nonselected_lines
p.y_range.start = p.y_range.bounds[0]
p.y_range.end = p.y_range.bounds[1]
rect_source = ColumnDataSource({
'x': [], 'y': [], 'width': [], 'height': []
})
# add rectangle selections
selection_renderer = p.rect(x='x', y='y', width='width', height='height',
source=rect_source,
fill_alpha=0.7, fill_color='#009933')
selection_tool = ParallelSelectionTool(
renderer_select=selection_renderer, renderer_data=parallel_renderer,
box_width=10)
# custom resets (reset only axes not selections)
reset_axes = ParallelResetTool()
# add tools and activate selection ones
p.add_tools(selection_tool, reset_axes)
p.toolbar.active_drag = selection_tool
return p |
def adaptive_avgmax_pool2d(x, pool_type='avg', padding=0, count_include_pad=False):
"""Selectable global pooling function with dynamic input kernel size
"""
if pool_type == 'avgmaxc':
x = torch.cat([
F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad),
F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
], dim=1)
elif pool_type == 'avgmax':
x_avg = F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
x_max = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
x = 0.5 * (x_avg + x_max)
elif pool_type == 'max':
x = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
else:
if pool_type != 'avg':
print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)
x = F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
return x | Selectable global pooling function with dynamic input kernel size | Below is the the instruction that describes the task:
### Input:
Selectable global pooling function with dynamic input kernel size
### Response:
def adaptive_avgmax_pool2d(x, pool_type='avg', padding=0, count_include_pad=False):
"""Selectable global pooling function with dynamic input kernel size
"""
if pool_type == 'avgmaxc':
x = torch.cat([
F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad),
F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
], dim=1)
elif pool_type == 'avgmax':
x_avg = F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
x_max = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
x = 0.5 * (x_avg + x_max)
elif pool_type == 'max':
x = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
else:
if pool_type != 'avg':
print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)
x = F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
return x |
def base26int(s, _start=1 - ord('A')):
"""Return string ``s`` as ``int`` in bijective base26 notation.
>>> base26int('SPAM')
344799
"""
return sum((_start + ord(c)) * 26**i for i, c in enumerate(reversed(s))) | Return string ``s`` as ``int`` in bijective base26 notation.
>>> base26int('SPAM')
344799 | Below is the the instruction that describes the task:
### Input:
Return string ``s`` as ``int`` in bijective base26 notation.
>>> base26int('SPAM')
344799
### Response:
def base26int(s, _start=1 - ord('A')):
"""Return string ``s`` as ``int`` in bijective base26 notation.
>>> base26int('SPAM')
344799
"""
return sum((_start + ord(c)) * 26**i for i, c in enumerate(reversed(s))) |
def find_secondary_sources(
assignment_nodes,
sources,
lattice
):
"""
Sets the secondary_nodes attribute of each source in the sources list.
Args:
assignment_nodes([AssignmentNode])
sources([tuple])
lattice(Lattice): the lattice we're analysing.
"""
for source in sources:
source.secondary_nodes = find_assignments(assignment_nodes, source, lattice) | Sets the secondary_nodes attribute of each source in the sources list.
Args:
assignment_nodes([AssignmentNode])
sources([tuple])
lattice(Lattice): the lattice we're analysing. | Below is the the instruction that describes the task:
### Input:
Sets the secondary_nodes attribute of each source in the sources list.
Args:
assignment_nodes([AssignmentNode])
sources([tuple])
lattice(Lattice): the lattice we're analysing.
### Response:
def find_secondary_sources(
assignment_nodes,
sources,
lattice
):
"""
Sets the secondary_nodes attribute of each source in the sources list.
Args:
assignment_nodes([AssignmentNode])
sources([tuple])
lattice(Lattice): the lattice we're analysing.
"""
for source in sources:
source.secondary_nodes = find_assignments(assignment_nodes, source, lattice) |
def to_python(self, value):
"""
Strips any dodgy HTML tags from the input
"""
if value in self.empty_values:
try:
return self.empty_value
except AttributeError:
# CharField.empty_value was introduced in Django 1.11; in prior
# versions a unicode string was returned for empty values in
# all cases.
return u''
return bleach.clean(value, **self.bleach_options) | Strips any dodgy HTML tags from the input | Below is the the instruction that describes the task:
### Input:
Strips any dodgy HTML tags from the input
### Response:
def to_python(self, value):
"""
Strips any dodgy HTML tags from the input
"""
if value in self.empty_values:
try:
return self.empty_value
except AttributeError:
# CharField.empty_value was introduced in Django 1.11; in prior
# versions a unicode string was returned for empty values in
# all cases.
return u''
return bleach.clean(value, **self.bleach_options) |
def update_measure(self):
"""updated noise level measure using two fitness lists ``self.fit`` and
``self.fitre``, return ``self.noiseS, all_individual_measures``.
Assumes that `self.idx` contains the indices where the fitness
lists differ
"""
lam = len(self.fit)
idx = np.argsort(self.fit + self.fitre)
ranks = np.argsort(idx).reshape((2, lam))
rankDelta = ranks[0] - ranks[1] - np.sign(ranks[0] - ranks[1])
# compute rank change limits using both ranks[0] and ranks[1]
r = np.arange(1, 2 * lam) # 2 * lam - 2 elements
limits = [0.5 * (Mh.prctile(np.abs(r - (ranks[0, i] + 1 - (ranks[0, i] > ranks[1, i]))),
self.theta * 50) +
Mh.prctile(np.abs(r - (ranks[1, i] + 1 - (ranks[1, i] > ranks[0, i]))),
self.theta * 50))
for i in self.idx]
# compute measurement
# max: 1 rankchange in 2*lambda is always fine
s = np.abs(rankDelta[self.idx]) - Mh.amax(limits, 1) # lives roughly in 0..2*lambda
self.noiseS += self.cum * (np.mean(s) - self.noiseS)
return self.noiseS, s | updated noise level measure using two fitness lists ``self.fit`` and
``self.fitre``, return ``self.noiseS, all_individual_measures``.
Assumes that `self.idx` contains the indices where the fitness
lists differ | Below is the the instruction that describes the task:
### Input:
updated noise level measure using two fitness lists ``self.fit`` and
``self.fitre``, return ``self.noiseS, all_individual_measures``.
Assumes that `self.idx` contains the indices where the fitness
lists differ
### Response:
def update_measure(self):
"""updated noise level measure using two fitness lists ``self.fit`` and
``self.fitre``, return ``self.noiseS, all_individual_measures``.
Assumes that `self.idx` contains the indices where the fitness
lists differ
"""
lam = len(self.fit)
idx = np.argsort(self.fit + self.fitre)
ranks = np.argsort(idx).reshape((2, lam))
rankDelta = ranks[0] - ranks[1] - np.sign(ranks[0] - ranks[1])
# compute rank change limits using both ranks[0] and ranks[1]
r = np.arange(1, 2 * lam) # 2 * lam - 2 elements
limits = [0.5 * (Mh.prctile(np.abs(r - (ranks[0, i] + 1 - (ranks[0, i] > ranks[1, i]))),
self.theta * 50) +
Mh.prctile(np.abs(r - (ranks[1, i] + 1 - (ranks[1, i] > ranks[0, i]))),
self.theta * 50))
for i in self.idx]
# compute measurement
# max: 1 rankchange in 2*lambda is always fine
s = np.abs(rankDelta[self.idx]) - Mh.amax(limits, 1) # lives roughly in 0..2*lambda
self.noiseS += self.cum * (np.mean(s) - self.noiseS)
return self.noiseS, s |
def _nested_convert_to_tensor(struct, dtype=None, name=None):
"""Eagerly converts struct to Tensor, recursing upon failure."""
if dtype is not None or not tf.nest.is_nested(struct):
return tf.convert_to_tensor(struct, dtype=dtype)
if _maybe_convertible_to_tensor(struct):
try:
# Try converting the structure wholesale.
return tf.convert_to_tensor(value=struct, name=name)
except (ValueError, TypeError):
# Unfortunately Eager/Graph mode don't agree on the error type.
pass
# Try converting all of its children.
shallow_struct = _get_shallow_structure(struct)
return nest.map_structure_up_to(
shallow_struct, lambda s: _nested_convert_to_tensor(s, name=name), struct) | Eagerly converts struct to Tensor, recursing upon failure. | Below is the the instruction that describes the task:
### Input:
Eagerly converts struct to Tensor, recursing upon failure.
### Response:
def _nested_convert_to_tensor(struct, dtype=None, name=None):
"""Eagerly converts struct to Tensor, recursing upon failure."""
if dtype is not None or not tf.nest.is_nested(struct):
return tf.convert_to_tensor(struct, dtype=dtype)
if _maybe_convertible_to_tensor(struct):
try:
# Try converting the structure wholesale.
return tf.convert_to_tensor(value=struct, name=name)
except (ValueError, TypeError):
# Unfortunately Eager/Graph mode don't agree on the error type.
pass
# Try converting all of its children.
shallow_struct = _get_shallow_structure(struct)
return nest.map_structure_up_to(
shallow_struct, lambda s: _nested_convert_to_tensor(s, name=name), struct) |
def get_image(self, float_key="floats", to_chw=True):
"""
get image list from ImageFrame
"""
tensors = callBigDlFunc(self.bigdl_type,
"localImageFrameToImageTensor", self.value, float_key, to_chw)
return map(lambda tensor: tensor.to_ndarray(), tensors) | get image list from ImageFrame | Below is the the instruction that describes the task:
### Input:
get image list from ImageFrame
### Response:
def get_image(self, float_key="floats", to_chw=True):
"""
get image list from ImageFrame
"""
tensors = callBigDlFunc(self.bigdl_type,
"localImageFrameToImageTensor", self.value, float_key, to_chw)
return map(lambda tensor: tensor.to_ndarray(), tensors) |
def on_KeyPress(self,event):
'''To adjust the distance between pitch markers.'''
if event.GetKeyCode() == wx.WXK_UP:
self.dist10deg += 0.1
print('Dist per 10 deg: %.1f' % self.dist10deg)
elif event.GetKeyCode() == wx.WXK_DOWN:
self.dist10deg -= 0.1
if self.dist10deg <= 0:
self.dist10deg = 0.1
print('Dist per 10 deg: %.1f' % self.dist10deg)
# Toggle Widgets
elif event.GetKeyCode() == 49: # 1
widgets = [self.modeText,self.wpText]
self.toggleWidgets(widgets)
elif event.GetKeyCode() == 50: # 2
widgets = [self.batOutRec,self.batInRec,self.voltsText,self.ampsText,self.batPerText]
self.toggleWidgets(widgets)
elif event.GetKeyCode() == 51: # 3
widgets = [self.rollText,self.pitchText,self.yawText]
self.toggleWidgets(widgets)
elif event.GetKeyCode() == 52: # 4
widgets = [self.airspeedText,self.altitudeText,self.climbRateText]
self.toggleWidgets(widgets)
elif event.GetKeyCode() == 53: # 5
widgets = [self.altHistRect,self.altPlot,self.altMarker,self.altText2]
self.toggleWidgets(widgets)
elif event.GetKeyCode() == 54: # 6
widgets = [self.headingTri,self.headingText,self.headingNorthTri,self.headingNorthText,self.headingWPTri,self.headingWPText]
self.toggleWidgets(widgets)
# Update Matplotlib Plot
self.canvas.draw()
self.canvas.Refresh()
self.Refresh()
self.Update() | To adjust the distance between pitch markers. | Below is the the instruction that describes the task:
### Input:
To adjust the distance between pitch markers.
### Response:
def on_KeyPress(self,event):
'''To adjust the distance between pitch markers.'''
if event.GetKeyCode() == wx.WXK_UP:
self.dist10deg += 0.1
print('Dist per 10 deg: %.1f' % self.dist10deg)
elif event.GetKeyCode() == wx.WXK_DOWN:
self.dist10deg -= 0.1
if self.dist10deg <= 0:
self.dist10deg = 0.1
print('Dist per 10 deg: %.1f' % self.dist10deg)
# Toggle Widgets
elif event.GetKeyCode() == 49: # 1
widgets = [self.modeText,self.wpText]
self.toggleWidgets(widgets)
elif event.GetKeyCode() == 50: # 2
widgets = [self.batOutRec,self.batInRec,self.voltsText,self.ampsText,self.batPerText]
self.toggleWidgets(widgets)
elif event.GetKeyCode() == 51: # 3
widgets = [self.rollText,self.pitchText,self.yawText]
self.toggleWidgets(widgets)
elif event.GetKeyCode() == 52: # 4
widgets = [self.airspeedText,self.altitudeText,self.climbRateText]
self.toggleWidgets(widgets)
elif event.GetKeyCode() == 53: # 5
widgets = [self.altHistRect,self.altPlot,self.altMarker,self.altText2]
self.toggleWidgets(widgets)
elif event.GetKeyCode() == 54: # 6
widgets = [self.headingTri,self.headingText,self.headingNorthTri,self.headingNorthText,self.headingWPTri,self.headingWPText]
self.toggleWidgets(widgets)
# Update Matplotlib Plot
self.canvas.draw()
self.canvas.Refresh()
self.Refresh()
self.Update() |
def grow(self):
"Add another worker to the pool."
t = self.worker_factory(self)
t.start()
self._size += 1 | Add another worker to the pool. | Below is the the instruction that describes the task:
### Input:
Add another worker to the pool.
### Response:
def grow(self):
"Add another worker to the pool."
t = self.worker_factory(self)
t.start()
self._size += 1 |
def check_no_self_dependency(cls, dap):
'''Check that the package does not depend on itself.
Return a list of problems.'''
problems = list()
if 'package_name' in dap.meta and 'dependencies' in dap.meta:
dependencies = set()
for dependency in dap.meta['dependencies']:
if 'dependencies' in dap._badmeta and dependency in dap._badmeta['dependencies']:
continue
# No version specified
if not re.search(r'[<=>]', dependency):
dependencies.add(dependency)
# Version specified
for mark in ['==', '>=', '<=', '<', '>']:
dep = dependency.split(mark)
if len(dep) == 2:
dependencies.add(dep[0].strip())
break
if dap.meta['package_name'] in dependencies:
msg = 'Depends on dap with the same name as itself'
problems.append(DapProblem(msg))
return problems | Check that the package does not depend on itself.
Return a list of problems. | Below is the the instruction that describes the task:
### Input:
Check that the package does not depend on itself.
Return a list of problems.
### Response:
def check_no_self_dependency(cls, dap):
'''Check that the package does not depend on itself.
Return a list of problems.'''
problems = list()
if 'package_name' in dap.meta and 'dependencies' in dap.meta:
dependencies = set()
for dependency in dap.meta['dependencies']:
if 'dependencies' in dap._badmeta and dependency in dap._badmeta['dependencies']:
continue
# No version specified
if not re.search(r'[<=>]', dependency):
dependencies.add(dependency)
# Version specified
for mark in ['==', '>=', '<=', '<', '>']:
dep = dependency.split(mark)
if len(dep) == 2:
dependencies.add(dep[0].strip())
break
if dap.meta['package_name'] in dependencies:
msg = 'Depends on dap with the same name as itself'
problems.append(DapProblem(msg))
return problems |
def _get_rtl_context(self):
"""
get RtlNetlist context from signals
"""
for sig in chain(self._inputs, self._outputs):
if sig.ctx:
return sig.ctx
else:
# Param instances does not have context
continue
raise HwtSyntaxError(
"Statement does not have any signal in any context", self) | get RtlNetlist context from signals | Below is the the instruction that describes the task:
### Input:
get RtlNetlist context from signals
### Response:
def _get_rtl_context(self):
"""
get RtlNetlist context from signals
"""
for sig in chain(self._inputs, self._outputs):
if sig.ctx:
return sig.ctx
else:
# Param instances does not have context
continue
raise HwtSyntaxError(
"Statement does not have any signal in any context", self) |
def create_genome_size_dict(genome):
# type: (str) -> Dict[str,int]
"""Creates genome size dict from string containing data."""
size_file = get_genome_size_file(genome)
size_lines = open(size_file).readlines()
size_dict = {}
for line in size_lines:
genome, length = line.split()
size_dict[genome] = int(length)
return size_dict | Creates genome size dict from string containing data. | Below is the the instruction that describes the task:
### Input:
Creates genome size dict from string containing data.
### Response:
def create_genome_size_dict(genome):
# type: (str) -> Dict[str,int]
"""Creates genome size dict from string containing data."""
size_file = get_genome_size_file(genome)
size_lines = open(size_file).readlines()
size_dict = {}
for line in size_lines:
genome, length = line.split()
size_dict[genome] = int(length)
return size_dict |
def count_events (env, evtpath, filter):
"""TODO: this can probably be replaced with simply reading the file
ourselves!
"""
with env.slurp (argv=['dmstat', '%s%s[cols energy]' % (evtpath, filter)], linebreak=True) as s:
for etype, payload in s:
if etype != 'stdout':
continue
if b'good:' not in payload:
continue
return int (payload.split ()[-1])
raise Exception ('parsing of dmlist output failed') | TODO: this can probably be replaced with simply reading the file
ourselves! | Below is the the instruction that describes the task:
### Input:
TODO: this can probably be replaced with simply reading the file
ourselves!
### Response:
def count_events (env, evtpath, filter):
"""TODO: this can probably be replaced with simply reading the file
ourselves!
"""
with env.slurp (argv=['dmstat', '%s%s[cols energy]' % (evtpath, filter)], linebreak=True) as s:
for etype, payload in s:
if etype != 'stdout':
continue
if b'good:' not in payload:
continue
return int (payload.split ()[-1])
raise Exception ('parsing of dmlist output failed') |
def upsert(self):
""" create or update the jenkins job """
if not self.jenkins_host.has_job(self.name):
LOGGER.info("creating {0}...".format(self.name))
self.jenkins_host.create_job(self.name, self.config_xml)
else:
jenkins_job = self.jenkins_host[self.name]
LOGGER.info("updating {0}...".format(self.name))
jenkins_job.update_config(self.config_xml) | create or update the jenkins job | Below is the the instruction that describes the task:
### Input:
create or update the jenkins job
### Response:
def upsert(self):
""" create or update the jenkins job """
if not self.jenkins_host.has_job(self.name):
LOGGER.info("creating {0}...".format(self.name))
self.jenkins_host.create_job(self.name, self.config_xml)
else:
jenkins_job = self.jenkins_host[self.name]
LOGGER.info("updating {0}...".format(self.name))
jenkins_job.update_config(self.config_xml) |
def read_image_file(data_dir, image_ext, n):
"""Return a Tensor containing the patches
"""
def PIL2array(_img):
"""Convert PIL image type to numpy 2D array
"""
return np.array(_img.getdata(), dtype=np.uint8).reshape(64, 64)
def find_files(_data_dir, _image_ext):
"""Return a list with the file names of the images containing the patches
"""
files = []
# find those files with the specified extension
for file_dir in os.listdir(_data_dir):
if file_dir.endswith(_image_ext):
files.append(os.path.join(_data_dir, file_dir))
return sorted(files) # sort files in ascend order to keep relations
patches = []
list_files = find_files(data_dir, image_ext)
for fpath in list_files:
img = Image.open(fpath)
for y in range(0, 1024, 64):
for x in range(0, 1024, 64):
patch = img.crop((x, y, x + 64, y + 64))
patches.append(PIL2array(patch))
return torch.ByteTensor(np.array(patches[:n])) | Return a Tensor containing the patches | Below is the the instruction that describes the task:
### Input:
Return a Tensor containing the patches
### Response:
def read_image_file(data_dir, image_ext, n):
"""Return a Tensor containing the patches
"""
def PIL2array(_img):
"""Convert PIL image type to numpy 2D array
"""
return np.array(_img.getdata(), dtype=np.uint8).reshape(64, 64)
def find_files(_data_dir, _image_ext):
"""Return a list with the file names of the images containing the patches
"""
files = []
# find those files with the specified extension
for file_dir in os.listdir(_data_dir):
if file_dir.endswith(_image_ext):
files.append(os.path.join(_data_dir, file_dir))
return sorted(files) # sort files in ascend order to keep relations
patches = []
list_files = find_files(data_dir, image_ext)
for fpath in list_files:
img = Image.open(fpath)
for y in range(0, 1024, 64):
for x in range(0, 1024, 64):
patch = img.crop((x, y, x + 64, y + 64))
patches.append(PIL2array(patch))
return torch.ByteTensor(np.array(patches[:n])) |
def define(
self,
name: str,
default: Any = None,
type: type = None,
help: str = None,
metavar: str = None,
multiple: bool = False,
group: str = None,
callback: Callable[[Any], None] = None,
) -> None:
"""Defines a new command line option.
``type`` can be any of `str`, `int`, `float`, `bool`,
`~datetime.datetime`, or `~datetime.timedelta`. If no ``type``
is given but a ``default`` is, ``type`` is the type of
``default``. Otherwise, ``type`` defaults to `str`.
If ``multiple`` is True, the option value is a list of ``type``
instead of an instance of ``type``.
``help`` and ``metavar`` are used to construct the
automatically generated command line help string. The help
message is formatted like::
--name=METAVAR help string
``group`` is used to group the defined options in logical
groups. By default, command line options are grouped by the
file in which they are defined.
Command line option names must be unique globally.
If a ``callback`` is given, it will be run with the new value whenever
the option is changed. This can be used to combine command-line
and file-based options::
define("config", type=str, help="path to config file",
callback=lambda path: parse_config_file(path, final=False))
With this definition, options in the file specified by ``--config`` will
override options set earlier on the command line, but can be overridden
by later flags.
"""
normalized = self._normalize_name(name)
if normalized in self._options:
raise Error(
"Option %r already defined in %s"
% (normalized, self._options[normalized].file_name)
)
frame = sys._getframe(0)
options_file = frame.f_code.co_filename
# Can be called directly, or through top level define() fn, in which
# case, step up above that frame to look for real caller.
if (
frame.f_back.f_code.co_filename == options_file
and frame.f_back.f_code.co_name == "define"
):
frame = frame.f_back
file_name = frame.f_back.f_code.co_filename
if file_name == options_file:
file_name = ""
if type is None:
if not multiple and default is not None:
type = default.__class__
else:
type = str
if group:
group_name = group # type: Optional[str]
else:
group_name = file_name
option = _Option(
name,
file_name=file_name,
default=default,
type=type,
help=help,
metavar=metavar,
multiple=multiple,
group_name=group_name,
callback=callback,
)
self._options[normalized] = option | Defines a new command line option.
``type`` can be any of `str`, `int`, `float`, `bool`,
`~datetime.datetime`, or `~datetime.timedelta`. If no ``type``
is given but a ``default`` is, ``type`` is the type of
``default``. Otherwise, ``type`` defaults to `str`.
If ``multiple`` is True, the option value is a list of ``type``
instead of an instance of ``type``.
``help`` and ``metavar`` are used to construct the
automatically generated command line help string. The help
message is formatted like::
--name=METAVAR help string
``group`` is used to group the defined options in logical
groups. By default, command line options are grouped by the
file in which they are defined.
Command line option names must be unique globally.
If a ``callback`` is given, it will be run with the new value whenever
the option is changed. This can be used to combine command-line
and file-based options::
define("config", type=str, help="path to config file",
callback=lambda path: parse_config_file(path, final=False))
With this definition, options in the file specified by ``--config`` will
override options set earlier on the command line, but can be overridden
by later flags. | Below is the the instruction that describes the task:
### Input:
Defines a new command line option.
``type`` can be any of `str`, `int`, `float`, `bool`,
`~datetime.datetime`, or `~datetime.timedelta`. If no ``type``
is given but a ``default`` is, ``type`` is the type of
``default``. Otherwise, ``type`` defaults to `str`.
If ``multiple`` is True, the option value is a list of ``type``
instead of an instance of ``type``.
``help`` and ``metavar`` are used to construct the
automatically generated command line help string. The help
message is formatted like::
--name=METAVAR help string
``group`` is used to group the defined options in logical
groups. By default, command line options are grouped by the
file in which they are defined.
Command line option names must be unique globally.
If a ``callback`` is given, it will be run with the new value whenever
the option is changed. This can be used to combine command-line
and file-based options::
define("config", type=str, help="path to config file",
callback=lambda path: parse_config_file(path, final=False))
With this definition, options in the file specified by ``--config`` will
override options set earlier on the command line, but can be overridden
by later flags.
### Response:
def define(
self,
name: str,
default: Any = None,
type: type = None,
help: str = None,
metavar: str = None,
multiple: bool = False,
group: str = None,
callback: Callable[[Any], None] = None,
) -> None:
"""Defines a new command line option.
``type`` can be any of `str`, `int`, `float`, `bool`,
`~datetime.datetime`, or `~datetime.timedelta`. If no ``type``
is given but a ``default`` is, ``type`` is the type of
``default``. Otherwise, ``type`` defaults to `str`.
If ``multiple`` is True, the option value is a list of ``type``
instead of an instance of ``type``.
``help`` and ``metavar`` are used to construct the
automatically generated command line help string. The help
message is formatted like::
--name=METAVAR help string
``group`` is used to group the defined options in logical
groups. By default, command line options are grouped by the
file in which they are defined.
Command line option names must be unique globally.
If a ``callback`` is given, it will be run with the new value whenever
the option is changed. This can be used to combine command-line
and file-based options::
define("config", type=str, help="path to config file",
callback=lambda path: parse_config_file(path, final=False))
With this definition, options in the file specified by ``--config`` will
override options set earlier on the command line, but can be overridden
by later flags.
"""
normalized = self._normalize_name(name)
if normalized in self._options:
raise Error(
"Option %r already defined in %s"
% (normalized, self._options[normalized].file_name)
)
frame = sys._getframe(0)
options_file = frame.f_code.co_filename
# Can be called directly, or through top level define() fn, in which
# case, step up above that frame to look for real caller.
if (
frame.f_back.f_code.co_filename == options_file
and frame.f_back.f_code.co_name == "define"
):
frame = frame.f_back
file_name = frame.f_back.f_code.co_filename
if file_name == options_file:
file_name = ""
if type is None:
if not multiple and default is not None:
type = default.__class__
else:
type = str
if group:
group_name = group # type: Optional[str]
else:
group_name = file_name
option = _Option(
name,
file_name=file_name,
default=default,
type=type,
help=help,
metavar=metavar,
multiple=multiple,
group_name=group_name,
callback=callback,
)
self._options[normalized] = option |
def _decode_subject(self, subject):
"""Load data from a ASN.1 subject.
"""
self.common_names = []
subject_name = []
for rdnss in subject:
for rdns in rdnss:
rdnss_list = []
for nameval in rdns:
val_type = nameval.getComponentByName('type')
value = nameval.getComponentByName('value')
if val_type not in DN_OIDS:
logger.debug("OID {0} not supported".format(val_type))
continue
val_type = DN_OIDS[val_type]
value = der_decoder.decode(value,
asn1Spec = DirectoryString())[0]
value = value.getComponent()
try:
value = _decode_asn1_string(value)
except UnicodeError:
logger.debug("Cannot decode value: {0!r}".format(value))
continue
if val_type == u"commonName":
self.common_names.append(value)
rdnss_list.append((val_type, value))
subject_name.append(tuple(rdnss_list))
self.subject_name = tuple(subject_name) | Load data from a ASN.1 subject. | Below is the the instruction that describes the task:
### Input:
Load data from a ASN.1 subject.
### Response:
def _decode_subject(self, subject):
"""Load data from a ASN.1 subject.
"""
self.common_names = []
subject_name = []
for rdnss in subject:
for rdns in rdnss:
rdnss_list = []
for nameval in rdns:
val_type = nameval.getComponentByName('type')
value = nameval.getComponentByName('value')
if val_type not in DN_OIDS:
logger.debug("OID {0} not supported".format(val_type))
continue
val_type = DN_OIDS[val_type]
value = der_decoder.decode(value,
asn1Spec = DirectoryString())[0]
value = value.getComponent()
try:
value = _decode_asn1_string(value)
except UnicodeError:
logger.debug("Cannot decode value: {0!r}".format(value))
continue
if val_type == u"commonName":
self.common_names.append(value)
rdnss_list.append((val_type, value))
subject_name.append(tuple(rdnss_list))
self.subject_name = tuple(subject_name) |
def get_profile_model():
"""
Return the model class for the currently-active user profile
model, as defined by the ``AUTH_PROFILE_MODULE`` setting.
:return: The model that is used as profile.
"""
if (not hasattr(settings, 'AUTH_PROFILE_MODULE')) or \
(not settings.AUTH_PROFILE_MODULE):
raise SiteProfileNotAvailable
try:
profile_mod = apps.get_model(*settings.AUTH_PROFILE_MODULE.rsplit('.', 1))
except LookupError:
profile_mod = None
if profile_mod is None:
raise SiteProfileNotAvailable
return profile_mod | Return the model class for the currently-active user profile
model, as defined by the ``AUTH_PROFILE_MODULE`` setting.
:return: The model that is used as profile. | Below is the the instruction that describes the task:
### Input:
Return the model class for the currently-active user profile
model, as defined by the ``AUTH_PROFILE_MODULE`` setting.
:return: The model that is used as profile.
### Response:
def get_profile_model():
"""
Return the model class for the currently-active user profile
model, as defined by the ``AUTH_PROFILE_MODULE`` setting.
:return: The model that is used as profile.
"""
if (not hasattr(settings, 'AUTH_PROFILE_MODULE')) or \
(not settings.AUTH_PROFILE_MODULE):
raise SiteProfileNotAvailable
try:
profile_mod = apps.get_model(*settings.AUTH_PROFILE_MODULE.rsplit('.', 1))
except LookupError:
profile_mod = None
if profile_mod is None:
raise SiteProfileNotAvailable
return profile_mod |
def getblock(self, blockhash, decode=False):
"""returns detail block info."""
if not decode:
decode = "false"
return self.req("getblock", [blockhash, decode])
else:
return self.req("getblock", [blockhash]) | returns detail block info. | Below is the the instruction that describes the task:
### Input:
returns detail block info.
### Response:
def getblock(self, blockhash, decode=False):
"""returns detail block info."""
if not decode:
decode = "false"
return self.req("getblock", [blockhash, decode])
else:
return self.req("getblock", [blockhash]) |
def build_album_art_full_uri(self, url):
"""Ensure an Album Art URI is an absolute URI.
Args:
url (str): the album art URI.
Returns:
str: An absolute URI.
"""
# Add on the full album art link, as the URI version
# does not include the ipaddress
if not url.startswith(('http:', 'https:')):
url = 'http://' + self.soco.ip_address + ':1400' + url
return url | Ensure an Album Art URI is an absolute URI.
Args:
url (str): the album art URI.
Returns:
str: An absolute URI. | Below is the the instruction that describes the task:
### Input:
Ensure an Album Art URI is an absolute URI.
Args:
url (str): the album art URI.
Returns:
str: An absolute URI.
### Response:
def build_album_art_full_uri(self, url):
"""Ensure an Album Art URI is an absolute URI.
Args:
url (str): the album art URI.
Returns:
str: An absolute URI.
"""
# Add on the full album art link, as the URI version
# does not include the ipaddress
if not url.startswith(('http:', 'https:')):
url = 'http://' + self.soco.ip_address + ':1400' + url
return url |
def decrypt_multi_get(decrypt_method, crypto_config_method, read_method, **kwargs):
# type: (Callable, Callable, Callable, **Any) -> Dict
# TODO: narrow this down
"""Transparently decrypt multiple items after getting them from the table with a scan or query method.
:param callable decrypt_method: Method to use to decrypt items
:param callable crypto_config_method: Method that accepts ``kwargs`` and provides a :class:`CryptoConfig`
:param callable read_method: Method that reads from the table
:param **kwargs: Keyword arguments to pass to ``read_method``
:return: DynamoDB response
:rtype: dict
"""
validate_get_arguments(kwargs)
crypto_config, ddb_kwargs = crypto_config_method(**kwargs)
response = read_method(**ddb_kwargs)
for pos in range(len(response["Items"])):
response["Items"][pos] = decrypt_method(
item=response["Items"][pos],
crypto_config=crypto_config.with_item(_item_transformer(decrypt_method)(response["Items"][pos])),
)
return response | Transparently decrypt multiple items after getting them from the table with a scan or query method.
:param callable decrypt_method: Method to use to decrypt items
:param callable crypto_config_method: Method that accepts ``kwargs`` and provides a :class:`CryptoConfig`
:param callable read_method: Method that reads from the table
:param **kwargs: Keyword arguments to pass to ``read_method``
:return: DynamoDB response
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Transparently decrypt multiple items after getting them from the table with a scan or query method.
:param callable decrypt_method: Method to use to decrypt items
:param callable crypto_config_method: Method that accepts ``kwargs`` and provides a :class:`CryptoConfig`
:param callable read_method: Method that reads from the table
:param **kwargs: Keyword arguments to pass to ``read_method``
:return: DynamoDB response
:rtype: dict
### Response:
def decrypt_multi_get(decrypt_method, crypto_config_method, read_method, **kwargs):
# type: (Callable, Callable, Callable, **Any) -> Dict
# TODO: narrow this down
"""Transparently decrypt multiple items after getting them from the table with a scan or query method.
:param callable decrypt_method: Method to use to decrypt items
:param callable crypto_config_method: Method that accepts ``kwargs`` and provides a :class:`CryptoConfig`
:param callable read_method: Method that reads from the table
:param **kwargs: Keyword arguments to pass to ``read_method``
:return: DynamoDB response
:rtype: dict
"""
validate_get_arguments(kwargs)
crypto_config, ddb_kwargs = crypto_config_method(**kwargs)
response = read_method(**ddb_kwargs)
for pos in range(len(response["Items"])):
response["Items"][pos] = decrypt_method(
item=response["Items"][pos],
crypto_config=crypto_config.with_item(_item_transformer(decrypt_method)(response["Items"][pos])),
)
return response |
def _check_building_options(string):
"""
Checks the building options to make sure that they are defined last,
after the task name and the dependencies
"""
opener, closer = '{', '}'
_check_enclosing_characters(string, opener, closer)
if opener in string:
if string[-1] != closer:
raise ValueError(OPTIONS_NOT_LAST)
ret = True
else:
ret = False
return ret | Checks the building options to make sure that they are defined last,
after the task name and the dependencies | Below is the the instruction that describes the task:
### Input:
Checks the building options to make sure that they are defined last,
after the task name and the dependencies
### Response:
def _check_building_options(string):
"""
Checks the building options to make sure that they are defined last,
after the task name and the dependencies
"""
opener, closer = '{', '}'
_check_enclosing_characters(string, opener, closer)
if opener in string:
if string[-1] != closer:
raise ValueError(OPTIONS_NOT_LAST)
ret = True
else:
ret = False
return ret |
def from_payload(self, payload):
"""Init frame from binary data."""
number_of_objects = payload[0]
self.remaining_scenes = payload[-1]
predicted_len = number_of_objects * 65 + 2
if len(payload) != predicted_len:
raise PyVLXException('scene_list_notification_wrong_length')
self.scenes = []
for i in range(number_of_objects):
scene = payload[(i*65+1):(i*65+66)]
number = scene[0]
name = bytes_to_string(scene[1:])
self.scenes.append((number, name)) | Init frame from binary data. | Below is the the instruction that describes the task:
### Input:
Init frame from binary data.
### Response:
def from_payload(self, payload):
"""Init frame from binary data."""
number_of_objects = payload[0]
self.remaining_scenes = payload[-1]
predicted_len = number_of_objects * 65 + 2
if len(payload) != predicted_len:
raise PyVLXException('scene_list_notification_wrong_length')
self.scenes = []
for i in range(number_of_objects):
scene = payload[(i*65+1):(i*65+66)]
number = scene[0]
name = bytes_to_string(scene[1:])
self.scenes.append((number, name)) |
def _preprocess_data(self, data):
"""
At this point, the data either has a `read` attribute (e.g. a file
object or a StringIO) or is a string that is a JSON document.
If self.chunksize, we prepare the data for the `__next__` method.
Otherwise, we read it into memory for the `read` method.
"""
if hasattr(data, 'read') and not self.chunksize:
data = data.read()
if not hasattr(data, 'read') and self.chunksize:
data = StringIO(data)
return data | At this point, the data either has a `read` attribute (e.g. a file
object or a StringIO) or is a string that is a JSON document.
If self.chunksize, we prepare the data for the `__next__` method.
Otherwise, we read it into memory for the `read` method. | Below is the the instruction that describes the task:
### Input:
At this point, the data either has a `read` attribute (e.g. a file
object or a StringIO) or is a string that is a JSON document.
If self.chunksize, we prepare the data for the `__next__` method.
Otherwise, we read it into memory for the `read` method.
### Response:
def _preprocess_data(self, data):
"""
At this point, the data either has a `read` attribute (e.g. a file
object or a StringIO) or is a string that is a JSON document.
If self.chunksize, we prepare the data for the `__next__` method.
Otherwise, we read it into memory for the `read` method.
"""
if hasattr(data, 'read') and not self.chunksize:
data = data.read()
if not hasattr(data, 'read') and self.chunksize:
data = StringIO(data)
return data |
def malware(self, malware, password, file_name):
"""
Uploads to malware vault.
Args:
malware:
password:
file_name:
"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
self._data['malware'] = malware
self._data['password'] = password
self._data['fileName'] = file_name
request = {'malware': malware, 'password': password, 'fileName': file_name}
return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request) | Uploads to malware vault.
Args:
malware:
password:
file_name: | Below is the the instruction that describes the task:
### Input:
Uploads to malware vault.
Args:
malware:
password:
file_name:
### Response:
def malware(self, malware, password, file_name):
"""
Uploads to malware vault.
Args:
malware:
password:
file_name:
"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
self._data['malware'] = malware
self._data['password'] = password
self._data['fileName'] = file_name
request = {'malware': malware, 'password': password, 'fileName': file_name}
return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request) |
def list_math_subtraction(a, b):
"""!
@brief Calculates subtraction of two lists.
@details Each element from list 'a' is subtracted by element from list 'b' accordingly.
@param[in] a (list): List of elements that supports mathematical subtraction.
@param[in] b (list): List of elements that supports mathematical subtraction.
@return (list) Results of subtraction of two lists.
"""
return [a[i] - b[i] for i in range(len(a))]; | !
@brief Calculates subtraction of two lists.
@details Each element from list 'a' is subtracted by element from list 'b' accordingly.
@param[in] a (list): List of elements that supports mathematical subtraction.
@param[in] b (list): List of elements that supports mathematical subtraction.
@return (list) Results of subtraction of two lists. | Below is the the instruction that describes the task:
### Input:
!
@brief Calculates subtraction of two lists.
@details Each element from list 'a' is subtracted by element from list 'b' accordingly.
@param[in] a (list): List of elements that supports mathematical subtraction.
@param[in] b (list): List of elements that supports mathematical subtraction.
@return (list) Results of subtraction of two lists.
### Response:
def list_math_subtraction(a, b):
"""!
@brief Calculates subtraction of two lists.
@details Each element from list 'a' is subtracted by element from list 'b' accordingly.
@param[in] a (list): List of elements that supports mathematical subtraction.
@param[in] b (list): List of elements that supports mathematical subtraction.
@return (list) Results of subtraction of two lists.
"""
return [a[i] - b[i] for i in range(len(a))]; |
def make_worksheet_data(headers, worksheet):
"""
Make data from worksheet
"""
data = []
row_idx = 1
while row_idx < worksheet.nrows:
cell_idx = 0
row_dict = {}
while cell_idx < worksheet.ncols:
cell_type = worksheet.cell_type(row_idx, cell_idx)
if cell_type in VALID_CELL_TYPES:
cell_value = worksheet.cell_value(row_idx, cell_idx)
try:
if cell_type == 2 and cell_value.is_integer():
cell_value = int(cell_value)
row_dict[headers[cell_idx]] = cell_value
except KeyError:
try:
column = ascii_uppercase[cell_idx]
except IndexError:
column = cell_idx
puts("There is no header for cell with value '{0}' in column '{1}' of '{2}'" .format(
cell_value, column, worksheet.name
))
cell_idx += 1
data.append(row_dict)
row_idx += 1
# Magic key handling
if 'key' in headers.values():
keyed_data = {}
for row in data:
if 'key' in row.keys():
key = slughifi(row['key'])
if keyed_data.get(key):
puts("There is already a key named '{0}' with value "
"'{1}' in '{2}'. It is being overwritten with "
"value '{3}'.".format(key,
keyed_data.get(key),
worksheet.name,
row))
# Magic values worksheet
if worksheet.name == "values":
value = row.get('value')
if value not in ("", None):
keyed_data[key] = value
else:
keyed_data[key] = row
data = keyed_data
return data | Make data from worksheet | Below is the the instruction that describes the task:
### Input:
Make data from worksheet
### Response:
def make_worksheet_data(headers, worksheet):
"""
Make data from worksheet
"""
data = []
row_idx = 1
while row_idx < worksheet.nrows:
cell_idx = 0
row_dict = {}
while cell_idx < worksheet.ncols:
cell_type = worksheet.cell_type(row_idx, cell_idx)
if cell_type in VALID_CELL_TYPES:
cell_value = worksheet.cell_value(row_idx, cell_idx)
try:
if cell_type == 2 and cell_value.is_integer():
cell_value = int(cell_value)
row_dict[headers[cell_idx]] = cell_value
except KeyError:
try:
column = ascii_uppercase[cell_idx]
except IndexError:
column = cell_idx
puts("There is no header for cell with value '{0}' in column '{1}' of '{2}'" .format(
cell_value, column, worksheet.name
))
cell_idx += 1
data.append(row_dict)
row_idx += 1
# Magic key handling
if 'key' in headers.values():
keyed_data = {}
for row in data:
if 'key' in row.keys():
key = slughifi(row['key'])
if keyed_data.get(key):
puts("There is already a key named '{0}' with value "
"'{1}' in '{2}'. It is being overwritten with "
"value '{3}'.".format(key,
keyed_data.get(key),
worksheet.name,
row))
# Magic values worksheet
if worksheet.name == "values":
value = row.get('value')
if value not in ("", None):
keyed_data[key] = value
else:
keyed_data[key] = row
data = keyed_data
return data |
def authorization_code_pkce(self, client_id, code_verifier, code,
redirect_uri, grant_type='authorization_code'):
"""Authorization code pkce grant
This is the OAuth 2.0 grant that mobile apps utilize in order to access an API.
Use this endpoint to exchange an Authorization Code for a Token.
Args:
grant_type (str): Denotes the flow you're using. For authorization code pkce
use authorization_code
client_id (str): your application's client Id
code_verifier (str): Cryptographically random key that was used to generate
the code_challenge passed to /authorize.
code (str): The Authorization Code received from the /authorize Calls
redirect_uri (str, optional): This is required only if it was set at
the GET /authorize endpoint. The values must match
Returns:
access_token, id_token
"""
return self.post(
'https://{}/oauth/token'.format(self.domain),
data={
'client_id': client_id,
'code_verifier': code_verifier,
'code': code,
'grant_type': grant_type,
'redirect_uri': redirect_uri,
},
headers={'Content-Type': 'application/json'}
) | Authorization code pkce grant
This is the OAuth 2.0 grant that mobile apps utilize in order to access an API.
Use this endpoint to exchange an Authorization Code for a Token.
Args:
grant_type (str): Denotes the flow you're using. For authorization code pkce
use authorization_code
client_id (str): your application's client Id
code_verifier (str): Cryptographically random key that was used to generate
the code_challenge passed to /authorize.
code (str): The Authorization Code received from the /authorize Calls
redirect_uri (str, optional): This is required only if it was set at
the GET /authorize endpoint. The values must match
Returns:
access_token, id_token | Below is the the instruction that describes the task:
### Input:
Authorization code pkce grant
This is the OAuth 2.0 grant that mobile apps utilize in order to access an API.
Use this endpoint to exchange an Authorization Code for a Token.
Args:
grant_type (str): Denotes the flow you're using. For authorization code pkce
use authorization_code
client_id (str): your application's client Id
code_verifier (str): Cryptographically random key that was used to generate
the code_challenge passed to /authorize.
code (str): The Authorization Code received from the /authorize Calls
redirect_uri (str, optional): This is required only if it was set at
the GET /authorize endpoint. The values must match
Returns:
access_token, id_token
### Response:
def authorization_code_pkce(self, client_id, code_verifier, code,
redirect_uri, grant_type='authorization_code'):
"""Authorization code pkce grant
This is the OAuth 2.0 grant that mobile apps utilize in order to access an API.
Use this endpoint to exchange an Authorization Code for a Token.
Args:
grant_type (str): Denotes the flow you're using. For authorization code pkce
use authorization_code
client_id (str): your application's client Id
code_verifier (str): Cryptographically random key that was used to generate
the code_challenge passed to /authorize.
code (str): The Authorization Code received from the /authorize Calls
redirect_uri (str, optional): This is required only if it was set at
the GET /authorize endpoint. The values must match
Returns:
access_token, id_token
"""
return self.post(
'https://{}/oauth/token'.format(self.domain),
data={
'client_id': client_id,
'code_verifier': code_verifier,
'code': code,
'grant_type': grant_type,
'redirect_uri': redirect_uri,
},
headers={'Content-Type': 'application/json'}
) |
def which(cmd, path="PATH"):
"""Find cmd on PATH."""
if os.path.exists(cmd):
return cmd
if cmd[0] == '/':
return None
for segment in os.getenv(path).split(":"):
program = os.path.normpath(os.path.join(segment, cmd))
if os.path.exists(program):
return program
return None | Find cmd on PATH. | Below is the the instruction that describes the task:
### Input:
Find cmd on PATH.
### Response:
def which(cmd, path="PATH"):
"""Find cmd on PATH."""
if os.path.exists(cmd):
return cmd
if cmd[0] == '/':
return None
for segment in os.getenv(path).split(":"):
program = os.path.normpath(os.path.join(segment, cmd))
if os.path.exists(program):
return program
return None |
def _line_load(network, grid, crit_lines):
"""
Checks for over-loading issues of lines.
Parameters
----------
network : :class:`~.grid.network.Network`
grid : :class:`~.grid.grids.LVGrid` or :class:`~.grid.grids.MVGrid`
crit_lines : :pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded lines, their maximum relative
over-loading and the corresponding time step.
Index of the dataframe are the over-loaded lines of type
:class:`~.grid.components.Line`. Columns are 'max_rel_overload'
containing the maximum relative over-loading as float and 'time_index'
containing the corresponding time step the over-loading occured in as
:pandas:`pandas.Timestamp<timestamp>`.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded lines, their maximum relative
over-loading and the corresponding time step.
Index of the dataframe are the over-loaded lines of type
:class:`~.grid.components.Line`. Columns are 'max_rel_overload'
containing the maximum relative over-loading as float and 'time_index'
containing the corresponding time step the over-loading occured in as
:pandas:`pandas.Timestamp<timestamp>`.
"""
if isinstance(grid, LVGrid):
grid_level = 'lv'
else:
grid_level = 'mv'
for line in list(grid.graph.lines()):
i_line_allowed_per_case = {}
i_line_allowed_per_case['feedin_case'] = \
line['line'].type['I_max_th'] * line['line'].quantity * \
network.config['grid_expansion_load_factors'][
'{}_feedin_case_line'.format(grid_level)]
i_line_allowed_per_case['load_case'] = \
line['line'].type['I_max_th'] * line['line'].quantity * \
network.config['grid_expansion_load_factors'][
'{}_load_case_line'.format(grid_level)]
# maximum allowed line load in each time step
i_line_allowed = \
network.timeseries.timesteps_load_feedin_case.case.apply(
lambda _: i_line_allowed_per_case[_])
try:
# check if maximum current from power flow analysis exceeds
# allowed maximum current
i_line_pfa = network.results.i_res[repr(line['line'])]
if any((i_line_allowed - i_line_pfa) < 0):
# find out largest relative deviation
relative_i_res = i_line_pfa / i_line_allowed
crit_lines = crit_lines.append(pd.DataFrame(
{'max_rel_overload': relative_i_res.max(),
'time_index': relative_i_res.idxmax()},
index=[line['line']]))
except KeyError:
logger.debug('No results for line {} '.format(str(line)) +
'to check overloading.')
return crit_lines | Checks for over-loading issues of lines.
Parameters
----------
network : :class:`~.grid.network.Network`
grid : :class:`~.grid.grids.LVGrid` or :class:`~.grid.grids.MVGrid`
crit_lines : :pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded lines, their maximum relative
over-loading and the corresponding time step.
Index of the dataframe are the over-loaded lines of type
:class:`~.grid.components.Line`. Columns are 'max_rel_overload'
containing the maximum relative over-loading as float and 'time_index'
containing the corresponding time step the over-loading occured in as
:pandas:`pandas.Timestamp<timestamp>`.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded lines, their maximum relative
over-loading and the corresponding time step.
Index of the dataframe are the over-loaded lines of type
:class:`~.grid.components.Line`. Columns are 'max_rel_overload'
containing the maximum relative over-loading as float and 'time_index'
containing the corresponding time step the over-loading occured in as
:pandas:`pandas.Timestamp<timestamp>`. | Below is the the instruction that describes the task:
### Input:
Checks for over-loading issues of lines.
Parameters
----------
network : :class:`~.grid.network.Network`
grid : :class:`~.grid.grids.LVGrid` or :class:`~.grid.grids.MVGrid`
crit_lines : :pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded lines, their maximum relative
over-loading and the corresponding time step.
Index of the dataframe are the over-loaded lines of type
:class:`~.grid.components.Line`. Columns are 'max_rel_overload'
containing the maximum relative over-loading as float and 'time_index'
containing the corresponding time step the over-loading occured in as
:pandas:`pandas.Timestamp<timestamp>`.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded lines, their maximum relative
over-loading and the corresponding time step.
Index of the dataframe are the over-loaded lines of type
:class:`~.grid.components.Line`. Columns are 'max_rel_overload'
containing the maximum relative over-loading as float and 'time_index'
containing the corresponding time step the over-loading occured in as
:pandas:`pandas.Timestamp<timestamp>`.
### Response:
def _line_load(network, grid, crit_lines):
"""
Checks for over-loading issues of lines.
Parameters
----------
network : :class:`~.grid.network.Network`
grid : :class:`~.grid.grids.LVGrid` or :class:`~.grid.grids.MVGrid`
crit_lines : :pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded lines, their maximum relative
over-loading and the corresponding time step.
Index of the dataframe are the over-loaded lines of type
:class:`~.grid.components.Line`. Columns are 'max_rel_overload'
containing the maximum relative over-loading as float and 'time_index'
containing the corresponding time step the over-loading occured in as
:pandas:`pandas.Timestamp<timestamp>`.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded lines, their maximum relative
over-loading and the corresponding time step.
Index of the dataframe are the over-loaded lines of type
:class:`~.grid.components.Line`. Columns are 'max_rel_overload'
containing the maximum relative over-loading as float and 'time_index'
containing the corresponding time step the over-loading occured in as
:pandas:`pandas.Timestamp<timestamp>`.
"""
if isinstance(grid, LVGrid):
grid_level = 'lv'
else:
grid_level = 'mv'
for line in list(grid.graph.lines()):
i_line_allowed_per_case = {}
i_line_allowed_per_case['feedin_case'] = \
line['line'].type['I_max_th'] * line['line'].quantity * \
network.config['grid_expansion_load_factors'][
'{}_feedin_case_line'.format(grid_level)]
i_line_allowed_per_case['load_case'] = \
line['line'].type['I_max_th'] * line['line'].quantity * \
network.config['grid_expansion_load_factors'][
'{}_load_case_line'.format(grid_level)]
# maximum allowed line load in each time step
i_line_allowed = \
network.timeseries.timesteps_load_feedin_case.case.apply(
lambda _: i_line_allowed_per_case[_])
try:
# check if maximum current from power flow analysis exceeds
# allowed maximum current
i_line_pfa = network.results.i_res[repr(line['line'])]
if any((i_line_allowed - i_line_pfa) < 0):
# find out largest relative deviation
relative_i_res = i_line_pfa / i_line_allowed
crit_lines = crit_lines.append(pd.DataFrame(
{'max_rel_overload': relative_i_res.max(),
'time_index': relative_i_res.idxmax()},
index=[line['line']]))
except KeyError:
logger.debug('No results for line {} '.format(str(line)) +
'to check overloading.')
return crit_lines |
def create_room(self, payload):
''' create a stream in a non-inclusive manner '''
response, status_code = self.__pod__.Streams.post_v2_room_create(
# V2RoomAttributes
payload=payload
).result()
self.logger.debug('%s: %s' % (status_code, response))
return status_code, response | create a stream in a non-inclusive manner | Below is the the instruction that describes the task:
### Input:
create a stream in a non-inclusive manner
### Response:
def create_room(self, payload):
''' create a stream in a non-inclusive manner '''
response, status_code = self.__pod__.Streams.post_v2_room_create(
# V2RoomAttributes
payload=payload
).result()
self.logger.debug('%s: %s' % (status_code, response))
return status_code, response |
def action_log_create(sender, instance, created, **kwargs):
"""
Signal receiver that creates a log entry when a model instance is first saved to the database.
Direct use is discouraged, connect your model through :py:func:`actionslog.registry.register` instead.
"""
if created:
changes = model_instance_diff(None, instance)
log_entry = LogAction.objects.create_log_action(
instance=instance,
action=LogAction.CREATE,
changes=json.dumps(changes),
) | Signal receiver that creates a log entry when a model instance is first saved to the database.
Direct use is discouraged, connect your model through :py:func:`actionslog.registry.register` instead. | Below is the the instruction that describes the task:
### Input:
Signal receiver that creates a log entry when a model instance is first saved to the database.
Direct use is discouraged, connect your model through :py:func:`actionslog.registry.register` instead.
### Response:
def action_log_create(sender, instance, created, **kwargs):
"""
Signal receiver that creates a log entry when a model instance is first saved to the database.
Direct use is discouraged, connect your model through :py:func:`actionslog.registry.register` instead.
"""
if created:
changes = model_instance_diff(None, instance)
log_entry = LogAction.objects.create_log_action(
instance=instance,
action=LogAction.CREATE,
changes=json.dumps(changes),
) |
def simple_beam_splitter(ax, p0, size=2.54, width=0.1, alpha=0,
format=None, **kwds):
r"""Draw a simple beam splitter."""
if format is None: format = 'k-'
a = size/2
b = width/2
x0 = [a, -a, -a, a, a]
y0 = [b, b, -b, -b, b]
cur_list = [(x0, y0)]
cur_list = rotate_and_traslate(cur_list, alpha, p0)
for curi in cur_list: ax.plot(curi[0], curi[1], format, **kwds) | r"""Draw a simple beam splitter. | Below is the the instruction that describes the task:
### Input:
r"""Draw a simple beam splitter.
### Response:
def simple_beam_splitter(ax, p0, size=2.54, width=0.1, alpha=0,
format=None, **kwds):
r"""Draw a simple beam splitter."""
if format is None: format = 'k-'
a = size/2
b = width/2
x0 = [a, -a, -a, a, a]
y0 = [b, b, -b, -b, b]
cur_list = [(x0, y0)]
cur_list = rotate_and_traslate(cur_list, alpha, p0)
for curi in cur_list: ax.plot(curi[0], curi[1], format, **kwds) |
def collect_prs_info(self):
"""Collect all pending merge PRs info.
:returns: mapping of PRs by state
"""
REPO_RE = re.compile(
'^(https://github.com/|git@github.com:)'
'(?P<owner>.*?)/(?P<repo>.*?)(.git)?$')
PULL_RE = re.compile(
'^(refs/)?pull/(?P<pr>[0-9]+)/head$')
remotes = {r['name']: r['url'] for r in self.remotes}
all_prs = {}
for merge in self.merges:
remote = merge['remote']
ref = merge['ref']
repo_url = remotes[remote]
repo_mo = REPO_RE.match(repo_url)
if not repo_mo:
logger.debug('%s is not a github repo', repo_url)
continue
pull_mo = PULL_RE.match(ref)
if not pull_mo:
logger.debug('%s is not a github pull reqeust', ref)
continue
pr_info = {
'owner': repo_mo.group('owner'),
'repo': repo_mo.group('repo'),
'pr': pull_mo.group('pr'),
}
pr_info['path'] = '{owner}/{repo}/pulls/{pr}'.format(**pr_info)
pr_info['url'] = 'https://github.com/{path}'.format(**pr_info)
pr_info['shortcut'] = '{owner}/{repo}#{pr}'.format(**pr_info)
r = self._github_api_get('/repos/{path}'.format(**pr_info))
if r.status_code != 200:
logger.warning(
'Could not get status of {path}. '
'Reason: {r.status_code} {r.reason}'.format(r=r, **pr_info)
)
continue
pr_info['state'] = r.json().get('state')
pr_info['merged'] = (
not r.json().get('merged') and 'not ' or ''
) + 'merged'
all_prs.setdefault(pr_info['state'], []).append(pr_info)
return all_prs | Collect all pending merge PRs info.
:returns: mapping of PRs by state | Below is the the instruction that describes the task:
### Input:
Collect all pending merge PRs info.
:returns: mapping of PRs by state
### Response:
def collect_prs_info(self):
"""Collect all pending merge PRs info.
:returns: mapping of PRs by state
"""
REPO_RE = re.compile(
'^(https://github.com/|git@github.com:)'
'(?P<owner>.*?)/(?P<repo>.*?)(.git)?$')
PULL_RE = re.compile(
'^(refs/)?pull/(?P<pr>[0-9]+)/head$')
remotes = {r['name']: r['url'] for r in self.remotes}
all_prs = {}
for merge in self.merges:
remote = merge['remote']
ref = merge['ref']
repo_url = remotes[remote]
repo_mo = REPO_RE.match(repo_url)
if not repo_mo:
logger.debug('%s is not a github repo', repo_url)
continue
pull_mo = PULL_RE.match(ref)
if not pull_mo:
logger.debug('%s is not a github pull reqeust', ref)
continue
pr_info = {
'owner': repo_mo.group('owner'),
'repo': repo_mo.group('repo'),
'pr': pull_mo.group('pr'),
}
pr_info['path'] = '{owner}/{repo}/pulls/{pr}'.format(**pr_info)
pr_info['url'] = 'https://github.com/{path}'.format(**pr_info)
pr_info['shortcut'] = '{owner}/{repo}#{pr}'.format(**pr_info)
r = self._github_api_get('/repos/{path}'.format(**pr_info))
if r.status_code != 200:
logger.warning(
'Could not get status of {path}. '
'Reason: {r.status_code} {r.reason}'.format(r=r, **pr_info)
)
continue
pr_info['state'] = r.json().get('state')
pr_info['merged'] = (
not r.json().get('merged') and 'not ' or ''
) + 'merged'
all_prs.setdefault(pr_info['state'], []).append(pr_info)
return all_prs |
def validate_extra_context(ctx, param, value):
"""Validate extra context."""
for s in value:
if '=' not in s:
raise click.BadParameter(
'EXTRA_CONTEXT should contain items of the form key=value; '
"'{}' doesn't match that form".format(s)
)
# Convert tuple -- e.g.: (u'program_name=foobar', u'startsecs=66')
# to dict -- e.g.: {'program_name': 'foobar', 'startsecs': '66'}
return collections.OrderedDict(s.split('=', 1) for s in value) or None | Validate extra context. | Below is the the instruction that describes the task:
### Input:
Validate extra context.
### Response:
def validate_extra_context(ctx, param, value):
"""Validate extra context."""
for s in value:
if '=' not in s:
raise click.BadParameter(
'EXTRA_CONTEXT should contain items of the form key=value; '
"'{}' doesn't match that form".format(s)
)
# Convert tuple -- e.g.: (u'program_name=foobar', u'startsecs=66')
# to dict -- e.g.: {'program_name': 'foobar', 'startsecs': '66'}
return collections.OrderedDict(s.split('=', 1) for s in value) or None |
def datasets_list(self, project_id=None, max_results=0, page_token=None):
"""Issues a request to list the datasets in the project.
Args:
project_id: the project id to use to fetch the results; use None for the default project.
max_results: an optional maximum number of tables to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
if project_id is None:
project_id = self._project_id
url = Api._ENDPOINT + (Api._DATASETS_PATH % (project_id, ''))
args = {}
if max_results != 0:
args['maxResults'] = max_results
if page_token is not None:
args['pageToken'] = page_token
return datalab.utils.Http.request(url, args=args, credentials=self._credentials) | Issues a request to list the datasets in the project.
Args:
project_id: the project id to use to fetch the results; use None for the default project.
max_results: an optional maximum number of tables to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | Below is the the instruction that describes the task:
### Input:
Issues a request to list the datasets in the project.
Args:
project_id: the project id to use to fetch the results; use None for the default project.
max_results: an optional maximum number of tables to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
### Response:
def datasets_list(self, project_id=None, max_results=0, page_token=None):
"""Issues a request to list the datasets in the project.
Args:
project_id: the project id to use to fetch the results; use None for the default project.
max_results: an optional maximum number of tables to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
if project_id is None:
project_id = self._project_id
url = Api._ENDPOINT + (Api._DATASETS_PATH % (project_id, ''))
args = {}
if max_results != 0:
args['maxResults'] = max_results
if page_token is not None:
args['pageToken'] = page_token
return datalab.utils.Http.request(url, args=args, credentials=self._credentials) |
def read_scanimage_metadata(fh):
"""Read ScanImage BigTIFF v3 static and ROI metadata from open file.
Return non-varying frame data as dict and ROI group data as JSON.
The settings can be used to read image data and metadata without parsing
the TIFF file.
Raise ValueError if file does not contain valid ScanImage v3 metadata.
"""
fh.seek(0)
try:
byteorder, version = struct.unpack('<2sH', fh.read(4))
if byteorder != b'II' or version != 43:
raise Exception
fh.seek(16)
magic, version, size0, size1 = struct.unpack('<IIII', fh.read(16))
if magic != 117637889 or version != 3:
raise Exception
except Exception:
raise ValueError('not a ScanImage BigTIFF v3 file')
frame_data = matlabstr2py(bytes2str(fh.read(size0)[:-1]))
roi_data = read_json(fh, '<', None, size1, None) if size1 > 1 else {}
return frame_data, roi_data | Read ScanImage BigTIFF v3 static and ROI metadata from open file.
Return non-varying frame data as dict and ROI group data as JSON.
The settings can be used to read image data and metadata without parsing
the TIFF file.
Raise ValueError if file does not contain valid ScanImage v3 metadata. | Below is the the instruction that describes the task:
### Input:
Read ScanImage BigTIFF v3 static and ROI metadata from open file.
Return non-varying frame data as dict and ROI group data as JSON.
The settings can be used to read image data and metadata without parsing
the TIFF file.
Raise ValueError if file does not contain valid ScanImage v3 metadata.
### Response:
def read_scanimage_metadata(fh):
"""Read ScanImage BigTIFF v3 static and ROI metadata from open file.
Return non-varying frame data as dict and ROI group data as JSON.
The settings can be used to read image data and metadata without parsing
the TIFF file.
Raise ValueError if file does not contain valid ScanImage v3 metadata.
"""
fh.seek(0)
try:
byteorder, version = struct.unpack('<2sH', fh.read(4))
if byteorder != b'II' or version != 43:
raise Exception
fh.seek(16)
magic, version, size0, size1 = struct.unpack('<IIII', fh.read(16))
if magic != 117637889 or version != 3:
raise Exception
except Exception:
raise ValueError('not a ScanImage BigTIFF v3 file')
frame_data = matlabstr2py(bytes2str(fh.read(size0)[:-1]))
roi_data = read_json(fh, '<', None, size1, None) if size1 > 1 else {}
return frame_data, roi_data |
def bishop88_mpp(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, method='newton'):
"""
Find max power point.
Parameters
----------
photocurrent : numeric
photogenerated current (Iph or IL) in amperes [A]
saturation_current : numeric
diode dark or saturation current (Io or Isat) in amperes [A]
resistance_series : numeric
series resistance (Rs) in ohms
resistance_shunt : numeric
shunt resistance (Rsh) in ohms
nNsVth : numeric
product of diode ideality factor (n), number of series cells (Ns), and
thermal voltage (Vth = k_b * T / q_e) in volts [V]
method : str
one of two optional search methods: either ``'brentq'``, a reliable and
bounded method or ``'newton'`` which is the default.
Returns
-------
OrderedDict or pandas.DataFrame
max power current ``i_mp`` [A], max power voltage ``v_mp`` [V], and
max power ``p_mp`` [W]
"""
# collect args
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth)
# first bound the search using voc
voc_est = estimate_voc(photocurrent, saturation_current, nNsVth)
def fmpp(x, *a):
return bishop88(x, *a, gradients=True)[6]
if method.lower() == 'brentq':
# break out arguments for numpy.vectorize to handle broadcasting
vec_fun = np.vectorize(
lambda voc, iph, isat, rs, rsh, gamma:
brentq(fmpp, 0.0, voc, args=(iph, isat, rs, rsh, gamma))
)
vd = vec_fun(voc_est, *args)
elif method.lower() == 'newton':
# make sure all args are numpy arrays if max size > 1
# if voc_est is an array, then make a copy to use for initial guess, v0
args, v0 = _prepare_newton_inputs((), args, voc_est)
vd = newton(
func=fmpp, x0=v0,
fprime=lambda x, *a: bishop88(x, *a, gradients=True)[7], args=args
)
else:
raise NotImplementedError("Method '%s' isn't implemented" % method)
return bishop88(vd, *args) | Find max power point.
Parameters
----------
photocurrent : numeric
photogenerated current (Iph or IL) in amperes [A]
saturation_current : numeric
diode dark or saturation current (Io or Isat) in amperes [A]
resistance_series : numeric
series resistance (Rs) in ohms
resistance_shunt : numeric
shunt resistance (Rsh) in ohms
nNsVth : numeric
product of diode ideality factor (n), number of series cells (Ns), and
thermal voltage (Vth = k_b * T / q_e) in volts [V]
method : str
one of two optional search methods: either ``'brentq'``, a reliable and
bounded method or ``'newton'`` which is the default.
Returns
-------
OrderedDict or pandas.DataFrame
max power current ``i_mp`` [A], max power voltage ``v_mp`` [V], and
max power ``p_mp`` [W] | Below is the the instruction that describes the task:
### Input:
Find max power point.
Parameters
----------
photocurrent : numeric
photogenerated current (Iph or IL) in amperes [A]
saturation_current : numeric
diode dark or saturation current (Io or Isat) in amperes [A]
resistance_series : numeric
series resistance (Rs) in ohms
resistance_shunt : numeric
shunt resistance (Rsh) in ohms
nNsVth : numeric
product of diode ideality factor (n), number of series cells (Ns), and
thermal voltage (Vth = k_b * T / q_e) in volts [V]
method : str
one of two optional search methods: either ``'brentq'``, a reliable and
bounded method or ``'newton'`` which is the default.
Returns
-------
OrderedDict or pandas.DataFrame
max power current ``i_mp`` [A], max power voltage ``v_mp`` [V], and
max power ``p_mp`` [W]
### Response:
def bishop88_mpp(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, method='newton'):
"""
Find max power point.
Parameters
----------
photocurrent : numeric
photogenerated current (Iph or IL) in amperes [A]
saturation_current : numeric
diode dark or saturation current (Io or Isat) in amperes [A]
resistance_series : numeric
series resistance (Rs) in ohms
resistance_shunt : numeric
shunt resistance (Rsh) in ohms
nNsVth : numeric
product of diode ideality factor (n), number of series cells (Ns), and
thermal voltage (Vth = k_b * T / q_e) in volts [V]
method : str
one of two optional search methods: either ``'brentq'``, a reliable and
bounded method or ``'newton'`` which is the default.
Returns
-------
OrderedDict or pandas.DataFrame
max power current ``i_mp`` [A], max power voltage ``v_mp`` [V], and
max power ``p_mp`` [W]
"""
# collect args
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth)
# first bound the search using voc
voc_est = estimate_voc(photocurrent, saturation_current, nNsVth)
def fmpp(x, *a):
return bishop88(x, *a, gradients=True)[6]
if method.lower() == 'brentq':
# break out arguments for numpy.vectorize to handle broadcasting
vec_fun = np.vectorize(
lambda voc, iph, isat, rs, rsh, gamma:
brentq(fmpp, 0.0, voc, args=(iph, isat, rs, rsh, gamma))
)
vd = vec_fun(voc_est, *args)
elif method.lower() == 'newton':
# make sure all args are numpy arrays if max size > 1
# if voc_est is an array, then make a copy to use for initial guess, v0
args, v0 = _prepare_newton_inputs((), args, voc_est)
vd = newton(
func=fmpp, x0=v0,
fprime=lambda x, *a: bishop88(x, *a, gradients=True)[7], args=args
)
else:
raise NotImplementedError("Method '%s' isn't implemented" % method)
return bishop88(vd, *args) |
def _init_map(self):
"""stub"""
FilesAnswerFormRecord._init_map(self)
FeedbackAnswerFormRecord._init_map(self)
self.my_osid_object_form._my_map['inlineRegions'] = \
self._inline_regions_metadata['default_object_values'][0] | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def _init_map(self):
"""stub"""
FilesAnswerFormRecord._init_map(self)
FeedbackAnswerFormRecord._init_map(self)
self.my_osid_object_form._my_map['inlineRegions'] = \
self._inline_regions_metadata['default_object_values'][0] |
def feasible_ratio(self, solutions):
"""counts for each coordinate the number of feasible values in
``solutions`` and returns an array of length ``len(solutions[0])``
with the ratios.
`solutions` is a list or array of repaired ``Solution``
instances,
"""
raise NotImplementedError('Solution class disappeared')
count = np.zeros(len(solutions[0]))
for x in solutions:
count += x.unrepaired == x
return count / float(len(solutions)) | counts for each coordinate the number of feasible values in
``solutions`` and returns an array of length ``len(solutions[0])``
with the ratios.
`solutions` is a list or array of repaired ``Solution``
instances, | Below is the the instruction that describes the task:
### Input:
counts for each coordinate the number of feasible values in
``solutions`` and returns an array of length ``len(solutions[0])``
with the ratios.
`solutions` is a list or array of repaired ``Solution``
instances,
### Response:
def feasible_ratio(self, solutions):
"""counts for each coordinate the number of feasible values in
``solutions`` and returns an array of length ``len(solutions[0])``
with the ratios.
`solutions` is a list or array of repaired ``Solution``
instances,
"""
raise NotImplementedError('Solution class disappeared')
count = np.zeros(len(solutions[0]))
for x in solutions:
count += x.unrepaired == x
return count / float(len(solutions)) |
def rotate_points_to_axis(points, axis):
"""Rotate all points of a list, such that `axis==[0,1,0]`
This is accomplished by rotating in the x-z-plane by phi into the
y-z-plane, then rotation in the y-z-plane by theta up to [0,1,0],
and finally rotating back in the x-z-plane by -phi.
Parameters
----------
points: list-like with elements of length 3
The Cartesian points. These should be in the same format as
produced by `sphere_points_from_angles_and_tilt`.
axis: list-like, length 3
The reference axis that will be used to determine the
rotation angle of the points. The points will be rotated
about the origin such that `axis` matches [0,1,0].
Returns
-------
rotated_points: np.ndarray of shape (N,3)
The rotated points.
"""
axis = norm_vec(axis)
u, v, w = axis
points = np.array(points)
# Determine the rotational angle in the x-z plane
phi = np.arctan2(u, w)
# Determine the tilt angle w.r.t. the y-axis
theta = np.arccos(v)
# Negative rotation about y-axis
Rphi = np.array([
[np.cos(phi), 0, -np.sin(phi)],
[0, 1, 0],
[np.sin(phi), 0, np.cos(phi)],
])
# Negative rotation about x-axis
Rtheta = np.array([
[1, 0, 0],
[0, np.cos(theta), np.sin(theta)],
[0, -np.sin(theta), np.cos(theta)],
])
DR1 = np.dot(Rtheta, Rphi)
# Rotate back by -phi such that effective rotation was only
# towards [0,1,0].
DR = np.dot(Rphi.T, DR1)
rotpoints = np.zeros((len(points), 3))
for ii, pnt in enumerate(points):
rotpoints[ii] = np.dot(DR, pnt)
# For visualiztaion:
# import matplotlib.pylab as plt
# from mpl_toolkits.mplot3d import Axes3D
# from matplotlib.patches import FancyArrowPatch
# from mpl_toolkits.mplot3d import proj3d
#
# class Arrow3D(FancyArrowPatch):
# def __init__(self, xs, ys, zs, *args, **kwargs):
# FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
# self._verts3d = xs, ys, zs
#
# def draw(self, renderer):
# xs3d, ys3d, zs3d = self._verts3d
# xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
# self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
# FancyArrowPatch.draw(self, renderer)
#
# fig = plt.figure(figsize=(10,10))
# ax = fig.add_subplot(111, projection='3d')
# for vec in rotpoints:
# u,v,w = vec
# a = Arrow3D([0,u],[0,v],[0,w],
# mutation_scale=20, lw=1, arrowstyle="-|>")
# ax.add_artist(a)
#
# radius=1
# ax.set_xlabel('X')
# ax.set_ylabel('Y')
# ax.set_zlabel('Z')
# ax.set_xlim(-radius*1.5, radius*1.5)
# ax.set_ylim(-radius*1.5, radius*1.5)
# ax.set_zlim(-radius*1.5, radius*1.5)
# plt.tight_layout()
# plt.show()
return rotpoints | Rotate all points of a list, such that `axis==[0,1,0]`
This is accomplished by rotating in the x-z-plane by phi into the
y-z-plane, then rotation in the y-z-plane by theta up to [0,1,0],
and finally rotating back in the x-z-plane by -phi.
Parameters
----------
points: list-like with elements of length 3
The Cartesian points. These should be in the same format as
produced by `sphere_points_from_angles_and_tilt`.
axis: list-like, length 3
The reference axis that will be used to determine the
rotation angle of the points. The points will be rotated
about the origin such that `axis` matches [0,1,0].
Returns
-------
rotated_points: np.ndarray of shape (N,3)
The rotated points. | Below is the the instruction that describes the task:
### Input:
Rotate all points of a list, such that `axis==[0,1,0]`
This is accomplished by rotating in the x-z-plane by phi into the
y-z-plane, then rotation in the y-z-plane by theta up to [0,1,0],
and finally rotating back in the x-z-plane by -phi.
Parameters
----------
points: list-like with elements of length 3
The Cartesian points. These should be in the same format as
produced by `sphere_points_from_angles_and_tilt`.
axis: list-like, length 3
The reference axis that will be used to determine the
rotation angle of the points. The points will be rotated
about the origin such that `axis` matches [0,1,0].
Returns
-------
rotated_points: np.ndarray of shape (N,3)
The rotated points.
### Response:
def rotate_points_to_axis(points, axis):
"""Rotate all points of a list, such that `axis==[0,1,0]`
This is accomplished by rotating in the x-z-plane by phi into the
y-z-plane, then rotation in the y-z-plane by theta up to [0,1,0],
and finally rotating back in the x-z-plane by -phi.
Parameters
----------
points: list-like with elements of length 3
The Cartesian points. These should be in the same format as
produced by `sphere_points_from_angles_and_tilt`.
axis: list-like, length 3
The reference axis that will be used to determine the
rotation angle of the points. The points will be rotated
about the origin such that `axis` matches [0,1,0].
Returns
-------
rotated_points: np.ndarray of shape (N,3)
The rotated points.
"""
axis = norm_vec(axis)
u, v, w = axis
points = np.array(points)
# Determine the rotational angle in the x-z plane
phi = np.arctan2(u, w)
# Determine the tilt angle w.r.t. the y-axis
theta = np.arccos(v)
# Negative rotation about y-axis
Rphi = np.array([
[np.cos(phi), 0, -np.sin(phi)],
[0, 1, 0],
[np.sin(phi), 0, np.cos(phi)],
])
# Negative rotation about x-axis
Rtheta = np.array([
[1, 0, 0],
[0, np.cos(theta), np.sin(theta)],
[0, -np.sin(theta), np.cos(theta)],
])
DR1 = np.dot(Rtheta, Rphi)
# Rotate back by -phi such that effective rotation was only
# towards [0,1,0].
DR = np.dot(Rphi.T, DR1)
rotpoints = np.zeros((len(points), 3))
for ii, pnt in enumerate(points):
rotpoints[ii] = np.dot(DR, pnt)
# For visualiztaion:
# import matplotlib.pylab as plt
# from mpl_toolkits.mplot3d import Axes3D
# from matplotlib.patches import FancyArrowPatch
# from mpl_toolkits.mplot3d import proj3d
#
# class Arrow3D(FancyArrowPatch):
# def __init__(self, xs, ys, zs, *args, **kwargs):
# FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
# self._verts3d = xs, ys, zs
#
# def draw(self, renderer):
# xs3d, ys3d, zs3d = self._verts3d
# xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
# self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
# FancyArrowPatch.draw(self, renderer)
#
# fig = plt.figure(figsize=(10,10))
# ax = fig.add_subplot(111, projection='3d')
# for vec in rotpoints:
# u,v,w = vec
# a = Arrow3D([0,u],[0,v],[0,w],
# mutation_scale=20, lw=1, arrowstyle="-|>")
# ax.add_artist(a)
#
# radius=1
# ax.set_xlabel('X')
# ax.set_ylabel('Y')
# ax.set_zlabel('Z')
# ax.set_xlim(-radius*1.5, radius*1.5)
# ax.set_ylim(-radius*1.5, radius*1.5)
# ax.set_zlim(-radius*1.5, radius*1.5)
# plt.tight_layout()
# plt.show()
return rotpoints |
def find_bucket(self, bucketing_id, parent_id, traffic_allocations):
""" Determine entity based on bucket value and traffic allocations.
Args:
bucketing_id: ID to be used for bucketing the user.
parent_id: ID representing group or experiment.
traffic_allocations: Traffic allocations representing traffic allotted to experiments or variations.
Returns:
Entity ID which may represent experiment or variation.
"""
bucketing_key = BUCKETING_ID_TEMPLATE.format(bucketing_id=bucketing_id, parent_id=parent_id)
bucketing_number = self._generate_bucket_value(bucketing_key)
self.config.logger.debug('Assigned bucket %s to user with bucketing ID "%s".' % (
bucketing_number,
bucketing_id
))
for traffic_allocation in traffic_allocations:
current_end_of_range = traffic_allocation.get('endOfRange')
if bucketing_number < current_end_of_range:
return traffic_allocation.get('entityId')
return None | Determine entity based on bucket value and traffic allocations.
Args:
bucketing_id: ID to be used for bucketing the user.
parent_id: ID representing group or experiment.
traffic_allocations: Traffic allocations representing traffic allotted to experiments or variations.
Returns:
Entity ID which may represent experiment or variation. | Below is the the instruction that describes the task:
### Input:
Determine entity based on bucket value and traffic allocations.
Args:
bucketing_id: ID to be used for bucketing the user.
parent_id: ID representing group or experiment.
traffic_allocations: Traffic allocations representing traffic allotted to experiments or variations.
Returns:
Entity ID which may represent experiment or variation.
### Response:
def find_bucket(self, bucketing_id, parent_id, traffic_allocations):
""" Determine entity based on bucket value and traffic allocations.
Args:
bucketing_id: ID to be used for bucketing the user.
parent_id: ID representing group or experiment.
traffic_allocations: Traffic allocations representing traffic allotted to experiments or variations.
Returns:
Entity ID which may represent experiment or variation.
"""
bucketing_key = BUCKETING_ID_TEMPLATE.format(bucketing_id=bucketing_id, parent_id=parent_id)
bucketing_number = self._generate_bucket_value(bucketing_key)
self.config.logger.debug('Assigned bucket %s to user with bucketing ID "%s".' % (
bucketing_number,
bucketing_id
))
for traffic_allocation in traffic_allocations:
current_end_of_range = traffic_allocation.get('endOfRange')
if bucketing_number < current_end_of_range:
return traffic_allocation.get('entityId')
return None |
def _convert_2_0_0(topo, topo_path):
"""
Convert topologies from GNS3 2.0.0 to 2.1
Changes:
* Remove startup_script_path from VPCS and base config file for IOU and Dynamips
"""
topo["revision"] = 8
for node in topo.get("topology", {}).get("nodes", []):
if "properties" in node:
if node["node_type"] == "vpcs":
if "startup_script_path" in node["properties"]:
del node["properties"]["startup_script_path"]
if "startup_script" in node["properties"]:
del node["properties"]["startup_script"]
elif node["node_type"] == "dynamips" or node["node_type"] == "iou":
if "startup_config" in node["properties"]:
del node["properties"]["startup_config"]
if "private_config" in node["properties"]:
del node["properties"]["private_config"]
if "startup_config_content" in node["properties"]:
del node["properties"]["startup_config_content"]
if "private_config_content" in node["properties"]:
del node["properties"]["private_config_content"]
return topo | Convert topologies from GNS3 2.0.0 to 2.1
Changes:
* Remove startup_script_path from VPCS and base config file for IOU and Dynamips | Below is the the instruction that describes the task:
### Input:
Convert topologies from GNS3 2.0.0 to 2.1
Changes:
* Remove startup_script_path from VPCS and base config file for IOU and Dynamips
### Response:
def _convert_2_0_0(topo, topo_path):
"""
Convert topologies from GNS3 2.0.0 to 2.1
Changes:
* Remove startup_script_path from VPCS and base config file for IOU and Dynamips
"""
topo["revision"] = 8
for node in topo.get("topology", {}).get("nodes", []):
if "properties" in node:
if node["node_type"] == "vpcs":
if "startup_script_path" in node["properties"]:
del node["properties"]["startup_script_path"]
if "startup_script" in node["properties"]:
del node["properties"]["startup_script"]
elif node["node_type"] == "dynamips" or node["node_type"] == "iou":
if "startup_config" in node["properties"]:
del node["properties"]["startup_config"]
if "private_config" in node["properties"]:
del node["properties"]["private_config"]
if "startup_config_content" in node["properties"]:
del node["properties"]["startup_config_content"]
if "private_config_content" in node["properties"]:
del node["properties"]["private_config_content"]
return topo |
def can_feed(self, unit_type: UnitTypeId) -> bool:
""" Checks if you have enough free supply to build the unit """
required = self._game_data.units[unit_type.value]._proto.food_required
return required == 0 or self.supply_left >= required | Checks if you have enough free supply to build the unit | Below is the the instruction that describes the task:
### Input:
Checks if you have enough free supply to build the unit
### Response:
def can_feed(self, unit_type: UnitTypeId) -> bool:
""" Checks if you have enough free supply to build the unit """
required = self._game_data.units[unit_type.value]._proto.food_required
return required == 0 or self.supply_left >= required |
def incoming_manipulators(self):
"""**DEPRECATED**: All incoming SON manipulators.
.. versionchanged:: 3.5
Deprecated.
.. versionadded:: 2.0
"""
warnings.warn("Database.incoming_manipulators() is deprecated",
DeprecationWarning, stacklevel=2)
return [manipulator.__class__.__name__
for manipulator in self.__incoming_manipulators] | **DEPRECATED**: All incoming SON manipulators.
.. versionchanged:: 3.5
Deprecated.
.. versionadded:: 2.0 | Below is the the instruction that describes the task:
### Input:
**DEPRECATED**: All incoming SON manipulators.
.. versionchanged:: 3.5
Deprecated.
.. versionadded:: 2.0
### Response:
def incoming_manipulators(self):
"""**DEPRECATED**: All incoming SON manipulators.
.. versionchanged:: 3.5
Deprecated.
.. versionadded:: 2.0
"""
warnings.warn("Database.incoming_manipulators() is deprecated",
DeprecationWarning, stacklevel=2)
return [manipulator.__class__.__name__
for manipulator in self.__incoming_manipulators] |
def drawLine(self, x1, y1, x2, y2, silent=False):
"""
Draws a line on the current :py:class:`Layer` with the current :py:class:`Brush`.
Coordinates are relative to the original layer size WITHOUT downsampling applied.
:param x1: Starting X coordinate.
:param y1: Starting Y coordinate.
:param x2: End X coordinate.
:param y2: End Y coordinate.
:rtype: Nothing.
"""
start = time.time()
#Downsample the coordinates
x1 = int(x1/config.DOWNSAMPLING)
x2 = int(x2/config.DOWNSAMPLING)
y1 = int(y1/config.DOWNSAMPLING)
y2 = int(y2/config.DOWNSAMPLING)
if not silent :
print('drawing line from: '+str((x1,y1))+' to: '+str((x2,y2)))
#Calculate the direction and the length of the step
direction = N.arctan2(y2 - y1, x2 - x1)
length = self.brush.spacing
#Prepare the loop
x, y = x1, y1
totalSteps = int(N.sqrt((x2 - x)**2 + (y2 - y)**2)/length)
lay = self.image.getActiveLayer()
col = self.color
secCol = self.secondColor
mirr = self.mirrorMode
#If I use source caching..
if self.brush.usesSourceCaching:
#..than optimize it for faster drawing
laydata = lay.data
x -= self.brush.brushSize*0.5
y -= self.brush.brushSize*0.5
colbrsource = self.brush.coloredBrushSource
canvSize = config.CANVAS_SIZE
brmask = self.brush.brushMask
for _ in range(totalSteps):
#Make the dab on this point
applyMirroredDab_jit(mirr, laydata, int(x), int(y), colbrsource.copy(), canvSize, brmask)
#Mode the point for the next step and update the distances
x += lendir_x(length, direction)
y += lendir_y(length, direction)
#..if I don't use source caching..
else:
#..do the normal drawing
for _ in range(totalSteps):
#Make the dab on this point
self.brush.makeDab(lay, int(x), int(y), col, secCol, mirror=mirr)
#Mode the point for the next step and update the distances
x += lendir_x(length, direction)
y += lendir_y(length, direction) | Draws a line on the current :py:class:`Layer` with the current :py:class:`Brush`.
Coordinates are relative to the original layer size WITHOUT downsampling applied.
:param x1: Starting X coordinate.
:param y1: Starting Y coordinate.
:param x2: End X coordinate.
:param y2: End Y coordinate.
:rtype: Nothing. | Below is the the instruction that describes the task:
### Input:
Draws a line on the current :py:class:`Layer` with the current :py:class:`Brush`.
Coordinates are relative to the original layer size WITHOUT downsampling applied.
:param x1: Starting X coordinate.
:param y1: Starting Y coordinate.
:param x2: End X coordinate.
:param y2: End Y coordinate.
:rtype: Nothing.
### Response:
def drawLine(self, x1, y1, x2, y2, silent=False):
"""
Draws a line on the current :py:class:`Layer` with the current :py:class:`Brush`.
Coordinates are relative to the original layer size WITHOUT downsampling applied.
:param x1: Starting X coordinate.
:param y1: Starting Y coordinate.
:param x2: End X coordinate.
:param y2: End Y coordinate.
:rtype: Nothing.
"""
start = time.time()
#Downsample the coordinates
x1 = int(x1/config.DOWNSAMPLING)
x2 = int(x2/config.DOWNSAMPLING)
y1 = int(y1/config.DOWNSAMPLING)
y2 = int(y2/config.DOWNSAMPLING)
if not silent :
print('drawing line from: '+str((x1,y1))+' to: '+str((x2,y2)))
#Calculate the direction and the length of the step
direction = N.arctan2(y2 - y1, x2 - x1)
length = self.brush.spacing
#Prepare the loop
x, y = x1, y1
totalSteps = int(N.sqrt((x2 - x)**2 + (y2 - y)**2)/length)
lay = self.image.getActiveLayer()
col = self.color
secCol = self.secondColor
mirr = self.mirrorMode
#If I use source caching..
if self.brush.usesSourceCaching:
#..than optimize it for faster drawing
laydata = lay.data
x -= self.brush.brushSize*0.5
y -= self.brush.brushSize*0.5
colbrsource = self.brush.coloredBrushSource
canvSize = config.CANVAS_SIZE
brmask = self.brush.brushMask
for _ in range(totalSteps):
#Make the dab on this point
applyMirroredDab_jit(mirr, laydata, int(x), int(y), colbrsource.copy(), canvSize, brmask)
#Mode the point for the next step and update the distances
x += lendir_x(length, direction)
y += lendir_y(length, direction)
#..if I don't use source caching..
else:
#..do the normal drawing
for _ in range(totalSteps):
#Make the dab on this point
self.brush.makeDab(lay, int(x), int(y), col, secCol, mirror=mirr)
#Mode the point for the next step and update the distances
x += lendir_x(length, direction)
y += lendir_y(length, direction) |
def _get_p_p_id_and_contract(self):
"""Get id of consumption profile."""
contracts = {}
try:
raw_res = yield from self._session.get(PROFILE_URL,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get profile page")
# Parse html
content = yield from raw_res.text()
soup = BeautifulSoup(content, 'html.parser')
# Search contracts
for node in soup.find_all('span', {"class": "contrat"}):
rematch = re.match("C[a-z]* ([0-9]{4} [0-9]{5})", node.text)
if rematch is not None:
contracts[rematch.group(1).replace(" ", "")] = None
# search for links
for node in soup.find_all('a', {"class": "big iconLink"}):
for contract in contracts:
if contract in node.attrs.get('href'):
contracts[contract] = node.attrs.get('href')
# Looking for p_p_id
p_p_id = None
for node in soup.find_all('span'):
node_id = node.attrs.get('id', "")
if node_id.startswith("p_portraitConsommation_WAR"):
p_p_id = node_id[2:]
break
if p_p_id is None:
raise PyHydroQuebecError("Could not get p_p_id")
return p_p_id, contracts | Get id of consumption profile. | Below is the the instruction that describes the task:
### Input:
Get id of consumption profile.
### Response:
def _get_p_p_id_and_contract(self):
"""Get id of consumption profile."""
contracts = {}
try:
raw_res = yield from self._session.get(PROFILE_URL,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get profile page")
# Parse html
content = yield from raw_res.text()
soup = BeautifulSoup(content, 'html.parser')
# Search contracts
for node in soup.find_all('span', {"class": "contrat"}):
rematch = re.match("C[a-z]* ([0-9]{4} [0-9]{5})", node.text)
if rematch is not None:
contracts[rematch.group(1).replace(" ", "")] = None
# search for links
for node in soup.find_all('a', {"class": "big iconLink"}):
for contract in contracts:
if contract in node.attrs.get('href'):
contracts[contract] = node.attrs.get('href')
# Looking for p_p_id
p_p_id = None
for node in soup.find_all('span'):
node_id = node.attrs.get('id', "")
if node_id.startswith("p_portraitConsommation_WAR"):
p_p_id = node_id[2:]
break
if p_p_id is None:
raise PyHydroQuebecError("Could not get p_p_id")
return p_p_id, contracts |
def tsv_import(self, xsv_source, encoding="UTF-8", transforms=None, row_class=DataObject, **kwargs):
"""Imports the contents of a tab-separated data file into this table.
@param xsv_source: tab-separated data file - if a string is given, the file with that name will be
opened, read, and closed; if a file object is given, then that object
will be read as-is, and left for the caller to be closed.
@type xsv_source: string or file
@param transforms: dict of functions by attribute name; if given, each
attribute will be transformed using the corresponding transform; if there is no
matching transform, the attribute will be read as a string (default); the
transform function can also be defined as a (function, default-value) tuple; if
there is an Exception raised by the transform function, then the attribute will
be set to the given default value
@type transforms: dict (optional)
"""
return self._xsv_import(xsv_source, encoding, transforms=transforms, delimiter="\t", row_class=row_class, **kwargs) | Imports the contents of a tab-separated data file into this table.
@param xsv_source: tab-separated data file - if a string is given, the file with that name will be
opened, read, and closed; if a file object is given, then that object
will be read as-is, and left for the caller to be closed.
@type xsv_source: string or file
@param transforms: dict of functions by attribute name; if given, each
attribute will be transformed using the corresponding transform; if there is no
matching transform, the attribute will be read as a string (default); the
transform function can also be defined as a (function, default-value) tuple; if
there is an Exception raised by the transform function, then the attribute will
be set to the given default value
@type transforms: dict (optional) | Below is the the instruction that describes the task:
### Input:
Imports the contents of a tab-separated data file into this table.
@param xsv_source: tab-separated data file - if a string is given, the file with that name will be
opened, read, and closed; if a file object is given, then that object
will be read as-is, and left for the caller to be closed.
@type xsv_source: string or file
@param transforms: dict of functions by attribute name; if given, each
attribute will be transformed using the corresponding transform; if there is no
matching transform, the attribute will be read as a string (default); the
transform function can also be defined as a (function, default-value) tuple; if
there is an Exception raised by the transform function, then the attribute will
be set to the given default value
@type transforms: dict (optional)
### Response:
def tsv_import(self, xsv_source, encoding="UTF-8", transforms=None, row_class=DataObject, **kwargs):
"""Imports the contents of a tab-separated data file into this table.
@param xsv_source: tab-separated data file - if a string is given, the file with that name will be
opened, read, and closed; if a file object is given, then that object
will be read as-is, and left for the caller to be closed.
@type xsv_source: string or file
@param transforms: dict of functions by attribute name; if given, each
attribute will be transformed using the corresponding transform; if there is no
matching transform, the attribute will be read as a string (default); the
transform function can also be defined as a (function, default-value) tuple; if
there is an Exception raised by the transform function, then the attribute will
be set to the given default value
@type transforms: dict (optional)
"""
return self._xsv_import(xsv_source, encoding, transforms=transforms, delimiter="\t", row_class=row_class, **kwargs) |
def delete(queue_id):
'''
Delete message(s) from the mail queue
CLI Example:
.. code-block:: bash
salt '*' postfix.delete 5C33CA0DEA
salt '*' postfix.delete ALL
'''
ret = {'message': '',
'result': True
}
if not queue_id:
log.error('Require argument queue_id')
if not queue_id == 'ALL':
queue = show_queue()
_message = None
for item in queue:
if item['queue_id'] == queue_id:
_message = item
if not _message:
ret['message'] = 'No message in queue with ID {0}'.format(queue_id)
ret['result'] = False
return ret
cmd = 'postsuper -d {0}'.format(queue_id)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] == 0:
if queue_id == 'ALL':
ret['message'] = 'Successfully removed all messages'
else:
ret['message'] = 'Successfully removed message with queue id {0}'.format(queue_id)
else:
if queue_id == 'ALL':
ret['message'] = 'Unable to removed all messages'
else:
ret['message'] = 'Unable to remove message with queue id {0}: {1}'.format(queue_id, result['stderr'])
return ret | Delete message(s) from the mail queue
CLI Example:
.. code-block:: bash
salt '*' postfix.delete 5C33CA0DEA
salt '*' postfix.delete ALL | Below is the the instruction that describes the task:
### Input:
Delete message(s) from the mail queue
CLI Example:
.. code-block:: bash
salt '*' postfix.delete 5C33CA0DEA
salt '*' postfix.delete ALL
### Response:
def delete(queue_id):
'''
Delete message(s) from the mail queue
CLI Example:
.. code-block:: bash
salt '*' postfix.delete 5C33CA0DEA
salt '*' postfix.delete ALL
'''
ret = {'message': '',
'result': True
}
if not queue_id:
log.error('Require argument queue_id')
if not queue_id == 'ALL':
queue = show_queue()
_message = None
for item in queue:
if item['queue_id'] == queue_id:
_message = item
if not _message:
ret['message'] = 'No message in queue with ID {0}'.format(queue_id)
ret['result'] = False
return ret
cmd = 'postsuper -d {0}'.format(queue_id)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] == 0:
if queue_id == 'ALL':
ret['message'] = 'Successfully removed all messages'
else:
ret['message'] = 'Successfully removed message with queue id {0}'.format(queue_id)
else:
if queue_id == 'ALL':
ret['message'] = 'Unable to removed all messages'
else:
ret['message'] = 'Unable to remove message with queue id {0}: {1}'.format(queue_id, result['stderr'])
return ret |
def run_rnaseq_ann_filter(data):
"""Run RNA-seq annotation and filtering.
"""
data = to_single_data(data)
if dd.get_vrn_file(data):
eff_file = effects.add_to_vcf(dd.get_vrn_file(data), data)[0]
if eff_file:
data = dd.set_vrn_file(data, eff_file)
ann_file = population.run_vcfanno(dd.get_vrn_file(data), data)
if ann_file:
data = dd.set_vrn_file(data, ann_file)
variantcaller = dd.get_variantcaller(data)
if variantcaller and ("gatk-haplotype" in variantcaller):
filter_file = variation.gatk_filter_rnaseq(dd.get_vrn_file(data), data)
data = dd.set_vrn_file(data, filter_file)
# remove variants close to splice junctions
vrn_file = dd.get_vrn_file(data)
vrn_file = variation.filter_junction_variants(vrn_file, data)
data = dd.set_vrn_file(data, vrn_file)
return [[data]] | Run RNA-seq annotation and filtering. | Below is the the instruction that describes the task:
### Input:
Run RNA-seq annotation and filtering.
### Response:
def run_rnaseq_ann_filter(data):
"""Run RNA-seq annotation and filtering.
"""
data = to_single_data(data)
if dd.get_vrn_file(data):
eff_file = effects.add_to_vcf(dd.get_vrn_file(data), data)[0]
if eff_file:
data = dd.set_vrn_file(data, eff_file)
ann_file = population.run_vcfanno(dd.get_vrn_file(data), data)
if ann_file:
data = dd.set_vrn_file(data, ann_file)
variantcaller = dd.get_variantcaller(data)
if variantcaller and ("gatk-haplotype" in variantcaller):
filter_file = variation.gatk_filter_rnaseq(dd.get_vrn_file(data), data)
data = dd.set_vrn_file(data, filter_file)
# remove variants close to splice junctions
vrn_file = dd.get_vrn_file(data)
vrn_file = variation.filter_junction_variants(vrn_file, data)
data = dd.set_vrn_file(data, vrn_file)
return [[data]] |
def Reset(self):
'''Reset the mock object state.
Remove all mock objects from the bus and tidy up so the state is as if
python-dbusmock had just been restarted. If the mock object was
originally created with a template (from the command line, the Python
API or by calling AddTemplate over D-Bus), it will be
re-instantiated with that template.
'''
# Clear other existing objects.
for obj_name, obj in objects.items():
if obj_name != self.path:
obj.remove_from_connection()
objects.clear()
# Reinitialise our state. Carefully remove new methods from our dict;
# they don't not actually exist if they are a statically defined
# template function
for method_name in self.methods[self.interface]:
try:
delattr(self.__class__, method_name)
except AttributeError:
pass
self._reset({})
if self._template is not None:
self.AddTemplate(self._template, self._template_parameters)
objects[self.path] = self | Reset the mock object state.
Remove all mock objects from the bus and tidy up so the state is as if
python-dbusmock had just been restarted. If the mock object was
originally created with a template (from the command line, the Python
API or by calling AddTemplate over D-Bus), it will be
re-instantiated with that template. | Below is the the instruction that describes the task:
### Input:
Reset the mock object state.
Remove all mock objects from the bus and tidy up so the state is as if
python-dbusmock had just been restarted. If the mock object was
originally created with a template (from the command line, the Python
API or by calling AddTemplate over D-Bus), it will be
re-instantiated with that template.
### Response:
def Reset(self):
'''Reset the mock object state.
Remove all mock objects from the bus and tidy up so the state is as if
python-dbusmock had just been restarted. If the mock object was
originally created with a template (from the command line, the Python
API or by calling AddTemplate over D-Bus), it will be
re-instantiated with that template.
'''
# Clear other existing objects.
for obj_name, obj in objects.items():
if obj_name != self.path:
obj.remove_from_connection()
objects.clear()
# Reinitialise our state. Carefully remove new methods from our dict;
# they don't not actually exist if they are a statically defined
# template function
for method_name in self.methods[self.interface]:
try:
delattr(self.__class__, method_name)
except AttributeError:
pass
self._reset({})
if self._template is not None:
self.AddTemplate(self._template, self._template_parameters)
objects[self.path] = self |
def makeLogic(self):
# *** When camera list has been closed, re-create the cameralist tree and update filterchains ***
# self.manage_cameras_win.signals.close.connect(self.updateCameraTree) # now put into save_camera_config_slot
# self.manage_cameras_win.signals.close.connect(self.filterchain_group.update) # TODO: use this once fixed
# self.manage_cameras_win.signals.close.connect(self.filterchain_group.read) # TODO: eh.. lets be sure of this .. (are we releasing slots in the LiveThread etc.)
# self.manage_cameras_win.signals.close.connect(self.save_camera_config_slot)
# self.manage_memory_container.signals.save.connect(self.save_memory_conf_slot)
# *** Menu bar connections ***
# the self.filemenu.exit attribute was autogenerated
self.filemenu.exit. triggered.connect(self.exit_slot)
self.filemenu.save_window_layout. triggered.connect(
self.save_window_layout_slot)
self.filemenu.load_window_layout. triggered.connect(
self.load_window_layout_slot)
"""
self.configmenu.manage_cameras. triggered.connect(
self.manage_cameras_slot)
self.configmenu.memory_usage. triggered.connect(
self.memory_usage_slot)
"""
self.configmenu.configuration_dialog.triggered.connect(self.config_dialog_slot)
self.viewmenu.camera_list. triggered.connect(self.camera_list_slot)
self.aboutmenu.about_valkka_live. triggered.connect(self.about_slot)
# *** Connect autogenerated menu calls into autogenerated slot functions ***
for i in range(1, 5):
# gets member function grid_ixi_slot
slot_func = getattr(self, "grid_%ix%i_slot" % (i, i))
# gets member function grid_ixi from self.viewmenu.video_grid
menu_func = getattr(self.viewmenu.video_grid,
"grid_%ix%i" % (i, i))
menu_func.triggered.connect(slot_func)
# i.e., like this : self.viewmenu.video_grid.grid_1x1.triggered.connect(slot_func)
# *** autogenerated machine vision menu and slots ***
for cl in self.mvision_classes:
getattr(self.mvisionmenu,cl.name).triggered.connect(getattr(self,cl.name+"_slot")) | self.configmenu.manage_cameras. triggered.connect(
self.manage_cameras_slot)
self.configmenu.memory_usage. triggered.connect(
self.memory_usage_slot) | Below is the the instruction that describes the task:
### Input:
self.configmenu.manage_cameras. triggered.connect(
self.manage_cameras_slot)
self.configmenu.memory_usage. triggered.connect(
self.memory_usage_slot)
### Response:
def makeLogic(self):
# *** When camera list has been closed, re-create the cameralist tree and update filterchains ***
# self.manage_cameras_win.signals.close.connect(self.updateCameraTree) # now put into save_camera_config_slot
# self.manage_cameras_win.signals.close.connect(self.filterchain_group.update) # TODO: use this once fixed
# self.manage_cameras_win.signals.close.connect(self.filterchain_group.read) # TODO: eh.. lets be sure of this .. (are we releasing slots in the LiveThread etc.)
# self.manage_cameras_win.signals.close.connect(self.save_camera_config_slot)
# self.manage_memory_container.signals.save.connect(self.save_memory_conf_slot)
# *** Menu bar connections ***
# the self.filemenu.exit attribute was autogenerated
self.filemenu.exit. triggered.connect(self.exit_slot)
self.filemenu.save_window_layout. triggered.connect(
self.save_window_layout_slot)
self.filemenu.load_window_layout. triggered.connect(
self.load_window_layout_slot)
"""
self.configmenu.manage_cameras. triggered.connect(
self.manage_cameras_slot)
self.configmenu.memory_usage. triggered.connect(
self.memory_usage_slot)
"""
self.configmenu.configuration_dialog.triggered.connect(self.config_dialog_slot)
self.viewmenu.camera_list. triggered.connect(self.camera_list_slot)
self.aboutmenu.about_valkka_live. triggered.connect(self.about_slot)
# *** Connect autogenerated menu calls into autogenerated slot functions ***
for i in range(1, 5):
# gets member function grid_ixi_slot
slot_func = getattr(self, "grid_%ix%i_slot" % (i, i))
# gets member function grid_ixi from self.viewmenu.video_grid
menu_func = getattr(self.viewmenu.video_grid,
"grid_%ix%i" % (i, i))
menu_func.triggered.connect(slot_func)
# i.e., like this : self.viewmenu.video_grid.grid_1x1.triggered.connect(slot_func)
# *** autogenerated machine vision menu and slots ***
for cl in self.mvision_classes:
getattr(self.mvisionmenu,cl.name).triggered.connect(getattr(self,cl.name+"_slot")) |
def print_error_messages_raylet(task_error_queue, threads_stopped):
"""Prints message received in the given output queue.
This checks periodically if any un-raised errors occured in the background.
Args:
task_error_queue (queue.Queue): A queue used to receive errors from the
thread that listens to Redis.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
try:
error, t = task_error_queue.get(block=False)
except queue.Empty:
threads_stopped.wait(timeout=0.01)
continue
# Delay errors a little bit of time to attempt to suppress redundant
# messages originating from the worker.
while t + UNCAUGHT_ERROR_GRACE_PERIOD > time.time():
threads_stopped.wait(timeout=1)
if threads_stopped.is_set():
break
if t < last_task_error_raise_time + UNCAUGHT_ERROR_GRACE_PERIOD:
logger.debug("Suppressing error from worker: {}".format(error))
else:
logger.error(
"Possible unhandled error from worker: {}".format(error)) | Prints message received in the given output queue.
This checks periodically if any un-raised errors occured in the background.
Args:
task_error_queue (queue.Queue): A queue used to receive errors from the
thread that listens to Redis.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit. | Below is the the instruction that describes the task:
### Input:
Prints message received in the given output queue.
This checks periodically if any un-raised errors occured in the background.
Args:
task_error_queue (queue.Queue): A queue used to receive errors from the
thread that listens to Redis.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
### Response:
def print_error_messages_raylet(task_error_queue, threads_stopped):
"""Prints message received in the given output queue.
This checks periodically if any un-raised errors occured in the background.
Args:
task_error_queue (queue.Queue): A queue used to receive errors from the
thread that listens to Redis.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
try:
error, t = task_error_queue.get(block=False)
except queue.Empty:
threads_stopped.wait(timeout=0.01)
continue
# Delay errors a little bit of time to attempt to suppress redundant
# messages originating from the worker.
while t + UNCAUGHT_ERROR_GRACE_PERIOD > time.time():
threads_stopped.wait(timeout=1)
if threads_stopped.is_set():
break
if t < last_task_error_raise_time + UNCAUGHT_ERROR_GRACE_PERIOD:
logger.debug("Suppressing error from worker: {}".format(error))
else:
logger.error(
"Possible unhandled error from worker: {}".format(error)) |
def input(self):
"""Returns a file-like object representing the request body."""
if self._input is None:
input_file = self.environ['wsgi.input']
content_length = self.content_length or 0
self._input = WsgiInput(input_file, self.content_length)
return self._input | Returns a file-like object representing the request body. | Below is the the instruction that describes the task:
### Input:
Returns a file-like object representing the request body.
### Response:
def input(self):
"""Returns a file-like object representing the request body."""
if self._input is None:
input_file = self.environ['wsgi.input']
content_length = self.content_length or 0
self._input = WsgiInput(input_file, self.content_length)
return self._input |
def hasstate(self, window_name, object_name, state, guiTimeOut=0):
"""
has state
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@type window_name: string
@param state: State of the current object.
@type object_name: string
@param guiTimeOut: Wait timeout in seconds
@type guiTimeOut: integer
@return: 1 on success.
@rtype: integer
"""
try:
object_handle = self._get_object_handle(window_name, object_name)
if state == "enabled":
return int(object_handle.AXEnabled)
elif state == "focused":
return int(object_handle.AXFocused)
elif state == "focusable":
return int(object_handle.AXFocused)
elif state == "checked":
if re.match("AXCheckBox", object_handle.AXRole,
re.M | re.U | re.L) or \
re.match("AXRadioButton", object_handle.AXRole,
re.M | re.U | re.L):
if object_handle.AXValue:
return 1
except:
pass
return 0 | has state
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@type window_name: string
@param state: State of the current object.
@type object_name: string
@param guiTimeOut: Wait timeout in seconds
@type guiTimeOut: integer
@return: 1 on success.
@rtype: integer | Below is the the instruction that describes the task:
### Input:
has state
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@type window_name: string
@param state: State of the current object.
@type object_name: string
@param guiTimeOut: Wait timeout in seconds
@type guiTimeOut: integer
@return: 1 on success.
@rtype: integer
### Response:
def hasstate(self, window_name, object_name, state, guiTimeOut=0):
"""
has state
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@type window_name: string
@param state: State of the current object.
@type object_name: string
@param guiTimeOut: Wait timeout in seconds
@type guiTimeOut: integer
@return: 1 on success.
@rtype: integer
"""
try:
object_handle = self._get_object_handle(window_name, object_name)
if state == "enabled":
return int(object_handle.AXEnabled)
elif state == "focused":
return int(object_handle.AXFocused)
elif state == "focusable":
return int(object_handle.AXFocused)
elif state == "checked":
if re.match("AXCheckBox", object_handle.AXRole,
re.M | re.U | re.L) or \
re.match("AXRadioButton", object_handle.AXRole,
re.M | re.U | re.L):
if object_handle.AXValue:
return 1
except:
pass
return 0 |
def handle_simulation_end(self, data_portal):
"""
When the simulation is complete, run the full period risk report
and send it out on the results socket.
"""
log.info(
'Simulated {} trading days\n'
'first open: {}\n'
'last close: {}',
self._session_count,
self._trading_calendar.session_open(self._first_session),
self._trading_calendar.session_close(self._last_session),
)
packet = {}
self.end_of_simulation(
packet,
self._ledger,
self._trading_calendar,
self._sessions,
data_portal,
self._benchmark_source,
)
return packet | When the simulation is complete, run the full period risk report
and send it out on the results socket. | Below is the the instruction that describes the task:
### Input:
When the simulation is complete, run the full period risk report
and send it out on the results socket.
### Response:
def handle_simulation_end(self, data_portal):
"""
When the simulation is complete, run the full period risk report
and send it out on the results socket.
"""
log.info(
'Simulated {} trading days\n'
'first open: {}\n'
'last close: {}',
self._session_count,
self._trading_calendar.session_open(self._first_session),
self._trading_calendar.session_close(self._last_session),
)
packet = {}
self.end_of_simulation(
packet,
self._ledger,
self._trading_calendar,
self._sessions,
data_portal,
self._benchmark_source,
)
return packet |
def _parse_docstrings(self, filepath):
"""Looks for additional docstring specifications in the correctly named
XML files in the same directory as the module."""
xmlpath = self.get_xmldoc_path(filepath)
if self.tramp.exists(xmlpath):
xmlstring = self.tramp.read(xmlpath)
self.modulep.docparser.parsexml(xmlstring, self.modules, xmlpath) | Looks for additional docstring specifications in the correctly named
XML files in the same directory as the module. | Below is the the instruction that describes the task:
### Input:
Looks for additional docstring specifications in the correctly named
XML files in the same directory as the module.
### Response:
def _parse_docstrings(self, filepath):
"""Looks for additional docstring specifications in the correctly named
XML files in the same directory as the module."""
xmlpath = self.get_xmldoc_path(filepath)
if self.tramp.exists(xmlpath):
xmlstring = self.tramp.read(xmlpath)
self.modulep.docparser.parsexml(xmlstring, self.modules, xmlpath) |
def total_border_pixels_from_mask_and_edge_pixels(mask, edge_pixels, masked_grid_index_to_pixel):
"""Compute the total number of borders-pixels in a masks."""
border_pixel_total = 0
for i in range(edge_pixels.shape[0]):
if check_if_border_pixel(mask, edge_pixels[i], masked_grid_index_to_pixel):
border_pixel_total += 1
return border_pixel_total | Compute the total number of borders-pixels in a masks. | Below is the the instruction that describes the task:
### Input:
Compute the total number of borders-pixels in a masks.
### Response:
def total_border_pixels_from_mask_and_edge_pixels(mask, edge_pixels, masked_grid_index_to_pixel):
"""Compute the total number of borders-pixels in a masks."""
border_pixel_total = 0
for i in range(edge_pixels.shape[0]):
if check_if_border_pixel(mask, edge_pixels[i], masked_grid_index_to_pixel):
border_pixel_total += 1
return border_pixel_total |
def set_commissions(self, fn):
"""
Set commission (transaction fee) function.
Args:
fn (fn(quantity, price)): Function used to determine commission
amount.
"""
self.commission_fn = fn
for c in self._childrenv:
if isinstance(c, StrategyBase):
c.set_commissions(fn) | Set commission (transaction fee) function.
Args:
fn (fn(quantity, price)): Function used to determine commission
amount. | Below is the the instruction that describes the task:
### Input:
Set commission (transaction fee) function.
Args:
fn (fn(quantity, price)): Function used to determine commission
amount.
### Response:
def set_commissions(self, fn):
"""
Set commission (transaction fee) function.
Args:
fn (fn(quantity, price)): Function used to determine commission
amount.
"""
self.commission_fn = fn
for c in self._childrenv:
if isinstance(c, StrategyBase):
c.set_commissions(fn) |
def map(self, ID_s,
FROM=None,
TO=None,
target_as_set=False,
no_match_sub=None):
'''
The main method of this class and the essence of the package.
It allows to "map" stuff.
Args:
ID_s: Nested lists with strings as leafs (plain strings also possible)
FROM (str): Origin key for the mapping (default: main key)
TO (str): Destination key for the mapping (default: main key)
target_as_set (bool): Whether to summarize the output as a set (removes duplicates)
no_match_sub: Object representing the status of an ID not being able to be matched
(default: None)
Returns:
Mapping: a mapping object capturing the result of the mapping request
'''
def io_mode(ID_s):
'''
Handles the input/output modalities of the mapping.
'''
unlist_return = False
list_of_lists = False
if isinstance(ID_s, str):
ID_s = [ID_s]
unlist_return = True
elif isinstance(ID_s, list):
if len(ID_s) > 0 and isinstance(ID_s[0], list):
# assuming ID_s is a list of lists of ID strings
list_of_lists = True
return ID_s, unlist_return, list_of_lists
# interpret input
if FROM == TO:
return ID_s
ID_s, unlist_return, list_of_lists = io_mode(ID_s)
# map consistent with interpretation of input
if list_of_lists:
mapped_ids = [self.map(ID, FROM, TO, target_as_set, no_match_sub) for ID in ID_s]
else:
mapped_ids = self._map(ID_s, FROM, TO, target_as_set, no_match_sub)
# return consistent with interpretation of input
if unlist_return:
return mapped_ids[0]
return Mapping(ID_s, mapped_ids) | The main method of this class and the essence of the package.
It allows to "map" stuff.
Args:
ID_s: Nested lists with strings as leafs (plain strings also possible)
FROM (str): Origin key for the mapping (default: main key)
TO (str): Destination key for the mapping (default: main key)
target_as_set (bool): Whether to summarize the output as a set (removes duplicates)
no_match_sub: Object representing the status of an ID not being able to be matched
(default: None)
Returns:
Mapping: a mapping object capturing the result of the mapping request | Below is the the instruction that describes the task:
### Input:
The main method of this class and the essence of the package.
It allows to "map" stuff.
Args:
ID_s: Nested lists with strings as leafs (plain strings also possible)
FROM (str): Origin key for the mapping (default: main key)
TO (str): Destination key for the mapping (default: main key)
target_as_set (bool): Whether to summarize the output as a set (removes duplicates)
no_match_sub: Object representing the status of an ID not being able to be matched
(default: None)
Returns:
Mapping: a mapping object capturing the result of the mapping request
### Response:
def map(self, ID_s,
FROM=None,
TO=None,
target_as_set=False,
no_match_sub=None):
'''
The main method of this class and the essence of the package.
It allows to "map" stuff.
Args:
ID_s: Nested lists with strings as leafs (plain strings also possible)
FROM (str): Origin key for the mapping (default: main key)
TO (str): Destination key for the mapping (default: main key)
target_as_set (bool): Whether to summarize the output as a set (removes duplicates)
no_match_sub: Object representing the status of an ID not being able to be matched
(default: None)
Returns:
Mapping: a mapping object capturing the result of the mapping request
'''
def io_mode(ID_s):
'''
Handles the input/output modalities of the mapping.
'''
unlist_return = False
list_of_lists = False
if isinstance(ID_s, str):
ID_s = [ID_s]
unlist_return = True
elif isinstance(ID_s, list):
if len(ID_s) > 0 and isinstance(ID_s[0], list):
# assuming ID_s is a list of lists of ID strings
list_of_lists = True
return ID_s, unlist_return, list_of_lists
# interpret input
if FROM == TO:
return ID_s
ID_s, unlist_return, list_of_lists = io_mode(ID_s)
# map consistent with interpretation of input
if list_of_lists:
mapped_ids = [self.map(ID, FROM, TO, target_as_set, no_match_sub) for ID in ID_s]
else:
mapped_ids = self._map(ID_s, FROM, TO, target_as_set, no_match_sub)
# return consistent with interpretation of input
if unlist_return:
return mapped_ids[0]
return Mapping(ID_s, mapped_ids) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.