code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def is_mention_line(cls, word):
""" Detects links and mentions
:param word: Token to be evaluated
"""
if word.startswith('@'):
return True
elif word.startswith('http://'):
return True
elif word.startswith('https://'):
return True
else:
return False | Detects links and mentions
:param word: Token to be evaluated | Below is the the instruction that describes the task:
### Input:
Detects links and mentions
:param word: Token to be evaluated
### Response:
def is_mention_line(cls, word):
""" Detects links and mentions
:param word: Token to be evaluated
"""
if word.startswith('@'):
return True
elif word.startswith('http://'):
return True
elif word.startswith('https://'):
return True
else:
return False |
def check_docstrings(show_diff=False, config=None, mods=None):
"""
Check docstrings in module match the README.md
"""
readme = parse_readme()
modules_readme = core_module_docstrings(config=config)
warned = False
if create_readme(readme) != create_readme(modules_readme):
for module in sorted(readme):
if mods and module not in mods:
continue
err = None
if module not in modules_readme:
err = "Module {} in README but not in /modules".format(module)
elif (
"".join(readme[module]).strip()
!= "".join(modules_readme[module]).strip()
):
err = "Module {} docstring does not match README".format(module)
if err:
if not warned:
print_stderr("Documentation does not match!\n")
warned = True
print_stderr(err)
for module in modules_readme:
if mods and module not in mods:
continue
if module not in readme:
print_stderr("Module {} in /modules but not in README".format(module))
if show_diff:
print_stderr(
"\n".join(
difflib.unified_diff(
create_readme(readme).split("\n"),
create_readme(modules_readme).split("\n"),
)
)
)
else:
if warned:
print_stderr("\nUse `py3-cmd docstring --diff` to view diff.") | Check docstrings in module match the README.md | Below is the the instruction that describes the task:
### Input:
Check docstrings in module match the README.md
### Response:
def check_docstrings(show_diff=False, config=None, mods=None):
"""
Check docstrings in module match the README.md
"""
readme = parse_readme()
modules_readme = core_module_docstrings(config=config)
warned = False
if create_readme(readme) != create_readme(modules_readme):
for module in sorted(readme):
if mods and module not in mods:
continue
err = None
if module not in modules_readme:
err = "Module {} in README but not in /modules".format(module)
elif (
"".join(readme[module]).strip()
!= "".join(modules_readme[module]).strip()
):
err = "Module {} docstring does not match README".format(module)
if err:
if not warned:
print_stderr("Documentation does not match!\n")
warned = True
print_stderr(err)
for module in modules_readme:
if mods and module not in mods:
continue
if module not in readme:
print_stderr("Module {} in /modules but not in README".format(module))
if show_diff:
print_stderr(
"\n".join(
difflib.unified_diff(
create_readme(readme).split("\n"),
create_readme(modules_readme).split("\n"),
)
)
)
else:
if warned:
print_stderr("\nUse `py3-cmd docstring --diff` to view diff.") |
def login(self, password):
"""
Authentify yourself against the box,
:param password: Admin password of the box
:type password: str
:return: True if your auth is successful
:rtype: bool
"""
self.bbox_auth.set_access(BboxConstant.AUTHENTICATION_LEVEL_PUBLIC, BboxConstant.AUTHENTICATION_LEVEL_PUBLIC)
self.bbox_url.set_api_name("login", None)
data = {'password': password}
api = BboxApiCall(self.bbox_url, BboxConstant.HTTP_METHOD_POST, data,
self.bbox_auth)
response = api.execute_api_request()
if response.status_code == 200:
self.bbox_auth.set_cookie_id(response.cookies["BBOX_ID"])
return self.bbox_auth.is_authentified() | Authentify yourself against the box,
:param password: Admin password of the box
:type password: str
:return: True if your auth is successful
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Authentify yourself against the box,
:param password: Admin password of the box
:type password: str
:return: True if your auth is successful
:rtype: bool
### Response:
def login(self, password):
"""
Authentify yourself against the box,
:param password: Admin password of the box
:type password: str
:return: True if your auth is successful
:rtype: bool
"""
self.bbox_auth.set_access(BboxConstant.AUTHENTICATION_LEVEL_PUBLIC, BboxConstant.AUTHENTICATION_LEVEL_PUBLIC)
self.bbox_url.set_api_name("login", None)
data = {'password': password}
api = BboxApiCall(self.bbox_url, BboxConstant.HTTP_METHOD_POST, data,
self.bbox_auth)
response = api.execute_api_request()
if response.status_code == 200:
self.bbox_auth.set_cookie_id(response.cookies["BBOX_ID"])
return self.bbox_auth.is_authentified() |
def get_record(self, **kwargs):
# type: (str) -> Union[dr.DirectoryRecord, udfmod.UDFFileEntry]
'''
Get the directory record for a particular path.
Parameters:
iso_path - The absolute path on the ISO9660 filesystem to get the
record for.
rr_path - The absolute path on the Rock Ridge filesystem to get the
record for.
joliet_path - The absolute path on the Joliet filesystem to get the
record for.
udf_path - The absolute path on the UDF filesystem to get the record
for.
Returns:
An object that represents the path. This may be a dr.DirectoryRecord
object (in the cases of iso_path, rr_path, or joliet_path), or a
udf.UDFFileEntry object (in the case of udf_path).
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
num_paths = 0
for key in kwargs:
if key in ['joliet_path', 'rr_path', 'iso_path', 'udf_path']:
if kwargs[key] is not None:
num_paths += 1
else:
raise pycdlibexception.PyCdlibInvalidInput("Invalid keyword, must be one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'")
if num_paths != 1:
raise pycdlibexception.PyCdlibInvalidInput("Must specify one, and only one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'")
if 'joliet_path' in kwargs:
return self._get_entry(None, None, self._normalize_joliet_path(kwargs['joliet_path']))
if 'rr_path' in kwargs:
return self._get_entry(None, utils.normpath(kwargs['rr_path']), None)
if 'udf_path' in kwargs:
return self._get_udf_entry(kwargs['udf_path'])
return self._get_entry(utils.normpath(kwargs['iso_path']), None, None) | Get the directory record for a particular path.
Parameters:
iso_path - The absolute path on the ISO9660 filesystem to get the
record for.
rr_path - The absolute path on the Rock Ridge filesystem to get the
record for.
joliet_path - The absolute path on the Joliet filesystem to get the
record for.
udf_path - The absolute path on the UDF filesystem to get the record
for.
Returns:
An object that represents the path. This may be a dr.DirectoryRecord
object (in the cases of iso_path, rr_path, or joliet_path), or a
udf.UDFFileEntry object (in the case of udf_path). | Below is the the instruction that describes the task:
### Input:
Get the directory record for a particular path.
Parameters:
iso_path - The absolute path on the ISO9660 filesystem to get the
record for.
rr_path - The absolute path on the Rock Ridge filesystem to get the
record for.
joliet_path - The absolute path on the Joliet filesystem to get the
record for.
udf_path - The absolute path on the UDF filesystem to get the record
for.
Returns:
An object that represents the path. This may be a dr.DirectoryRecord
object (in the cases of iso_path, rr_path, or joliet_path), or a
udf.UDFFileEntry object (in the case of udf_path).
### Response:
def get_record(self, **kwargs):
# type: (str) -> Union[dr.DirectoryRecord, udfmod.UDFFileEntry]
'''
Get the directory record for a particular path.
Parameters:
iso_path - The absolute path on the ISO9660 filesystem to get the
record for.
rr_path - The absolute path on the Rock Ridge filesystem to get the
record for.
joliet_path - The absolute path on the Joliet filesystem to get the
record for.
udf_path - The absolute path on the UDF filesystem to get the record
for.
Returns:
An object that represents the path. This may be a dr.DirectoryRecord
object (in the cases of iso_path, rr_path, or joliet_path), or a
udf.UDFFileEntry object (in the case of udf_path).
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
num_paths = 0
for key in kwargs:
if key in ['joliet_path', 'rr_path', 'iso_path', 'udf_path']:
if kwargs[key] is not None:
num_paths += 1
else:
raise pycdlibexception.PyCdlibInvalidInput("Invalid keyword, must be one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'")
if num_paths != 1:
raise pycdlibexception.PyCdlibInvalidInput("Must specify one, and only one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'")
if 'joliet_path' in kwargs:
return self._get_entry(None, None, self._normalize_joliet_path(kwargs['joliet_path']))
if 'rr_path' in kwargs:
return self._get_entry(None, utils.normpath(kwargs['rr_path']), None)
if 'udf_path' in kwargs:
return self._get_udf_entry(kwargs['udf_path'])
return self._get_entry(utils.normpath(kwargs['iso_path']), None, None) |
def username(self):
""" Username for the org connection. """
username = self.config.get("username")
if not username:
username = self.userinfo__preferred_username
return username | Username for the org connection. | Below is the the instruction that describes the task:
### Input:
Username for the org connection.
### Response:
def username(self):
""" Username for the org connection. """
username = self.config.get("username")
if not username:
username = self.userinfo__preferred_username
return username |
def _compute_site_scaling(self, vs30, mean):
"""
Scales the ground motions by increasing 40 % on NEHRP class D/E sites,
and decreasing by 40 % on NEHRP class A/B sites
"""
site_factor = np.ones(len(vs30), dtype=float)
idx = vs30 <= 360.
site_factor[idx] = 1.4
idx = vs30 > 760.0
site_factor[idx] = 0.6
return np.log(np.exp(mean) * site_factor) | Scales the ground motions by increasing 40 % on NEHRP class D/E sites,
and decreasing by 40 % on NEHRP class A/B sites | Below is the the instruction that describes the task:
### Input:
Scales the ground motions by increasing 40 % on NEHRP class D/E sites,
and decreasing by 40 % on NEHRP class A/B sites
### Response:
def _compute_site_scaling(self, vs30, mean):
"""
Scales the ground motions by increasing 40 % on NEHRP class D/E sites,
and decreasing by 40 % on NEHRP class A/B sites
"""
site_factor = np.ones(len(vs30), dtype=float)
idx = vs30 <= 360.
site_factor[idx] = 1.4
idx = vs30 > 760.0
site_factor[idx] = 0.6
return np.log(np.exp(mean) * site_factor) |
def match(self, filename):
"""Searches for a pattern that matches the given filename.
:return A matching pattern or None if there is no matching pattern.
"""
try:
for regex, patterns in self._regex_patterns:
match = regex.match(filename)
debug_template = "%s against %s: %%s" % (filename, regex._real_regex.pattern)
if match:
if self.debug:
logger.info(debug_template % "hit")
return patterns[match.lastindex -1]
if self.debug:
logger.info(debug_template % "miss")
except Exception as e:
# We can't show the default e.msg to the user as thats for
# the combined pattern we sent to regex. Instead we indicate to
# the user that an ignore file needs fixing.
logger.error('Invalid pattern found in regex: %s.', e.msg)
e.msg = "File ~/.bazaar/ignore or .bzrignore contains error(s)."
bad_patterns = ''
for _, patterns in self._regex_patterns:
for p in patterns:
if not Globster.is_pattern_valid(p):
bad_patterns += ('\n %s' % p)
e.msg += bad_patterns
raise e
return None | Searches for a pattern that matches the given filename.
:return A matching pattern or None if there is no matching pattern. | Below is the the instruction that describes the task:
### Input:
Searches for a pattern that matches the given filename.
:return A matching pattern or None if there is no matching pattern.
### Response:
def match(self, filename):
"""Searches for a pattern that matches the given filename.
:return A matching pattern or None if there is no matching pattern.
"""
try:
for regex, patterns in self._regex_patterns:
match = regex.match(filename)
debug_template = "%s against %s: %%s" % (filename, regex._real_regex.pattern)
if match:
if self.debug:
logger.info(debug_template % "hit")
return patterns[match.lastindex -1]
if self.debug:
logger.info(debug_template % "miss")
except Exception as e:
# We can't show the default e.msg to the user as thats for
# the combined pattern we sent to regex. Instead we indicate to
# the user that an ignore file needs fixing.
logger.error('Invalid pattern found in regex: %s.', e.msg)
e.msg = "File ~/.bazaar/ignore or .bzrignore contains error(s)."
bad_patterns = ''
for _, patterns in self._regex_patterns:
for p in patterns:
if not Globster.is_pattern_valid(p):
bad_patterns += ('\n %s' % p)
e.msg += bad_patterns
raise e
return None |
def unshare_network(network_id, usernames,**kwargs):
"""
Un-Share a network with a list of users, identified by their usernames.
"""
user_id = kwargs.get('user_id')
net_i = _get_network(network_id)
net_i.check_share_permission(user_id)
for username in usernames:
user_i = _get_user(username)
#Set the owner ship on the network itself
net_i.unset_owner(user_i.id, write=write, share=share)
db.DBSession.flush() | Un-Share a network with a list of users, identified by their usernames. | Below is the the instruction that describes the task:
### Input:
Un-Share a network with a list of users, identified by their usernames.
### Response:
def unshare_network(network_id, usernames,**kwargs):
"""
Un-Share a network with a list of users, identified by their usernames.
"""
user_id = kwargs.get('user_id')
net_i = _get_network(network_id)
net_i.check_share_permission(user_id)
for username in usernames:
user_i = _get_user(username)
#Set the owner ship on the network itself
net_i.unset_owner(user_i.id, write=write, share=share)
db.DBSession.flush() |
def ParserUnparserFactory(module_name, *unparser_names):
"""
Produce a new parser/unparser object from the names provided.
"""
parse_callable = import_module(PKGNAME + '.parsers.' + module_name).parse
unparser_module = import_module(PKGNAME + '.unparsers.' + module_name)
return RawParserUnparserFactory(module_name, parse_callable, *[
getattr(unparser_module, name) for name in unparser_names]) | Produce a new parser/unparser object from the names provided. | Below is the the instruction that describes the task:
### Input:
Produce a new parser/unparser object from the names provided.
### Response:
def ParserUnparserFactory(module_name, *unparser_names):
"""
Produce a new parser/unparser object from the names provided.
"""
parse_callable = import_module(PKGNAME + '.parsers.' + module_name).parse
unparser_module = import_module(PKGNAME + '.unparsers.' + module_name)
return RawParserUnparserFactory(module_name, parse_callable, *[
getattr(unparser_module, name) for name in unparser_names]) |
def get_datasets(self, workspace_id):
"""Runs HTTP GET request to retrieve the list of datasets."""
api_path = self.DATASOURCES_URI_FMT.format(workspace_id)
return self._send_get_req(api_path) | Runs HTTP GET request to retrieve the list of datasets. | Below is the the instruction that describes the task:
### Input:
Runs HTTP GET request to retrieve the list of datasets.
### Response:
def get_datasets(self, workspace_id):
"""Runs HTTP GET request to retrieve the list of datasets."""
api_path = self.DATASOURCES_URI_FMT.format(workspace_id)
return self._send_get_req(api_path) |
def disjoint_relations(rdf, fix=False):
"""Check if the graph contains concepts connected by both of the semantically
disjoint semantic skos:related and skos:broaderTransitive (S27),
and optionally remove the involved skos:related relations.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing skos:related relations that
overlap with skos:broaderTransitive.
"""
for conc1, conc2 in sorted(rdf.subject_objects(SKOS.related)):
if conc2 in sorted(rdf.transitive_objects(conc1, SKOS.broader)):
if fix:
logging.warning(
"Concepts %s and %s connected by both "
"skos:broaderTransitive and skos:related, "
"removing skos:related",
conc1, conc2)
rdf.remove((conc1, SKOS.related, conc2))
rdf.remove((conc2, SKOS.related, conc1))
else:
logging.warning(
"Concepts %s and %s connected by both "
"skos:broaderTransitive and skos:related, "
"but keeping it because keep_related is enabled",
conc1, conc2) | Check if the graph contains concepts connected by both of the semantically
disjoint semantic skos:related and skos:broaderTransitive (S27),
and optionally remove the involved skos:related relations.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing skos:related relations that
overlap with skos:broaderTransitive. | Below is the the instruction that describes the task:
### Input:
Check if the graph contains concepts connected by both of the semantically
disjoint semantic skos:related and skos:broaderTransitive (S27),
and optionally remove the involved skos:related relations.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing skos:related relations that
overlap with skos:broaderTransitive.
### Response:
def disjoint_relations(rdf, fix=False):
"""Check if the graph contains concepts connected by both of the semantically
disjoint semantic skos:related and skos:broaderTransitive (S27),
and optionally remove the involved skos:related relations.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing skos:related relations that
overlap with skos:broaderTransitive.
"""
for conc1, conc2 in sorted(rdf.subject_objects(SKOS.related)):
if conc2 in sorted(rdf.transitive_objects(conc1, SKOS.broader)):
if fix:
logging.warning(
"Concepts %s and %s connected by both "
"skos:broaderTransitive and skos:related, "
"removing skos:related",
conc1, conc2)
rdf.remove((conc1, SKOS.related, conc2))
rdf.remove((conc2, SKOS.related, conc1))
else:
logging.warning(
"Concepts %s and %s connected by both "
"skos:broaderTransitive and skos:related, "
"but keeping it because keep_related is enabled",
conc1, conc2) |
def main(argv):
"""Entry point for command line script to perform OAuth 2.0."""
p = argparse.ArgumentParser()
p.add_argument('-s', '--scope', nargs='+')
p.add_argument('-o', '--oauth-service', default='google')
p.add_argument('-i', '--client-id')
p.add_argument('-x', '--client-secret')
p.add_argument('-r', '--redirect-uri')
p.add_argument('-f', '--client-secrets')
args = p.parse_args(argv)
client_args = (args.client_id, args.client_secret, args.client_id)
if any(client_args) and not all(client_args):
print('Must provide none of client-id, client-secret and redirect-uri;'
' or all of them.')
p.print_usage()
return 1
print args.scope
if not args.scope:
print('Scope must be provided.')
p.print_usage()
return 1
config = WizardClientConfig()
config.scope = ' '.join(args.scope)
print(run_local(UserOAuth2(config))['access_token'])
return 0 | Entry point for command line script to perform OAuth 2.0. | Below is the the instruction that describes the task:
### Input:
Entry point for command line script to perform OAuth 2.0.
### Response:
def main(argv):
"""Entry point for command line script to perform OAuth 2.0."""
p = argparse.ArgumentParser()
p.add_argument('-s', '--scope', nargs='+')
p.add_argument('-o', '--oauth-service', default='google')
p.add_argument('-i', '--client-id')
p.add_argument('-x', '--client-secret')
p.add_argument('-r', '--redirect-uri')
p.add_argument('-f', '--client-secrets')
args = p.parse_args(argv)
client_args = (args.client_id, args.client_secret, args.client_id)
if any(client_args) and not all(client_args):
print('Must provide none of client-id, client-secret and redirect-uri;'
' or all of them.')
p.print_usage()
return 1
print args.scope
if not args.scope:
print('Scope must be provided.')
p.print_usage()
return 1
config = WizardClientConfig()
config.scope = ' '.join(args.scope)
print(run_local(UserOAuth2(config))['access_token'])
return 0 |
def addJobShape(self, jobShape):
"""
Function adds the job to the first node reservation in which it will fit (this is the
bin-packing aspect).
"""
chosenNodeShape = None
for nodeShape in self.nodeShapes:
if NodeReservation(nodeShape).fits(jobShape):
# This node shape is the first that fits this jobShape
chosenNodeShape = nodeShape
break
if chosenNodeShape is None:
logger.warning("Couldn't fit job with requirements %r into any nodes in the nodeTypes "
"list." % jobShape)
return
# grab current list of job objects appended to this nodeType
nodeReservations = self.nodeReservations[chosenNodeShape]
for nodeReservation in nodeReservations:
if nodeReservation.attemptToAddJob(jobShape, chosenNodeShape, self.targetTime):
# We succeeded adding the job to this node reservation. Now we're done.
return
reservation = NodeReservation(chosenNodeShape)
currentTimeAllocated = chosenNodeShape.wallTime
adjustEndingReservationForJob(reservation, jobShape, 0)
self.nodeReservations[chosenNodeShape].append(reservation)
# Extend the reservation if necessary to cover the job's entire runtime.
while currentTimeAllocated < jobShape.wallTime:
extendThisReservation = NodeReservation(reservation.shape)
currentTimeAllocated += chosenNodeShape.wallTime
reservation.nReservation = extendThisReservation
reservation = extendThisReservation | Function adds the job to the first node reservation in which it will fit (this is the
bin-packing aspect). | Below is the the instruction that describes the task:
### Input:
Function adds the job to the first node reservation in which it will fit (this is the
bin-packing aspect).
### Response:
def addJobShape(self, jobShape):
"""
Function adds the job to the first node reservation in which it will fit (this is the
bin-packing aspect).
"""
chosenNodeShape = None
for nodeShape in self.nodeShapes:
if NodeReservation(nodeShape).fits(jobShape):
# This node shape is the first that fits this jobShape
chosenNodeShape = nodeShape
break
if chosenNodeShape is None:
logger.warning("Couldn't fit job with requirements %r into any nodes in the nodeTypes "
"list." % jobShape)
return
# grab current list of job objects appended to this nodeType
nodeReservations = self.nodeReservations[chosenNodeShape]
for nodeReservation in nodeReservations:
if nodeReservation.attemptToAddJob(jobShape, chosenNodeShape, self.targetTime):
# We succeeded adding the job to this node reservation. Now we're done.
return
reservation = NodeReservation(chosenNodeShape)
currentTimeAllocated = chosenNodeShape.wallTime
adjustEndingReservationForJob(reservation, jobShape, 0)
self.nodeReservations[chosenNodeShape].append(reservation)
# Extend the reservation if necessary to cover the job's entire runtime.
while currentTimeAllocated < jobShape.wallTime:
extendThisReservation = NodeReservation(reservation.shape)
currentTimeAllocated += chosenNodeShape.wallTime
reservation.nReservation = extendThisReservation
reservation = extendThisReservation |
def _combine_attr_fast_update(self, attr, typ):
'''Avoids having to call _update for each intermediate base. Only
works for class attr of type UpdateDict.
'''
values = dict(getattr(self, attr, {}))
for base in self._class_data.bases:
vals = dict(getattr(base, attr, {}))
preserve_attr_data(vals, values)
values = combine(vals, values)
setattr(self, attr, typ(values)) | Avoids having to call _update for each intermediate base. Only
works for class attr of type UpdateDict. | Below is the the instruction that describes the task:
### Input:
Avoids having to call _update for each intermediate base. Only
works for class attr of type UpdateDict.
### Response:
def _combine_attr_fast_update(self, attr, typ):
'''Avoids having to call _update for each intermediate base. Only
works for class attr of type UpdateDict.
'''
values = dict(getattr(self, attr, {}))
for base in self._class_data.bases:
vals = dict(getattr(base, attr, {}))
preserve_attr_data(vals, values)
values = combine(vals, values)
setattr(self, attr, typ(values)) |
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
return conn | Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used. | Below is the the instruction that describes the task:
### Input:
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
### Response:
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
return conn |
def _write_vmx_file(self):
"""
Writes pairs to the VMware VMX file corresponding to this VM.
"""
try:
self.manager.write_vmx_file(self._vmx_path, self._vmx_pairs)
except OSError as e:
raise VMwareError('Could not write VMware VMX file "{}": {}'.format(self._vmx_path, e)) | Writes pairs to the VMware VMX file corresponding to this VM. | Below is the the instruction that describes the task:
### Input:
Writes pairs to the VMware VMX file corresponding to this VM.
### Response:
def _write_vmx_file(self):
"""
Writes pairs to the VMware VMX file corresponding to this VM.
"""
try:
self.manager.write_vmx_file(self._vmx_path, self._vmx_pairs)
except OSError as e:
raise VMwareError('Could not write VMware VMX file "{}": {}'.format(self._vmx_path, e)) |
def insert_def(self, index, def_item):
"""Inserts a def universally."""
self.defs.insert(index, def_item)
for other in self.others:
other.insert_def(index, def_item) | Inserts a def universally. | Below is the the instruction that describes the task:
### Input:
Inserts a def universally.
### Response:
def insert_def(self, index, def_item):
"""Inserts a def universally."""
self.defs.insert(index, def_item)
for other in self.others:
other.insert_def(index, def_item) |
def asymptotes(hyp, n=1000):
"""
Gets a cone of asymptotes for hyperbola
"""
assert N.linalg.norm(hyp.center()) == 0
u = N.linspace(0,2*N.pi,n)
_ = N.ones(len(u))
angles = N.array([N.cos(u),N.sin(u),_]).T
return dot(angles,hyp[:-1,:-1]) | Gets a cone of asymptotes for hyperbola | Below is the the instruction that describes the task:
### Input:
Gets a cone of asymptotes for hyperbola
### Response:
def asymptotes(hyp, n=1000):
"""
Gets a cone of asymptotes for hyperbola
"""
assert N.linalg.norm(hyp.center()) == 0
u = N.linspace(0,2*N.pi,n)
_ = N.ones(len(u))
angles = N.array([N.cos(u),N.sin(u),_]).T
return dot(angles,hyp[:-1,:-1]) |
def CreateUser(username, password=None, is_admin=False):
"""Creates a new GRR user."""
grr_api = maintenance_utils.InitGRRRootAPI()
try:
user_exists = grr_api.GrrUser(username).Get() is not None
except api_errors.ResourceNotFoundError:
user_exists = False
if user_exists:
raise UserAlreadyExistsError("User '%s' already exists." % username)
user_type, password = _GetUserTypeAndPassword(
username, password=password, is_admin=is_admin)
grr_api.CreateGrrUser(
username=username, user_type=user_type, password=password) | Creates a new GRR user. | Below is the the instruction that describes the task:
### Input:
Creates a new GRR user.
### Response:
def CreateUser(username, password=None, is_admin=False):
"""Creates a new GRR user."""
grr_api = maintenance_utils.InitGRRRootAPI()
try:
user_exists = grr_api.GrrUser(username).Get() is not None
except api_errors.ResourceNotFoundError:
user_exists = False
if user_exists:
raise UserAlreadyExistsError("User '%s' already exists." % username)
user_type, password = _GetUserTypeAndPassword(
username, password=password, is_admin=is_admin)
grr_api.CreateGrrUser(
username=username, user_type=user_type, password=password) |
def add_system(self, system):
'''
Add system to the world.
All systems will be processed on World.process()
system is of type System
'''
if system not in self._systems:
system.set_world(self)
self._systems.append(system)
else:
raise DuplicateSystemError(system) | Add system to the world.
All systems will be processed on World.process()
system is of type System | Below is the the instruction that describes the task:
### Input:
Add system to the world.
All systems will be processed on World.process()
system is of type System
### Response:
def add_system(self, system):
'''
Add system to the world.
All systems will be processed on World.process()
system is of type System
'''
if system not in self._systems:
system.set_world(self)
self._systems.append(system)
else:
raise DuplicateSystemError(system) |
def bsp_resize(node: tcod.bsp.BSP, x: int, y: int, w: int, h: int) -> None:
"""
.. deprecated:: 2.0
Assign directly to :any:`BSP` attributes instead.
"""
node.x = x
node.y = y
node.width = w
node.height = h | .. deprecated:: 2.0
Assign directly to :any:`BSP` attributes instead. | Below is the the instruction that describes the task:
### Input:
.. deprecated:: 2.0
Assign directly to :any:`BSP` attributes instead.
### Response:
def bsp_resize(node: tcod.bsp.BSP, x: int, y: int, w: int, h: int) -> None:
"""
.. deprecated:: 2.0
Assign directly to :any:`BSP` attributes instead.
"""
node.x = x
node.y = y
node.width = w
node.height = h |
def output(func, *output, **kw):
"""
output allows annotating a method on a Parameterized class to
declare that it returns an output of a specific type. The outputs
of a Parameterized class can be queried using the
Parameterized.param.outputs method. By default the output will
inherit the method name but a custom name can be declared by
expressing the Parameter type using a keyword argument. Declaring
multiple return types using keywords is only supported in Python >= 3.6.
The simplest declaration simply declares the method returns an
object without any type guarantees, e.g.:
@output()
If a specific parameter type is specified this is a declaration
that the method will return a value of that type, e.g.:
@output(param.Number())
To override the default name of the output the type may be declared
as a keyword argument, e.g.:
@output(custom_name=param.Number())
Multiple outputs may be declared using keywords mapping from
output name to the type for Python >= 3.6 or using tuples of the
same format, which is supported for earlier versions, i.e. these
two declarations are equivalent:
@output(number=param.Number(), string=param.String())
@output(('number', param.Number()), ('string', param.String()))
output also accepts Python object types which will be upgraded to
a ClassSelector, e.g.:
@output(int)
"""
if output:
outputs = []
for i, out in enumerate(output):
i = i if len(output) > 1 else None
if isinstance(out, tuple) and len(out) == 2 and isinstance(out[0], str):
outputs.append(out+(i,))
elif isinstance(out, str):
outputs.append((out, Parameter(), i))
else:
outputs.append((None, out, i))
elif kw:
py_major = sys.version_info.major
py_minor = sys.version_info.minor
if (py_major < 3 or (py_major == 3 and py_minor < 6)) and len(kw) > 1:
raise ValueError('Multiple output declaration using keywords '
'only supported in Python >= 3.6.')
# (requires keywords to be kept ordered, which was not true in previous versions)
outputs = [(name, otype, i if len(kw) > 1 else None)
for i, (name, otype) in enumerate(kw.items())]
else:
outputs = [(None, Parameter(), None)]
names, processed = [], []
for name, otype, i in outputs:
if isinstance(otype, type):
if issubclass(otype, Parameter):
otype = otype()
else:
from .import ClassSelector
otype = ClassSelector(class_=otype)
elif isinstance(otype, tuple) and all(isinstance(t, type) for t in otype):
from .import ClassSelector
otype = ClassSelector(class_=otype)
if not isinstance(otype, Parameter):
raise ValueError('output type must be declared with a Parameter class, '
'instance or a Python object type.')
processed.append((name, otype, i))
names.append(name)
if len(set(names)) != len(names):
raise ValueError('When declaring multiple outputs each value '
'must be unique.')
_dinfo = getattr(func, '_dinfo', {})
_dinfo.update({'outputs': processed})
@wraps(func)
def _output(*args,**kw):
return func(*args,**kw)
_output._dinfo = _dinfo
return _output | output allows annotating a method on a Parameterized class to
declare that it returns an output of a specific type. The outputs
of a Parameterized class can be queried using the
Parameterized.param.outputs method. By default the output will
inherit the method name but a custom name can be declared by
expressing the Parameter type using a keyword argument. Declaring
multiple return types using keywords is only supported in Python >= 3.6.
The simplest declaration simply declares the method returns an
object without any type guarantees, e.g.:
@output()
If a specific parameter type is specified this is a declaration
that the method will return a value of that type, e.g.:
@output(param.Number())
To override the default name of the output the type may be declared
as a keyword argument, e.g.:
@output(custom_name=param.Number())
Multiple outputs may be declared using keywords mapping from
output name to the type for Python >= 3.6 or using tuples of the
same format, which is supported for earlier versions, i.e. these
two declarations are equivalent:
@output(number=param.Number(), string=param.String())
@output(('number', param.Number()), ('string', param.String()))
output also accepts Python object types which will be upgraded to
a ClassSelector, e.g.:
@output(int) | Below is the the instruction that describes the task:
### Input:
output allows annotating a method on a Parameterized class to
declare that it returns an output of a specific type. The outputs
of a Parameterized class can be queried using the
Parameterized.param.outputs method. By default the output will
inherit the method name but a custom name can be declared by
expressing the Parameter type using a keyword argument. Declaring
multiple return types using keywords is only supported in Python >= 3.6.
The simplest declaration simply declares the method returns an
object without any type guarantees, e.g.:
@output()
If a specific parameter type is specified this is a declaration
that the method will return a value of that type, e.g.:
@output(param.Number())
To override the default name of the output the type may be declared
as a keyword argument, e.g.:
@output(custom_name=param.Number())
Multiple outputs may be declared using keywords mapping from
output name to the type for Python >= 3.6 or using tuples of the
same format, which is supported for earlier versions, i.e. these
two declarations are equivalent:
@output(number=param.Number(), string=param.String())
@output(('number', param.Number()), ('string', param.String()))
output also accepts Python object types which will be upgraded to
a ClassSelector, e.g.:
@output(int)
### Response:
def output(func, *output, **kw):
"""
output allows annotating a method on a Parameterized class to
declare that it returns an output of a specific type. The outputs
of a Parameterized class can be queried using the
Parameterized.param.outputs method. By default the output will
inherit the method name but a custom name can be declared by
expressing the Parameter type using a keyword argument. Declaring
multiple return types using keywords is only supported in Python >= 3.6.
The simplest declaration simply declares the method returns an
object without any type guarantees, e.g.:
@output()
If a specific parameter type is specified this is a declaration
that the method will return a value of that type, e.g.:
@output(param.Number())
To override the default name of the output the type may be declared
as a keyword argument, e.g.:
@output(custom_name=param.Number())
Multiple outputs may be declared using keywords mapping from
output name to the type for Python >= 3.6 or using tuples of the
same format, which is supported for earlier versions, i.e. these
two declarations are equivalent:
@output(number=param.Number(), string=param.String())
@output(('number', param.Number()), ('string', param.String()))
output also accepts Python object types which will be upgraded to
a ClassSelector, e.g.:
@output(int)
"""
if output:
outputs = []
for i, out in enumerate(output):
i = i if len(output) > 1 else None
if isinstance(out, tuple) and len(out) == 2 and isinstance(out[0], str):
outputs.append(out+(i,))
elif isinstance(out, str):
outputs.append((out, Parameter(), i))
else:
outputs.append((None, out, i))
elif kw:
py_major = sys.version_info.major
py_minor = sys.version_info.minor
if (py_major < 3 or (py_major == 3 and py_minor < 6)) and len(kw) > 1:
raise ValueError('Multiple output declaration using keywords '
'only supported in Python >= 3.6.')
# (requires keywords to be kept ordered, which was not true in previous versions)
outputs = [(name, otype, i if len(kw) > 1 else None)
for i, (name, otype) in enumerate(kw.items())]
else:
outputs = [(None, Parameter(), None)]
names, processed = [], []
for name, otype, i in outputs:
if isinstance(otype, type):
if issubclass(otype, Parameter):
otype = otype()
else:
from .import ClassSelector
otype = ClassSelector(class_=otype)
elif isinstance(otype, tuple) and all(isinstance(t, type) for t in otype):
from .import ClassSelector
otype = ClassSelector(class_=otype)
if not isinstance(otype, Parameter):
raise ValueError('output type must be declared with a Parameter class, '
'instance or a Python object type.')
processed.append((name, otype, i))
names.append(name)
if len(set(names)) != len(names):
raise ValueError('When declaring multiple outputs each value '
'must be unique.')
_dinfo = getattr(func, '_dinfo', {})
_dinfo.update({'outputs': processed})
@wraps(func)
def _output(*args,**kw):
return func(*args,**kw)
_output._dinfo = _dinfo
return _output |
def make_noise_image(shape, type='gaussian', mean=None, stddev=None,
random_state=None):
"""
Make a noise image containing Gaussian or Poisson noise.
Parameters
----------
shape : 2-tuple of int
The shape of the output 2D image.
type : {'gaussian', 'poisson'}
The distribution used to generate the random noise:
* ``'gaussian'``: Gaussian distributed noise.
* ``'poisson'``: Poisson distributed noise.
mean : float
The mean of the random distribution. Required for both Gaussian
and Poisson noise. The default is 0.
stddev : float, optional
The standard deviation of the Gaussian noise to add to the
output image. Required for Gaussian noise and ignored for
Poisson noise (the variance of the Poisson distribution is equal
to its mean).
random_state : int or `~numpy.random.RandomState`, optional
Pseudo-random number generator state used for random sampling.
Separate function calls with the same noise parameters and
``random_state`` will generate the identical noise image.
Returns
-------
image : 2D `~numpy.ndarray`
Image containing random noise.
See Also
--------
apply_poisson_noise
Examples
--------
.. plot::
:include-source:
# make Gaussian and Poisson noise images
from photutils.datasets import make_noise_image
shape = (100, 100)
image1 = make_noise_image(shape, type='gaussian', mean=0., stddev=5.)
image2 = make_noise_image(shape, type='poisson', mean=5.)
# plot the images
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
ax1.imshow(image1, origin='lower', interpolation='nearest')
ax1.set_title('Gaussian noise ($\\mu=0$, $\\sigma=5.$)')
ax2.imshow(image2, origin='lower', interpolation='nearest')
ax2.set_title('Poisson noise ($\\mu=5$)')
"""
if mean is None:
raise ValueError('"mean" must be input')
prng = check_random_state(random_state)
if type == 'gaussian':
if stddev is None:
raise ValueError('"stddev" must be input for Gaussian noise')
image = prng.normal(loc=mean, scale=stddev, size=shape)
elif type == 'poisson':
image = prng.poisson(lam=mean, size=shape)
else:
raise ValueError('Invalid type: {0}. Use one of '
'{"gaussian", "poisson"}.'.format(type))
return image | Make a noise image containing Gaussian or Poisson noise.
Parameters
----------
shape : 2-tuple of int
The shape of the output 2D image.
type : {'gaussian', 'poisson'}
The distribution used to generate the random noise:
* ``'gaussian'``: Gaussian distributed noise.
* ``'poisson'``: Poisson distributed noise.
mean : float
The mean of the random distribution. Required for both Gaussian
and Poisson noise. The default is 0.
stddev : float, optional
The standard deviation of the Gaussian noise to add to the
output image. Required for Gaussian noise and ignored for
Poisson noise (the variance of the Poisson distribution is equal
to its mean).
random_state : int or `~numpy.random.RandomState`, optional
Pseudo-random number generator state used for random sampling.
Separate function calls with the same noise parameters and
``random_state`` will generate the identical noise image.
Returns
-------
image : 2D `~numpy.ndarray`
Image containing random noise.
See Also
--------
apply_poisson_noise
Examples
--------
.. plot::
:include-source:
# make Gaussian and Poisson noise images
from photutils.datasets import make_noise_image
shape = (100, 100)
image1 = make_noise_image(shape, type='gaussian', mean=0., stddev=5.)
image2 = make_noise_image(shape, type='poisson', mean=5.)
# plot the images
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
ax1.imshow(image1, origin='lower', interpolation='nearest')
ax1.set_title('Gaussian noise ($\\mu=0$, $\\sigma=5.$)')
ax2.imshow(image2, origin='lower', interpolation='nearest')
ax2.set_title('Poisson noise ($\\mu=5$)') | Below is the the instruction that describes the task:
### Input:
Make a noise image containing Gaussian or Poisson noise.
Parameters
----------
shape : 2-tuple of int
The shape of the output 2D image.
type : {'gaussian', 'poisson'}
The distribution used to generate the random noise:
* ``'gaussian'``: Gaussian distributed noise.
* ``'poisson'``: Poisson distributed noise.
mean : float
The mean of the random distribution. Required for both Gaussian
and Poisson noise. The default is 0.
stddev : float, optional
The standard deviation of the Gaussian noise to add to the
output image. Required for Gaussian noise and ignored for
Poisson noise (the variance of the Poisson distribution is equal
to its mean).
random_state : int or `~numpy.random.RandomState`, optional
Pseudo-random number generator state used for random sampling.
Separate function calls with the same noise parameters and
``random_state`` will generate the identical noise image.
Returns
-------
image : 2D `~numpy.ndarray`
Image containing random noise.
See Also
--------
apply_poisson_noise
Examples
--------
.. plot::
:include-source:
# make Gaussian and Poisson noise images
from photutils.datasets import make_noise_image
shape = (100, 100)
image1 = make_noise_image(shape, type='gaussian', mean=0., stddev=5.)
image2 = make_noise_image(shape, type='poisson', mean=5.)
# plot the images
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
ax1.imshow(image1, origin='lower', interpolation='nearest')
ax1.set_title('Gaussian noise ($\\mu=0$, $\\sigma=5.$)')
ax2.imshow(image2, origin='lower', interpolation='nearest')
ax2.set_title('Poisson noise ($\\mu=5$)')
### Response:
def make_noise_image(shape, type='gaussian', mean=None, stddev=None,
random_state=None):
"""
Make a noise image containing Gaussian or Poisson noise.
Parameters
----------
shape : 2-tuple of int
The shape of the output 2D image.
type : {'gaussian', 'poisson'}
The distribution used to generate the random noise:
* ``'gaussian'``: Gaussian distributed noise.
* ``'poisson'``: Poisson distributed noise.
mean : float
The mean of the random distribution. Required for both Gaussian
and Poisson noise. The default is 0.
stddev : float, optional
The standard deviation of the Gaussian noise to add to the
output image. Required for Gaussian noise and ignored for
Poisson noise (the variance of the Poisson distribution is equal
to its mean).
random_state : int or `~numpy.random.RandomState`, optional
Pseudo-random number generator state used for random sampling.
Separate function calls with the same noise parameters and
``random_state`` will generate the identical noise image.
Returns
-------
image : 2D `~numpy.ndarray`
Image containing random noise.
See Also
--------
apply_poisson_noise
Examples
--------
.. plot::
:include-source:
# make Gaussian and Poisson noise images
from photutils.datasets import make_noise_image
shape = (100, 100)
image1 = make_noise_image(shape, type='gaussian', mean=0., stddev=5.)
image2 = make_noise_image(shape, type='poisson', mean=5.)
# plot the images
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
ax1.imshow(image1, origin='lower', interpolation='nearest')
ax1.set_title('Gaussian noise ($\\mu=0$, $\\sigma=5.$)')
ax2.imshow(image2, origin='lower', interpolation='nearest')
ax2.set_title('Poisson noise ($\\mu=5$)')
"""
if mean is None:
raise ValueError('"mean" must be input')
prng = check_random_state(random_state)
if type == 'gaussian':
if stddev is None:
raise ValueError('"stddev" must be input for Gaussian noise')
image = prng.normal(loc=mean, scale=stddev, size=shape)
elif type == 'poisson':
image = prng.poisson(lam=mean, size=shape)
else:
raise ValueError('Invalid type: {0}. Use one of '
'{"gaussian", "poisson"}.'.format(type))
return image |
def read(path, encoding="UTF-8"):
"""Read and return content from file *path*"""
with OPEN_FUNC(path, 'rb') as _file:
cont = _file.read()
return cont.decode(encoding) | Read and return content from file *path* | Below is the the instruction that describes the task:
### Input:
Read and return content from file *path*
### Response:
def read(path, encoding="UTF-8"):
"""Read and return content from file *path*"""
with OPEN_FUNC(path, 'rb') as _file:
cont = _file.read()
return cont.decode(encoding) |
def sinwave(n=4,inc=.25):
"""
Returns a DataFrame with the required format for
a surface (sine wave) plot
Parameters:
-----------
n : int
Ranges for X and Y axis (-n,n)
n_y : int
Size of increment along the axis
"""
x=np.arange(-n,n,inc)
y=np.arange(-n,n,inc)
X,Y=np.meshgrid(x,y)
R = np.sqrt(X**2 + Y**2)
Z = np.sin(R)/(.5*R)
return pd.DataFrame(Z,index=x,columns=y) | Returns a DataFrame with the required format for
a surface (sine wave) plot
Parameters:
-----------
n : int
Ranges for X and Y axis (-n,n)
n_y : int
Size of increment along the axis | Below is the the instruction that describes the task:
### Input:
Returns a DataFrame with the required format for
a surface (sine wave) plot
Parameters:
-----------
n : int
Ranges for X and Y axis (-n,n)
n_y : int
Size of increment along the axis
### Response:
def sinwave(n=4,inc=.25):
"""
Returns a DataFrame with the required format for
a surface (sine wave) plot
Parameters:
-----------
n : int
Ranges for X and Y axis (-n,n)
n_y : int
Size of increment along the axis
"""
x=np.arange(-n,n,inc)
y=np.arange(-n,n,inc)
X,Y=np.meshgrid(x,y)
R = np.sqrt(X**2 + Y**2)
Z = np.sin(R)/(.5*R)
return pd.DataFrame(Z,index=x,columns=y) |
def to_dict(self):
"""
Recusively exports object values to a dict
:return: `dict`of values
"""
if not hasattr(self, '_fields'):
return self.__dict__
result = dict()
for field_name, field in self._fields.items():
if isinstance(field, xmlmap.NodeField):
obj = getattr(self, field_name)
if obj is None:
result[field_name] = None
elif not hasattr(obj, 'to_dict'):
result[field_name] = obj.__dict__
else:
result[field_name] = obj.to_dict()
elif isinstance(field, xmlmap.NodeListField):
objs = getattr(self, field_name)
result[field_name] = list()
for obj in objs:
if not hasattr(obj, 'to_dict'):
result[field_name].append(obj.__dict__)
else:
result[field_name].append(obj.to_dict())
else:
result[field_name] = getattr(self, field_name)
return result | Recusively exports object values to a dict
:return: `dict`of values | Below is the the instruction that describes the task:
### Input:
Recusively exports object values to a dict
:return: `dict`of values
### Response:
def to_dict(self):
"""
Recusively exports object values to a dict
:return: `dict`of values
"""
if not hasattr(self, '_fields'):
return self.__dict__
result = dict()
for field_name, field in self._fields.items():
if isinstance(field, xmlmap.NodeField):
obj = getattr(self, field_name)
if obj is None:
result[field_name] = None
elif not hasattr(obj, 'to_dict'):
result[field_name] = obj.__dict__
else:
result[field_name] = obj.to_dict()
elif isinstance(field, xmlmap.NodeListField):
objs = getattr(self, field_name)
result[field_name] = list()
for obj in objs:
if not hasattr(obj, 'to_dict'):
result[field_name].append(obj.__dict__)
else:
result[field_name].append(obj.to_dict())
else:
result[field_name] = getattr(self, field_name)
return result |
def from_buffer(buffer, mime=False):
"""
Accepts a binary string and returns the detected filetype. Return
value is the mimetype if mime=True, otherwise a human readable
name.
>>> magic.from_buffer(open("testdata/test.pdf").read(1024))
'PDF document, version 1.2'
"""
m = _get_magic_type(mime)
return m.from_buffer(buffer) | Accepts a binary string and returns the detected filetype. Return
value is the mimetype if mime=True, otherwise a human readable
name.
>>> magic.from_buffer(open("testdata/test.pdf").read(1024))
'PDF document, version 1.2' | Below is the the instruction that describes the task:
### Input:
Accepts a binary string and returns the detected filetype. Return
value is the mimetype if mime=True, otherwise a human readable
name.
>>> magic.from_buffer(open("testdata/test.pdf").read(1024))
'PDF document, version 1.2'
### Response:
def from_buffer(buffer, mime=False):
"""
Accepts a binary string and returns the detected filetype. Return
value is the mimetype if mime=True, otherwise a human readable
name.
>>> magic.from_buffer(open("testdata/test.pdf").read(1024))
'PDF document, version 1.2'
"""
m = _get_magic_type(mime)
return m.from_buffer(buffer) |
def _true_anom_to_phase(true_anom, period, ecc, per0):
"""
TODO: add documentation
"""
phshift = 0
mean_anom = true_anom - (ecc*sin(true_anom))*u.deg
Phi = (mean_anom + per0) / (360*u.deg) - 1./4
# phase = Phi - (phshift - 0.25 + per0/(360*u.deg)) * period
phase = (Phi*u.d - (phshift - 0.25 + per0/(360*u.deg)) * period)*(u.cycle/u.d)
return phase | TODO: add documentation | Below is the the instruction that describes the task:
### Input:
TODO: add documentation
### Response:
def _true_anom_to_phase(true_anom, period, ecc, per0):
"""
TODO: add documentation
"""
phshift = 0
mean_anom = true_anom - (ecc*sin(true_anom))*u.deg
Phi = (mean_anom + per0) / (360*u.deg) - 1./4
# phase = Phi - (phshift - 0.25 + per0/(360*u.deg)) * period
phase = (Phi*u.d - (phshift - 0.25 + per0/(360*u.deg)) * period)*(u.cycle/u.d)
return phase |
def putall(self, items, on_dup_key=RAISE, on_dup_val=RAISE, on_dup_kv=None):
"""
Like a bulk :meth:`put`.
If one of the given items causes an exception to be raised,
none of the items is inserted.
"""
if items:
on_dup = self._get_on_dup((on_dup_key, on_dup_val, on_dup_kv))
self._update(False, on_dup, items) | Like a bulk :meth:`put`.
If one of the given items causes an exception to be raised,
none of the items is inserted. | Below is the the instruction that describes the task:
### Input:
Like a bulk :meth:`put`.
If one of the given items causes an exception to be raised,
none of the items is inserted.
### Response:
def putall(self, items, on_dup_key=RAISE, on_dup_val=RAISE, on_dup_kv=None):
"""
Like a bulk :meth:`put`.
If one of the given items causes an exception to be raised,
none of the items is inserted.
"""
if items:
on_dup = self._get_on_dup((on_dup_key, on_dup_val, on_dup_kv))
self._update(False, on_dup, items) |
def add(self, child):
"""
Adds a typed child object to the component type.
@param child: Child object to be added.
"""
if isinstance(child, FatComponent):
self.add_child_component(child)
else:
Fat.add(self, child) | Adds a typed child object to the component type.
@param child: Child object to be added. | Below is the the instruction that describes the task:
### Input:
Adds a typed child object to the component type.
@param child: Child object to be added.
### Response:
def add(self, child):
"""
Adds a typed child object to the component type.
@param child: Child object to be added.
"""
if isinstance(child, FatComponent):
self.add_child_component(child)
else:
Fat.add(self, child) |
def DeserializeExclusiveData(self, reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
"""
self.Nonce = reader.ReadUInt32()
self.Type = TransactionType.MinerTransaction | Deserialize full object.
Args:
reader (neo.IO.BinaryReader): | Below is the the instruction that describes the task:
### Input:
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
### Response:
def DeserializeExclusiveData(self, reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
"""
self.Nonce = reader.ReadUInt32()
self.Type = TransactionType.MinerTransaction |
def getatom(self, atomends=None):
"""Parse an RFC 2822 atom.
Optional atomends specifies a different set of end token delimiters
(the default is to use self.atomends). This is used e.g. in
getphraselist() since phrase endings must not include the `.' (which
is legal in phrases)."""
atomlist = ['']
if atomends is None:
atomends = self.atomends
while self.pos < len(self.field):
if self.field[self.pos] in atomends:
break
else:
atomlist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(atomlist) | Parse an RFC 2822 atom.
Optional atomends specifies a different set of end token delimiters
(the default is to use self.atomends). This is used e.g. in
getphraselist() since phrase endings must not include the `.' (which
is legal in phrases). | Below is the the instruction that describes the task:
### Input:
Parse an RFC 2822 atom.
Optional atomends specifies a different set of end token delimiters
(the default is to use self.atomends). This is used e.g. in
getphraselist() since phrase endings must not include the `.' (which
is legal in phrases).
### Response:
def getatom(self, atomends=None):
"""Parse an RFC 2822 atom.
Optional atomends specifies a different set of end token delimiters
(the default is to use self.atomends). This is used e.g. in
getphraselist() since phrase endings must not include the `.' (which
is legal in phrases)."""
atomlist = ['']
if atomends is None:
atomends = self.atomends
while self.pos < len(self.field):
if self.field[self.pos] in atomends:
break
else:
atomlist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(atomlist) |
def update(self):
"""Updates the various instanced modules"""
if self._utilisation is not None:
api = "SYNO.Core.System.Utilization"
url = "%s/entry.cgi?api=%s&version=1&method=get&_sid=%s" % (
self.base_url,
api,
self.access_token)
self._utilisation.update(self._get_url(url))
if self._storage is not None:
api = "SYNO.Storage.CGI.Storage"
url = "%s/entry.cgi?api=%s&version=1&method=load_info&_sid=%s" % (
self.base_url,
api,
self.access_token)
self._storage.update(self._get_url(url)) | Updates the various instanced modules | Below is the the instruction that describes the task:
### Input:
Updates the various instanced modules
### Response:
def update(self):
"""Updates the various instanced modules"""
if self._utilisation is not None:
api = "SYNO.Core.System.Utilization"
url = "%s/entry.cgi?api=%s&version=1&method=get&_sid=%s" % (
self.base_url,
api,
self.access_token)
self._utilisation.update(self._get_url(url))
if self._storage is not None:
api = "SYNO.Storage.CGI.Storage"
url = "%s/entry.cgi?api=%s&version=1&method=load_info&_sid=%s" % (
self.base_url,
api,
self.access_token)
self._storage.update(self._get_url(url)) |
def find_oxygen_reactions(model):
"""Return list of oxygen-producing/-consuming reactions."""
o2_in_model = helpers.find_met_in_model(model, "MNXM4")
return set([rxn for met in model.metabolites for
rxn in met.reactions if met.formula == "O2" or
met in o2_in_model]) | Return list of oxygen-producing/-consuming reactions. | Below is the the instruction that describes the task:
### Input:
Return list of oxygen-producing/-consuming reactions.
### Response:
def find_oxygen_reactions(model):
"""Return list of oxygen-producing/-consuming reactions."""
o2_in_model = helpers.find_met_in_model(model, "MNXM4")
return set([rxn for met in model.metabolites for
rxn in met.reactions if met.formula == "O2" or
met in o2_in_model]) |
def iter_and_close(file_like, block_size):
"""Yield file contents by block then close the file."""
while 1:
try:
block = file_like.read(block_size)
if block:
yield block
else:
raise StopIteration
except StopIteration:
file_like.close()
return | Yield file contents by block then close the file. | Below is the the instruction that describes the task:
### Input:
Yield file contents by block then close the file.
### Response:
def iter_and_close(file_like, block_size):
"""Yield file contents by block then close the file."""
while 1:
try:
block = file_like.read(block_size)
if block:
yield block
else:
raise StopIteration
except StopIteration:
file_like.close()
return |
def calc_sp_wc_v1(self):
"""Add throughfall to the snow layer.
Required control parameters:
|NmbZones|
|ZoneType|
Required flux sequences:
|TF|
|RfC|
|SfC|
Updated state sequences:
|WC|
|SP|
Basic equations:
:math:`\\frac{dSP}{dt} = TF \\cdot \\frac{SfC}{SfC+RfC}` \n
:math:`\\frac{dWC}{dt} = TF \\cdot \\frac{RfC}{SfC+RfC}`
Exemples:
Consider the following setting, in which eight zones of
different type receive a throughfall of 10mm:
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> nmbzones(8)
>>> zonetype(ILAKE, GLACIER, FIELD, FOREST, FIELD, FIELD, FIELD, FIELD)
>>> fluxes.tf = 10.0
>>> fluxes.sfc = 0.5, 0.5, 0.5, 0.5, 0.2, 0.8, 1.0, 4.0
>>> fluxes.rfc = 0.5, 0.5, 0.5, 0.5, 0.8, 0.2, 4.0, 1.0
>>> states.sp = 0.0
>>> states.wc = 0.0
>>> model.calc_sp_wc_v1()
>>> states.sp
sp(0.0, 5.0, 5.0, 5.0, 2.0, 8.0, 2.0, 8.0)
>>> states.wc
wc(0.0, 5.0, 5.0, 5.0, 8.0, 2.0, 8.0, 2.0)
The snow routine does not apply for internal lakes, which is why
both the ice storage and the water storage of the first zone
remain unchanged. The snow routine is identical for glaciers,
fields and forests in the current context, which is why the
results of the second, third, and fourth zone are equal. The
last four zones illustrate that the corrected snowfall fraction
as well as the corrected rainfall fraction are applied in a
relative manner, as the total amount of water yield has been
corrected in the interception module already.
When both factors are zero, the neither the water nor the ice
content of the snow layer changes:
>>> fluxes.sfc = 0.0
>>> fluxes.rfc = 0.0
>>> states.sp = 2.0
>>> states.wc = 0.0
>>> model.calc_sp_wc_v1()
>>> states.sp
sp(0.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0)
>>> states.wc
wc(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
"""
con = self.parameters.control.fastaccess
flu = self.sequences.fluxes.fastaccess
sta = self.sequences.states.fastaccess
for k in range(con.nmbzones):
if con.zonetype[k] != ILAKE:
if (flu.rfc[k]+flu.sfc[k]) > 0.:
sta.wc[k] += flu.tf[k]*flu.rfc[k]/(flu.rfc[k]+flu.sfc[k])
sta.sp[k] += flu.tf[k]*flu.sfc[k]/(flu.rfc[k]+flu.sfc[k])
else:
sta.wc[k] = 0.
sta.sp[k] = 0. | Add throughfall to the snow layer.
Required control parameters:
|NmbZones|
|ZoneType|
Required flux sequences:
|TF|
|RfC|
|SfC|
Updated state sequences:
|WC|
|SP|
Basic equations:
:math:`\\frac{dSP}{dt} = TF \\cdot \\frac{SfC}{SfC+RfC}` \n
:math:`\\frac{dWC}{dt} = TF \\cdot \\frac{RfC}{SfC+RfC}`
Exemples:
Consider the following setting, in which eight zones of
different type receive a throughfall of 10mm:
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> nmbzones(8)
>>> zonetype(ILAKE, GLACIER, FIELD, FOREST, FIELD, FIELD, FIELD, FIELD)
>>> fluxes.tf = 10.0
>>> fluxes.sfc = 0.5, 0.5, 0.5, 0.5, 0.2, 0.8, 1.0, 4.0
>>> fluxes.rfc = 0.5, 0.5, 0.5, 0.5, 0.8, 0.2, 4.0, 1.0
>>> states.sp = 0.0
>>> states.wc = 0.0
>>> model.calc_sp_wc_v1()
>>> states.sp
sp(0.0, 5.0, 5.0, 5.0, 2.0, 8.0, 2.0, 8.0)
>>> states.wc
wc(0.0, 5.0, 5.0, 5.0, 8.0, 2.0, 8.0, 2.0)
The snow routine does not apply for internal lakes, which is why
both the ice storage and the water storage of the first zone
remain unchanged. The snow routine is identical for glaciers,
fields and forests in the current context, which is why the
results of the second, third, and fourth zone are equal. The
last four zones illustrate that the corrected snowfall fraction
as well as the corrected rainfall fraction are applied in a
relative manner, as the total amount of water yield has been
corrected in the interception module already.
When both factors are zero, the neither the water nor the ice
content of the snow layer changes:
>>> fluxes.sfc = 0.0
>>> fluxes.rfc = 0.0
>>> states.sp = 2.0
>>> states.wc = 0.0
>>> model.calc_sp_wc_v1()
>>> states.sp
sp(0.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0)
>>> states.wc
wc(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) | Below is the the instruction that describes the task:
### Input:
Add throughfall to the snow layer.
Required control parameters:
|NmbZones|
|ZoneType|
Required flux sequences:
|TF|
|RfC|
|SfC|
Updated state sequences:
|WC|
|SP|
Basic equations:
:math:`\\frac{dSP}{dt} = TF \\cdot \\frac{SfC}{SfC+RfC}` \n
:math:`\\frac{dWC}{dt} = TF \\cdot \\frac{RfC}{SfC+RfC}`
Exemples:
Consider the following setting, in which eight zones of
different type receive a throughfall of 10mm:
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> nmbzones(8)
>>> zonetype(ILAKE, GLACIER, FIELD, FOREST, FIELD, FIELD, FIELD, FIELD)
>>> fluxes.tf = 10.0
>>> fluxes.sfc = 0.5, 0.5, 0.5, 0.5, 0.2, 0.8, 1.0, 4.0
>>> fluxes.rfc = 0.5, 0.5, 0.5, 0.5, 0.8, 0.2, 4.0, 1.0
>>> states.sp = 0.0
>>> states.wc = 0.0
>>> model.calc_sp_wc_v1()
>>> states.sp
sp(0.0, 5.0, 5.0, 5.0, 2.0, 8.0, 2.0, 8.0)
>>> states.wc
wc(0.0, 5.0, 5.0, 5.0, 8.0, 2.0, 8.0, 2.0)
The snow routine does not apply for internal lakes, which is why
both the ice storage and the water storage of the first zone
remain unchanged. The snow routine is identical for glaciers,
fields and forests in the current context, which is why the
results of the second, third, and fourth zone are equal. The
last four zones illustrate that the corrected snowfall fraction
as well as the corrected rainfall fraction are applied in a
relative manner, as the total amount of water yield has been
corrected in the interception module already.
When both factors are zero, the neither the water nor the ice
content of the snow layer changes:
>>> fluxes.sfc = 0.0
>>> fluxes.rfc = 0.0
>>> states.sp = 2.0
>>> states.wc = 0.0
>>> model.calc_sp_wc_v1()
>>> states.sp
sp(0.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0)
>>> states.wc
wc(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
### Response:
def calc_sp_wc_v1(self):
"""Add throughfall to the snow layer.
Required control parameters:
|NmbZones|
|ZoneType|
Required flux sequences:
|TF|
|RfC|
|SfC|
Updated state sequences:
|WC|
|SP|
Basic equations:
:math:`\\frac{dSP}{dt} = TF \\cdot \\frac{SfC}{SfC+RfC}` \n
:math:`\\frac{dWC}{dt} = TF \\cdot \\frac{RfC}{SfC+RfC}`
Exemples:
Consider the following setting, in which eight zones of
different type receive a throughfall of 10mm:
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> nmbzones(8)
>>> zonetype(ILAKE, GLACIER, FIELD, FOREST, FIELD, FIELD, FIELD, FIELD)
>>> fluxes.tf = 10.0
>>> fluxes.sfc = 0.5, 0.5, 0.5, 0.5, 0.2, 0.8, 1.0, 4.0
>>> fluxes.rfc = 0.5, 0.5, 0.5, 0.5, 0.8, 0.2, 4.0, 1.0
>>> states.sp = 0.0
>>> states.wc = 0.0
>>> model.calc_sp_wc_v1()
>>> states.sp
sp(0.0, 5.0, 5.0, 5.0, 2.0, 8.0, 2.0, 8.0)
>>> states.wc
wc(0.0, 5.0, 5.0, 5.0, 8.0, 2.0, 8.0, 2.0)
The snow routine does not apply for internal lakes, which is why
both the ice storage and the water storage of the first zone
remain unchanged. The snow routine is identical for glaciers,
fields and forests in the current context, which is why the
results of the second, third, and fourth zone are equal. The
last four zones illustrate that the corrected snowfall fraction
as well as the corrected rainfall fraction are applied in a
relative manner, as the total amount of water yield has been
corrected in the interception module already.
When both factors are zero, the neither the water nor the ice
content of the snow layer changes:
>>> fluxes.sfc = 0.0
>>> fluxes.rfc = 0.0
>>> states.sp = 2.0
>>> states.wc = 0.0
>>> model.calc_sp_wc_v1()
>>> states.sp
sp(0.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0)
>>> states.wc
wc(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
"""
con = self.parameters.control.fastaccess
flu = self.sequences.fluxes.fastaccess
sta = self.sequences.states.fastaccess
for k in range(con.nmbzones):
if con.zonetype[k] != ILAKE:
if (flu.rfc[k]+flu.sfc[k]) > 0.:
sta.wc[k] += flu.tf[k]*flu.rfc[k]/(flu.rfc[k]+flu.sfc[k])
sta.sp[k] += flu.tf[k]*flu.sfc[k]/(flu.rfc[k]+flu.sfc[k])
else:
sta.wc[k] = 0.
sta.sp[k] = 0. |
def get_pin_width(self):
"""
:return: the PIN width
"""
command = const.CMD_GET_PINWIDTH
command_string = b' P'
response_size = 9
cmd_response = self.__send_command(command, command_string, response_size)
if cmd_response.get('status'):
width = self.__data.split(b'\x00')[0]
return bytearray(width)[0]
else:
raise ZKErrorResponse("can0t get pin width") | :return: the PIN width | Below is the the instruction that describes the task:
### Input:
:return: the PIN width
### Response:
def get_pin_width(self):
"""
:return: the PIN width
"""
command = const.CMD_GET_PINWIDTH
command_string = b' P'
response_size = 9
cmd_response = self.__send_command(command, command_string, response_size)
if cmd_response.get('status'):
width = self.__data.split(b'\x00')[0]
return bytearray(width)[0]
else:
raise ZKErrorResponse("can0t get pin width") |
def ustep(self):
"""Dual variable update."""
self.U += self.rsdl_r(self.AX, self.Y) | Dual variable update. | Below is the the instruction that describes the task:
### Input:
Dual variable update.
### Response:
def ustep(self):
"""Dual variable update."""
self.U += self.rsdl_r(self.AX, self.Y) |
def _create_show_definitions_action(self):
"""Create action for showing definitions / help."""
icon = resources_path('img', 'icons', 'show-inasafe-help.svg')
self.action_show_definitions = QAction(
QIcon(icon),
self.tr('InaSAFE Help'),
self.iface.mainWindow())
self.action_show_definitions.setStatusTip(self.tr(
'Show InaSAFE Help'))
self.action_show_definitions.setWhatsThis(self.tr(
'Use this to show a document describing all InaSAFE concepts.'))
self.action_show_definitions.triggered.connect(
self.show_definitions)
self.add_action(
self.action_show_definitions,
add_to_toolbar=True) | Create action for showing definitions / help. | Below is the the instruction that describes the task:
### Input:
Create action for showing definitions / help.
### Response:
def _create_show_definitions_action(self):
"""Create action for showing definitions / help."""
icon = resources_path('img', 'icons', 'show-inasafe-help.svg')
self.action_show_definitions = QAction(
QIcon(icon),
self.tr('InaSAFE Help'),
self.iface.mainWindow())
self.action_show_definitions.setStatusTip(self.tr(
'Show InaSAFE Help'))
self.action_show_definitions.setWhatsThis(self.tr(
'Use this to show a document describing all InaSAFE concepts.'))
self.action_show_definitions.triggered.connect(
self.show_definitions)
self.add_action(
self.action_show_definitions,
add_to_toolbar=True) |
def get(self, obj_id):
"""
Get a single item
:param obj_id: int
:return: dict|str
"""
response = self._client.session.get(
'{url}/{id}'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response) | Get a single item
:param obj_id: int
:return: dict|str | Below is the the instruction that describes the task:
### Input:
Get a single item
:param obj_id: int
:return: dict|str
### Response:
def get(self, obj_id):
"""
Get a single item
:param obj_id: int
:return: dict|str
"""
response = self._client.session.get(
'{url}/{id}'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response) |
def reqMktDepth(
self, contract: Contract, numRows: int = 5,
isSmartDepth: bool = False, mktDepthOptions=None) -> Ticker:
"""
Subscribe to market depth data (a.k.a. DOM, L2 or order book).
https://interactivebrokers.github.io/tws-api/market_depth.html
Args:
contract: Contract of interest.
numRows: Number of depth level on each side of the order book
(5 max).
isSmartDepth: Consolidate the order book across exchanges.
mktDepthOptions: Unknown.
Returns:
The Ticker that holds the market depth in ``ticker.domBids``
and ``ticker.domAsks`` and the list of MktDepthData in
``ticker.domTicks``.
"""
reqId = self.client.getReqId()
ticker = self.wrapper.startTicker(reqId, contract, 'mktDepth')
self.client.reqMktDepth(
reqId, contract, numRows, isSmartDepth, mktDepthOptions)
return ticker | Subscribe to market depth data (a.k.a. DOM, L2 or order book).
https://interactivebrokers.github.io/tws-api/market_depth.html
Args:
contract: Contract of interest.
numRows: Number of depth level on each side of the order book
(5 max).
isSmartDepth: Consolidate the order book across exchanges.
mktDepthOptions: Unknown.
Returns:
The Ticker that holds the market depth in ``ticker.domBids``
and ``ticker.domAsks`` and the list of MktDepthData in
``ticker.domTicks``. | Below is the the instruction that describes the task:
### Input:
Subscribe to market depth data (a.k.a. DOM, L2 or order book).
https://interactivebrokers.github.io/tws-api/market_depth.html
Args:
contract: Contract of interest.
numRows: Number of depth level on each side of the order book
(5 max).
isSmartDepth: Consolidate the order book across exchanges.
mktDepthOptions: Unknown.
Returns:
The Ticker that holds the market depth in ``ticker.domBids``
and ``ticker.domAsks`` and the list of MktDepthData in
``ticker.domTicks``.
### Response:
def reqMktDepth(
self, contract: Contract, numRows: int = 5,
isSmartDepth: bool = False, mktDepthOptions=None) -> Ticker:
"""
Subscribe to market depth data (a.k.a. DOM, L2 or order book).
https://interactivebrokers.github.io/tws-api/market_depth.html
Args:
contract: Contract of interest.
numRows: Number of depth level on each side of the order book
(5 max).
isSmartDepth: Consolidate the order book across exchanges.
mktDepthOptions: Unknown.
Returns:
The Ticker that holds the market depth in ``ticker.domBids``
and ``ticker.domAsks`` and the list of MktDepthData in
``ticker.domTicks``.
"""
reqId = self.client.getReqId()
ticker = self.wrapper.startTicker(reqId, contract, 'mktDepth')
self.client.reqMktDepth(
reqId, contract, numRows, isSmartDepth, mktDepthOptions)
return ticker |
def Write(self, output_writer):
"""Writes the table to output writer.
Args:
output_writer (CLIOutputWriter): output writer.
"""
# Round up the column sizes to the nearest tab.
for column_index, column_size in enumerate(self._column_sizes):
column_size, _ = divmod(column_size, self._NUMBER_OF_SPACES_IN_TAB)
column_size = (column_size + 1) * self._NUMBER_OF_SPACES_IN_TAB
self._column_sizes[column_index] = column_size
if self._columns:
self._WriteRow(output_writer, self._columns, in_bold=True)
for values in self._rows:
self._WriteRow(output_writer, values) | Writes the table to output writer.
Args:
output_writer (CLIOutputWriter): output writer. | Below is the the instruction that describes the task:
### Input:
Writes the table to output writer.
Args:
output_writer (CLIOutputWriter): output writer.
### Response:
def Write(self, output_writer):
"""Writes the table to output writer.
Args:
output_writer (CLIOutputWriter): output writer.
"""
# Round up the column sizes to the nearest tab.
for column_index, column_size in enumerate(self._column_sizes):
column_size, _ = divmod(column_size, self._NUMBER_OF_SPACES_IN_TAB)
column_size = (column_size + 1) * self._NUMBER_OF_SPACES_IN_TAB
self._column_sizes[column_index] = column_size
if self._columns:
self._WriteRow(output_writer, self._columns, in_bold=True)
for values in self._rows:
self._WriteRow(output_writer, values) |
def _copy_template_to_config(self, template_path,
config_path, overwrite=False):
"""Write the default config from a template.
:type template_path: str
:param template_path: The template config file path.
:type config_path: str
:param config_path: The user's config file path.
:type overwrite: bool
:param overwrite: (Optional) Determines whether to overwrite the
existing config file, if it exists.
:raises: :class:`OSError <exceptions.OSError>`
"""
config_path = os.path.expanduser(config_path)
if not overwrite and os.path.isfile(config_path):
return
else:
try:
config_path_dir_name = os.path.dirname(config_path)
os.makedirs(config_path_dir_name)
except OSError:
if not os.path.isdir(config_path_dir_name):
raise
shutil.copyfile(template_path, config_path) | Write the default config from a template.
:type template_path: str
:param template_path: The template config file path.
:type config_path: str
:param config_path: The user's config file path.
:type overwrite: bool
:param overwrite: (Optional) Determines whether to overwrite the
existing config file, if it exists.
:raises: :class:`OSError <exceptions.OSError>` | Below is the the instruction that describes the task:
### Input:
Write the default config from a template.
:type template_path: str
:param template_path: The template config file path.
:type config_path: str
:param config_path: The user's config file path.
:type overwrite: bool
:param overwrite: (Optional) Determines whether to overwrite the
existing config file, if it exists.
:raises: :class:`OSError <exceptions.OSError>`
### Response:
def _copy_template_to_config(self, template_path,
config_path, overwrite=False):
"""Write the default config from a template.
:type template_path: str
:param template_path: The template config file path.
:type config_path: str
:param config_path: The user's config file path.
:type overwrite: bool
:param overwrite: (Optional) Determines whether to overwrite the
existing config file, if it exists.
:raises: :class:`OSError <exceptions.OSError>`
"""
config_path = os.path.expanduser(config_path)
if not overwrite and os.path.isfile(config_path):
return
else:
try:
config_path_dir_name = os.path.dirname(config_path)
os.makedirs(config_path_dir_name)
except OSError:
if not os.path.isdir(config_path_dir_name):
raise
shutil.copyfile(template_path, config_path) |
def line_count(fn):
""" Get line count of file
Args:
fn (str): Path to file
Return:
Number of lines in file (int)
"""
with open(fn) as f:
for i, l in enumerate(f):
pass
return i + 1 | Get line count of file
Args:
fn (str): Path to file
Return:
Number of lines in file (int) | Below is the the instruction that describes the task:
### Input:
Get line count of file
Args:
fn (str): Path to file
Return:
Number of lines in file (int)
### Response:
def line_count(fn):
""" Get line count of file
Args:
fn (str): Path to file
Return:
Number of lines in file (int)
"""
with open(fn) as f:
for i, l in enumerate(f):
pass
return i + 1 |
def windows_df(self):
"""Get Windows (W) W-row, W-col and W-index of windows e.g. loaded with :meth:`block_windows` as a dataframe.
Returns:
[dataframe] -- A dataframe with the window information and indices (row, col, index).
"""
import pandas as pd
if self.windows is None:
raise Exception("You need to call the block_windows or windows before.")
df_wins = []
for row, col, win in zip(self.windows_row, self.windows_col, self.windows):
df_wins.append(pd.DataFrame({"row":[row], "col":[col], "Window":[win]}))
df_wins = pd.concat(df_wins).set_index(["row", "col"])
df_wins["window_index"] = range(df_wins.shape[0])
df_wins = df_wins.sort_index()
return df_wins | Get Windows (W) W-row, W-col and W-index of windows e.g. loaded with :meth:`block_windows` as a dataframe.
Returns:
[dataframe] -- A dataframe with the window information and indices (row, col, index). | Below is the the instruction that describes the task:
### Input:
Get Windows (W) W-row, W-col and W-index of windows e.g. loaded with :meth:`block_windows` as a dataframe.
Returns:
[dataframe] -- A dataframe with the window information and indices (row, col, index).
### Response:
def windows_df(self):
"""Get Windows (W) W-row, W-col and W-index of windows e.g. loaded with :meth:`block_windows` as a dataframe.
Returns:
[dataframe] -- A dataframe with the window information and indices (row, col, index).
"""
import pandas as pd
if self.windows is None:
raise Exception("You need to call the block_windows or windows before.")
df_wins = []
for row, col, win in zip(self.windows_row, self.windows_col, self.windows):
df_wins.append(pd.DataFrame({"row":[row], "col":[col], "Window":[win]}))
df_wins = pd.concat(df_wins).set_index(["row", "col"])
df_wins["window_index"] = range(df_wins.shape[0])
df_wins = df_wins.sort_index()
return df_wins |
def kfolds(n, k, sz, p_testset=None, seed=7238):
"""
return train, valid [,test]
testset if p_testset
:param n:
:param k:
:param sz:
:param p_testset:
:param seed:
:return:
"""
trains, tests = split_rand(sz, p_testset, seed)
ntrain = len(trains)
# np.random.seed(seed)
with np_seed(seed):
np.random.shuffle(trains)
if n == k:
# no split
train, valid = trains, trains
else:
foldsz = ntrain // k
itrain = np.arange(ntrain) // foldsz != n
train = trains[itrain]
valid = trains[~itrain]
if not p_testset:
return train, valid
else:
return train, valid, tests | return train, valid [,test]
testset if p_testset
:param n:
:param k:
:param sz:
:param p_testset:
:param seed:
:return: | Below is the the instruction that describes the task:
### Input:
return train, valid [,test]
testset if p_testset
:param n:
:param k:
:param sz:
:param p_testset:
:param seed:
:return:
### Response:
def kfolds(n, k, sz, p_testset=None, seed=7238):
"""
return train, valid [,test]
testset if p_testset
:param n:
:param k:
:param sz:
:param p_testset:
:param seed:
:return:
"""
trains, tests = split_rand(sz, p_testset, seed)
ntrain = len(trains)
# np.random.seed(seed)
with np_seed(seed):
np.random.shuffle(trains)
if n == k:
# no split
train, valid = trains, trains
else:
foldsz = ntrain // k
itrain = np.arange(ntrain) // foldsz != n
train = trains[itrain]
valid = trains[~itrain]
if not p_testset:
return train, valid
else:
return train, valid, tests |
def _validate_table_row_counts(self):
"""
Imports source .txt files, checks row counts and then compares the rowcounts with the gtfsobject
:return:
"""
for db_table_name in DB_TABLE_NAME_TO_SOURCE_FILE.keys():
table_name_source_file = DB_TABLE_NAME_TO_SOURCE_FILE[db_table_name]
row_warning_str = DB_TABLE_NAME_TO_ROWS_MISSING_WARNING[db_table_name]
# Row count in GTFS object:
database_row_count = self.gtfs.get_row_count(db_table_name)
# Row counts in source files:
source_row_count = 0
for gtfs_source in self.gtfs_sources:
frequencies_in_source = source_csv_to_pandas(gtfs_source, 'frequencies.txt')
try:
if table_name_source_file == 'trips' and not frequencies_in_source.empty:
source_row_count += self._frequency_generated_trips_rows(gtfs_source)
elif table_name_source_file == 'stop_times' and not frequencies_in_source.empty:
source_row_count += self._compute_number_of_frequency_generated_stop_times(gtfs_source)
else:
df = source_csv_to_pandas(gtfs_source, table_name_source_file)
source_row_count += len(df.index)
except IOError as e:
if hasattr(e, "filename") and db_table_name in e.filename:
pass
else:
raise e
if source_row_count == database_row_count and self.verbose:
print("Row counts match for " + table_name_source_file + " between the source and database ("
+ str(database_row_count) + ")")
else:
difference = database_row_count - source_row_count
('Row counts do not match for ' + str(table_name_source_file) + ': (source=' + str(source_row_count) +
', database=' + str(database_row_count) + ")")
if table_name_source_file == "calendar" and difference > 0:
query = "SELECT count(*) FROM (SELECT * FROM calendar ORDER BY service_I DESC LIMIT " \
+ str(int(difference)) + \
") WHERE start_date=end_date AND m=0 AND t=0 AND w=0 AND th=0 AND f=0 AND s=0 AND su=0"
number_of_entries_added_by_calendar_dates_loader = self.gtfs.execute_custom_query(query).fetchone()[
0]
if number_of_entries_added_by_calendar_dates_loader == difference and self.verbose:
print(" But don't worry, the extra entries seem to just dummy entries due to calendar_dates")
else:
if self.verbose:
print(" Reason for this is unknown.")
self.warnings_container.add_warning(row_warning_str, self.location, difference)
else:
self.warnings_container.add_warning(row_warning_str, self.location, difference) | Imports source .txt files, checks row counts and then compares the rowcounts with the gtfsobject
:return: | Below is the the instruction that describes the task:
### Input:
Imports source .txt files, checks row counts and then compares the rowcounts with the gtfsobject
:return:
### Response:
def _validate_table_row_counts(self):
"""
Imports source .txt files, checks row counts and then compares the rowcounts with the gtfsobject
:return:
"""
for db_table_name in DB_TABLE_NAME_TO_SOURCE_FILE.keys():
table_name_source_file = DB_TABLE_NAME_TO_SOURCE_FILE[db_table_name]
row_warning_str = DB_TABLE_NAME_TO_ROWS_MISSING_WARNING[db_table_name]
# Row count in GTFS object:
database_row_count = self.gtfs.get_row_count(db_table_name)
# Row counts in source files:
source_row_count = 0
for gtfs_source in self.gtfs_sources:
frequencies_in_source = source_csv_to_pandas(gtfs_source, 'frequencies.txt')
try:
if table_name_source_file == 'trips' and not frequencies_in_source.empty:
source_row_count += self._frequency_generated_trips_rows(gtfs_source)
elif table_name_source_file == 'stop_times' and not frequencies_in_source.empty:
source_row_count += self._compute_number_of_frequency_generated_stop_times(gtfs_source)
else:
df = source_csv_to_pandas(gtfs_source, table_name_source_file)
source_row_count += len(df.index)
except IOError as e:
if hasattr(e, "filename") and db_table_name in e.filename:
pass
else:
raise e
if source_row_count == database_row_count and self.verbose:
print("Row counts match for " + table_name_source_file + " between the source and database ("
+ str(database_row_count) + ")")
else:
difference = database_row_count - source_row_count
('Row counts do not match for ' + str(table_name_source_file) + ': (source=' + str(source_row_count) +
', database=' + str(database_row_count) + ")")
if table_name_source_file == "calendar" and difference > 0:
query = "SELECT count(*) FROM (SELECT * FROM calendar ORDER BY service_I DESC LIMIT " \
+ str(int(difference)) + \
") WHERE start_date=end_date AND m=0 AND t=0 AND w=0 AND th=0 AND f=0 AND s=0 AND su=0"
number_of_entries_added_by_calendar_dates_loader = self.gtfs.execute_custom_query(query).fetchone()[
0]
if number_of_entries_added_by_calendar_dates_loader == difference and self.verbose:
print(" But don't worry, the extra entries seem to just dummy entries due to calendar_dates")
else:
if self.verbose:
print(" Reason for this is unknown.")
self.warnings_container.add_warning(row_warning_str, self.location, difference)
else:
self.warnings_container.add_warning(row_warning_str, self.location, difference) |
def get_installed_extensions(self, include_disabled_extensions=None, include_errors=None, asset_types=None, include_installation_issues=None):
"""GetInstalledExtensions.
[Preview API] List the installed extensions in the account / project collection.
:param bool include_disabled_extensions: If true (the default), include disabled extensions in the results.
:param bool include_errors: If true, include installed extensions with errors.
:param [str] asset_types:
:param bool include_installation_issues:
:rtype: [InstalledExtension]
"""
query_parameters = {}
if include_disabled_extensions is not None:
query_parameters['includeDisabledExtensions'] = self._serialize.query('include_disabled_extensions', include_disabled_extensions, 'bool')
if include_errors is not None:
query_parameters['includeErrors'] = self._serialize.query('include_errors', include_errors, 'bool')
if asset_types is not None:
asset_types = ":".join(asset_types)
query_parameters['assetTypes'] = self._serialize.query('asset_types', asset_types, 'str')
if include_installation_issues is not None:
query_parameters['includeInstallationIssues'] = self._serialize.query('include_installation_issues', include_installation_issues, 'bool')
response = self._send(http_method='GET',
location_id='275424d0-c844-4fe2-bda6-04933a1357d8',
version='5.0-preview.1',
query_parameters=query_parameters)
return self._deserialize('[InstalledExtension]', self._unwrap_collection(response)) | GetInstalledExtensions.
[Preview API] List the installed extensions in the account / project collection.
:param bool include_disabled_extensions: If true (the default), include disabled extensions in the results.
:param bool include_errors: If true, include installed extensions with errors.
:param [str] asset_types:
:param bool include_installation_issues:
:rtype: [InstalledExtension] | Below is the the instruction that describes the task:
### Input:
GetInstalledExtensions.
[Preview API] List the installed extensions in the account / project collection.
:param bool include_disabled_extensions: If true (the default), include disabled extensions in the results.
:param bool include_errors: If true, include installed extensions with errors.
:param [str] asset_types:
:param bool include_installation_issues:
:rtype: [InstalledExtension]
### Response:
def get_installed_extensions(self, include_disabled_extensions=None, include_errors=None, asset_types=None, include_installation_issues=None):
"""GetInstalledExtensions.
[Preview API] List the installed extensions in the account / project collection.
:param bool include_disabled_extensions: If true (the default), include disabled extensions in the results.
:param bool include_errors: If true, include installed extensions with errors.
:param [str] asset_types:
:param bool include_installation_issues:
:rtype: [InstalledExtension]
"""
query_parameters = {}
if include_disabled_extensions is not None:
query_parameters['includeDisabledExtensions'] = self._serialize.query('include_disabled_extensions', include_disabled_extensions, 'bool')
if include_errors is not None:
query_parameters['includeErrors'] = self._serialize.query('include_errors', include_errors, 'bool')
if asset_types is not None:
asset_types = ":".join(asset_types)
query_parameters['assetTypes'] = self._serialize.query('asset_types', asset_types, 'str')
if include_installation_issues is not None:
query_parameters['includeInstallationIssues'] = self._serialize.query('include_installation_issues', include_installation_issues, 'bool')
response = self._send(http_method='GET',
location_id='275424d0-c844-4fe2-bda6-04933a1357d8',
version='5.0-preview.1',
query_parameters=query_parameters)
return self._deserialize('[InstalledExtension]', self._unwrap_collection(response)) |
def zip(value=data, mu=mu, psi=psi):
""" Zero-inflated Poisson likelihood """
# Initialize likeihood
like = 0.0
# Loop over data
for x in value:
if not x:
# Zero values
like += np.log((1. - psi) + psi * np.exp(-mu))
else:
# Non-zero values
like += np.log(psi) + poisson_like(x, mu)
return like | Zero-inflated Poisson likelihood | Below is the the instruction that describes the task:
### Input:
Zero-inflated Poisson likelihood
### Response:
def zip(value=data, mu=mu, psi=psi):
""" Zero-inflated Poisson likelihood """
# Initialize likeihood
like = 0.0
# Loop over data
for x in value:
if not x:
# Zero values
like += np.log((1. - psi) + psi * np.exp(-mu))
else:
# Non-zero values
like += np.log(psi) + poisson_like(x, mu)
return like |
def _query_uncompressed(options, collection_name, num_to_skip,
num_to_return, query, field_selector, opts, check_keys=False):
"""Internal query message helper."""
op_query, max_bson_size = _query(
options,
collection_name,
num_to_skip,
num_to_return,
query,
field_selector,
opts,
check_keys)
rid, msg = __pack_message(2004, op_query)
return rid, msg, max_bson_size | Internal query message helper. | Below is the the instruction that describes the task:
### Input:
Internal query message helper.
### Response:
def _query_uncompressed(options, collection_name, num_to_skip,
num_to_return, query, field_selector, opts, check_keys=False):
"""Internal query message helper."""
op_query, max_bson_size = _query(
options,
collection_name,
num_to_skip,
num_to_return,
query,
field_selector,
opts,
check_keys)
rid, msg = __pack_message(2004, op_query)
return rid, msg, max_bson_size |
def prepare(self):
"""Un-serialize data from data attribute and add instance_id key if necessary
:return: None
"""
# Maybe the Brok is a old daemon one or was already prepared
# if so, the data is already ok
if hasattr(self, 'prepared') and not self.prepared:
self.data = unserialize(self.data)
if self.instance_id:
self.data['instance_id'] = self.instance_id
self.prepared = True | Un-serialize data from data attribute and add instance_id key if necessary
:return: None | Below is the the instruction that describes the task:
### Input:
Un-serialize data from data attribute and add instance_id key if necessary
:return: None
### Response:
def prepare(self):
"""Un-serialize data from data attribute and add instance_id key if necessary
:return: None
"""
# Maybe the Brok is a old daemon one or was already prepared
# if so, the data is already ok
if hasattr(self, 'prepared') and not self.prepared:
self.data = unserialize(self.data)
if self.instance_id:
self.data['instance_id'] = self.instance_id
self.prepared = True |
def authorize_ip_permission(
self, group_name, ip_protocol, from_port, to_port, cidr_ip):
"""
This is a convenience function that wraps the "authorize ip
permission" functionality of the C{authorize_security_group} method.
For an explanation of the parameters, see C{authorize_security_group}.
"""
d = self.authorize_security_group(
group_name,
ip_protocol=ip_protocol, from_port=from_port, to_port=to_port,
cidr_ip=cidr_ip)
return d | This is a convenience function that wraps the "authorize ip
permission" functionality of the C{authorize_security_group} method.
For an explanation of the parameters, see C{authorize_security_group}. | Below is the the instruction that describes the task:
### Input:
This is a convenience function that wraps the "authorize ip
permission" functionality of the C{authorize_security_group} method.
For an explanation of the parameters, see C{authorize_security_group}.
### Response:
def authorize_ip_permission(
self, group_name, ip_protocol, from_port, to_port, cidr_ip):
"""
This is a convenience function that wraps the "authorize ip
permission" functionality of the C{authorize_security_group} method.
For an explanation of the parameters, see C{authorize_security_group}.
"""
d = self.authorize_security_group(
group_name,
ip_protocol=ip_protocol, from_port=from_port, to_port=to_port,
cidr_ip=cidr_ip)
return d |
def normalize(expr):
"""No elimination, but normalize arguments."""
args = [normalize(arg) for arg in expr.args]
return type(expr)(expr.func, *args, start=expr.start, end=expr.end) | No elimination, but normalize arguments. | Below is the the instruction that describes the task:
### Input:
No elimination, but normalize arguments.
### Response:
def normalize(expr):
"""No elimination, but normalize arguments."""
args = [normalize(arg) for arg in expr.args]
return type(expr)(expr.func, *args, start=expr.start, end=expr.end) |
def _get_kdjd(cls, df, n_days):
""" Get the D of KDJ
D = 2/3 × (prev. D) +1/3 × (curr. K)
2/3 and 1/3 are the smooth parameters.
:param df: data
:param n_days: calculation range
:return: None
"""
k_column = 'kdjk_{}'.format(n_days)
d_column = 'kdjd_{}'.format(n_days)
df[d_column] = list(cls._calc_kd(df.get(k_column))) | Get the D of KDJ
D = 2/3 × (prev. D) +1/3 × (curr. K)
2/3 and 1/3 are the smooth parameters.
:param df: data
:param n_days: calculation range
:return: None | Below is the the instruction that describes the task:
### Input:
Get the D of KDJ
D = 2/3 × (prev. D) +1/3 × (curr. K)
2/3 and 1/3 are the smooth parameters.
:param df: data
:param n_days: calculation range
:return: None
### Response:
def _get_kdjd(cls, df, n_days):
""" Get the D of KDJ
D = 2/3 × (prev. D) +1/3 × (curr. K)
2/3 and 1/3 are the smooth parameters.
:param df: data
:param n_days: calculation range
:return: None
"""
k_column = 'kdjk_{}'.format(n_days)
d_column = 'kdjd_{}'.format(n_days)
df[d_column] = list(cls._calc_kd(df.get(k_column))) |
def validate(self, raise_unsupported=False):
"""
Checks if the Entry instance includes all the required fields of its
type. If ``raise_unsupported`` is set to ``True`` it will also check
for potentially unsupported types.
If a problem is found, an InvalidStructure exception is raised.
"""
fields = set(self.keys())
flattened_required_fields = set()
required_errors = []
for field in self.required_fields:
found = False
if isinstance(field, (list, tuple)):
# Check all alternatives
for real_f in field:
if real_f in fields:
flattened_required_fields.add(real_f)
found = True
else:
flattened_required_fields.add(field)
if field in fields:
found = True
if not found:
required_errors.append(field)
unsupported_fields = fields - flattened_required_fields \
- set(self.optional_fields)
if len(required_errors) or (raise_unsupported
and len(unsupported_fields)):
raise exceptions.InvalidStructure("Missing or unsupported fields found",
required_fields=required_errors,
unsupported_fields=unsupported_fields) | Checks if the Entry instance includes all the required fields of its
type. If ``raise_unsupported`` is set to ``True`` it will also check
for potentially unsupported types.
If a problem is found, an InvalidStructure exception is raised. | Below is the the instruction that describes the task:
### Input:
Checks if the Entry instance includes all the required fields of its
type. If ``raise_unsupported`` is set to ``True`` it will also check
for potentially unsupported types.
If a problem is found, an InvalidStructure exception is raised.
### Response:
def validate(self, raise_unsupported=False):
"""
Checks if the Entry instance includes all the required fields of its
type. If ``raise_unsupported`` is set to ``True`` it will also check
for potentially unsupported types.
If a problem is found, an InvalidStructure exception is raised.
"""
fields = set(self.keys())
flattened_required_fields = set()
required_errors = []
for field in self.required_fields:
found = False
if isinstance(field, (list, tuple)):
# Check all alternatives
for real_f in field:
if real_f in fields:
flattened_required_fields.add(real_f)
found = True
else:
flattened_required_fields.add(field)
if field in fields:
found = True
if not found:
required_errors.append(field)
unsupported_fields = fields - flattened_required_fields \
- set(self.optional_fields)
if len(required_errors) or (raise_unsupported
and len(unsupported_fields)):
raise exceptions.InvalidStructure("Missing or unsupported fields found",
required_fields=required_errors,
unsupported_fields=unsupported_fields) |
def detect_protocol(cls, data, default=None):
""" TODO: support fbthrift, finagle-thrift, finagle-mux, CORBA """
if cls.is_compact_protocol(data):
return TCompactProtocol
elif cls.is_binary_protocol(data):
return TBinaryProtocol
elif cls.is_json_protocol(data):
return TJSONProtocol
if default is None:
raise ValueError('Unknown protocol')
return default | TODO: support fbthrift, finagle-thrift, finagle-mux, CORBA | Below is the the instruction that describes the task:
### Input:
TODO: support fbthrift, finagle-thrift, finagle-mux, CORBA
### Response:
def detect_protocol(cls, data, default=None):
""" TODO: support fbthrift, finagle-thrift, finagle-mux, CORBA """
if cls.is_compact_protocol(data):
return TCompactProtocol
elif cls.is_binary_protocol(data):
return TBinaryProtocol
elif cls.is_json_protocol(data):
return TJSONProtocol
if default is None:
raise ValueError('Unknown protocol')
return default |
def p_type(self, p):
'''type : term
| array_type opt_order
| pointer_type
| type LIST
| type SET
| type LPAREN opt_types RPAREN
| type COLUMN type DICT
| LPAREN types RPAREN
| LARRAY type RARRAY
| type OR type
'''
if len(p) == 2:
p[0] = p[1],
elif len(p) == 3 and p[2] == 'list':
p[0] = tuple(List[t] for t in p[1])
elif len(p) == 3 and p[2] == 'set':
p[0] = tuple(Set[t] for t in p[1])
elif len(p) == 3:
if p[2] is None:
expanded = []
for nd in p[1]:
expanded.append(nd)
if isinstance(nd, NDArray) and len(nd.__args__) == 3:
expanded.append(NDArray[nd.__args__[0], -1::, -1::])
p[0] = tuple(expanded)
elif p[2] == "F":
for nd in p[1]:
if len(nd.__args__) != 3:
raise PythranSyntaxError("Invalid Pythran spec. "
"F order is only valid for 2D arrays")
p[0] = tuple(NDArray[nd.__args__[0], -1::, -1::] for nd in p[1])
else:
p[0] = p[1]
elif len(p) == 5 and p[4] == ')':
p[0] = tuple(Fun[args, r]
for r in p[1]
for args in (product(*p[3])
if len(p[3]) > 1 else p[3]))
elif len(p) == 5:
p[0] = tuple(Dict[k, v] for k in p[1] for v in p[3])
elif len(p) == 4 and p[2] == 'or':
p[0] = p[1] + p[3]
elif len(p) == 4 and p[3] == ')':
p[0] = tuple(Tuple[t] for t in p[2])
elif len(p) == 4 and p[3] == ']':
p[0] = p[2]
else:
raise PythranSyntaxError("Invalid Pythran spec. "
"Unknown text '{0}'".format(p.value)) | type : term
| array_type opt_order
| pointer_type
| type LIST
| type SET
| type LPAREN opt_types RPAREN
| type COLUMN type DICT
| LPAREN types RPAREN
| LARRAY type RARRAY
| type OR type | Below is the the instruction that describes the task:
### Input:
type : term
| array_type opt_order
| pointer_type
| type LIST
| type SET
| type LPAREN opt_types RPAREN
| type COLUMN type DICT
| LPAREN types RPAREN
| LARRAY type RARRAY
| type OR type
### Response:
def p_type(self, p):
'''type : term
| array_type opt_order
| pointer_type
| type LIST
| type SET
| type LPAREN opt_types RPAREN
| type COLUMN type DICT
| LPAREN types RPAREN
| LARRAY type RARRAY
| type OR type
'''
if len(p) == 2:
p[0] = p[1],
elif len(p) == 3 and p[2] == 'list':
p[0] = tuple(List[t] for t in p[1])
elif len(p) == 3 and p[2] == 'set':
p[0] = tuple(Set[t] for t in p[1])
elif len(p) == 3:
if p[2] is None:
expanded = []
for nd in p[1]:
expanded.append(nd)
if isinstance(nd, NDArray) and len(nd.__args__) == 3:
expanded.append(NDArray[nd.__args__[0], -1::, -1::])
p[0] = tuple(expanded)
elif p[2] == "F":
for nd in p[1]:
if len(nd.__args__) != 3:
raise PythranSyntaxError("Invalid Pythran spec. "
"F order is only valid for 2D arrays")
p[0] = tuple(NDArray[nd.__args__[0], -1::, -1::] for nd in p[1])
else:
p[0] = p[1]
elif len(p) == 5 and p[4] == ')':
p[0] = tuple(Fun[args, r]
for r in p[1]
for args in (product(*p[3])
if len(p[3]) > 1 else p[3]))
elif len(p) == 5:
p[0] = tuple(Dict[k, v] for k in p[1] for v in p[3])
elif len(p) == 4 and p[2] == 'or':
p[0] = p[1] + p[3]
elif len(p) == 4 and p[3] == ')':
p[0] = tuple(Tuple[t] for t in p[2])
elif len(p) == 4 and p[3] == ']':
p[0] = p[2]
else:
raise PythranSyntaxError("Invalid Pythran spec. "
"Unknown text '{0}'".format(p.value)) |
def subclasses(self, sort_by=None, reverse=False):
"""Get all nested Constant class instance and it's name pair.
:param sort_by: the attribute name used for sorting.
:param reverse: if True, return in descend order.
:returns: [(attr, value),...] pairs.
::
>>> class MyClass(Constant):
... a = 1 # non-class attributre
... b = 2 # non-class attributre
...
... class C(Constant):
... pass
...
... class D(Constant):
... pass
>>> my_class = MyClass()
>>> my_class.subclasses()
[("C", my_class.C), ("D", my_class.D)]
.. versionadded:: 0.0.4
"""
l = list()
for attr, _ in self.Subclasses(sort_by, reverse):
value = getattr(self, attr)
l.append((attr, value))
return l | Get all nested Constant class instance and it's name pair.
:param sort_by: the attribute name used for sorting.
:param reverse: if True, return in descend order.
:returns: [(attr, value),...] pairs.
::
>>> class MyClass(Constant):
... a = 1 # non-class attributre
... b = 2 # non-class attributre
...
... class C(Constant):
... pass
...
... class D(Constant):
... pass
>>> my_class = MyClass()
>>> my_class.subclasses()
[("C", my_class.C), ("D", my_class.D)]
.. versionadded:: 0.0.4 | Below is the the instruction that describes the task:
### Input:
Get all nested Constant class instance and it's name pair.
:param sort_by: the attribute name used for sorting.
:param reverse: if True, return in descend order.
:returns: [(attr, value),...] pairs.
::
>>> class MyClass(Constant):
... a = 1 # non-class attributre
... b = 2 # non-class attributre
...
... class C(Constant):
... pass
...
... class D(Constant):
... pass
>>> my_class = MyClass()
>>> my_class.subclasses()
[("C", my_class.C), ("D", my_class.D)]
.. versionadded:: 0.0.4
### Response:
def subclasses(self, sort_by=None, reverse=False):
"""Get all nested Constant class instance and it's name pair.
:param sort_by: the attribute name used for sorting.
:param reverse: if True, return in descend order.
:returns: [(attr, value),...] pairs.
::
>>> class MyClass(Constant):
... a = 1 # non-class attributre
... b = 2 # non-class attributre
...
... class C(Constant):
... pass
...
... class D(Constant):
... pass
>>> my_class = MyClass()
>>> my_class.subclasses()
[("C", my_class.C), ("D", my_class.D)]
.. versionadded:: 0.0.4
"""
l = list()
for attr, _ in self.Subclasses(sort_by, reverse):
value = getattr(self, attr)
l.append((attr, value))
return l |
def rl_set_prompt(prompt: str) -> None: # pragma: no cover
"""
Sets readline's prompt
:param prompt: the new prompt value
"""
safe_prompt = rl_make_safe_prompt(prompt)
if rl_type == RlType.GNU:
encoded_prompt = bytes(safe_prompt, encoding='utf-8')
readline_lib.rl_set_prompt(encoded_prompt)
elif rl_type == RlType.PYREADLINE:
readline.rl._set_prompt(safe_prompt) | Sets readline's prompt
:param prompt: the new prompt value | Below is the the instruction that describes the task:
### Input:
Sets readline's prompt
:param prompt: the new prompt value
### Response:
def rl_set_prompt(prompt: str) -> None: # pragma: no cover
"""
Sets readline's prompt
:param prompt: the new prompt value
"""
safe_prompt = rl_make_safe_prompt(prompt)
if rl_type == RlType.GNU:
encoded_prompt = bytes(safe_prompt, encoding='utf-8')
readline_lib.rl_set_prompt(encoded_prompt)
elif rl_type == RlType.PYREADLINE:
readline.rl._set_prompt(safe_prompt) |
def override_span_name(self, name):
"""Overrides the current span name.
This is useful if you don't know the span name yet when you create the
zipkin_span object. i.e. pyramid_zipkin doesn't know which route the
request matched until the function wrapped by the context manager
completes.
:param name: New span name
:type name: str
"""
self.span_name = name
if self.logging_context:
self.logging_context.span_name = name | Overrides the current span name.
This is useful if you don't know the span name yet when you create the
zipkin_span object. i.e. pyramid_zipkin doesn't know which route the
request matched until the function wrapped by the context manager
completes.
:param name: New span name
:type name: str | Below is the the instruction that describes the task:
### Input:
Overrides the current span name.
This is useful if you don't know the span name yet when you create the
zipkin_span object. i.e. pyramid_zipkin doesn't know which route the
request matched until the function wrapped by the context manager
completes.
:param name: New span name
:type name: str
### Response:
def override_span_name(self, name):
"""Overrides the current span name.
This is useful if you don't know the span name yet when you create the
zipkin_span object. i.e. pyramid_zipkin doesn't know which route the
request matched until the function wrapped by the context manager
completes.
:param name: New span name
:type name: str
"""
self.span_name = name
if self.logging_context:
self.logging_context.span_name = name |
def monitor():
"""Wrapper to call console with a loop."""
log = logging.getLogger(__name__)
loop = asyncio.get_event_loop()
asyncio.ensure_future(console(loop, log))
loop.run_forever() | Wrapper to call console with a loop. | Below is the the instruction that describes the task:
### Input:
Wrapper to call console with a loop.
### Response:
def monitor():
"""Wrapper to call console with a loop."""
log = logging.getLogger(__name__)
loop = asyncio.get_event_loop()
asyncio.ensure_future(console(loop, log))
loop.run_forever() |
def _get_file_event_handler(self, file_path, save_name):
"""Get or create an event handler for a particular file.
file_path: the file's actual path
save_name: its path relative to the run directory (aka the watch directory)
"""
self._file_pusher.update_file(save_name, file_path) # track upload progress
if save_name not in self._file_event_handlers:
if save_name == 'wandb-history.jsonl':
self._file_event_handlers['wandb-history.jsonl'] = FileEventHandlerTextStream(
file_path, 'wandb-history.jsonl', self._api)
elif save_name == 'wandb-events.jsonl':
self._file_event_handlers['wandb-events.jsonl'] = FileEventHandlerTextStream(
file_path, 'wandb-events.jsonl', self._api)
elif 'tfevents' in save_name or 'graph.pbtxt' in save_name:
# overwrite the tensorboard but not every reload -- just
# frequently enough to resemble realtime
self._file_event_handlers[save_name] = FileEventHandlerThrottledOverwrite(
file_path, save_name, self._api, self._file_pusher)
# Don't try to stream tensorboard files for now.
# elif 'tfevents' in save_name:
# # TODO: This is hard-coded, but we want to give users control
# # over streaming files (or detect them).
# self._api.get_file_stream_api().set_file_policy(save_name,
# BinaryFilePolicy())
# self._file_event_handlers[save_name] = FileEventHandlerBinaryStream(
# file_path, save_name, self._api)
# Overwrite handler (non-deferred) has a bug, wherein if the file is truncated
# during upload, the request to Google hangs (at least, this is my working
# theory). So for now we defer uploading everything til the end of the run.
# TODO: send wandb-summary during run. One option is to copy to a temporary
# file before uploading.
elif save_name == config.FNAME:
self._file_event_handlers[save_name] = FileEventHandlerConfig(
file_path, save_name, self._api, self._file_pusher, self._run)
elif save_name == 'wandb-summary.json':
# Load the summary into the syncer process for meta etc to work
self._run.summary.load()
self._api.get_file_stream_api().set_file_policy(save_name, OverwriteFilePolicy())
self._file_event_handlers[save_name] = FileEventHandlerSummary(
file_path, save_name, self._api, self._file_pusher, self._run)
elif save_name.startswith('media/'):
# Save media files immediately
self._file_event_handlers[save_name] = FileEventHandlerOverwrite(
file_path, save_name, self._api, self._file_pusher)
else:
Handler = FileEventHandlerOverwriteDeferred
for policy, globs in six.iteritems(self._user_file_policies):
if policy == "end":
continue
for g in globs:
if any(save_name in p for p in glob.glob(os.path.join(self._run.dir, g))):
if policy == "live":
Handler = FileEventHandlerThrottledOverwriteMinWait
self._file_event_handlers[save_name] = Handler(
file_path, save_name, self._api, self._file_pusher)
return self._file_event_handlers[save_name] | Get or create an event handler for a particular file.
file_path: the file's actual path
save_name: its path relative to the run directory (aka the watch directory) | Below is the the instruction that describes the task:
### Input:
Get or create an event handler for a particular file.
file_path: the file's actual path
save_name: its path relative to the run directory (aka the watch directory)
### Response:
def _get_file_event_handler(self, file_path, save_name):
"""Get or create an event handler for a particular file.
file_path: the file's actual path
save_name: its path relative to the run directory (aka the watch directory)
"""
self._file_pusher.update_file(save_name, file_path) # track upload progress
if save_name not in self._file_event_handlers:
if save_name == 'wandb-history.jsonl':
self._file_event_handlers['wandb-history.jsonl'] = FileEventHandlerTextStream(
file_path, 'wandb-history.jsonl', self._api)
elif save_name == 'wandb-events.jsonl':
self._file_event_handlers['wandb-events.jsonl'] = FileEventHandlerTextStream(
file_path, 'wandb-events.jsonl', self._api)
elif 'tfevents' in save_name or 'graph.pbtxt' in save_name:
# overwrite the tensorboard but not every reload -- just
# frequently enough to resemble realtime
self._file_event_handlers[save_name] = FileEventHandlerThrottledOverwrite(
file_path, save_name, self._api, self._file_pusher)
# Don't try to stream tensorboard files for now.
# elif 'tfevents' in save_name:
# # TODO: This is hard-coded, but we want to give users control
# # over streaming files (or detect them).
# self._api.get_file_stream_api().set_file_policy(save_name,
# BinaryFilePolicy())
# self._file_event_handlers[save_name] = FileEventHandlerBinaryStream(
# file_path, save_name, self._api)
# Overwrite handler (non-deferred) has a bug, wherein if the file is truncated
# during upload, the request to Google hangs (at least, this is my working
# theory). So for now we defer uploading everything til the end of the run.
# TODO: send wandb-summary during run. One option is to copy to a temporary
# file before uploading.
elif save_name == config.FNAME:
self._file_event_handlers[save_name] = FileEventHandlerConfig(
file_path, save_name, self._api, self._file_pusher, self._run)
elif save_name == 'wandb-summary.json':
# Load the summary into the syncer process for meta etc to work
self._run.summary.load()
self._api.get_file_stream_api().set_file_policy(save_name, OverwriteFilePolicy())
self._file_event_handlers[save_name] = FileEventHandlerSummary(
file_path, save_name, self._api, self._file_pusher, self._run)
elif save_name.startswith('media/'):
# Save media files immediately
self._file_event_handlers[save_name] = FileEventHandlerOverwrite(
file_path, save_name, self._api, self._file_pusher)
else:
Handler = FileEventHandlerOverwriteDeferred
for policy, globs in six.iteritems(self._user_file_policies):
if policy == "end":
continue
for g in globs:
if any(save_name in p for p in glob.glob(os.path.join(self._run.dir, g))):
if policy == "live":
Handler = FileEventHandlerThrottledOverwriteMinWait
self._file_event_handlers[save_name] = Handler(
file_path, save_name, self._api, self._file_pusher)
return self._file_event_handlers[save_name] |
def followed_streams(self, limit=25, offset=0):
"""Return the streams the current user follows.
Needs authorization ``user_read``.
:param limit: maximum number of results
:type limit: :class:`int`
:param offset: offset for pagination
:type offset: :class:`int`
:returns: A list of streams
:rtype: :class:`list`of :class:`models.Stream` instances
:raises: :class:`exceptions.NotAuthorizedError`
"""
r = self.kraken_request('GET', 'streams/followed',
params={'limit': limit,
'offset': offset})
return models.Stream.wrap_search(r) | Return the streams the current user follows.
Needs authorization ``user_read``.
:param limit: maximum number of results
:type limit: :class:`int`
:param offset: offset for pagination
:type offset: :class:`int`
:returns: A list of streams
:rtype: :class:`list`of :class:`models.Stream` instances
:raises: :class:`exceptions.NotAuthorizedError` | Below is the the instruction that describes the task:
### Input:
Return the streams the current user follows.
Needs authorization ``user_read``.
:param limit: maximum number of results
:type limit: :class:`int`
:param offset: offset for pagination
:type offset: :class:`int`
:returns: A list of streams
:rtype: :class:`list`of :class:`models.Stream` instances
:raises: :class:`exceptions.NotAuthorizedError`
### Response:
def followed_streams(self, limit=25, offset=0):
"""Return the streams the current user follows.
Needs authorization ``user_read``.
:param limit: maximum number of results
:type limit: :class:`int`
:param offset: offset for pagination
:type offset: :class:`int`
:returns: A list of streams
:rtype: :class:`list`of :class:`models.Stream` instances
:raises: :class:`exceptions.NotAuthorizedError`
"""
r = self.kraken_request('GET', 'streams/followed',
params={'limit': limit,
'offset': offset})
return models.Stream.wrap_search(r) |
def _isclose(a: Any, b: Any, *, atol: Union[int, float]) -> bool:
"""Convenience wrapper around np.isclose."""
return True if np.isclose([a], [b], atol=atol, rtol=0.0)[0] else False | Convenience wrapper around np.isclose. | Below is the the instruction that describes the task:
### Input:
Convenience wrapper around np.isclose.
### Response:
def _isclose(a: Any, b: Any, *, atol: Union[int, float]) -> bool:
"""Convenience wrapper around np.isclose."""
return True if np.isclose([a], [b], atol=atol, rtol=0.0)[0] else False |
def p_field_optional2_3(self, p):
"""
field : name arguments directives
"""
p[0] = Field(name=p[1], arguments=p[2], directives=p[3]) | field : name arguments directives | Below is the the instruction that describes the task:
### Input:
field : name arguments directives
### Response:
def p_field_optional2_3(self, p):
"""
field : name arguments directives
"""
p[0] = Field(name=p[1], arguments=p[2], directives=p[3]) |
def contamination_finder(self, input_path=None, report_path=None):
"""
Helper function to get confindr integrated into the assembly pipeline
"""
logging.info('Calculating contamination in reads')
if input_path is not None:
input_dir = input_path
else:
input_dir = self.path
if report_path is not None:
reportpath = report_path
else:
reportpath = os.path.join(input_dir, 'confindr')
confindr_report = os.path.join(input_dir, 'confindr', 'confindr_report.csv')
pipeline_report = os.path.join(reportpath, 'confindr_report.csv')
# Only proceed if the confindr report doesn't exist
if not os.path.isfile(confindr_report):
# # Create an object to store attributes to pass to confinder
# Clear and recreate the output folder
try:
shutil.rmtree(reportpath)
except IOError:
pass
make_path(reportpath)
# Run confindr
systemcall = 'confindr.py -i {input_dir} -o {output_dir} -d {database_dir} -bf 0.05'\
.format(input_dir=input_dir,
output_dir=os.path.join(input_dir, 'confindr'),
database_dir=os.path.join(self.reffilepath, 'ConFindr', 'databases'))
# Run the call
out, err = run_subprocess(systemcall)
write_to_logfile(systemcall, systemcall, self.logfile, None, None, None, None)
write_to_logfile(out, err, self.logfile, None, None, None, None)
logging.info('Contamination detection complete!')
# Load the confindr report into a dictionary using pandas
# https://stackoverflow.com/questions/33620982/reading-csv-file-as-dictionary-using-pandas
confindr_results = pandas.read_csv(confindr_report, index_col=0).T.to_dict()
# Find the results for each of the samples
for sample in self.metadata:
# Create a GenObject to store the results
sample.confindr = GenObject()
# Iterate through the dictionary to find the outputs for each sample
for line in confindr_results:
# If the current line corresponds to the sample of interest
if sample.name in line:
# Set the values using the appropriate keys as the attributes
sample.confindr.genus = confindr_results[line]['Genus'] if type(confindr_results[line]['Genus']) \
is not float else 'ND'
sample.confindr.num_contaminated_snvs = confindr_results[line]['NumContamSNVs']
sample.confindr.contam_status = confindr_results[line]['ContamStatus']
# Don't break parsing previous ConFindr reports that lack the percent contamination calculations
try:
sample.confindr.percent_contam = confindr_results[line]['PercentContam'] if \
str(confindr_results[line]['PercentContam']) != 'nan' else 0
except KeyError:
sample.confindr.percent_contam = 'ND'
try:
sample.confindr.percent_contam_std = \
confindr_results[line]['PercentContamStandardDeviation'] if \
str(confindr_results[line]['PercentContamStandardDeviation']) != 'nan' else 0
except KeyError:
sample.confindr.percent_contam_std = 'ND'
if sample.confindr.contam_status is True:
sample.confindr.contam_status = 'Contaminated'
elif sample.confindr.contam_status is False:
sample.confindr.contam_status = 'Clean'
# Re-write the output to be consistent with the rest of the pipeline
with open(pipeline_report, 'w') as csv:
data = 'Strain,Genus,NumContamSNVs,ContamStatus,PercentContam,PercentContamSTD\n'
for sample in self.metadata:
data += '{str},{genus},{numcontamsnv},{status},{pc},{pcs}\n'.format(
str=sample.name,
genus=sample.confindr.genus,
numcontamsnv=sample.confindr.num_contaminated_snvs,
status=sample.confindr.contam_status,
pc=sample.confindr.percent_contam,
pcs=sample.confindr.percent_contam_std
)
csv.write(data) | Helper function to get confindr integrated into the assembly pipeline | Below is the the instruction that describes the task:
### Input:
Helper function to get confindr integrated into the assembly pipeline
### Response:
def contamination_finder(self, input_path=None, report_path=None):
"""
Helper function to get confindr integrated into the assembly pipeline
"""
logging.info('Calculating contamination in reads')
if input_path is not None:
input_dir = input_path
else:
input_dir = self.path
if report_path is not None:
reportpath = report_path
else:
reportpath = os.path.join(input_dir, 'confindr')
confindr_report = os.path.join(input_dir, 'confindr', 'confindr_report.csv')
pipeline_report = os.path.join(reportpath, 'confindr_report.csv')
# Only proceed if the confindr report doesn't exist
if not os.path.isfile(confindr_report):
# # Create an object to store attributes to pass to confinder
# Clear and recreate the output folder
try:
shutil.rmtree(reportpath)
except IOError:
pass
make_path(reportpath)
# Run confindr
systemcall = 'confindr.py -i {input_dir} -o {output_dir} -d {database_dir} -bf 0.05'\
.format(input_dir=input_dir,
output_dir=os.path.join(input_dir, 'confindr'),
database_dir=os.path.join(self.reffilepath, 'ConFindr', 'databases'))
# Run the call
out, err = run_subprocess(systemcall)
write_to_logfile(systemcall, systemcall, self.logfile, None, None, None, None)
write_to_logfile(out, err, self.logfile, None, None, None, None)
logging.info('Contamination detection complete!')
# Load the confindr report into a dictionary using pandas
# https://stackoverflow.com/questions/33620982/reading-csv-file-as-dictionary-using-pandas
confindr_results = pandas.read_csv(confindr_report, index_col=0).T.to_dict()
# Find the results for each of the samples
for sample in self.metadata:
# Create a GenObject to store the results
sample.confindr = GenObject()
# Iterate through the dictionary to find the outputs for each sample
for line in confindr_results:
# If the current line corresponds to the sample of interest
if sample.name in line:
# Set the values using the appropriate keys as the attributes
sample.confindr.genus = confindr_results[line]['Genus'] if type(confindr_results[line]['Genus']) \
is not float else 'ND'
sample.confindr.num_contaminated_snvs = confindr_results[line]['NumContamSNVs']
sample.confindr.contam_status = confindr_results[line]['ContamStatus']
# Don't break parsing previous ConFindr reports that lack the percent contamination calculations
try:
sample.confindr.percent_contam = confindr_results[line]['PercentContam'] if \
str(confindr_results[line]['PercentContam']) != 'nan' else 0
except KeyError:
sample.confindr.percent_contam = 'ND'
try:
sample.confindr.percent_contam_std = \
confindr_results[line]['PercentContamStandardDeviation'] if \
str(confindr_results[line]['PercentContamStandardDeviation']) != 'nan' else 0
except KeyError:
sample.confindr.percent_contam_std = 'ND'
if sample.confindr.contam_status is True:
sample.confindr.contam_status = 'Contaminated'
elif sample.confindr.contam_status is False:
sample.confindr.contam_status = 'Clean'
# Re-write the output to be consistent with the rest of the pipeline
with open(pipeline_report, 'w') as csv:
data = 'Strain,Genus,NumContamSNVs,ContamStatus,PercentContam,PercentContamSTD\n'
for sample in self.metadata:
data += '{str},{genus},{numcontamsnv},{status},{pc},{pcs}\n'.format(
str=sample.name,
genus=sample.confindr.genus,
numcontamsnv=sample.confindr.num_contaminated_snvs,
status=sample.confindr.contam_status,
pc=sample.confindr.percent_contam,
pcs=sample.confindr.percent_contam_std
)
csv.write(data) |
def index():
"""Show the landing page."""
gene_lists = app.db.gene_lists() if app.config['STORE_ENABLED'] else []
queries = app.db.gemini_queries() if app.config['STORE_ENABLED'] else []
case_groups = {}
for case in app.db.cases():
key = (case.variant_source, case.variant_type, case.variant_mode)
if key not in case_groups:
case_groups[key] = []
case_groups[key].append(case)
return render_template('index.html', case_groups=case_groups,
gene_lists=gene_lists, queries=queries) | Show the landing page. | Below is the the instruction that describes the task:
### Input:
Show the landing page.
### Response:
def index():
"""Show the landing page."""
gene_lists = app.db.gene_lists() if app.config['STORE_ENABLED'] else []
queries = app.db.gemini_queries() if app.config['STORE_ENABLED'] else []
case_groups = {}
for case in app.db.cases():
key = (case.variant_source, case.variant_type, case.variant_mode)
if key not in case_groups:
case_groups[key] = []
case_groups[key].append(case)
return render_template('index.html', case_groups=case_groups,
gene_lists=gene_lists, queries=queries) |
def get(equity):
"""
Retrieve all current options chains for given equity.
.. versionchanged:: 0.5.0
Eliminate special exception handling.
Parameters
-------------
equity : str
Equity for which to retrieve options data.
Returns
-------------
optdata : :class:`~pynance.opt.core.Options`
All options data for given equity currently available
from Yahoo! Finance.
Examples
-------------
Basic usage::
>>> fopt = pn.opt.get('f')
To show useful information (expiration dates, stock price, quote time)
when retrieving options data, you can chain the call to
:func:`get` with :meth:`~pynance.opt.core.Options.info`::
>>> fopt = pn.opt.get('f').info()
Expirations:
...
Stock: 15.93
Quote time: 2015-03-07 16:00
"""
_optmeta = pdr.data.Options(equity, 'yahoo')
_optdata = _optmeta.get_all_data()
return Options(_optdata) | Retrieve all current options chains for given equity.
.. versionchanged:: 0.5.0
Eliminate special exception handling.
Parameters
-------------
equity : str
Equity for which to retrieve options data.
Returns
-------------
optdata : :class:`~pynance.opt.core.Options`
All options data for given equity currently available
from Yahoo! Finance.
Examples
-------------
Basic usage::
>>> fopt = pn.opt.get('f')
To show useful information (expiration dates, stock price, quote time)
when retrieving options data, you can chain the call to
:func:`get` with :meth:`~pynance.opt.core.Options.info`::
>>> fopt = pn.opt.get('f').info()
Expirations:
...
Stock: 15.93
Quote time: 2015-03-07 16:00 | Below is the the instruction that describes the task:
### Input:
Retrieve all current options chains for given equity.
.. versionchanged:: 0.5.0
Eliminate special exception handling.
Parameters
-------------
equity : str
Equity for which to retrieve options data.
Returns
-------------
optdata : :class:`~pynance.opt.core.Options`
All options data for given equity currently available
from Yahoo! Finance.
Examples
-------------
Basic usage::
>>> fopt = pn.opt.get('f')
To show useful information (expiration dates, stock price, quote time)
when retrieving options data, you can chain the call to
:func:`get` with :meth:`~pynance.opt.core.Options.info`::
>>> fopt = pn.opt.get('f').info()
Expirations:
...
Stock: 15.93
Quote time: 2015-03-07 16:00
### Response:
def get(equity):
"""
Retrieve all current options chains for given equity.
.. versionchanged:: 0.5.0
Eliminate special exception handling.
Parameters
-------------
equity : str
Equity for which to retrieve options data.
Returns
-------------
optdata : :class:`~pynance.opt.core.Options`
All options data for given equity currently available
from Yahoo! Finance.
Examples
-------------
Basic usage::
>>> fopt = pn.opt.get('f')
To show useful information (expiration dates, stock price, quote time)
when retrieving options data, you can chain the call to
:func:`get` with :meth:`~pynance.opt.core.Options.info`::
>>> fopt = pn.opt.get('f').info()
Expirations:
...
Stock: 15.93
Quote time: 2015-03-07 16:00
"""
_optmeta = pdr.data.Options(equity, 'yahoo')
_optdata = _optmeta.get_all_data()
return Options(_optdata) |
def isValidClass(self, class_):
"""
Needs to be its own method so it can be called from both wantClass and
registerGoodClass.
"""
module = inspect.getmodule(class_)
valid = (
module in self._valid_modules
or (
hasattr(module, '__file__')
and module.__file__ in self._valid_named_modules
)
)
return valid and not private(class_) | Needs to be its own method so it can be called from both wantClass and
registerGoodClass. | Below is the the instruction that describes the task:
### Input:
Needs to be its own method so it can be called from both wantClass and
registerGoodClass.
### Response:
def isValidClass(self, class_):
"""
Needs to be its own method so it can be called from both wantClass and
registerGoodClass.
"""
module = inspect.getmodule(class_)
valid = (
module in self._valid_modules
or (
hasattr(module, '__file__')
and module.__file__ in self._valid_named_modules
)
)
return valid and not private(class_) |
def _credit_card_type(self, card_type=None):
""" Returns a random credit card type instance. """
if card_type is None:
card_type = self.random_element(self.credit_card_types.keys())
elif isinstance(card_type, CreditCard):
return card_type
return self.credit_card_types[card_type] | Returns a random credit card type instance. | Below is the the instruction that describes the task:
### Input:
Returns a random credit card type instance.
### Response:
def _credit_card_type(self, card_type=None):
""" Returns a random credit card type instance. """
if card_type is None:
card_type = self.random_element(self.credit_card_types.keys())
elif isinstance(card_type, CreditCard):
return card_type
return self.credit_card_types[card_type] |
def _patch_file(path, content):
"""Will backup the file then patch it"""
f = open(path)
existing_content = f.read()
f.close()
if existing_content == content:
# already patched
log.warn('Already patched.')
return False
log.warn('Patching...')
_rename_path(path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
return True | Will backup the file then patch it | Below is the the instruction that describes the task:
### Input:
Will backup the file then patch it
### Response:
def _patch_file(path, content):
"""Will backup the file then patch it"""
f = open(path)
existing_content = f.read()
f.close()
if existing_content == content:
# already patched
log.warn('Already patched.')
return False
log.warn('Patching...')
_rename_path(path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
return True |
def rlist_modules(mname):
"""
Attempts to the submodules under a module recursively. This function
works for modules located in the default path as well as extended paths
via the sys.meta_path hooks.
This function carries the expectation that the hidden module variable
'__path__' has been set correctly.
:param mname: the module name to descend into
"""
module = import_module(mname)
if not module:
raise ImportError('Unable to load module {}'.format(mname))
found = list()
if _should_use_module_path(module):
mpath = module.__path__[0]
else:
mpaths = sys.path
mpath = _scan_paths_for(mname, mpaths)
if mpath:
for pmname in _search_for_modules(mpath, recursive=True):
found_mod = MODULE_PATH_SEP.join((mname, pmname))
found.append(found_mod)
return found | Attempts to the submodules under a module recursively. This function
works for modules located in the default path as well as extended paths
via the sys.meta_path hooks.
This function carries the expectation that the hidden module variable
'__path__' has been set correctly.
:param mname: the module name to descend into | Below is the the instruction that describes the task:
### Input:
Attempts to the submodules under a module recursively. This function
works for modules located in the default path as well as extended paths
via the sys.meta_path hooks.
This function carries the expectation that the hidden module variable
'__path__' has been set correctly.
:param mname: the module name to descend into
### Response:
def rlist_modules(mname):
"""
Attempts to the submodules under a module recursively. This function
works for modules located in the default path as well as extended paths
via the sys.meta_path hooks.
This function carries the expectation that the hidden module variable
'__path__' has been set correctly.
:param mname: the module name to descend into
"""
module = import_module(mname)
if not module:
raise ImportError('Unable to load module {}'.format(mname))
found = list()
if _should_use_module_path(module):
mpath = module.__path__[0]
else:
mpaths = sys.path
mpath = _scan_paths_for(mname, mpaths)
if mpath:
for pmname in _search_for_modules(mpath, recursive=True):
found_mod = MODULE_PATH_SEP.join((mname, pmname))
found.append(found_mod)
return found |
def _glob_to_regexp(pat):
"""Compile a glob pattern into a regexp.
We need to do this because fnmatch allows * to match /, which we
don't want. E.g. an MANIFEST.in exclude of 'dirname/*css' should
match 'dirname/foo.css' but not 'dirname/subdir/bar.css'.
"""
pat = fnmatch.translate(pat)
# Note that distutils in Python 2.6 has a buggy glob_to_re in
# distutils.filelist -- it converts '*.cfg' to '[^/]*cfg' instead
# of '[^\\]*cfg' on Windows.
sep = r'\\\\' if os.path.sep == '\\' else os.path.sep
return re.sub(r'((?<!\\)(\\\\)*)\.', r'\1[^%s]' % sep, pat) | Compile a glob pattern into a regexp.
We need to do this because fnmatch allows * to match /, which we
don't want. E.g. an MANIFEST.in exclude of 'dirname/*css' should
match 'dirname/foo.css' but not 'dirname/subdir/bar.css'. | Below is the the instruction that describes the task:
### Input:
Compile a glob pattern into a regexp.
We need to do this because fnmatch allows * to match /, which we
don't want. E.g. an MANIFEST.in exclude of 'dirname/*css' should
match 'dirname/foo.css' but not 'dirname/subdir/bar.css'.
### Response:
def _glob_to_regexp(pat):
"""Compile a glob pattern into a regexp.
We need to do this because fnmatch allows * to match /, which we
don't want. E.g. an MANIFEST.in exclude of 'dirname/*css' should
match 'dirname/foo.css' but not 'dirname/subdir/bar.css'.
"""
pat = fnmatch.translate(pat)
# Note that distutils in Python 2.6 has a buggy glob_to_re in
# distutils.filelist -- it converts '*.cfg' to '[^/]*cfg' instead
# of '[^\\]*cfg' on Windows.
sep = r'\\\\' if os.path.sep == '\\' else os.path.sep
return re.sub(r'((?<!\\)(\\\\)*)\.', r'\1[^%s]' % sep, pat) |
def apply(self, fun=None, axis=0):
"""
Apply a lambda expression to an H2OFrame.
:param fun: a lambda expression to be applied per row or per column.
:param axis: 0 = apply to each column; 1 = apply to each row
:returns: a new H2OFrame with the results of applying ``fun`` to the current frame.
"""
from .astfun import lambda_to_expr
assert_is_type(axis, 0, 1)
assert_is_type(fun, FunctionType)
assert_satisfies(fun, fun.__name__ == "<lambda>")
res = lambda_to_expr(fun)
return H2OFrame._expr(expr=ExprNode("apply", self, 1 + (axis == 0), *res)) | Apply a lambda expression to an H2OFrame.
:param fun: a lambda expression to be applied per row or per column.
:param axis: 0 = apply to each column; 1 = apply to each row
:returns: a new H2OFrame with the results of applying ``fun`` to the current frame. | Below is the the instruction that describes the task:
### Input:
Apply a lambda expression to an H2OFrame.
:param fun: a lambda expression to be applied per row or per column.
:param axis: 0 = apply to each column; 1 = apply to each row
:returns: a new H2OFrame with the results of applying ``fun`` to the current frame.
### Response:
def apply(self, fun=None, axis=0):
"""
Apply a lambda expression to an H2OFrame.
:param fun: a lambda expression to be applied per row or per column.
:param axis: 0 = apply to each column; 1 = apply to each row
:returns: a new H2OFrame with the results of applying ``fun`` to the current frame.
"""
from .astfun import lambda_to_expr
assert_is_type(axis, 0, 1)
assert_is_type(fun, FunctionType)
assert_satisfies(fun, fun.__name__ == "<lambda>")
res = lambda_to_expr(fun)
return H2OFrame._expr(expr=ExprNode("apply", self, 1 + (axis == 0), *res)) |
def _my_pdf_formatter(data, format, ordered_alphabets) :
""" Generate a logo in PDF format.
Modified from weblogo version 3.4 source code.
"""
eps = _my_eps_formatter(data, format, ordered_alphabets).decode()
gs = weblogolib.GhostscriptAPI()
return gs.convert('pdf', eps, format.logo_width, format.logo_height) | Generate a logo in PDF format.
Modified from weblogo version 3.4 source code. | Below is the the instruction that describes the task:
### Input:
Generate a logo in PDF format.
Modified from weblogo version 3.4 source code.
### Response:
def _my_pdf_formatter(data, format, ordered_alphabets) :
""" Generate a logo in PDF format.
Modified from weblogo version 3.4 source code.
"""
eps = _my_eps_formatter(data, format, ordered_alphabets).decode()
gs = weblogolib.GhostscriptAPI()
return gs.convert('pdf', eps, format.logo_width, format.logo_height) |
def delete(self):
"""
Override delete in parent class, this will also delete
the routing configuration referencing this interface.
::
engine = Engine('vm')
interface = engine.interface.get(2)
interface.delete()
"""
super(Interface, self).delete()
for route in self._engine.routing:
if route.to_delete:
route.delete()
self._engine._del_cache() | Override delete in parent class, this will also delete
the routing configuration referencing this interface.
::
engine = Engine('vm')
interface = engine.interface.get(2)
interface.delete() | Below is the the instruction that describes the task:
### Input:
Override delete in parent class, this will also delete
the routing configuration referencing this interface.
::
engine = Engine('vm')
interface = engine.interface.get(2)
interface.delete()
### Response:
def delete(self):
"""
Override delete in parent class, this will also delete
the routing configuration referencing this interface.
::
engine = Engine('vm')
interface = engine.interface.get(2)
interface.delete()
"""
super(Interface, self).delete()
for route in self._engine.routing:
if route.to_delete:
route.delete()
self._engine._del_cache() |
def find(cls, where='.', exclude=(), include=('*',)):
"""Return a list all Python packages found within directory 'where'
'where' should be supplied as a "cross-platform" (i.e. URL-style)
path; it will be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
The list of included packages is built up first and then any
explicitly excluded packages are removed from it.
"""
out = cls._find_packages_iter(convert_path(where))
out = cls.require_parents(out)
includes = cls._build_filter(*include)
excludes = cls._build_filter('ez_setup', '*__pycache__', *exclude)
out = filter(includes, out)
out = filterfalse(excludes, out)
return list(out) | Return a list all Python packages found within directory 'where'
'where' should be supplied as a "cross-platform" (i.e. URL-style)
path; it will be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
The list of included packages is built up first and then any
explicitly excluded packages are removed from it. | Below is the the instruction that describes the task:
### Input:
Return a list all Python packages found within directory 'where'
'where' should be supplied as a "cross-platform" (i.e. URL-style)
path; it will be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
The list of included packages is built up first and then any
explicitly excluded packages are removed from it.
### Response:
def find(cls, where='.', exclude=(), include=('*',)):
"""Return a list all Python packages found within directory 'where'
'where' should be supplied as a "cross-platform" (i.e. URL-style)
path; it will be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
The list of included packages is built up first and then any
explicitly excluded packages are removed from it.
"""
out = cls._find_packages_iter(convert_path(where))
out = cls.require_parents(out)
includes = cls._build_filter(*include)
excludes = cls._build_filter('ez_setup', '*__pycache__', *exclude)
out = filter(includes, out)
out = filterfalse(excludes, out)
return list(out) |
def generate(self, request, **kwargs):
""" proxy for the tileset.generate method """
# method check to avoid bad requests
self.method_check(request, allowed=['get'])
# create a basic bundle object for self.get_cached_obj_get.
basic_bundle = self.build_bundle(request=request)
# using the primary key defined in the url, obtain the tileset
tileset = self.cached_obj_get(
bundle=basic_bundle,
**self.remove_api_resource_names(kwargs))
# Return what the method output, tastypie will handle the serialization
return self.create_response(request, tileset.generate()) | proxy for the tileset.generate method | Below is the the instruction that describes the task:
### Input:
proxy for the tileset.generate method
### Response:
def generate(self, request, **kwargs):
""" proxy for the tileset.generate method """
# method check to avoid bad requests
self.method_check(request, allowed=['get'])
# create a basic bundle object for self.get_cached_obj_get.
basic_bundle = self.build_bundle(request=request)
# using the primary key defined in the url, obtain the tileset
tileset = self.cached_obj_get(
bundle=basic_bundle,
**self.remove_api_resource_names(kwargs))
# Return what the method output, tastypie will handle the serialization
return self.create_response(request, tileset.generate()) |
def read(self, entity=None, attrs=None, ignore=None, params=None):
"""Deal with oddly named and structured data returned by the server.
For more information, see `Bugzilla #1235019
<https://bugzilla.redhat.com/show_bug.cgi?id=1235019>`_
and `Bugzilla #1449749
<https://bugzilla.redhat.com/show_bug.cgi?id=1449749>`_.
`content_facet_attributes` are returned only in case any of facet
attributes were actually set.
Also add image to the response if needed, as
:meth:`nailgun.entity_mixins.EntityReadMixin.read` can't initialize
image.
"""
if attrs is None:
attrs = self.read_json()
if ignore is None:
ignore = set()
if 'parameters' in attrs:
attrs['host_parameters_attributes'] = attrs.pop('parameters')
else:
ignore.add('host_parameters_attributes')
if 'content_facet_attributes' not in attrs:
ignore.add('content_facet_attributes')
ignore.add('compute_attributes')
ignore.add('interfaces_attributes')
ignore.add('root_pass')
# Image entity requires compute_resource_id to initialize as it is
# part of its path. The thing is that entity_mixins.read() initializes
# entities by id only.
# Workaround is to add image to ignore, call entity_mixins.read()
# and then add 'manually' initialized image to the result.
# If image_id is None set image to None as it is done by default.
ignore.add('image')
# host id is required for interface initialization
ignore.add('interface')
ignore.add('build_status_label')
result = super(Host, self).read(entity, attrs, ignore, params)
if attrs.get('image_id'):
result.image = Image(
server_config=self._server_config,
id=attrs.get('image_id'),
compute_resource=attrs.get('compute_resource_id'),
)
else:
result.image = None
if 'interfaces' in attrs and attrs['interfaces']:
result.interface = [
Interface(
self._server_config,
host=result.id,
id=interface['id'],
)
for interface in attrs['interfaces']
]
if 'build_status_label' in attrs:
result.build_status_label = attrs['build_status_label']
return result | Deal with oddly named and structured data returned by the server.
For more information, see `Bugzilla #1235019
<https://bugzilla.redhat.com/show_bug.cgi?id=1235019>`_
and `Bugzilla #1449749
<https://bugzilla.redhat.com/show_bug.cgi?id=1449749>`_.
`content_facet_attributes` are returned only in case any of facet
attributes were actually set.
Also add image to the response if needed, as
:meth:`nailgun.entity_mixins.EntityReadMixin.read` can't initialize
image. | Below is the the instruction that describes the task:
### Input:
Deal with oddly named and structured data returned by the server.
For more information, see `Bugzilla #1235019
<https://bugzilla.redhat.com/show_bug.cgi?id=1235019>`_
and `Bugzilla #1449749
<https://bugzilla.redhat.com/show_bug.cgi?id=1449749>`_.
`content_facet_attributes` are returned only in case any of facet
attributes were actually set.
Also add image to the response if needed, as
:meth:`nailgun.entity_mixins.EntityReadMixin.read` can't initialize
image.
### Response:
def read(self, entity=None, attrs=None, ignore=None, params=None):
"""Deal with oddly named and structured data returned by the server.
For more information, see `Bugzilla #1235019
<https://bugzilla.redhat.com/show_bug.cgi?id=1235019>`_
and `Bugzilla #1449749
<https://bugzilla.redhat.com/show_bug.cgi?id=1449749>`_.
`content_facet_attributes` are returned only in case any of facet
attributes were actually set.
Also add image to the response if needed, as
:meth:`nailgun.entity_mixins.EntityReadMixin.read` can't initialize
image.
"""
if attrs is None:
attrs = self.read_json()
if ignore is None:
ignore = set()
if 'parameters' in attrs:
attrs['host_parameters_attributes'] = attrs.pop('parameters')
else:
ignore.add('host_parameters_attributes')
if 'content_facet_attributes' not in attrs:
ignore.add('content_facet_attributes')
ignore.add('compute_attributes')
ignore.add('interfaces_attributes')
ignore.add('root_pass')
# Image entity requires compute_resource_id to initialize as it is
# part of its path. The thing is that entity_mixins.read() initializes
# entities by id only.
# Workaround is to add image to ignore, call entity_mixins.read()
# and then add 'manually' initialized image to the result.
# If image_id is None set image to None as it is done by default.
ignore.add('image')
# host id is required for interface initialization
ignore.add('interface')
ignore.add('build_status_label')
result = super(Host, self).read(entity, attrs, ignore, params)
if attrs.get('image_id'):
result.image = Image(
server_config=self._server_config,
id=attrs.get('image_id'),
compute_resource=attrs.get('compute_resource_id'),
)
else:
result.image = None
if 'interfaces' in attrs and attrs['interfaces']:
result.interface = [
Interface(
self._server_config,
host=result.id,
id=interface['id'],
)
for interface in attrs['interfaces']
]
if 'build_status_label' in attrs:
result.build_status_label = attrs['build_status_label']
return result |
def read_input(self, filename, has_header=True):
"""
filename is any filename, or something on which open() can be called
for example:
csv_input = CSVInput()
csv_input.read_input("csvfile.csv")
"""
stream = open(filename)
reader = csv.reader(stream)
csv_data = []
for (i, row) in enumerate(reader):
if i==0:
if not has_header:
csv_data.append([str(i) for i in xrange(0,len(row))])
csv_data.append(row)
self.data = csv_data | filename is any filename, or something on which open() can be called
for example:
csv_input = CSVInput()
csv_input.read_input("csvfile.csv") | Below is the the instruction that describes the task:
### Input:
filename is any filename, or something on which open() can be called
for example:
csv_input = CSVInput()
csv_input.read_input("csvfile.csv")
### Response:
def read_input(self, filename, has_header=True):
"""
filename is any filename, or something on which open() can be called
for example:
csv_input = CSVInput()
csv_input.read_input("csvfile.csv")
"""
stream = open(filename)
reader = csv.reader(stream)
csv_data = []
for (i, row) in enumerate(reader):
if i==0:
if not has_header:
csv_data.append([str(i) for i in xrange(0,len(row))])
csv_data.append(row)
self.data = csv_data |
def sign_ssh_challenge(self, blob, identity):
"""Sign given blob using a private key on the device."""
msg = _parse_ssh_blob(blob)
log.debug('%s: user %r via %r (%r)',
msg['conn'], msg['user'], msg['auth'], msg['key_type'])
log.debug('nonce: %r', msg['nonce'])
fp = msg['public_key']['fingerprint']
log.debug('fingerprint: %s', fp)
log.debug('hidden challenge size: %d bytes', len(blob))
log.info('please confirm user "%s" login to "%s" using %s...',
msg['user'].decode('ascii'), identity.to_string(),
self.device)
with self.device:
return self.device.sign(blob=blob, identity=identity) | Sign given blob using a private key on the device. | Below is the the instruction that describes the task:
### Input:
Sign given blob using a private key on the device.
### Response:
def sign_ssh_challenge(self, blob, identity):
"""Sign given blob using a private key on the device."""
msg = _parse_ssh_blob(blob)
log.debug('%s: user %r via %r (%r)',
msg['conn'], msg['user'], msg['auth'], msg['key_type'])
log.debug('nonce: %r', msg['nonce'])
fp = msg['public_key']['fingerprint']
log.debug('fingerprint: %s', fp)
log.debug('hidden challenge size: %d bytes', len(blob))
log.info('please confirm user "%s" login to "%s" using %s...',
msg['user'].decode('ascii'), identity.to_string(),
self.device)
with self.device:
return self.device.sign(blob=blob, identity=identity) |
def output_positions(positions, positions_path):
"""Output the positions of an image to a positions.dat file.
Positions correspond to a set of pixels in the lensed source galaxy that are anticipated to come from the same \
multiply-imaged region of the source-plane. Mass models which do not trace the pixels within a threshold value of \
one another are resampled during the non-linear search.
Positions are stored in a .dat file, where each line of the file gives a list of list of (y,x) positions which \
correspond to the same region of the source-plane. Thus, multiple source-plane regions can be input over multiple \
lines of the same positions file.
Parameters
----------
positions : [[[]]]
The lists of positions (e.g. [[[1.0, 1.0], [2.0, 2.0]], [[3.0, 3.0], [4.0, 4.0]]])
positions_path : str
The path to the positions .dat file containing the positions (e.g. '/path/to/positions.dat')
"""
with open(positions_path, 'w') as f:
for position in positions:
f.write("%s\n" % position) | Output the positions of an image to a positions.dat file.
Positions correspond to a set of pixels in the lensed source galaxy that are anticipated to come from the same \
multiply-imaged region of the source-plane. Mass models which do not trace the pixels within a threshold value of \
one another are resampled during the non-linear search.
Positions are stored in a .dat file, where each line of the file gives a list of list of (y,x) positions which \
correspond to the same region of the source-plane. Thus, multiple source-plane regions can be input over multiple \
lines of the same positions file.
Parameters
----------
positions : [[[]]]
The lists of positions (e.g. [[[1.0, 1.0], [2.0, 2.0]], [[3.0, 3.0], [4.0, 4.0]]])
positions_path : str
The path to the positions .dat file containing the positions (e.g. '/path/to/positions.dat') | Below is the the instruction that describes the task:
### Input:
Output the positions of an image to a positions.dat file.
Positions correspond to a set of pixels in the lensed source galaxy that are anticipated to come from the same \
multiply-imaged region of the source-plane. Mass models which do not trace the pixels within a threshold value of \
one another are resampled during the non-linear search.
Positions are stored in a .dat file, where each line of the file gives a list of list of (y,x) positions which \
correspond to the same region of the source-plane. Thus, multiple source-plane regions can be input over multiple \
lines of the same positions file.
Parameters
----------
positions : [[[]]]
The lists of positions (e.g. [[[1.0, 1.0], [2.0, 2.0]], [[3.0, 3.0], [4.0, 4.0]]])
positions_path : str
The path to the positions .dat file containing the positions (e.g. '/path/to/positions.dat')
### Response:
def output_positions(positions, positions_path):
"""Output the positions of an image to a positions.dat file.
Positions correspond to a set of pixels in the lensed source galaxy that are anticipated to come from the same \
multiply-imaged region of the source-plane. Mass models which do not trace the pixels within a threshold value of \
one another are resampled during the non-linear search.
Positions are stored in a .dat file, where each line of the file gives a list of list of (y,x) positions which \
correspond to the same region of the source-plane. Thus, multiple source-plane regions can be input over multiple \
lines of the same positions file.
Parameters
----------
positions : [[[]]]
The lists of positions (e.g. [[[1.0, 1.0], [2.0, 2.0]], [[3.0, 3.0], [4.0, 4.0]]])
positions_path : str
The path to the positions .dat file containing the positions (e.g. '/path/to/positions.dat')
"""
with open(positions_path, 'w') as f:
for position in positions:
f.write("%s\n" % position) |
def send(self, data_to_send):
""" Immediately sends the data passed in to :func:`service_endpoint_uri`. If the service request fails, the
passed in items are pushed back to the :func:`queue`.
Args:
data_to_send (Array): an array of :class:`contracts.Envelope` objects to send to the service.
"""
request_payload = json.dumps([ a.write() for a in data_to_send ])
request = HTTPClient.Request(self._service_endpoint_uri, bytearray(request_payload, 'utf-8'), { 'Accept': 'application/json', 'Content-Type' : 'application/json; charset=utf-8' })
try:
response = HTTPClient.urlopen(request, timeout=self._timeout)
status_code = response.getcode()
if 200 <= status_code < 300:
return
except HTTPError as e:
if e.getcode() == 400:
return
except Exception as e:
pass
# Add our unsent data back on to the queue
for data in data_to_send:
self._queue.put(data) | Immediately sends the data passed in to :func:`service_endpoint_uri`. If the service request fails, the
passed in items are pushed back to the :func:`queue`.
Args:
data_to_send (Array): an array of :class:`contracts.Envelope` objects to send to the service. | Below is the the instruction that describes the task:
### Input:
Immediately sends the data passed in to :func:`service_endpoint_uri`. If the service request fails, the
passed in items are pushed back to the :func:`queue`.
Args:
data_to_send (Array): an array of :class:`contracts.Envelope` objects to send to the service.
### Response:
def send(self, data_to_send):
""" Immediately sends the data passed in to :func:`service_endpoint_uri`. If the service request fails, the
passed in items are pushed back to the :func:`queue`.
Args:
data_to_send (Array): an array of :class:`contracts.Envelope` objects to send to the service.
"""
request_payload = json.dumps([ a.write() for a in data_to_send ])
request = HTTPClient.Request(self._service_endpoint_uri, bytearray(request_payload, 'utf-8'), { 'Accept': 'application/json', 'Content-Type' : 'application/json; charset=utf-8' })
try:
response = HTTPClient.urlopen(request, timeout=self._timeout)
status_code = response.getcode()
if 200 <= status_code < 300:
return
except HTTPError as e:
if e.getcode() == 400:
return
except Exception as e:
pass
# Add our unsent data back on to the queue
for data in data_to_send:
self._queue.put(data) |
def getData(self, n):
"""Returns the next n values for the distribution as a list."""
records = [self.getNext() for x in range(n)]
return records | Returns the next n values for the distribution as a list. | Below is the the instruction that describes the task:
### Input:
Returns the next n values for the distribution as a list.
### Response:
def getData(self, n):
"""Returns the next n values for the distribution as a list."""
records = [self.getNext() for x in range(n)]
return records |
def print_row(self, row, rstrip=True):
""" Format and print the pre-rendered data to the output device. """
line = ''.join(map(str, row))
print(line.rstrip() if rstrip else line, file=self.table.file) | Format and print the pre-rendered data to the output device. | Below is the the instruction that describes the task:
### Input:
Format and print the pre-rendered data to the output device.
### Response:
def print_row(self, row, rstrip=True):
""" Format and print the pre-rendered data to the output device. """
line = ''.join(map(str, row))
print(line.rstrip() if rstrip else line, file=self.table.file) |
def relink_all(cls, old_file, new_file):
"""Relink all object versions (for a given file) to a new file.
.. warning::
Use this method with great care.
"""
assert old_file.checksum == new_file.checksum
assert old_file.id
assert new_file.id
with db.session.begin_nested():
ObjectVersion.query.filter_by(file_id=str(old_file.id)).update({
ObjectVersion.file_id: str(new_file.id)}) | Relink all object versions (for a given file) to a new file.
.. warning::
Use this method with great care. | Below is the the instruction that describes the task:
### Input:
Relink all object versions (for a given file) to a new file.
.. warning::
Use this method with great care.
### Response:
def relink_all(cls, old_file, new_file):
"""Relink all object versions (for a given file) to a new file.
.. warning::
Use this method with great care.
"""
assert old_file.checksum == new_file.checksum
assert old_file.id
assert new_file.id
with db.session.begin_nested():
ObjectVersion.query.filter_by(file_id=str(old_file.id)).update({
ObjectVersion.file_id: str(new_file.id)}) |
def fit(self, df, duration_col, event_col=None, weights_col=None, show_progress=False):
"""
Parameters
----------
Fit the Aalen Additive model to a dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of the column in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are uncensored.
weights_col: string, optional
an optional column in the DataFrame, df, that denotes the weight per subject.
This column is expelled and not used as a covariate, but as a weight in the
final regression. Default weight is 1.
This can be used for case-weights. For example, a weight of 2 means there were two subjects with
identical observations.
This can be used for sampling weights.
show_progress: boolean, optional (default=False)
Since the fitter is iterative, show iteration number.
Returns
-------
self: AalenAdditiveFitter
self with additional new properties: ``cumulative_hazards_``, etc.
Examples
--------
>>> from lifelines import AalenAdditiveFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> aaf = AalenAdditiveFitter()
>>> aaf.fit(df, 'T', 'E')
>>> aaf.predict_median(df)
>>> aaf.print_summary()
"""
self._time_fit_was_called = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + " UTC"
self._censoring_type = CensoringType.RIGHT
df = df.copy()
self.duration_col = duration_col
self.event_col = event_col
self.weights_col = weights_col
self._n_examples = df.shape[0]
X, T, E, weights = self._preprocess_dataframe(df)
self.durations = T.copy()
self.event_observed = E.copy()
self.weights = weights.copy()
self._norm_std = X.std(0)
# if we included an intercept, we need to fix not divide by zero.
if self.fit_intercept:
self._norm_std["_intercept"] = 1.0
else:
# a _intercept was provided
self._norm_std[self._norm_std < 1e-8] = 1.0
self.hazards_, self.cumulative_hazards_, self.cumulative_variance_ = self._fit_model(
normalize(X, 0, self._norm_std), T, E, weights, show_progress
)
self.hazards_ /= self._norm_std
self.cumulative_hazards_ /= self._norm_std
self.cumulative_variance_ /= self._norm_std
self.confidence_intervals_ = self._compute_confidence_intervals()
self._index = self.hazards_.index
self._predicted_hazards_ = self.predict_cumulative_hazard(X).iloc[-1].values.ravel()
return self | Parameters
----------
Fit the Aalen Additive model to a dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of the column in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are uncensored.
weights_col: string, optional
an optional column in the DataFrame, df, that denotes the weight per subject.
This column is expelled and not used as a covariate, but as a weight in the
final regression. Default weight is 1.
This can be used for case-weights. For example, a weight of 2 means there were two subjects with
identical observations.
This can be used for sampling weights.
show_progress: boolean, optional (default=False)
Since the fitter is iterative, show iteration number.
Returns
-------
self: AalenAdditiveFitter
self with additional new properties: ``cumulative_hazards_``, etc.
Examples
--------
>>> from lifelines import AalenAdditiveFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> aaf = AalenAdditiveFitter()
>>> aaf.fit(df, 'T', 'E')
>>> aaf.predict_median(df)
>>> aaf.print_summary() | Below is the the instruction that describes the task:
### Input:
Parameters
----------
Fit the Aalen Additive model to a dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of the column in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are uncensored.
weights_col: string, optional
an optional column in the DataFrame, df, that denotes the weight per subject.
This column is expelled and not used as a covariate, but as a weight in the
final regression. Default weight is 1.
This can be used for case-weights. For example, a weight of 2 means there were two subjects with
identical observations.
This can be used for sampling weights.
show_progress: boolean, optional (default=False)
Since the fitter is iterative, show iteration number.
Returns
-------
self: AalenAdditiveFitter
self with additional new properties: ``cumulative_hazards_``, etc.
Examples
--------
>>> from lifelines import AalenAdditiveFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> aaf = AalenAdditiveFitter()
>>> aaf.fit(df, 'T', 'E')
>>> aaf.predict_median(df)
>>> aaf.print_summary()
### Response:
def fit(self, df, duration_col, event_col=None, weights_col=None, show_progress=False):
"""
Parameters
----------
Fit the Aalen Additive model to a dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of the column in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are uncensored.
weights_col: string, optional
an optional column in the DataFrame, df, that denotes the weight per subject.
This column is expelled and not used as a covariate, but as a weight in the
final regression. Default weight is 1.
This can be used for case-weights. For example, a weight of 2 means there were two subjects with
identical observations.
This can be used for sampling weights.
show_progress: boolean, optional (default=False)
Since the fitter is iterative, show iteration number.
Returns
-------
self: AalenAdditiveFitter
self with additional new properties: ``cumulative_hazards_``, etc.
Examples
--------
>>> from lifelines import AalenAdditiveFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> aaf = AalenAdditiveFitter()
>>> aaf.fit(df, 'T', 'E')
>>> aaf.predict_median(df)
>>> aaf.print_summary()
"""
self._time_fit_was_called = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + " UTC"
self._censoring_type = CensoringType.RIGHT
df = df.copy()
self.duration_col = duration_col
self.event_col = event_col
self.weights_col = weights_col
self._n_examples = df.shape[0]
X, T, E, weights = self._preprocess_dataframe(df)
self.durations = T.copy()
self.event_observed = E.copy()
self.weights = weights.copy()
self._norm_std = X.std(0)
# if we included an intercept, we need to fix not divide by zero.
if self.fit_intercept:
self._norm_std["_intercept"] = 1.0
else:
# a _intercept was provided
self._norm_std[self._norm_std < 1e-8] = 1.0
self.hazards_, self.cumulative_hazards_, self.cumulative_variance_ = self._fit_model(
normalize(X, 0, self._norm_std), T, E, weights, show_progress
)
self.hazards_ /= self._norm_std
self.cumulative_hazards_ /= self._norm_std
self.cumulative_variance_ /= self._norm_std
self.confidence_intervals_ = self._compute_confidence_intervals()
self._index = self.hazards_.index
self._predicted_hazards_ = self.predict_cumulative_hazard(X).iloc[-1].values.ravel()
return self |
def evaluate_method(method, events, aux=0.):
"""Evaluate a TMVA::MethodBase over a NumPy array.
.. warning:: TMVA::Reader has known problems with thread safety in versions
of ROOT earlier than 6.03. There will potentially be a crash if you call
``method = reader.FindMVA(name)`` in Python and then pass this
``method`` here. Consider using ``evaluate_reader`` instead if you are
affected by this crash.
Parameters
----------
method : TMVA::MethodBase
A TMVA::MethodBase instance with variables booked in exactly the same
order as the columns in ``events``.
events : numpy array of shape [n_events, n_variables]
A two-dimensional NumPy array containing the rows of events and columns
of variables. The order of the columns must match the order in which
you called ``AddVariable()`` for each variable.
aux : float, optional (default=0.)
Auxiliary value used by MethodCuts to set the desired signal
efficiency.
Returns
-------
output : numpy array of shape [n_events]
The method output value for each event
See Also
--------
evaluate_reader
"""
if not isinstance(method, TMVA.MethodBase):
raise TypeError("reader must be a TMVA.MethodBase instance")
events = np.ascontiguousarray(events, dtype=np.float64)
if events.ndim == 1:
# convert to 2D
events = events[:, np.newaxis]
elif events.ndim != 2:
raise ValueError(
"events must be a two-dimensional array "
"with one event per row")
return _libtmvanumpy.evaluate_method(ROOT.AsCObject(method), events, aux) | Evaluate a TMVA::MethodBase over a NumPy array.
.. warning:: TMVA::Reader has known problems with thread safety in versions
of ROOT earlier than 6.03. There will potentially be a crash if you call
``method = reader.FindMVA(name)`` in Python and then pass this
``method`` here. Consider using ``evaluate_reader`` instead if you are
affected by this crash.
Parameters
----------
method : TMVA::MethodBase
A TMVA::MethodBase instance with variables booked in exactly the same
order as the columns in ``events``.
events : numpy array of shape [n_events, n_variables]
A two-dimensional NumPy array containing the rows of events and columns
of variables. The order of the columns must match the order in which
you called ``AddVariable()`` for each variable.
aux : float, optional (default=0.)
Auxiliary value used by MethodCuts to set the desired signal
efficiency.
Returns
-------
output : numpy array of shape [n_events]
The method output value for each event
See Also
--------
evaluate_reader | Below is the the instruction that describes the task:
### Input:
Evaluate a TMVA::MethodBase over a NumPy array.
.. warning:: TMVA::Reader has known problems with thread safety in versions
of ROOT earlier than 6.03. There will potentially be a crash if you call
``method = reader.FindMVA(name)`` in Python and then pass this
``method`` here. Consider using ``evaluate_reader`` instead if you are
affected by this crash.
Parameters
----------
method : TMVA::MethodBase
A TMVA::MethodBase instance with variables booked in exactly the same
order as the columns in ``events``.
events : numpy array of shape [n_events, n_variables]
A two-dimensional NumPy array containing the rows of events and columns
of variables. The order of the columns must match the order in which
you called ``AddVariable()`` for each variable.
aux : float, optional (default=0.)
Auxiliary value used by MethodCuts to set the desired signal
efficiency.
Returns
-------
output : numpy array of shape [n_events]
The method output value for each event
See Also
--------
evaluate_reader
### Response:
def evaluate_method(method, events, aux=0.):
"""Evaluate a TMVA::MethodBase over a NumPy array.
.. warning:: TMVA::Reader has known problems with thread safety in versions
of ROOT earlier than 6.03. There will potentially be a crash if you call
``method = reader.FindMVA(name)`` in Python and then pass this
``method`` here. Consider using ``evaluate_reader`` instead if you are
affected by this crash.
Parameters
----------
method : TMVA::MethodBase
A TMVA::MethodBase instance with variables booked in exactly the same
order as the columns in ``events``.
events : numpy array of shape [n_events, n_variables]
A two-dimensional NumPy array containing the rows of events and columns
of variables. The order of the columns must match the order in which
you called ``AddVariable()`` for each variable.
aux : float, optional (default=0.)
Auxiliary value used by MethodCuts to set the desired signal
efficiency.
Returns
-------
output : numpy array of shape [n_events]
The method output value for each event
See Also
--------
evaluate_reader
"""
if not isinstance(method, TMVA.MethodBase):
raise TypeError("reader must be a TMVA.MethodBase instance")
events = np.ascontiguousarray(events, dtype=np.float64)
if events.ndim == 1:
# convert to 2D
events = events[:, np.newaxis]
elif events.ndim != 2:
raise ValueError(
"events must be a two-dimensional array "
"with one event per row")
return _libtmvanumpy.evaluate_method(ROOT.AsCObject(method), events, aux) |
def key_callback(self, window, key, scancode, action, mods):
"""press ESCAPE to quite the application"""
if key == glfw.KEY_ESCAPE and action == glfw.PRESS:
glfw.SetWindowShouldClose(self.window, True) | press ESCAPE to quite the application | Below is the the instruction that describes the task:
### Input:
press ESCAPE to quite the application
### Response:
def key_callback(self, window, key, scancode, action, mods):
"""press ESCAPE to quite the application"""
if key == glfw.KEY_ESCAPE and action == glfw.PRESS:
glfw.SetWindowShouldClose(self.window, True) |
def _pca_scores(
scores,
pc1=0,
pc2=1,
fcol=None,
ecol=None,
marker='o',
markersize=30,
label_scores=None,
show_covariance_ellipse=True,
optimize_label_iter=OPTIMIZE_LABEL_ITER_DEFAULT,
**kwargs
):
"""
Plot a scores plot for two principal components as AxB scatter plot.
Returns the plotted axis.
:param scores: DataFrame containing scores
:param pc1: Column indexer into scores for PC1
:param pc2: Column indexer into scores for PC2
:param fcol: Face (fill) color definition
:param ecol: Edge color definition
:param marker: Marker style (matplotlib; default 'o')
:param markersize: int Size of the marker
:param label_scores: Index level to label markers with
:param show_covariance_ellipse: Plot covariance (2*std) ellipse around each grouping
:param optimize_label_iter: Number of iterations to run label adjustment algorithm
:return: Generated axes
"""
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1,1,1)
levels = [0,1]
for c in set(scores.columns.values):
try:
data = scores[c].values.reshape(2,-1)
except:
continue
fc = hierarchical_match(fcol, c, 'k')
ec = hierarchical_match(ecol, c)
if ec is None:
ec = fc
if type(markersize) == str:
# Use as a key vs. index value in this levels
idx = scores.columns.names.index(markersize)
s = c[idx]
elif callable(markersize):
s = markersize(c)
else:
s = markersize
ax.scatter(data[pc1,:], data[pc2,:], s=s, marker=marker, edgecolors=ec, c=fc)
if show_covariance_ellipse and data.shape[1] > 2:
cov = data[[pc1, pc2], :].T
ellip = plot_point_cov(cov, nstd=2, linestyle='dashed', linewidth=0.5, edgecolor=ec or fc,
alpha=0.8) #**kwargs for ellipse styling
ax.add_artist(ellip)
if label_scores:
scores_f = scores.iloc[ [pc1, pc2] ]
idxs = get_index_list( scores_f.columns.names, label_scores )
texts = []
for n, (x, y) in enumerate(scores_f.T.values):
t = ax.text(x, y, build_combined_label( scores_f.columns.values[n], idxs, ', '), bbox=dict(boxstyle='round,pad=0.3', fc='#ffffff', ec='none', alpha=0.6))
texts.append(t)
if texts and optimize_label_iter:
adjust_text(
texts,
lim=optimize_label_iter
)
ax.set_xlabel(scores.index[pc1], fontsize=16)
ax.set_ylabel(scores.index[pc2], fontsize=16)
fig.tight_layout()
return ax | Plot a scores plot for two principal components as AxB scatter plot.
Returns the plotted axis.
:param scores: DataFrame containing scores
:param pc1: Column indexer into scores for PC1
:param pc2: Column indexer into scores for PC2
:param fcol: Face (fill) color definition
:param ecol: Edge color definition
:param marker: Marker style (matplotlib; default 'o')
:param markersize: int Size of the marker
:param label_scores: Index level to label markers with
:param show_covariance_ellipse: Plot covariance (2*std) ellipse around each grouping
:param optimize_label_iter: Number of iterations to run label adjustment algorithm
:return: Generated axes | Below is the the instruction that describes the task:
### Input:
Plot a scores plot for two principal components as AxB scatter plot.
Returns the plotted axis.
:param scores: DataFrame containing scores
:param pc1: Column indexer into scores for PC1
:param pc2: Column indexer into scores for PC2
:param fcol: Face (fill) color definition
:param ecol: Edge color definition
:param marker: Marker style (matplotlib; default 'o')
:param markersize: int Size of the marker
:param label_scores: Index level to label markers with
:param show_covariance_ellipse: Plot covariance (2*std) ellipse around each grouping
:param optimize_label_iter: Number of iterations to run label adjustment algorithm
:return: Generated axes
### Response:
def _pca_scores(
scores,
pc1=0,
pc2=1,
fcol=None,
ecol=None,
marker='o',
markersize=30,
label_scores=None,
show_covariance_ellipse=True,
optimize_label_iter=OPTIMIZE_LABEL_ITER_DEFAULT,
**kwargs
):
"""
Plot a scores plot for two principal components as AxB scatter plot.
Returns the plotted axis.
:param scores: DataFrame containing scores
:param pc1: Column indexer into scores for PC1
:param pc2: Column indexer into scores for PC2
:param fcol: Face (fill) color definition
:param ecol: Edge color definition
:param marker: Marker style (matplotlib; default 'o')
:param markersize: int Size of the marker
:param label_scores: Index level to label markers with
:param show_covariance_ellipse: Plot covariance (2*std) ellipse around each grouping
:param optimize_label_iter: Number of iterations to run label adjustment algorithm
:return: Generated axes
"""
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1,1,1)
levels = [0,1]
for c in set(scores.columns.values):
try:
data = scores[c].values.reshape(2,-1)
except:
continue
fc = hierarchical_match(fcol, c, 'k')
ec = hierarchical_match(ecol, c)
if ec is None:
ec = fc
if type(markersize) == str:
# Use as a key vs. index value in this levels
idx = scores.columns.names.index(markersize)
s = c[idx]
elif callable(markersize):
s = markersize(c)
else:
s = markersize
ax.scatter(data[pc1,:], data[pc2,:], s=s, marker=marker, edgecolors=ec, c=fc)
if show_covariance_ellipse and data.shape[1] > 2:
cov = data[[pc1, pc2], :].T
ellip = plot_point_cov(cov, nstd=2, linestyle='dashed', linewidth=0.5, edgecolor=ec or fc,
alpha=0.8) #**kwargs for ellipse styling
ax.add_artist(ellip)
if label_scores:
scores_f = scores.iloc[ [pc1, pc2] ]
idxs = get_index_list( scores_f.columns.names, label_scores )
texts = []
for n, (x, y) in enumerate(scores_f.T.values):
t = ax.text(x, y, build_combined_label( scores_f.columns.values[n], idxs, ', '), bbox=dict(boxstyle='round,pad=0.3', fc='#ffffff', ec='none', alpha=0.6))
texts.append(t)
if texts and optimize_label_iter:
adjust_text(
texts,
lim=optimize_label_iter
)
ax.set_xlabel(scores.index[pc1], fontsize=16)
ax.set_ylabel(scores.index[pc2], fontsize=16)
fig.tight_layout()
return ax |
def from_array(a, mode=None, info={}):
"""Create a PNG :class:`Image` object from a 2- or 3-dimensional
array. One application of this function is easy PIL-style saving:
``png.from_array(pixels, 'L').save('foo.png')``.
.. note :
The use of the term *3-dimensional* is for marketing purposes
only. It doesn't actually work. Please bear with us. Meanwhile
enjoy the complimentary snacks (on request) and please use a
2-dimensional array.
Unless they are specified using the *info* parameter, the PNG's
height and width are taken from the array size. For a 3 dimensional
array the first axis is the height; the second axis is the width;
and the third axis is the channel number. Thus an RGB image that is
16 pixels high and 8 wide will use an array that is 16x8x3. For 2
dimensional arrays the first axis is the height, but the second axis
is ``width*channels``, so an RGB image that is 16 pixels high and 8
wide will use a 2-dimensional array that is 16x24 (each row will be
8*3==24 sample values).
*mode* is a string that specifies the image colour format in a
PIL-style mode. It can be:
``'L'``
greyscale (1 channel)
``'LA'``
greyscale with alpha (2 channel)
``'RGB'``
colour image (3 channel)
``'RGBA'``
colour image with alpha (4 channel)
The mode string can also specify the bit depth (overriding how this
function normally derives the bit depth, see below). Appending
``';16'`` to the mode will cause the PNG to be 16 bits per channel;
any decimal from 1 to 16 can be used to specify the bit depth.
When a 2-dimensional array is used *mode* determines how many
channels the image has, and so allows the width to be derived from
the second array dimension.
The array is expected to be a ``numpy`` array, but it can be any
suitable Python sequence. For example, a list of lists can be used:
``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``. The exact
rules are: ``len(a)`` gives the first dimension, height;
``len(a[0])`` gives the second dimension; ``len(a[0][0])`` gives the
third dimension, unless an exception is raised in which case a
2-dimensional array is assumed. It's slightly more complicated than
that because an iterator of rows can be used, and it all still
works. Using an iterator allows data to be streamed efficiently.
The bit depth of the PNG is normally taken from the array element's
datatype (but if *mode* specifies a bitdepth then that is used
instead). The array element's datatype is determined in a way which
is supposed to work both for ``numpy`` arrays and for Python
``array.array`` objects. A 1 byte datatype will give a bit depth of
8, a 2 byte datatype will give a bit depth of 16. If the datatype
does not have an implicit size, for example it is a plain Python
list of lists, as above, then a default of 8 is used.
The *info* parameter is a dictionary that can be used to specify
metadata (in the same style as the arguments to the
:class:``png.Writer`` class). For this function the keys that are
useful are:
height
overrides the height derived from the array dimensions and allows
*a* to be an iterable.
width
overrides the width derived from the array dimensions.
bitdepth
overrides the bit depth derived from the element datatype (but
must match *mode* if that also specifies a bit depth).
Generally anything specified in the
*info* dictionary will override any implicit choices that this
function would otherwise make, but must match any explicit ones.
For example, if the *info* dictionary has a ``greyscale`` key then
this must be true when mode is ``'L'`` or ``'LA'`` and false when
mode is ``'RGB'`` or ``'RGBA'``.
"""
# We abuse the *info* parameter by modifying it. Take a copy here.
# (Also typechecks *info* to some extent).
info = dict(info)
# Syntax check mode string.
bitdepth = None
try:
# Assign the 'L' or 'RGBA' part to `gotmode`.
if mode.startswith('L'):
gotmode = 'L'
mode = mode[1:]
elif mode.startswith('RGB'):
gotmode = 'RGB'
mode = mode[3:]
else:
raise Error()
if mode.startswith('A'):
gotmode += 'A'
mode = mode[1:]
# Skip any optional ';'
while mode.startswith(';'):
mode = mode[1:]
# Parse optional bitdepth
if mode:
try:
bitdepth = int(mode)
except (TypeError, ValueError):
raise Error()
except Error:
raise Error("mode string should be 'RGB' or 'L;16' or similar.")
mode = gotmode
# Get bitdepth from *mode* if possible.
if bitdepth:
if info.get('bitdepth') and bitdepth != info['bitdepth']:
raise Error("mode bitdepth (%d) should match info bitdepth (%d)." %
(bitdepth, info['bitdepth']))
info['bitdepth'] = bitdepth
# Fill in and/or check entries in *info*.
# Dimensions.
if 'size' in info:
# Check width, height, size all match where used.
for dimension,axis in [('width', 0), ('height', 1)]:
if dimension in info:
if info[dimension] != info['size'][axis]:
raise Error(
"info[%r] should match info['size'][%r]." %
(dimension, axis))
info['width'],info['height'] = info['size']
if 'height' not in info:
try:
l = len(a)
except TypeError:
raise Error(
"len(a) does not work, supply info['height'] instead.")
info['height'] = l
# Colour format.
if 'greyscale' in info:
if bool(info['greyscale']) != ('L' in mode):
raise Error("info['greyscale'] should match mode.")
info['greyscale'] = 'L' in mode
if 'alpha' in info:
if bool(info['alpha']) != ('A' in mode):
raise Error("info['alpha'] should match mode.")
info['alpha'] = 'A' in mode
planes = len(mode)
if 'planes' in info:
if info['planes'] != planes:
raise Error("info['planes'] should match mode.")
# In order to work out whether we the array is 2D or 3D we need its
# first row, which requires that we take a copy of its iterator.
# We may also need the first row to derive width and bitdepth.
a,t = itertools.tee(a)
row = t.next()
del t
try:
row[0][0]
threed = True
testelement = row[0]
except (IndexError, TypeError):
threed = False
testelement = row
if 'width' not in info:
if threed:
width = len(row)
else:
width = len(row) // planes
info['width'] = width
# Not implemented yet
assert not threed
if 'bitdepth' not in info:
try:
dtype = testelement.dtype
# goto the "else:" clause. Sorry.
except AttributeError:
try:
# Try a Python array.array.
bitdepth = 8 * testelement.itemsize
except AttributeError:
# We can't determine it from the array element's
# datatype, use a default of 8.
bitdepth = 8
else:
# If we got here without exception, we now assume that
# the array is a numpy array.
if dtype.kind == 'b':
bitdepth = 1
else:
bitdepth = 8 * dtype.itemsize
info['bitdepth'] = bitdepth
for thing in 'width height bitdepth greyscale alpha'.split():
assert thing in info
return Image(a, info) | Create a PNG :class:`Image` object from a 2- or 3-dimensional
array. One application of this function is easy PIL-style saving:
``png.from_array(pixels, 'L').save('foo.png')``.
.. note :
The use of the term *3-dimensional* is for marketing purposes
only. It doesn't actually work. Please bear with us. Meanwhile
enjoy the complimentary snacks (on request) and please use a
2-dimensional array.
Unless they are specified using the *info* parameter, the PNG's
height and width are taken from the array size. For a 3 dimensional
array the first axis is the height; the second axis is the width;
and the third axis is the channel number. Thus an RGB image that is
16 pixels high and 8 wide will use an array that is 16x8x3. For 2
dimensional arrays the first axis is the height, but the second axis
is ``width*channels``, so an RGB image that is 16 pixels high and 8
wide will use a 2-dimensional array that is 16x24 (each row will be
8*3==24 sample values).
*mode* is a string that specifies the image colour format in a
PIL-style mode. It can be:
``'L'``
greyscale (1 channel)
``'LA'``
greyscale with alpha (2 channel)
``'RGB'``
colour image (3 channel)
``'RGBA'``
colour image with alpha (4 channel)
The mode string can also specify the bit depth (overriding how this
function normally derives the bit depth, see below). Appending
``';16'`` to the mode will cause the PNG to be 16 bits per channel;
any decimal from 1 to 16 can be used to specify the bit depth.
When a 2-dimensional array is used *mode* determines how many
channels the image has, and so allows the width to be derived from
the second array dimension.
The array is expected to be a ``numpy`` array, but it can be any
suitable Python sequence. For example, a list of lists can be used:
``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``. The exact
rules are: ``len(a)`` gives the first dimension, height;
``len(a[0])`` gives the second dimension; ``len(a[0][0])`` gives the
third dimension, unless an exception is raised in which case a
2-dimensional array is assumed. It's slightly more complicated than
that because an iterator of rows can be used, and it all still
works. Using an iterator allows data to be streamed efficiently.
The bit depth of the PNG is normally taken from the array element's
datatype (but if *mode* specifies a bitdepth then that is used
instead). The array element's datatype is determined in a way which
is supposed to work both for ``numpy`` arrays and for Python
``array.array`` objects. A 1 byte datatype will give a bit depth of
8, a 2 byte datatype will give a bit depth of 16. If the datatype
does not have an implicit size, for example it is a plain Python
list of lists, as above, then a default of 8 is used.
The *info* parameter is a dictionary that can be used to specify
metadata (in the same style as the arguments to the
:class:``png.Writer`` class). For this function the keys that are
useful are:
height
overrides the height derived from the array dimensions and allows
*a* to be an iterable.
width
overrides the width derived from the array dimensions.
bitdepth
overrides the bit depth derived from the element datatype (but
must match *mode* if that also specifies a bit depth).
Generally anything specified in the
*info* dictionary will override any implicit choices that this
function would otherwise make, but must match any explicit ones.
For example, if the *info* dictionary has a ``greyscale`` key then
this must be true when mode is ``'L'`` or ``'LA'`` and false when
mode is ``'RGB'`` or ``'RGBA'``. | Below is the the instruction that describes the task:
### Input:
Create a PNG :class:`Image` object from a 2- or 3-dimensional
array. One application of this function is easy PIL-style saving:
``png.from_array(pixels, 'L').save('foo.png')``.
.. note :
The use of the term *3-dimensional* is for marketing purposes
only. It doesn't actually work. Please bear with us. Meanwhile
enjoy the complimentary snacks (on request) and please use a
2-dimensional array.
Unless they are specified using the *info* parameter, the PNG's
height and width are taken from the array size. For a 3 dimensional
array the first axis is the height; the second axis is the width;
and the third axis is the channel number. Thus an RGB image that is
16 pixels high and 8 wide will use an array that is 16x8x3. For 2
dimensional arrays the first axis is the height, but the second axis
is ``width*channels``, so an RGB image that is 16 pixels high and 8
wide will use a 2-dimensional array that is 16x24 (each row will be
8*3==24 sample values).
*mode* is a string that specifies the image colour format in a
PIL-style mode. It can be:
``'L'``
greyscale (1 channel)
``'LA'``
greyscale with alpha (2 channel)
``'RGB'``
colour image (3 channel)
``'RGBA'``
colour image with alpha (4 channel)
The mode string can also specify the bit depth (overriding how this
function normally derives the bit depth, see below). Appending
``';16'`` to the mode will cause the PNG to be 16 bits per channel;
any decimal from 1 to 16 can be used to specify the bit depth.
When a 2-dimensional array is used *mode* determines how many
channels the image has, and so allows the width to be derived from
the second array dimension.
The array is expected to be a ``numpy`` array, but it can be any
suitable Python sequence. For example, a list of lists can be used:
``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``. The exact
rules are: ``len(a)`` gives the first dimension, height;
``len(a[0])`` gives the second dimension; ``len(a[0][0])`` gives the
third dimension, unless an exception is raised in which case a
2-dimensional array is assumed. It's slightly more complicated than
that because an iterator of rows can be used, and it all still
works. Using an iterator allows data to be streamed efficiently.
The bit depth of the PNG is normally taken from the array element's
datatype (but if *mode* specifies a bitdepth then that is used
instead). The array element's datatype is determined in a way which
is supposed to work both for ``numpy`` arrays and for Python
``array.array`` objects. A 1 byte datatype will give a bit depth of
8, a 2 byte datatype will give a bit depth of 16. If the datatype
does not have an implicit size, for example it is a plain Python
list of lists, as above, then a default of 8 is used.
The *info* parameter is a dictionary that can be used to specify
metadata (in the same style as the arguments to the
:class:``png.Writer`` class). For this function the keys that are
useful are:
height
overrides the height derived from the array dimensions and allows
*a* to be an iterable.
width
overrides the width derived from the array dimensions.
bitdepth
overrides the bit depth derived from the element datatype (but
must match *mode* if that also specifies a bit depth).
Generally anything specified in the
*info* dictionary will override any implicit choices that this
function would otherwise make, but must match any explicit ones.
For example, if the *info* dictionary has a ``greyscale`` key then
this must be true when mode is ``'L'`` or ``'LA'`` and false when
mode is ``'RGB'`` or ``'RGBA'``.
### Response:
def from_array(a, mode=None, info={}):
"""Create a PNG :class:`Image` object from a 2- or 3-dimensional
array. One application of this function is easy PIL-style saving:
``png.from_array(pixels, 'L').save('foo.png')``.
.. note :
The use of the term *3-dimensional* is for marketing purposes
only. It doesn't actually work. Please bear with us. Meanwhile
enjoy the complimentary snacks (on request) and please use a
2-dimensional array.
Unless they are specified using the *info* parameter, the PNG's
height and width are taken from the array size. For a 3 dimensional
array the first axis is the height; the second axis is the width;
and the third axis is the channel number. Thus an RGB image that is
16 pixels high and 8 wide will use an array that is 16x8x3. For 2
dimensional arrays the first axis is the height, but the second axis
is ``width*channels``, so an RGB image that is 16 pixels high and 8
wide will use a 2-dimensional array that is 16x24 (each row will be
8*3==24 sample values).
*mode* is a string that specifies the image colour format in a
PIL-style mode. It can be:
``'L'``
greyscale (1 channel)
``'LA'``
greyscale with alpha (2 channel)
``'RGB'``
colour image (3 channel)
``'RGBA'``
colour image with alpha (4 channel)
The mode string can also specify the bit depth (overriding how this
function normally derives the bit depth, see below). Appending
``';16'`` to the mode will cause the PNG to be 16 bits per channel;
any decimal from 1 to 16 can be used to specify the bit depth.
When a 2-dimensional array is used *mode* determines how many
channels the image has, and so allows the width to be derived from
the second array dimension.
The array is expected to be a ``numpy`` array, but it can be any
suitable Python sequence. For example, a list of lists can be used:
``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``. The exact
rules are: ``len(a)`` gives the first dimension, height;
``len(a[0])`` gives the second dimension; ``len(a[0][0])`` gives the
third dimension, unless an exception is raised in which case a
2-dimensional array is assumed. It's slightly more complicated than
that because an iterator of rows can be used, and it all still
works. Using an iterator allows data to be streamed efficiently.
The bit depth of the PNG is normally taken from the array element's
datatype (but if *mode* specifies a bitdepth then that is used
instead). The array element's datatype is determined in a way which
is supposed to work both for ``numpy`` arrays and for Python
``array.array`` objects. A 1 byte datatype will give a bit depth of
8, a 2 byte datatype will give a bit depth of 16. If the datatype
does not have an implicit size, for example it is a plain Python
list of lists, as above, then a default of 8 is used.
The *info* parameter is a dictionary that can be used to specify
metadata (in the same style as the arguments to the
:class:``png.Writer`` class). For this function the keys that are
useful are:
height
overrides the height derived from the array dimensions and allows
*a* to be an iterable.
width
overrides the width derived from the array dimensions.
bitdepth
overrides the bit depth derived from the element datatype (but
must match *mode* if that also specifies a bit depth).
Generally anything specified in the
*info* dictionary will override any implicit choices that this
function would otherwise make, but must match any explicit ones.
For example, if the *info* dictionary has a ``greyscale`` key then
this must be true when mode is ``'L'`` or ``'LA'`` and false when
mode is ``'RGB'`` or ``'RGBA'``.
"""
# We abuse the *info* parameter by modifying it. Take a copy here.
# (Also typechecks *info* to some extent).
info = dict(info)
# Syntax check mode string.
bitdepth = None
try:
# Assign the 'L' or 'RGBA' part to `gotmode`.
if mode.startswith('L'):
gotmode = 'L'
mode = mode[1:]
elif mode.startswith('RGB'):
gotmode = 'RGB'
mode = mode[3:]
else:
raise Error()
if mode.startswith('A'):
gotmode += 'A'
mode = mode[1:]
# Skip any optional ';'
while mode.startswith(';'):
mode = mode[1:]
# Parse optional bitdepth
if mode:
try:
bitdepth = int(mode)
except (TypeError, ValueError):
raise Error()
except Error:
raise Error("mode string should be 'RGB' or 'L;16' or similar.")
mode = gotmode
# Get bitdepth from *mode* if possible.
if bitdepth:
if info.get('bitdepth') and bitdepth != info['bitdepth']:
raise Error("mode bitdepth (%d) should match info bitdepth (%d)." %
(bitdepth, info['bitdepth']))
info['bitdepth'] = bitdepth
# Fill in and/or check entries in *info*.
# Dimensions.
if 'size' in info:
# Check width, height, size all match where used.
for dimension,axis in [('width', 0), ('height', 1)]:
if dimension in info:
if info[dimension] != info['size'][axis]:
raise Error(
"info[%r] should match info['size'][%r]." %
(dimension, axis))
info['width'],info['height'] = info['size']
if 'height' not in info:
try:
l = len(a)
except TypeError:
raise Error(
"len(a) does not work, supply info['height'] instead.")
info['height'] = l
# Colour format.
if 'greyscale' in info:
if bool(info['greyscale']) != ('L' in mode):
raise Error("info['greyscale'] should match mode.")
info['greyscale'] = 'L' in mode
if 'alpha' in info:
if bool(info['alpha']) != ('A' in mode):
raise Error("info['alpha'] should match mode.")
info['alpha'] = 'A' in mode
planes = len(mode)
if 'planes' in info:
if info['planes'] != planes:
raise Error("info['planes'] should match mode.")
# In order to work out whether we the array is 2D or 3D we need its
# first row, which requires that we take a copy of its iterator.
# We may also need the first row to derive width and bitdepth.
a,t = itertools.tee(a)
row = t.next()
del t
try:
row[0][0]
threed = True
testelement = row[0]
except (IndexError, TypeError):
threed = False
testelement = row
if 'width' not in info:
if threed:
width = len(row)
else:
width = len(row) // planes
info['width'] = width
# Not implemented yet
assert not threed
if 'bitdepth' not in info:
try:
dtype = testelement.dtype
# goto the "else:" clause. Sorry.
except AttributeError:
try:
# Try a Python array.array.
bitdepth = 8 * testelement.itemsize
except AttributeError:
# We can't determine it from the array element's
# datatype, use a default of 8.
bitdepth = 8
else:
# If we got here without exception, we now assume that
# the array is a numpy array.
if dtype.kind == 'b':
bitdepth = 1
else:
bitdepth = 8 * dtype.itemsize
info['bitdepth'] = bitdepth
for thing in 'width height bitdepth greyscale alpha'.split():
assert thing in info
return Image(a, info) |
def deserialize_current_record_to_durable_model(record, current_model, durable_model):
"""
Utility function that will take a dynamo event record and turn it into the proper pynamo object.
This will properly deserialize the ugly Dynamo datatypes away.
:param record:
:param current_model:
:param durable_model:
:return:
"""
# Was the item in question too big for SNS? If so, then we need to fetch the item from the current Dynamo table:
if record.get(EVENT_TOO_BIG_FLAG):
record = get_full_current_object(record['dynamodb']['Keys']['arn']['S'], current_model)
if not record:
return None
serialized = record._serialize() # pylint: disable=W0212
record = {
'dynamodb': {
'NewImage': serialized['attributes']
}
}
# The ARN isn't added because it's in the HASH key section:
record['dynamodb']['NewImage']['arn'] = {'S': serialized['HASH']}
new_image = remove_current_specific_fields(record['dynamodb']['NewImage'])
data = {}
for item, value in new_image.items():
# This could end up as loss of precision
data[item] = DESER.deserialize(value)
return durable_model(**data) | Utility function that will take a dynamo event record and turn it into the proper pynamo object.
This will properly deserialize the ugly Dynamo datatypes away.
:param record:
:param current_model:
:param durable_model:
:return: | Below is the the instruction that describes the task:
### Input:
Utility function that will take a dynamo event record and turn it into the proper pynamo object.
This will properly deserialize the ugly Dynamo datatypes away.
:param record:
:param current_model:
:param durable_model:
:return:
### Response:
def deserialize_current_record_to_durable_model(record, current_model, durable_model):
"""
Utility function that will take a dynamo event record and turn it into the proper pynamo object.
This will properly deserialize the ugly Dynamo datatypes away.
:param record:
:param current_model:
:param durable_model:
:return:
"""
# Was the item in question too big for SNS? If so, then we need to fetch the item from the current Dynamo table:
if record.get(EVENT_TOO_BIG_FLAG):
record = get_full_current_object(record['dynamodb']['Keys']['arn']['S'], current_model)
if not record:
return None
serialized = record._serialize() # pylint: disable=W0212
record = {
'dynamodb': {
'NewImage': serialized['attributes']
}
}
# The ARN isn't added because it's in the HASH key section:
record['dynamodb']['NewImage']['arn'] = {'S': serialized['HASH']}
new_image = remove_current_specific_fields(record['dynamodb']['NewImage'])
data = {}
for item, value in new_image.items():
# This could end up as loss of precision
data[item] = DESER.deserialize(value)
return durable_model(**data) |
def power_chisq(template, data, num_bins, psd,
low_frequency_cutoff=None,
high_frequency_cutoff=None,
return_bins=False):
"""Calculate the chisq timeseries
Parameters
----------
template: FrequencySeries or TimeSeries
A time or frequency series that contains the filter template.
data: FrequencySeries or TimeSeries
A time or frequency series that contains the data to filter. The length
must be commensurate with the template.
(EXPLAINME - does this mean 'the same as' or something else?)
num_bins: int
The number of bins in the chisq. Note that the dof goes as 2*num_bins-2.
psd: FrequencySeries
The psd of the data.
low_frequency_cutoff: {None, float}, optional
The low frequency cutoff for the filter
high_frequency_cutoff: {None, float}, optional
The high frequency cutoff for the filter
return_bins: {boolean, False}, optional
Return a list of the individual chisq bins
Returns
-------
chisq: TimeSeries
TimeSeries containing the chisq values for all times.
"""
htilde = make_frequency_series(template)
stilde = make_frequency_series(data)
bins = power_chisq_bins(htilde, num_bins, psd, low_frequency_cutoff,
high_frequency_cutoff)
corra = zeros((len(htilde)-1)*2, dtype=htilde.dtype)
total_snr, corr, tnorm = matched_filter_core(htilde, stilde, psd,
low_frequency_cutoff, high_frequency_cutoff,
corr_out=corra)
return power_chisq_from_precomputed(corr, total_snr, tnorm, bins, return_bins=return_bins) | Calculate the chisq timeseries
Parameters
----------
template: FrequencySeries or TimeSeries
A time or frequency series that contains the filter template.
data: FrequencySeries or TimeSeries
A time or frequency series that contains the data to filter. The length
must be commensurate with the template.
(EXPLAINME - does this mean 'the same as' or something else?)
num_bins: int
The number of bins in the chisq. Note that the dof goes as 2*num_bins-2.
psd: FrequencySeries
The psd of the data.
low_frequency_cutoff: {None, float}, optional
The low frequency cutoff for the filter
high_frequency_cutoff: {None, float}, optional
The high frequency cutoff for the filter
return_bins: {boolean, False}, optional
Return a list of the individual chisq bins
Returns
-------
chisq: TimeSeries
TimeSeries containing the chisq values for all times. | Below is the the instruction that describes the task:
### Input:
Calculate the chisq timeseries
Parameters
----------
template: FrequencySeries or TimeSeries
A time or frequency series that contains the filter template.
data: FrequencySeries or TimeSeries
A time or frequency series that contains the data to filter. The length
must be commensurate with the template.
(EXPLAINME - does this mean 'the same as' or something else?)
num_bins: int
The number of bins in the chisq. Note that the dof goes as 2*num_bins-2.
psd: FrequencySeries
The psd of the data.
low_frequency_cutoff: {None, float}, optional
The low frequency cutoff for the filter
high_frequency_cutoff: {None, float}, optional
The high frequency cutoff for the filter
return_bins: {boolean, False}, optional
Return a list of the individual chisq bins
Returns
-------
chisq: TimeSeries
TimeSeries containing the chisq values for all times.
### Response:
def power_chisq(template, data, num_bins, psd,
low_frequency_cutoff=None,
high_frequency_cutoff=None,
return_bins=False):
"""Calculate the chisq timeseries
Parameters
----------
template: FrequencySeries or TimeSeries
A time or frequency series that contains the filter template.
data: FrequencySeries or TimeSeries
A time or frequency series that contains the data to filter. The length
must be commensurate with the template.
(EXPLAINME - does this mean 'the same as' or something else?)
num_bins: int
The number of bins in the chisq. Note that the dof goes as 2*num_bins-2.
psd: FrequencySeries
The psd of the data.
low_frequency_cutoff: {None, float}, optional
The low frequency cutoff for the filter
high_frequency_cutoff: {None, float}, optional
The high frequency cutoff for the filter
return_bins: {boolean, False}, optional
Return a list of the individual chisq bins
Returns
-------
chisq: TimeSeries
TimeSeries containing the chisq values for all times.
"""
htilde = make_frequency_series(template)
stilde = make_frequency_series(data)
bins = power_chisq_bins(htilde, num_bins, psd, low_frequency_cutoff,
high_frequency_cutoff)
corra = zeros((len(htilde)-1)*2, dtype=htilde.dtype)
total_snr, corr, tnorm = matched_filter_core(htilde, stilde, psd,
low_frequency_cutoff, high_frequency_cutoff,
corr_out=corra)
return power_chisq_from_precomputed(corr, total_snr, tnorm, bins, return_bins=return_bins) |
def allocate_resource_id(self):
"""id = d.allocate_resource_id()
Allocate a new X resource id number ID.
Raises ResourceIDError if there are no free resource ids.
"""
self.resource_id_lock.acquire()
try:
i = self.last_resource_id
while i in self.resource_ids:
i = i + 1
if i > self.info.resource_id_mask:
i = 0
if i == self.last_resource_id:
raise error.ResourceIDError('out of resource ids')
self.resource_ids[i] = None
self.last_resource_id = i
return self.info.resource_id_base | i
finally:
self.resource_id_lock.release() | id = d.allocate_resource_id()
Allocate a new X resource id number ID.
Raises ResourceIDError if there are no free resource ids. | Below is the the instruction that describes the task:
### Input:
id = d.allocate_resource_id()
Allocate a new X resource id number ID.
Raises ResourceIDError if there are no free resource ids.
### Response:
def allocate_resource_id(self):
"""id = d.allocate_resource_id()
Allocate a new X resource id number ID.
Raises ResourceIDError if there are no free resource ids.
"""
self.resource_id_lock.acquire()
try:
i = self.last_resource_id
while i in self.resource_ids:
i = i + 1
if i > self.info.resource_id_mask:
i = 0
if i == self.last_resource_id:
raise error.ResourceIDError('out of resource ids')
self.resource_ids[i] = None
self.last_resource_id = i
return self.info.resource_id_base | i
finally:
self.resource_id_lock.release() |
def get_last_row(dbconn, tablename, n=1, uuid=None):
"""
Returns the last `n` rows in the table
"""
return fetch(dbconn, tablename, n, uuid, end=True) | Returns the last `n` rows in the table | Below is the the instruction that describes the task:
### Input:
Returns the last `n` rows in the table
### Response:
def get_last_row(dbconn, tablename, n=1, uuid=None):
"""
Returns the last `n` rows in the table
"""
return fetch(dbconn, tablename, n, uuid, end=True) |
def pkcs_mgf1(mgfSeed, maskLen, h):
"""
Implements generic MGF1 Mask Generation function as described in
Appendix B.2.1 of RFC 3447. The hash function is passed by name.
valid values are 'md2', 'md4', 'md5', 'sha1', 'tls, 'sha256',
'sha384' and 'sha512'. Returns None on error.
Input:
mgfSeed: seed from which mask is generated, an octet string
maskLen: intended length in octets of the mask, at most 2^32 * hLen
hLen (see below)
h : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
'sha256', 'sha384'). hLen denotes the length in octets of
the hash function output.
Output:
an octet string of length maskLen
"""
# steps are those of Appendix B.2.1
if not h in _hashFuncParams:
warning("pkcs_mgf1: invalid hash (%s) provided")
return None
hLen = _hashFuncParams[h][0]
hFunc = _hashFuncParams[h][1]
if maskLen > 2**32 * hLen: # 1)
warning("pkcs_mgf1: maskLen > 2**32 * hLen")
return None
T = "" # 2)
maxCounter = math.ceil(float(maskLen) / float(hLen)) # 3)
counter = 0
while counter < maxCounter:
C = pkcs_i2osp(counter, 4)
T += hFunc(mgfSeed + C)
counter += 1
return T[:maskLen] | Implements generic MGF1 Mask Generation function as described in
Appendix B.2.1 of RFC 3447. The hash function is passed by name.
valid values are 'md2', 'md4', 'md5', 'sha1', 'tls, 'sha256',
'sha384' and 'sha512'. Returns None on error.
Input:
mgfSeed: seed from which mask is generated, an octet string
maskLen: intended length in octets of the mask, at most 2^32 * hLen
hLen (see below)
h : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
'sha256', 'sha384'). hLen denotes the length in octets of
the hash function output.
Output:
an octet string of length maskLen | Below is the the instruction that describes the task:
### Input:
Implements generic MGF1 Mask Generation function as described in
Appendix B.2.1 of RFC 3447. The hash function is passed by name.
valid values are 'md2', 'md4', 'md5', 'sha1', 'tls, 'sha256',
'sha384' and 'sha512'. Returns None on error.
Input:
mgfSeed: seed from which mask is generated, an octet string
maskLen: intended length in octets of the mask, at most 2^32 * hLen
hLen (see below)
h : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
'sha256', 'sha384'). hLen denotes the length in octets of
the hash function output.
Output:
an octet string of length maskLen
### Response:
def pkcs_mgf1(mgfSeed, maskLen, h):
"""
Implements generic MGF1 Mask Generation function as described in
Appendix B.2.1 of RFC 3447. The hash function is passed by name.
valid values are 'md2', 'md4', 'md5', 'sha1', 'tls, 'sha256',
'sha384' and 'sha512'. Returns None on error.
Input:
mgfSeed: seed from which mask is generated, an octet string
maskLen: intended length in octets of the mask, at most 2^32 * hLen
hLen (see below)
h : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
'sha256', 'sha384'). hLen denotes the length in octets of
the hash function output.
Output:
an octet string of length maskLen
"""
# steps are those of Appendix B.2.1
if not h in _hashFuncParams:
warning("pkcs_mgf1: invalid hash (%s) provided")
return None
hLen = _hashFuncParams[h][0]
hFunc = _hashFuncParams[h][1]
if maskLen > 2**32 * hLen: # 1)
warning("pkcs_mgf1: maskLen > 2**32 * hLen")
return None
T = "" # 2)
maxCounter = math.ceil(float(maskLen) / float(hLen)) # 3)
counter = 0
while counter < maxCounter:
C = pkcs_i2osp(counter, 4)
T += hFunc(mgfSeed + C)
counter += 1
return T[:maskLen] |
def getPointsForInterpolation(self,EndOfPrdvP,aLvlNow):
'''
Finds endogenous interpolation points (c,m) for the consumption function.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aLvlNow : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
Returns
-------
c_for_interpolation : np.array
Consumption points for interpolation.
m_for_interpolation : np.array
Corresponding market resource points for interpolation.
'''
cLvlNow = self.uPinv(EndOfPrdvP)
mLvlNow = cLvlNow + aLvlNow
# Limiting consumption is zero as m approaches mNrmMin
c_for_interpolation = np.concatenate((np.zeros((self.pLvlGrid.size,1)),cLvlNow),axis=-1)
m_for_interpolation = np.concatenate((self.BoroCnstNat(np.reshape(self.pLvlGrid,(self.pLvlGrid.size,1))),mLvlNow),axis=-1)
# Limiting consumption is MPCmin*mLvl as p approaches 0
m_temp = np.reshape(m_for_interpolation[0,:],(1,m_for_interpolation.shape[1]))
m_for_interpolation = np.concatenate((m_temp,m_for_interpolation),axis=0)
c_for_interpolation = np.concatenate((self.MPCminNow*m_temp,c_for_interpolation),axis=0)
return c_for_interpolation, m_for_interpolation | Finds endogenous interpolation points (c,m) for the consumption function.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aLvlNow : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
Returns
-------
c_for_interpolation : np.array
Consumption points for interpolation.
m_for_interpolation : np.array
Corresponding market resource points for interpolation. | Below is the the instruction that describes the task:
### Input:
Finds endogenous interpolation points (c,m) for the consumption function.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aLvlNow : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
Returns
-------
c_for_interpolation : np.array
Consumption points for interpolation.
m_for_interpolation : np.array
Corresponding market resource points for interpolation.
### Response:
def getPointsForInterpolation(self,EndOfPrdvP,aLvlNow):
'''
Finds endogenous interpolation points (c,m) for the consumption function.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aLvlNow : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
Returns
-------
c_for_interpolation : np.array
Consumption points for interpolation.
m_for_interpolation : np.array
Corresponding market resource points for interpolation.
'''
cLvlNow = self.uPinv(EndOfPrdvP)
mLvlNow = cLvlNow + aLvlNow
# Limiting consumption is zero as m approaches mNrmMin
c_for_interpolation = np.concatenate((np.zeros((self.pLvlGrid.size,1)),cLvlNow),axis=-1)
m_for_interpolation = np.concatenate((self.BoroCnstNat(np.reshape(self.pLvlGrid,(self.pLvlGrid.size,1))),mLvlNow),axis=-1)
# Limiting consumption is MPCmin*mLvl as p approaches 0
m_temp = np.reshape(m_for_interpolation[0,:],(1,m_for_interpolation.shape[1]))
m_for_interpolation = np.concatenate((m_temp,m_for_interpolation),axis=0)
c_for_interpolation = np.concatenate((self.MPCminNow*m_temp,c_for_interpolation),axis=0)
return c_for_interpolation, m_for_interpolation |
def qhalf(options, halfspaces, interior_point):
"""
Similar to qvoronoi command in command-line qhull.
Args:
option:
An options string. Up to two options separated by spaces
are supported. See Qhull's qhalf help for info. Typically
used options are:
Fp
halfspaces:
List of Halfspaces as input.
interior_point:
An interior point (see qhalf documentation)
Returns:
Output as a list of strings.
E.g., ['3', '4', ' 1 1 0 ', ' 1 -1 2 ',
' -1 1 2 ', ' 1 1 2 ']
"""
points = [list(h.normal) + [h.offset] for h in halfspaces]
data = [[len(interior_point), 1]]
data.append(map(repr, interior_point))
data.append([len(points[0])])
data.append([len(points)])
data.extend([map(repr, row) for row in points])
prep_str = [" ".join(map(str, line)) for line in data]
output = getattr(hull, "qhalf")(options, "\n".join(prep_str))
return list(map(str.strip, output.strip().split("\n"))) | Similar to qvoronoi command in command-line qhull.
Args:
option:
An options string. Up to two options separated by spaces
are supported. See Qhull's qhalf help for info. Typically
used options are:
Fp
halfspaces:
List of Halfspaces as input.
interior_point:
An interior point (see qhalf documentation)
Returns:
Output as a list of strings.
E.g., ['3', '4', ' 1 1 0 ', ' 1 -1 2 ',
' -1 1 2 ', ' 1 1 2 '] | Below is the the instruction that describes the task:
### Input:
Similar to qvoronoi command in command-line qhull.
Args:
option:
An options string. Up to two options separated by spaces
are supported. See Qhull's qhalf help for info. Typically
used options are:
Fp
halfspaces:
List of Halfspaces as input.
interior_point:
An interior point (see qhalf documentation)
Returns:
Output as a list of strings.
E.g., ['3', '4', ' 1 1 0 ', ' 1 -1 2 ',
' -1 1 2 ', ' 1 1 2 ']
### Response:
def qhalf(options, halfspaces, interior_point):
"""
Similar to qvoronoi command in command-line qhull.
Args:
option:
An options string. Up to two options separated by spaces
are supported. See Qhull's qhalf help for info. Typically
used options are:
Fp
halfspaces:
List of Halfspaces as input.
interior_point:
An interior point (see qhalf documentation)
Returns:
Output as a list of strings.
E.g., ['3', '4', ' 1 1 0 ', ' 1 -1 2 ',
' -1 1 2 ', ' 1 1 2 ']
"""
points = [list(h.normal) + [h.offset] for h in halfspaces]
data = [[len(interior_point), 1]]
data.append(map(repr, interior_point))
data.append([len(points[0])])
data.append([len(points)])
data.extend([map(repr, row) for row in points])
prep_str = [" ".join(map(str, line)) for line in data]
output = getattr(hull, "qhalf")(options, "\n".join(prep_str))
return list(map(str.strip, output.strip().split("\n"))) |
def _normalize_hparams(hparams):
"""Normalize a dict keyed by `HParam`s and/or raw strings.
Args:
hparams: A `dict` whose keys are `HParam` objects and/or strings
representing hyperparameter names, and whose values are
hyperparameter values. No two keys may have the same name.
Returns:
A `dict` whose keys are hyperparameter names (as strings) and whose
values are the corresponding hyperparameter values.
Raises:
ValueError: If two entries in `hparams` share the same
hyperparameter name.
"""
result = {}
for (k, v) in six.iteritems(hparams):
if isinstance(k, HParam):
k = k.name
if k in result:
raise ValueError("multiple values specified for hparam %r" % (k,))
result[k] = v
return result | Normalize a dict keyed by `HParam`s and/or raw strings.
Args:
hparams: A `dict` whose keys are `HParam` objects and/or strings
representing hyperparameter names, and whose values are
hyperparameter values. No two keys may have the same name.
Returns:
A `dict` whose keys are hyperparameter names (as strings) and whose
values are the corresponding hyperparameter values.
Raises:
ValueError: If two entries in `hparams` share the same
hyperparameter name. | Below is the the instruction that describes the task:
### Input:
Normalize a dict keyed by `HParam`s and/or raw strings.
Args:
hparams: A `dict` whose keys are `HParam` objects and/or strings
representing hyperparameter names, and whose values are
hyperparameter values. No two keys may have the same name.
Returns:
A `dict` whose keys are hyperparameter names (as strings) and whose
values are the corresponding hyperparameter values.
Raises:
ValueError: If two entries in `hparams` share the same
hyperparameter name.
### Response:
def _normalize_hparams(hparams):
"""Normalize a dict keyed by `HParam`s and/or raw strings.
Args:
hparams: A `dict` whose keys are `HParam` objects and/or strings
representing hyperparameter names, and whose values are
hyperparameter values. No two keys may have the same name.
Returns:
A `dict` whose keys are hyperparameter names (as strings) and whose
values are the corresponding hyperparameter values.
Raises:
ValueError: If two entries in `hparams` share the same
hyperparameter name.
"""
result = {}
for (k, v) in six.iteritems(hparams):
if isinstance(k, HParam):
k = k.name
if k in result:
raise ValueError("multiple values specified for hparam %r" % (k,))
result[k] = v
return result |
def copy(self):
"""Make a copy of current operator."""
# pylint: disable=no-value-for-parameter
# The constructor of subclasses from raw data should be a copy
return self.__class__(self.data, self.input_dims(), self.output_dims()) | Make a copy of current operator. | Below is the the instruction that describes the task:
### Input:
Make a copy of current operator.
### Response:
def copy(self):
"""Make a copy of current operator."""
# pylint: disable=no-value-for-parameter
# The constructor of subclasses from raw data should be a copy
return self.__class__(self.data, self.input_dims(), self.output_dims()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.