code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def load_configs(files, cwd=os.getcwd()):
"""Return repos from a list of files.
:todo: Validate scheme, check for duplciate destinations, VCS urls
:param files: paths to config file
:type files: list
:param cwd: current path (pass down for :func:`extract_repos`
:type cwd: str
:returns: expanded config dict item
:rtype: list of dict
"""
repos = []
for f in files:
_, ext = os.path.splitext(f)
conf = kaptan.Kaptan(handler=ext.lstrip('.')).import_config(f)
newrepos = extract_repos(conf.export('dict'), cwd)
if not repos:
repos.extend(newrepos)
continue
dupes = detect_duplicate_repos(repos, newrepos)
if dupes:
msg = ('repos with same path + different VCS detected!', dupes)
raise exc.VCSPullException(msg)
repos.extend(newrepos)
return repos | Return repos from a list of files.
:todo: Validate scheme, check for duplciate destinations, VCS urls
:param files: paths to config file
:type files: list
:param cwd: current path (pass down for :func:`extract_repos`
:type cwd: str
:returns: expanded config dict item
:rtype: list of dict | Below is the the instruction that describes the task:
### Input:
Return repos from a list of files.
:todo: Validate scheme, check for duplciate destinations, VCS urls
:param files: paths to config file
:type files: list
:param cwd: current path (pass down for :func:`extract_repos`
:type cwd: str
:returns: expanded config dict item
:rtype: list of dict
### Response:
def load_configs(files, cwd=os.getcwd()):
"""Return repos from a list of files.
:todo: Validate scheme, check for duplciate destinations, VCS urls
:param files: paths to config file
:type files: list
:param cwd: current path (pass down for :func:`extract_repos`
:type cwd: str
:returns: expanded config dict item
:rtype: list of dict
"""
repos = []
for f in files:
_, ext = os.path.splitext(f)
conf = kaptan.Kaptan(handler=ext.lstrip('.')).import_config(f)
newrepos = extract_repos(conf.export('dict'), cwd)
if not repos:
repos.extend(newrepos)
continue
dupes = detect_duplicate_repos(repos, newrepos)
if dupes:
msg = ('repos with same path + different VCS detected!', dupes)
raise exc.VCSPullException(msg)
repos.extend(newrepos)
return repos |
def _restriction_string(self):
"""Get a string explaining the expectation currently set
e.g `at least 5 times`, `at most 1 time`, or `2 times`
:rtype string
"""
if self.has_minimum:
string = 'at least '
value = self._minimum
elif self.has_maximum:
string = 'at most '
value = self._maximum
elif self.has_exact:
string = ''
value = self._exact
return (string + '{} {}').format(
value,
pluralize('time', value)
) | Get a string explaining the expectation currently set
e.g `at least 5 times`, `at most 1 time`, or `2 times`
:rtype string | Below is the the instruction that describes the task:
### Input:
Get a string explaining the expectation currently set
e.g `at least 5 times`, `at most 1 time`, or `2 times`
:rtype string
### Response:
def _restriction_string(self):
"""Get a string explaining the expectation currently set
e.g `at least 5 times`, `at most 1 time`, or `2 times`
:rtype string
"""
if self.has_minimum:
string = 'at least '
value = self._minimum
elif self.has_maximum:
string = 'at most '
value = self._maximum
elif self.has_exact:
string = ''
value = self._exact
return (string + '{} {}').format(
value,
pluralize('time', value)
) |
def stream_uploadfactory(content_md5=None, content_length=None,
content_type=None):
"""Get default put factory.
If Content-Type is ``'multipart/form-data'`` then the stream is aborted.
:param content_md5: The content MD5. (Default: ``None``)
:param content_length: The content length. (Default: ``None``)
:param content_type: The HTTP Content-Type. (Default: ``None``)
:returns: The stream, content length, MD5 of the content.
"""
if content_type.startswith('multipart/form-data'):
abort(422)
return request.stream, content_length, content_md5, parse_header_tags() | Get default put factory.
If Content-Type is ``'multipart/form-data'`` then the stream is aborted.
:param content_md5: The content MD5. (Default: ``None``)
:param content_length: The content length. (Default: ``None``)
:param content_type: The HTTP Content-Type. (Default: ``None``)
:returns: The stream, content length, MD5 of the content. | Below is the the instruction that describes the task:
### Input:
Get default put factory.
If Content-Type is ``'multipart/form-data'`` then the stream is aborted.
:param content_md5: The content MD5. (Default: ``None``)
:param content_length: The content length. (Default: ``None``)
:param content_type: The HTTP Content-Type. (Default: ``None``)
:returns: The stream, content length, MD5 of the content.
### Response:
def stream_uploadfactory(content_md5=None, content_length=None,
content_type=None):
"""Get default put factory.
If Content-Type is ``'multipart/form-data'`` then the stream is aborted.
:param content_md5: The content MD5. (Default: ``None``)
:param content_length: The content length. (Default: ``None``)
:param content_type: The HTTP Content-Type. (Default: ``None``)
:returns: The stream, content length, MD5 of the content.
"""
if content_type.startswith('multipart/form-data'):
abort(422)
return request.stream, content_length, content_md5, parse_header_tags() |
def partial_fit(self, X, y, classes=None):
"""
Runs a single epoch using the provided data
:return: This instance
"""
return self.fit(X, y, epochs=1) | Runs a single epoch using the provided data
:return: This instance | Below is the the instruction that describes the task:
### Input:
Runs a single epoch using the provided data
:return: This instance
### Response:
def partial_fit(self, X, y, classes=None):
"""
Runs a single epoch using the provided data
:return: This instance
"""
return self.fit(X, y, epochs=1) |
def download_file(url, destpath, filename=None, baseurl=None, subpath=None, middleware_callbacks=None, middleware_kwargs=None, request_fn=sess.get):
"""
Download a file from a URL, into a destination folder, with optional use of relative paths and middleware processors.
- If `filename` is set, that will be used as the name of the file when it's written to the destpath.
- If `baseurl` is specified, the file will be put into subdirectory of destpath per the url's path relative to the baseurl.
- If `subpath` is specified, it will be appended to destpath before deciding where to write the file.
- If `middleware_callbacks` is specified, the returned content will be passed through those function(s) before being returned.
- If `middleware_kwargs` are also specified, they will also be passed in to each function in middleware_callbacks.
"""
relative_file_url, subpath, filename = calculate_relative_url(url, filename=filename, baseurl=baseurl, subpath=subpath)
# ensure that the destination directory exists
fulldestpath = os.path.join(destpath, *subpath)
os.makedirs(fulldestpath, exist_ok=True)
# make the actual request to the URL
response = request_fn(url)
content = response.content
# if there are any middleware callbacks, apply them to the content
if middleware_callbacks:
content = content.decode()
if not isinstance(middleware_callbacks, list):
middleware_callbacks = [middleware_callbacks]
kwargs = {
"url": url,
"destpath": destpath,
"filename": filename,
"baseurl": baseurl,
"subpath": subpath,
"fulldestpath": fulldestpath,
"response": response,
}
kwargs.update(middleware_kwargs or {})
for callback in middleware_callbacks:
content = callback(content, **kwargs)
# ensure content is encoded, as we're doing a binary write
if isinstance(content, str):
content = content.encode()
# calculate the final destination for the file, and write the content out to there
dest = os.path.join(fulldestpath, filename)
with open(dest, "wb") as f:
f.write(content)
return relative_file_url, response | Download a file from a URL, into a destination folder, with optional use of relative paths and middleware processors.
- If `filename` is set, that will be used as the name of the file when it's written to the destpath.
- If `baseurl` is specified, the file will be put into subdirectory of destpath per the url's path relative to the baseurl.
- If `subpath` is specified, it will be appended to destpath before deciding where to write the file.
- If `middleware_callbacks` is specified, the returned content will be passed through those function(s) before being returned.
- If `middleware_kwargs` are also specified, they will also be passed in to each function in middleware_callbacks. | Below is the the instruction that describes the task:
### Input:
Download a file from a URL, into a destination folder, with optional use of relative paths and middleware processors.
- If `filename` is set, that will be used as the name of the file when it's written to the destpath.
- If `baseurl` is specified, the file will be put into subdirectory of destpath per the url's path relative to the baseurl.
- If `subpath` is specified, it will be appended to destpath before deciding where to write the file.
- If `middleware_callbacks` is specified, the returned content will be passed through those function(s) before being returned.
- If `middleware_kwargs` are also specified, they will also be passed in to each function in middleware_callbacks.
### Response:
def download_file(url, destpath, filename=None, baseurl=None, subpath=None, middleware_callbacks=None, middleware_kwargs=None, request_fn=sess.get):
"""
Download a file from a URL, into a destination folder, with optional use of relative paths and middleware processors.
- If `filename` is set, that will be used as the name of the file when it's written to the destpath.
- If `baseurl` is specified, the file will be put into subdirectory of destpath per the url's path relative to the baseurl.
- If `subpath` is specified, it will be appended to destpath before deciding where to write the file.
- If `middleware_callbacks` is specified, the returned content will be passed through those function(s) before being returned.
- If `middleware_kwargs` are also specified, they will also be passed in to each function in middleware_callbacks.
"""
relative_file_url, subpath, filename = calculate_relative_url(url, filename=filename, baseurl=baseurl, subpath=subpath)
# ensure that the destination directory exists
fulldestpath = os.path.join(destpath, *subpath)
os.makedirs(fulldestpath, exist_ok=True)
# make the actual request to the URL
response = request_fn(url)
content = response.content
# if there are any middleware callbacks, apply them to the content
if middleware_callbacks:
content = content.decode()
if not isinstance(middleware_callbacks, list):
middleware_callbacks = [middleware_callbacks]
kwargs = {
"url": url,
"destpath": destpath,
"filename": filename,
"baseurl": baseurl,
"subpath": subpath,
"fulldestpath": fulldestpath,
"response": response,
}
kwargs.update(middleware_kwargs or {})
for callback in middleware_callbacks:
content = callback(content, **kwargs)
# ensure content is encoded, as we're doing a binary write
if isinstance(content, str):
content = content.encode()
# calculate the final destination for the file, and write the content out to there
dest = os.path.join(fulldestpath, filename)
with open(dest, "wb") as f:
f.write(content)
return relative_file_url, response |
def Stop(self):
"""Signals the worker threads to shut down and waits until it exits."""
self._shutdown = True
self._new_updates.set() # Wake up the transmission thread.
if self._main_thread is not None:
self._main_thread.join()
self._main_thread = None
if self._transmission_thread is not None:
self._transmission_thread.join()
self._transmission_thread = None | Signals the worker threads to shut down and waits until it exits. | Below is the the instruction that describes the task:
### Input:
Signals the worker threads to shut down and waits until it exits.
### Response:
def Stop(self):
"""Signals the worker threads to shut down and waits until it exits."""
self._shutdown = True
self._new_updates.set() # Wake up the transmission thread.
if self._main_thread is not None:
self._main_thread.join()
self._main_thread = None
if self._transmission_thread is not None:
self._transmission_thread.join()
self._transmission_thread = None |
def filtered(self, efilter):
'''
Applies a filter to the search
'''
if not self.params:
self.params={'filter' : efilter}
return self
if not self.params.has_key('filter'):
self.params['filter'] = efilter
return self
self.params['filter'].update(efilter)
return self | Applies a filter to the search | Below is the the instruction that describes the task:
### Input:
Applies a filter to the search
### Response:
def filtered(self, efilter):
'''
Applies a filter to the search
'''
if not self.params:
self.params={'filter' : efilter}
return self
if not self.params.has_key('filter'):
self.params['filter'] = efilter
return self
self.params['filter'].update(efilter)
return self |
def main_init():
"""Parse command line arguments and start main script for analysis."""
parser = ArgumentParser(prog="PLIP", description=descript)
pdbstructure = parser.add_mutually_exclusive_group(required=True) # Needs either PDB ID or file
# '-' as file name reads from stdin
pdbstructure.add_argument("-f", "--file", dest="input", nargs="+", help="Set input file, '-' reads from stdin")
pdbstructure.add_argument("-i", "--input", dest="pdbid", nargs="+")
outputgroup = parser.add_mutually_exclusive_group(required=False) # Needs either outpath or stdout
outputgroup.add_argument("-o", "--out", dest="outpath", default="./")
outputgroup.add_argument("-O", "--stdout", dest="stdout", action="store_true", default=False, help="Write to stdout instead of file")
parser.add_argument("--rawstring", dest="use_raw_string", default=False, action="store_true", help="Use Python raw strings for stdout and stdin")
parser.add_argument("-v", "--verbose", dest="verbose", default=False, help="Set verbose mode", action="store_true")
parser.add_argument("-p", "--pics", dest="pics", default=False, help="Additional pictures", action="store_true")
parser.add_argument("-x", "--xml", dest="xml", default=False, help="Generate report file in XML format",
action="store_true")
parser.add_argument("-t", "--txt", dest="txt", default=False, help="Generate report file in TXT (RST) format",
action="store_true")
parser.add_argument("-y", "--pymol", dest="pymol", default=False, help="Additional PyMOL session files",
action="store_true")
parser.add_argument("--maxthreads", dest="maxthreads", default=multiprocessing.cpu_count(),
help="Set maximum number of main threads (number of binding sites processed simultaneously)."
"If not set, PLIP uses all available CPUs if possible.",
type=int)
parser.add_argument("--breakcomposite", dest="breakcomposite", default=False,
help="Don't combine ligand fragments with covalent bonds but treat them as single ligands for the analysis.",
action="store_true")
parser.add_argument("--altlocation", dest="altlocation", default=False,
help="Also consider alternate locations for atoms (e.g. alternate conformations).",
action="store_true")
parser.add_argument("--debug", dest="debug", default=False,
help="Turn on DEBUG mode with extended log.",
action="store_true")
parser.add_argument("--nofix", dest="nofix", default=False,
help="Turns off fixing of PDB files.",
action="store_true")
parser.add_argument("--nofixfile", dest="nofixfile", default=False,
help="Turns off writing files for fixed PDB files.",
action="store_true")
parser.add_argument("--nopdbcanmap", dest="nopdbcanmap", default=False,
help="Turns off calculation of mapping between canonical and PDB atom order for ligands.",
action="store_true")
parser.add_argument("--dnareceptor", dest="dnareceptor", default=False,
help="Uses the DNA instead of the protein as a receptor for interactions.",
action="store_true")
parser.add_argument("--name", dest="outputfilename", default="report",
help="Set a filename for the report TXT and XML files. Will only work when processing single structures.")
ligandtype = parser.add_mutually_exclusive_group() # Either peptide/inter or intra mode
ligandtype.add_argument("--peptides", "--inter", dest="peptides", default=[],
help="Allows to define one or multiple chains as peptide ligands or to detect inter-chain contacts",
nargs="+")
ligandtype.add_argument("--intra", dest="intra", help="Allows to define one chain to analyze intra-chain contacts.")
parser.add_argument("--keepmod", dest="keepmod", default=False,
help="Keep modified residues as ligands",
action="store_true")
# Optional threshold arguments, not shown in help
thr = namedtuple('threshold', 'name type')
thresholds = [thr(name='aromatic_planarity', type='angle'),
thr(name='hydroph_dist_max', type='distance'), thr(name='hbond_dist_max', type='distance'),
thr(name='hbond_don_angle_min', type='angle'), thr(name='pistack_dist_max', type='distance'),
thr(name='pistack_ang_dev', type='other'), thr(name='pistack_offset_max', type='distance'),
thr(name='pication_dist_max', type='distance'), thr(name='saltbridge_dist_max', type='distance'),
thr(name='halogen_dist_max', type='distance'), thr(name='halogen_acc_angle', type='angle'),
thr(name='halogen_don_angle', type='angle'), thr(name='halogen_angle_dev', type='other'),
thr(name='water_bridge_mindist', type='distance'), thr(name='water_bridge_maxdist', type='distance'),
thr(name='water_bridge_omega_min', type='angle'), thr(name='water_bridge_omega_max', type='angle'),
thr(name='water_bridge_theta_min', type='angle')]
for t in thresholds:
parser.add_argument('--%s' % t.name, dest=t.name, type=lambda val: threshold_limiter(parser, val),
help=argparse.SUPPRESS)
arguments = parser.parse_args()
config.VERBOSE = True if (arguments.verbose or arguments.debug) else False
config.DEBUG = True if arguments.debug else False
config.MAXTHREADS = arguments.maxthreads
config.XML = arguments.xml
config.TXT = arguments.txt
config.PICS = arguments.pics
config.PYMOL = arguments.pymol
config.STDOUT = arguments.stdout
config.RAWSTRING = arguments.use_raw_string
config.OUTPATH = arguments.outpath
config.OUTPATH = tilde_expansion("".join([config.OUTPATH, '/'])
if not config.OUTPATH.endswith('/') else config.OUTPATH)
config.BASEPATH = config.OUTPATH # Used for batch processing
config.BREAKCOMPOSITE = arguments.breakcomposite
config.ALTLOC = arguments.altlocation
config.PEPTIDES = arguments.peptides
config.INTRA = arguments.intra
config.NOFIX = arguments.nofix
config.NOFIXFILE = arguments.nofixfile
config.NOPDBCANMAP = arguments.nopdbcanmap
config.KEEPMOD = arguments.keepmod
config.DNARECEPTOR = arguments.dnareceptor
config.OUTPUTFILENAME = arguments.outputfilename
# Make sure we have pymol with --pics and --pymol
if config.PICS or config.PYMOL:
try:
import pymol
except ImportError:
write_message("PyMOL is required for --pics and --pymol.\n", mtype='error')
raise
# Assign values to global thresholds
for t in thresholds:
tvalue = getattr(arguments, t.name)
if tvalue is not None:
if t.type == 'angle' and not 0 < tvalue < 180: # Check value for angle thresholds
parser.error("Threshold for angles need to have values within 0 and 180.")
if t.type == 'distance':
if tvalue > 10: # Check value for angle thresholds
parser.error("Threshold for distances must not be larger than 10 Angstrom.")
elif tvalue > config.BS_DIST + 1: # Dynamically adapt the search space for binding site residues
config.BS_DIST = tvalue + 1
setattr(config, t.name.upper(), tvalue)
# Check additional conditions for interdependent thresholds
if not config.HALOGEN_ACC_ANGLE > config.HALOGEN_ANGLE_DEV:
parser.error("The halogen acceptor angle has to be larger than the halogen angle deviation.")
if not config.HALOGEN_DON_ANGLE > config.HALOGEN_ANGLE_DEV:
parser.error("The halogen donor angle has to be larger than the halogen angle deviation.")
if not config.WATER_BRIDGE_MINDIST < config.WATER_BRIDGE_MAXDIST:
parser.error("The water bridge minimum distance has to be smaller than the water bridge maximum distance.")
if not config.WATER_BRIDGE_OMEGA_MIN < config.WATER_BRIDGE_OMEGA_MAX:
parser.error("The water bridge omega minimum angle has to be smaller than the water bridge omega maximum angle")
expanded_path = tilde_expansion(arguments.input) if arguments.input is not None else None
main(expanded_path, arguments.pdbid) | Parse command line arguments and start main script for analysis. | Below is the the instruction that describes the task:
### Input:
Parse command line arguments and start main script for analysis.
### Response:
def main_init():
"""Parse command line arguments and start main script for analysis."""
parser = ArgumentParser(prog="PLIP", description=descript)
pdbstructure = parser.add_mutually_exclusive_group(required=True) # Needs either PDB ID or file
# '-' as file name reads from stdin
pdbstructure.add_argument("-f", "--file", dest="input", nargs="+", help="Set input file, '-' reads from stdin")
pdbstructure.add_argument("-i", "--input", dest="pdbid", nargs="+")
outputgroup = parser.add_mutually_exclusive_group(required=False) # Needs either outpath or stdout
outputgroup.add_argument("-o", "--out", dest="outpath", default="./")
outputgroup.add_argument("-O", "--stdout", dest="stdout", action="store_true", default=False, help="Write to stdout instead of file")
parser.add_argument("--rawstring", dest="use_raw_string", default=False, action="store_true", help="Use Python raw strings for stdout and stdin")
parser.add_argument("-v", "--verbose", dest="verbose", default=False, help="Set verbose mode", action="store_true")
parser.add_argument("-p", "--pics", dest="pics", default=False, help="Additional pictures", action="store_true")
parser.add_argument("-x", "--xml", dest="xml", default=False, help="Generate report file in XML format",
action="store_true")
parser.add_argument("-t", "--txt", dest="txt", default=False, help="Generate report file in TXT (RST) format",
action="store_true")
parser.add_argument("-y", "--pymol", dest="pymol", default=False, help="Additional PyMOL session files",
action="store_true")
parser.add_argument("--maxthreads", dest="maxthreads", default=multiprocessing.cpu_count(),
help="Set maximum number of main threads (number of binding sites processed simultaneously)."
"If not set, PLIP uses all available CPUs if possible.",
type=int)
parser.add_argument("--breakcomposite", dest="breakcomposite", default=False,
help="Don't combine ligand fragments with covalent bonds but treat them as single ligands for the analysis.",
action="store_true")
parser.add_argument("--altlocation", dest="altlocation", default=False,
help="Also consider alternate locations for atoms (e.g. alternate conformations).",
action="store_true")
parser.add_argument("--debug", dest="debug", default=False,
help="Turn on DEBUG mode with extended log.",
action="store_true")
parser.add_argument("--nofix", dest="nofix", default=False,
help="Turns off fixing of PDB files.",
action="store_true")
parser.add_argument("--nofixfile", dest="nofixfile", default=False,
help="Turns off writing files for fixed PDB files.",
action="store_true")
parser.add_argument("--nopdbcanmap", dest="nopdbcanmap", default=False,
help="Turns off calculation of mapping between canonical and PDB atom order for ligands.",
action="store_true")
parser.add_argument("--dnareceptor", dest="dnareceptor", default=False,
help="Uses the DNA instead of the protein as a receptor for interactions.",
action="store_true")
parser.add_argument("--name", dest="outputfilename", default="report",
help="Set a filename for the report TXT and XML files. Will only work when processing single structures.")
ligandtype = parser.add_mutually_exclusive_group() # Either peptide/inter or intra mode
ligandtype.add_argument("--peptides", "--inter", dest="peptides", default=[],
help="Allows to define one or multiple chains as peptide ligands or to detect inter-chain contacts",
nargs="+")
ligandtype.add_argument("--intra", dest="intra", help="Allows to define one chain to analyze intra-chain contacts.")
parser.add_argument("--keepmod", dest="keepmod", default=False,
help="Keep modified residues as ligands",
action="store_true")
# Optional threshold arguments, not shown in help
thr = namedtuple('threshold', 'name type')
thresholds = [thr(name='aromatic_planarity', type='angle'),
thr(name='hydroph_dist_max', type='distance'), thr(name='hbond_dist_max', type='distance'),
thr(name='hbond_don_angle_min', type='angle'), thr(name='pistack_dist_max', type='distance'),
thr(name='pistack_ang_dev', type='other'), thr(name='pistack_offset_max', type='distance'),
thr(name='pication_dist_max', type='distance'), thr(name='saltbridge_dist_max', type='distance'),
thr(name='halogen_dist_max', type='distance'), thr(name='halogen_acc_angle', type='angle'),
thr(name='halogen_don_angle', type='angle'), thr(name='halogen_angle_dev', type='other'),
thr(name='water_bridge_mindist', type='distance'), thr(name='water_bridge_maxdist', type='distance'),
thr(name='water_bridge_omega_min', type='angle'), thr(name='water_bridge_omega_max', type='angle'),
thr(name='water_bridge_theta_min', type='angle')]
for t in thresholds:
parser.add_argument('--%s' % t.name, dest=t.name, type=lambda val: threshold_limiter(parser, val),
help=argparse.SUPPRESS)
arguments = parser.parse_args()
config.VERBOSE = True if (arguments.verbose or arguments.debug) else False
config.DEBUG = True if arguments.debug else False
config.MAXTHREADS = arguments.maxthreads
config.XML = arguments.xml
config.TXT = arguments.txt
config.PICS = arguments.pics
config.PYMOL = arguments.pymol
config.STDOUT = arguments.stdout
config.RAWSTRING = arguments.use_raw_string
config.OUTPATH = arguments.outpath
config.OUTPATH = tilde_expansion("".join([config.OUTPATH, '/'])
if not config.OUTPATH.endswith('/') else config.OUTPATH)
config.BASEPATH = config.OUTPATH # Used for batch processing
config.BREAKCOMPOSITE = arguments.breakcomposite
config.ALTLOC = arguments.altlocation
config.PEPTIDES = arguments.peptides
config.INTRA = arguments.intra
config.NOFIX = arguments.nofix
config.NOFIXFILE = arguments.nofixfile
config.NOPDBCANMAP = arguments.nopdbcanmap
config.KEEPMOD = arguments.keepmod
config.DNARECEPTOR = arguments.dnareceptor
config.OUTPUTFILENAME = arguments.outputfilename
# Make sure we have pymol with --pics and --pymol
if config.PICS or config.PYMOL:
try:
import pymol
except ImportError:
write_message("PyMOL is required for --pics and --pymol.\n", mtype='error')
raise
# Assign values to global thresholds
for t in thresholds:
tvalue = getattr(arguments, t.name)
if tvalue is not None:
if t.type == 'angle' and not 0 < tvalue < 180: # Check value for angle thresholds
parser.error("Threshold for angles need to have values within 0 and 180.")
if t.type == 'distance':
if tvalue > 10: # Check value for angle thresholds
parser.error("Threshold for distances must not be larger than 10 Angstrom.")
elif tvalue > config.BS_DIST + 1: # Dynamically adapt the search space for binding site residues
config.BS_DIST = tvalue + 1
setattr(config, t.name.upper(), tvalue)
# Check additional conditions for interdependent thresholds
if not config.HALOGEN_ACC_ANGLE > config.HALOGEN_ANGLE_DEV:
parser.error("The halogen acceptor angle has to be larger than the halogen angle deviation.")
if not config.HALOGEN_DON_ANGLE > config.HALOGEN_ANGLE_DEV:
parser.error("The halogen donor angle has to be larger than the halogen angle deviation.")
if not config.WATER_BRIDGE_MINDIST < config.WATER_BRIDGE_MAXDIST:
parser.error("The water bridge minimum distance has to be smaller than the water bridge maximum distance.")
if not config.WATER_BRIDGE_OMEGA_MIN < config.WATER_BRIDGE_OMEGA_MAX:
parser.error("The water bridge omega minimum angle has to be smaller than the water bridge omega maximum angle")
expanded_path = tilde_expansion(arguments.input) if arguments.input is not None else None
main(expanded_path, arguments.pdbid) |
def stem(self, word):
"""
Stem a German word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
word = word.replace("\xDF", "ss")
# Every occurrence of 'u' and 'y'
# between vowels is put into upper case.
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
# R1 is adjusted so that the region before it
# contains at least 3 letters.
for i in range(1, len(word)):
if word[i] not in self.__vowels and word[i-1] in self.__vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) == 0:
return word
break
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if (suffix in ("en", "es", "e") and
word[-len(suffix)-4:-len(suffix)] == "niss"):
word = word[:-len(suffix)-1]
r1 = r1[:-len(suffix)-1]
r2 = r2[:-len(suffix)-1]
elif suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
if suffix == "st":
if word[-3] in self.__st_ending and len(word[:-3]) >= 3:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 3: Derivational suffixes
for suffix in self.__step3_suffixes:
if r2.endswith(suffix):
if suffix in ("end", "ung"):
if ("ig" in r2[-len(suffix)-2:-len(suffix)] and
"e" not in r2[-len(suffix)-3:-len(suffix)-2]):
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
elif (suffix in ("ig", "ik", "isch") and
"e" not in r2[-len(suffix)-1:-len(suffix)]):
word = word[:-len(suffix)]
elif suffix in ("lich", "heit"):
if ("er" in r1[-len(suffix)-2:-len(suffix)] or
"en" in r1[-len(suffix)-2:-len(suffix)]):
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
elif suffix == "keit":
if "lich" in r2[-len(suffix)-4:-len(suffix)]:
word = word[:-len(suffix)-4]
elif "ig" in r2[-len(suffix)-2:-len(suffix)]:
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
break
# Umlaut accents are removed and
# 'u' and 'y' are put back into lower case.
word = (word.replace("\xE4", "a").replace("\xF6", "o")
.replace("\xFC", "u").replace("U", "u")
.replace("Y", "y"))
return word | Stem a German word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode | Below is the the instruction that describes the task:
### Input:
Stem a German word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
### Response:
def stem(self, word):
"""
Stem a German word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
word = word.replace("\xDF", "ss")
# Every occurrence of 'u' and 'y'
# between vowels is put into upper case.
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
# R1 is adjusted so that the region before it
# contains at least 3 letters.
for i in range(1, len(word)):
if word[i] not in self.__vowels and word[i-1] in self.__vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) == 0:
return word
break
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if (suffix in ("en", "es", "e") and
word[-len(suffix)-4:-len(suffix)] == "niss"):
word = word[:-len(suffix)-1]
r1 = r1[:-len(suffix)-1]
r2 = r2[:-len(suffix)-1]
elif suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
if suffix == "st":
if word[-3] in self.__st_ending and len(word[:-3]) >= 3:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 3: Derivational suffixes
for suffix in self.__step3_suffixes:
if r2.endswith(suffix):
if suffix in ("end", "ung"):
if ("ig" in r2[-len(suffix)-2:-len(suffix)] and
"e" not in r2[-len(suffix)-3:-len(suffix)-2]):
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
elif (suffix in ("ig", "ik", "isch") and
"e" not in r2[-len(suffix)-1:-len(suffix)]):
word = word[:-len(suffix)]
elif suffix in ("lich", "heit"):
if ("er" in r1[-len(suffix)-2:-len(suffix)] or
"en" in r1[-len(suffix)-2:-len(suffix)]):
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
elif suffix == "keit":
if "lich" in r2[-len(suffix)-4:-len(suffix)]:
word = word[:-len(suffix)-4]
elif "ig" in r2[-len(suffix)-2:-len(suffix)]:
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
break
# Umlaut accents are removed and
# 'u' and 'y' are put back into lower case.
word = (word.replace("\xE4", "a").replace("\xF6", "o")
.replace("\xFC", "u").replace("U", "u")
.replace("Y", "y"))
return word |
def check_nonce(self, request, oauth_request):
"""
Checks nonce of request, and return True if valid.
"""
oauth_nonce = oauth_request['oauth_nonce']
oauth_timestamp = oauth_request['oauth_timestamp']
return check_nonce(request, oauth_request, oauth_nonce, oauth_timestamp) | Checks nonce of request, and return True if valid. | Below is the the instruction that describes the task:
### Input:
Checks nonce of request, and return True if valid.
### Response:
def check_nonce(self, request, oauth_request):
"""
Checks nonce of request, and return True if valid.
"""
oauth_nonce = oauth_request['oauth_nonce']
oauth_timestamp = oauth_request['oauth_timestamp']
return check_nonce(request, oauth_request, oauth_nonce, oauth_timestamp) |
def __get_neighbors(self, cell):
"""!
@brief Returns neighbors for specified CLIQUE block as clique_block objects.
@return (list) Neighbors as clique_block objects.
"""
neighbors = []
location_neighbors = cell.get_location_neighbors(self.__amount_intervals)
for i in range(len(location_neighbors)):
key = self.__location_to_key(location_neighbors[i])
candidate_neighbor = self.__cell_map[key]
if not candidate_neighbor.visited:
candidate_neighbor.visited = True
neighbors.append(candidate_neighbor)
return neighbors | !
@brief Returns neighbors for specified CLIQUE block as clique_block objects.
@return (list) Neighbors as clique_block objects. | Below is the the instruction that describes the task:
### Input:
!
@brief Returns neighbors for specified CLIQUE block as clique_block objects.
@return (list) Neighbors as clique_block objects.
### Response:
def __get_neighbors(self, cell):
"""!
@brief Returns neighbors for specified CLIQUE block as clique_block objects.
@return (list) Neighbors as clique_block objects.
"""
neighbors = []
location_neighbors = cell.get_location_neighbors(self.__amount_intervals)
for i in range(len(location_neighbors)):
key = self.__location_to_key(location_neighbors[i])
candidate_neighbor = self.__cell_map[key]
if not candidate_neighbor.visited:
candidate_neighbor.visited = True
neighbors.append(candidate_neighbor)
return neighbors |
def reorient_image2(image, orientation='RAS'):
"""
Reorient an image.
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> mni2 = mni.reorient_image2()
"""
if image.dimension != 3:
raise ValueError('image must have 3 dimensions')
inpixeltype = image.pixeltype
ndim = image.dimension
if image.pixeltype != 'float':
image = image.clone('float')
libfn = utils.get_lib_fn('reorientImage2')
itkimage = libfn(image.pointer, orientation)
new_img = iio.ANTsImage(pixeltype='float', dimension=ndim,
components=image.components, pointer=itkimage)#.clone(inpixeltype)
if inpixeltype != 'float':
new_img = new_img.clone(inpixeltype)
return new_img | Reorient an image.
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> mni2 = mni.reorient_image2() | Below is the the instruction that describes the task:
### Input:
Reorient an image.
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> mni2 = mni.reorient_image2()
### Response:
def reorient_image2(image, orientation='RAS'):
"""
Reorient an image.
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> mni2 = mni.reorient_image2()
"""
if image.dimension != 3:
raise ValueError('image must have 3 dimensions')
inpixeltype = image.pixeltype
ndim = image.dimension
if image.pixeltype != 'float':
image = image.clone('float')
libfn = utils.get_lib_fn('reorientImage2')
itkimage = libfn(image.pointer, orientation)
new_img = iio.ANTsImage(pixeltype='float', dimension=ndim,
components=image.components, pointer=itkimage)#.clone(inpixeltype)
if inpixeltype != 'float':
new_img = new_img.clone(inpixeltype)
return new_img |
def run(self): # Thread for receiving data from pilight
"""Receiver thread function called on Client.start()."""
logging.debug('Pilight receiver thread started')
if not self.callback:
raise RuntimeError('No callback function set, cancel readout thread')
def handle_messages(messages):
"""Call callback on each receive message."""
for message in messages: # Loop over received messages
if message: # Can be empty due to splitlines
message_dict = json.loads(message.decode())
if self.recv_codes_only:
# Filter: Only use receiver messages
if 'receiver' in message_dict['origin']:
if self.veto_repeats:
if message_dict.get('repeats', 1) == 1:
self.callback(message_dict)
else:
self.callback(message_dict)
else:
self.callback(message_dict)
while not self._stop_thread.isSet():
try: # Read socket in a non blocking call and interpret data
# Sometimes more than one JSON object is in the stream thus
# split at \n
with self._lock:
messages = self.receive_socket.recv(1024).splitlines()
handle_messages(messages)
except (socket.timeout, ValueError): # No data
pass
logging.debug('Pilight receiver thread stopped') | Receiver thread function called on Client.start(). | Below is the the instruction that describes the task:
### Input:
Receiver thread function called on Client.start().
### Response:
def run(self): # Thread for receiving data from pilight
"""Receiver thread function called on Client.start()."""
logging.debug('Pilight receiver thread started')
if not self.callback:
raise RuntimeError('No callback function set, cancel readout thread')
def handle_messages(messages):
"""Call callback on each receive message."""
for message in messages: # Loop over received messages
if message: # Can be empty due to splitlines
message_dict = json.loads(message.decode())
if self.recv_codes_only:
# Filter: Only use receiver messages
if 'receiver' in message_dict['origin']:
if self.veto_repeats:
if message_dict.get('repeats', 1) == 1:
self.callback(message_dict)
else:
self.callback(message_dict)
else:
self.callback(message_dict)
while not self._stop_thread.isSet():
try: # Read socket in a non blocking call and interpret data
# Sometimes more than one JSON object is in the stream thus
# split at \n
with self._lock:
messages = self.receive_socket.recv(1024).splitlines()
handle_messages(messages)
except (socket.timeout, ValueError): # No data
pass
logging.debug('Pilight receiver thread stopped') |
def first_field(self):
""" Returns the first :class:`Field` in the `Sequence` or ``None``
for an empty `Sequence`.
"""
for name, item in enumerate(self):
# Container
if is_container(item):
field = item.first_field()
# Container is not empty
if field is not None:
return field
# Field
elif is_field(item):
return item
else:
raise MemberTypeError(self, item, name)
return None | Returns the first :class:`Field` in the `Sequence` or ``None``
for an empty `Sequence`. | Below is the the instruction that describes the task:
### Input:
Returns the first :class:`Field` in the `Sequence` or ``None``
for an empty `Sequence`.
### Response:
def first_field(self):
""" Returns the first :class:`Field` in the `Sequence` or ``None``
for an empty `Sequence`.
"""
for name, item in enumerate(self):
# Container
if is_container(item):
field = item.first_field()
# Container is not empty
if field is not None:
return field
# Field
elif is_field(item):
return item
else:
raise MemberTypeError(self, item, name)
return None |
def get_default_collection_converters(conversion_finder: ConversionFinder) -> List[Union[Converter[Any, dict], Converter[dict, Any]]]:
"""
Utility method to return the default converters associated to dict (from dict to other type,
and from other type to dict)
:return:
"""
return [ConverterFunction(from_type=List, to_type=Set, conversion_method=list_to_set, custom_name='list_to_set',
function_args={'conversion_finder': conversion_finder}),
ConverterFunction(from_type=List, to_type=Tuple, conversion_method=list_to_tuple,
custom_name='list_to_tuple', function_args={'conversion_finder': conversion_finder})] | Utility method to return the default converters associated to dict (from dict to other type,
and from other type to dict)
:return: | Below is the the instruction that describes the task:
### Input:
Utility method to return the default converters associated to dict (from dict to other type,
and from other type to dict)
:return:
### Response:
def get_default_collection_converters(conversion_finder: ConversionFinder) -> List[Union[Converter[Any, dict], Converter[dict, Any]]]:
"""
Utility method to return the default converters associated to dict (from dict to other type,
and from other type to dict)
:return:
"""
return [ConverterFunction(from_type=List, to_type=Set, conversion_method=list_to_set, custom_name='list_to_set',
function_args={'conversion_finder': conversion_finder}),
ConverterFunction(from_type=List, to_type=Tuple, conversion_method=list_to_tuple,
custom_name='list_to_tuple', function_args={'conversion_finder': conversion_finder})] |
def get_all_recurrings(self, params=None):
"""
Get all recurrings
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list
"""
if not params:
params = {}
return self._iterate_through_pages(self.get_recurrings_per_page, resource=RECURRINGS, **{'params': params}) | Get all recurrings
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list | Below is the the instruction that describes the task:
### Input:
Get all recurrings
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list
### Response:
def get_all_recurrings(self, params=None):
"""
Get all recurrings
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list
"""
if not params:
params = {}
return self._iterate_through_pages(self.get_recurrings_per_page, resource=RECURRINGS, **{'params': params}) |
async def save(self, db=None):
'''
If object has _id, then object will be created or fully rewritten.
If not, object will be inserted and _id will be assigned.
'''
self._db = db or self.db
data = self.prepare_data()
# validate object
self.validate()
# connect to DB to save the model
for i in self.connection_retries():
try:
created = False if '_id' in data else True
result = await self.db[self.get_collection_name()].insert_one(data)
self._id = result.inserted_id
# emit post save
asyncio.ensure_future(post_save.send(
sender=self.__class__,
db=self.db,
instance=self,
created=created)
)
break
except ConnectionFailure as ex:
exceed = await self.check_reconnect_tries_and_wait(i, 'save')
if exceed:
raise ex | If object has _id, then object will be created or fully rewritten.
If not, object will be inserted and _id will be assigned. | Below is the the instruction that describes the task:
### Input:
If object has _id, then object will be created or fully rewritten.
If not, object will be inserted and _id will be assigned.
### Response:
async def save(self, db=None):
'''
If object has _id, then object will be created or fully rewritten.
If not, object will be inserted and _id will be assigned.
'''
self._db = db or self.db
data = self.prepare_data()
# validate object
self.validate()
# connect to DB to save the model
for i in self.connection_retries():
try:
created = False if '_id' in data else True
result = await self.db[self.get_collection_name()].insert_one(data)
self._id = result.inserted_id
# emit post save
asyncio.ensure_future(post_save.send(
sender=self.__class__,
db=self.db,
instance=self,
created=created)
)
break
except ConnectionFailure as ex:
exceed = await self.check_reconnect_tries_and_wait(i, 'save')
if exceed:
raise ex |
def set_error_handler(codec, handler, data=None):
"""Wraps openjp2 library function opj_set_error_handler.
Set the error handler use by openjpeg.
Parameters
----------
codec : CODEC_TYPE
Codec initialized by create_compress function.
handler : python function
The callback function to be used.
user_data : anything
User/client data.
Raises
------
RuntimeError
If the OpenJPEG library routine opj_set_error_handler fails.
"""
OPENJP2.opj_set_error_handler.argtypes = [CODEC_TYPE,
ctypes.c_void_p,
ctypes.c_void_p]
OPENJP2.opj_set_error_handler.restype = check_error
OPENJP2.opj_set_error_handler(codec, handler, data) | Wraps openjp2 library function opj_set_error_handler.
Set the error handler use by openjpeg.
Parameters
----------
codec : CODEC_TYPE
Codec initialized by create_compress function.
handler : python function
The callback function to be used.
user_data : anything
User/client data.
Raises
------
RuntimeError
If the OpenJPEG library routine opj_set_error_handler fails. | Below is the the instruction that describes the task:
### Input:
Wraps openjp2 library function opj_set_error_handler.
Set the error handler use by openjpeg.
Parameters
----------
codec : CODEC_TYPE
Codec initialized by create_compress function.
handler : python function
The callback function to be used.
user_data : anything
User/client data.
Raises
------
RuntimeError
If the OpenJPEG library routine opj_set_error_handler fails.
### Response:
def set_error_handler(codec, handler, data=None):
"""Wraps openjp2 library function opj_set_error_handler.
Set the error handler use by openjpeg.
Parameters
----------
codec : CODEC_TYPE
Codec initialized by create_compress function.
handler : python function
The callback function to be used.
user_data : anything
User/client data.
Raises
------
RuntimeError
If the OpenJPEG library routine opj_set_error_handler fails.
"""
OPENJP2.opj_set_error_handler.argtypes = [CODEC_TYPE,
ctypes.c_void_p,
ctypes.c_void_p]
OPENJP2.opj_set_error_handler.restype = check_error
OPENJP2.opj_set_error_handler(codec, handler, data) |
def make_success_response(self, result):
"""
Makes the python dict corresponding to the
JSON that needs to be sent for a successful
response. Result is the actual payload
that gets sent.
"""
response = self.make_response(constants.RESPONSE_STATUS_SUCCESS)
response[constants.RESPONSE_KEY_RESULT] = result
return response | Makes the python dict corresponding to the
JSON that needs to be sent for a successful
response. Result is the actual payload
that gets sent. | Below is the the instruction that describes the task:
### Input:
Makes the python dict corresponding to the
JSON that needs to be sent for a successful
response. Result is the actual payload
that gets sent.
### Response:
def make_success_response(self, result):
"""
Makes the python dict corresponding to the
JSON that needs to be sent for a successful
response. Result is the actual payload
that gets sent.
"""
response = self.make_response(constants.RESPONSE_STATUS_SUCCESS)
response[constants.RESPONSE_KEY_RESULT] = result
return response |
def get_flux(self, energies):
"""Get the total flux of this particle source at the given energies (summed over the components)"""
results = [component.shape(energies) for component in self.components.values()]
return numpy.sum(results, 0) | Get the total flux of this particle source at the given energies (summed over the components) | Below is the the instruction that describes the task:
### Input:
Get the total flux of this particle source at the given energies (summed over the components)
### Response:
def get_flux(self, energies):
"""Get the total flux of this particle source at the given energies (summed over the components)"""
results = [component.shape(energies) for component in self.components.values()]
return numpy.sum(results, 0) |
def update(self, values, force=False):
""" Update this task dictionary
:returns: A dictionary mapping field names specified to be updated
and a boolean value indicating whether the field was changed.
"""
results = {}
for k, v in six.iteritems(values):
results[k] = self.__setitem__(k, v, force=force)
return results | Update this task dictionary
:returns: A dictionary mapping field names specified to be updated
and a boolean value indicating whether the field was changed. | Below is the the instruction that describes the task:
### Input:
Update this task dictionary
:returns: A dictionary mapping field names specified to be updated
and a boolean value indicating whether the field was changed.
### Response:
def update(self, values, force=False):
""" Update this task dictionary
:returns: A dictionary mapping field names specified to be updated
and a boolean value indicating whether the field was changed.
"""
results = {}
for k, v in six.iteritems(values):
results[k] = self.__setitem__(k, v, force=force)
return results |
def handle(self, *args, **options):
"""This function is called by the Django API to specify how this object
will be saved to the database.
"""
taxonomy_id = options['taxonomy_id']
# Remove leading and trailing blank characters in "common_name"
# and "scientific_name
common_name = options['common_name'].strip()
scientific_name = options['scientific_name'].strip()
if common_name and scientific_name:
# A 'slug' is a label for an object in django, which only contains
# letters, numbers, underscores, and hyphens, thus making it URL-
# usable. The slugify method in django takes any string and
# converts it to this format. For more information, see:
# http://stackoverflow.com/questions/427102/what-is-a-slug-in-django
slug = slugify(scientific_name)
logger.info("Slug generated: %s", slug)
# If organism exists, update with passed parameters
try:
org = Organism.objects.get(taxonomy_id=taxonomy_id)
org.common_name = common_name
org.scientific_name = scientific_name
org.slug = slug
# If organism doesn't exist, construct an organism object
# (see organisms/models.py).
except Organism.DoesNotExist:
org = Organism(taxonomy_id=taxonomy_id,
common_name=common_name,
scientific_name=scientific_name,
slug=slug
)
org.save() # Save to the database.
else:
# Report an error if the user did not fill out all fields.
logger.error(
"Failed to add or update organism. "
"Please check that all fields are filled correctly."
) | This function is called by the Django API to specify how this object
will be saved to the database. | Below is the the instruction that describes the task:
### Input:
This function is called by the Django API to specify how this object
will be saved to the database.
### Response:
def handle(self, *args, **options):
"""This function is called by the Django API to specify how this object
will be saved to the database.
"""
taxonomy_id = options['taxonomy_id']
# Remove leading and trailing blank characters in "common_name"
# and "scientific_name
common_name = options['common_name'].strip()
scientific_name = options['scientific_name'].strip()
if common_name and scientific_name:
# A 'slug' is a label for an object in django, which only contains
# letters, numbers, underscores, and hyphens, thus making it URL-
# usable. The slugify method in django takes any string and
# converts it to this format. For more information, see:
# http://stackoverflow.com/questions/427102/what-is-a-slug-in-django
slug = slugify(scientific_name)
logger.info("Slug generated: %s", slug)
# If organism exists, update with passed parameters
try:
org = Organism.objects.get(taxonomy_id=taxonomy_id)
org.common_name = common_name
org.scientific_name = scientific_name
org.slug = slug
# If organism doesn't exist, construct an organism object
# (see organisms/models.py).
except Organism.DoesNotExist:
org = Organism(taxonomy_id=taxonomy_id,
common_name=common_name,
scientific_name=scientific_name,
slug=slug
)
org.save() # Save to the database.
else:
# Report an error if the user did not fill out all fields.
logger.error(
"Failed to add or update organism. "
"Please check that all fields are filled correctly."
) |
def get_lastfunction_header(self, header, default_return_value=None):
"""Returns a specific header from the last API call
This will return None if the header is not present
:param header: (required) The name of the header you want to get
the value of
Most useful for the following header information:
x-rate-limit-limit,
x-rate-limit-remaining,
x-rate-limit-class,
x-rate-limit-reset
"""
if self._last_call is None:
raise TwythonError('This function must be called after an API call. \
It delivers header information.')
return self._last_call['headers'].get(header, default_return_value) | Returns a specific header from the last API call
This will return None if the header is not present
:param header: (required) The name of the header you want to get
the value of
Most useful for the following header information:
x-rate-limit-limit,
x-rate-limit-remaining,
x-rate-limit-class,
x-rate-limit-reset | Below is the the instruction that describes the task:
### Input:
Returns a specific header from the last API call
This will return None if the header is not present
:param header: (required) The name of the header you want to get
the value of
Most useful for the following header information:
x-rate-limit-limit,
x-rate-limit-remaining,
x-rate-limit-class,
x-rate-limit-reset
### Response:
def get_lastfunction_header(self, header, default_return_value=None):
"""Returns a specific header from the last API call
This will return None if the header is not present
:param header: (required) The name of the header you want to get
the value of
Most useful for the following header information:
x-rate-limit-limit,
x-rate-limit-remaining,
x-rate-limit-class,
x-rate-limit-reset
"""
if self._last_call is None:
raise TwythonError('This function must be called after an API call. \
It delivers header information.')
return self._last_call['headers'].get(header, default_return_value) |
def signature(secret, parts):
"""Generates a signature. All strings are assumed to be utf-8
"""
if not isinstance(secret, six.binary_type):
secret = secret.encode('utf-8')
newparts = []
for part in parts:
if not isinstance(part, six.binary_type):
part = part.encode('utf-8')
newparts.append(part)
parts = newparts
if sys.version_info >= (2, 5):
csum = hmac.new(secret, digestmod=hashlib.sha1)
else:
csum = hmac.new(secret, digestmod=sha)
for part in parts:
csum.update(part)
return csum.hexdigest() | Generates a signature. All strings are assumed to be utf-8 | Below is the the instruction that describes the task:
### Input:
Generates a signature. All strings are assumed to be utf-8
### Response:
def signature(secret, parts):
"""Generates a signature. All strings are assumed to be utf-8
"""
if not isinstance(secret, six.binary_type):
secret = secret.encode('utf-8')
newparts = []
for part in parts:
if not isinstance(part, six.binary_type):
part = part.encode('utf-8')
newparts.append(part)
parts = newparts
if sys.version_info >= (2, 5):
csum = hmac.new(secret, digestmod=hashlib.sha1)
else:
csum = hmac.new(secret, digestmod=sha)
for part in parts:
csum.update(part)
return csum.hexdigest() |
def forward_sample(self, size=1, return_type='dataframe'):
"""
Generates sample(s) from joint distribution of the bayesian network.
Parameters
----------
size: int
size of sample to be generated
return_type: string (dataframe | recarray)
Return type for samples, either of 'dataframe' or 'recarray'.
Defaults to 'dataframe'
Returns
-------
sampled: A pandas.DataFrame or a numpy.recarray object depending upon return_type argument
the generated samples
Examples
--------
>>> from pgmpy.models.BayesianModel import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.sampling import BayesianModelSampling
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd_d = TabularCPD('diff', 2, [[0.6], [0.4]])
>>> cpd_i = TabularCPD('intel', 2, [[0.7], [0.3]])
>>> cpd_g = TabularCPD('grade', 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25,
... 0.08, 0.3], [0.3, 0.7, 0.02, 0.2]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd_d, cpd_i, cpd_g)
>>> inference = BayesianModelSampling(student)
>>> inference.forward_sample(size=2, return_type='recarray')
rec.array([(0, 0, 1), (1, 0, 2)], dtype=
[('diff', '<i8'), ('intel', '<i8'), ('grade', '<i8')])
"""
types = [(var_name, 'int') for var_name in self.topological_order]
sampled = np.zeros(size, dtype=types).view(np.recarray)
for node in self.topological_order:
cpd = self.model.get_cpds(node)
states = range(self.cardinality[node])
evidence = cpd.variables[:0:-1]
if evidence:
cached_values = self.pre_compute_reduce(variable=node)
evidence = np.vstack([sampled[i] for i in evidence])
weights = list(map(lambda t: cached_values[tuple(t)], evidence.T))
else:
weights = cpd.values
sampled[node] = sample_discrete(states, weights, size)
return _return_samples(return_type, sampled) | Generates sample(s) from joint distribution of the bayesian network.
Parameters
----------
size: int
size of sample to be generated
return_type: string (dataframe | recarray)
Return type for samples, either of 'dataframe' or 'recarray'.
Defaults to 'dataframe'
Returns
-------
sampled: A pandas.DataFrame or a numpy.recarray object depending upon return_type argument
the generated samples
Examples
--------
>>> from pgmpy.models.BayesianModel import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.sampling import BayesianModelSampling
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd_d = TabularCPD('diff', 2, [[0.6], [0.4]])
>>> cpd_i = TabularCPD('intel', 2, [[0.7], [0.3]])
>>> cpd_g = TabularCPD('grade', 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25,
... 0.08, 0.3], [0.3, 0.7, 0.02, 0.2]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd_d, cpd_i, cpd_g)
>>> inference = BayesianModelSampling(student)
>>> inference.forward_sample(size=2, return_type='recarray')
rec.array([(0, 0, 1), (1, 0, 2)], dtype=
[('diff', '<i8'), ('intel', '<i8'), ('grade', '<i8')]) | Below is the the instruction that describes the task:
### Input:
Generates sample(s) from joint distribution of the bayesian network.
Parameters
----------
size: int
size of sample to be generated
return_type: string (dataframe | recarray)
Return type for samples, either of 'dataframe' or 'recarray'.
Defaults to 'dataframe'
Returns
-------
sampled: A pandas.DataFrame or a numpy.recarray object depending upon return_type argument
the generated samples
Examples
--------
>>> from pgmpy.models.BayesianModel import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.sampling import BayesianModelSampling
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd_d = TabularCPD('diff', 2, [[0.6], [0.4]])
>>> cpd_i = TabularCPD('intel', 2, [[0.7], [0.3]])
>>> cpd_g = TabularCPD('grade', 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25,
... 0.08, 0.3], [0.3, 0.7, 0.02, 0.2]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd_d, cpd_i, cpd_g)
>>> inference = BayesianModelSampling(student)
>>> inference.forward_sample(size=2, return_type='recarray')
rec.array([(0, 0, 1), (1, 0, 2)], dtype=
[('diff', '<i8'), ('intel', '<i8'), ('grade', '<i8')])
### Response:
def forward_sample(self, size=1, return_type='dataframe'):
"""
Generates sample(s) from joint distribution of the bayesian network.
Parameters
----------
size: int
size of sample to be generated
return_type: string (dataframe | recarray)
Return type for samples, either of 'dataframe' or 'recarray'.
Defaults to 'dataframe'
Returns
-------
sampled: A pandas.DataFrame or a numpy.recarray object depending upon return_type argument
the generated samples
Examples
--------
>>> from pgmpy.models.BayesianModel import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.sampling import BayesianModelSampling
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd_d = TabularCPD('diff', 2, [[0.6], [0.4]])
>>> cpd_i = TabularCPD('intel', 2, [[0.7], [0.3]])
>>> cpd_g = TabularCPD('grade', 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25,
... 0.08, 0.3], [0.3, 0.7, 0.02, 0.2]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd_d, cpd_i, cpd_g)
>>> inference = BayesianModelSampling(student)
>>> inference.forward_sample(size=2, return_type='recarray')
rec.array([(0, 0, 1), (1, 0, 2)], dtype=
[('diff', '<i8'), ('intel', '<i8'), ('grade', '<i8')])
"""
types = [(var_name, 'int') for var_name in self.topological_order]
sampled = np.zeros(size, dtype=types).view(np.recarray)
for node in self.topological_order:
cpd = self.model.get_cpds(node)
states = range(self.cardinality[node])
evidence = cpd.variables[:0:-1]
if evidence:
cached_values = self.pre_compute_reduce(variable=node)
evidence = np.vstack([sampled[i] for i in evidence])
weights = list(map(lambda t: cached_values[tuple(t)], evidence.T))
else:
weights = cpd.values
sampled[node] = sample_discrete(states, weights, size)
return _return_samples(return_type, sampled) |
def print_info_signal_entry(self, signame):
"""Print status for a single signal name (signame)"""
if signame in signal_description:
description=signal_description[signame]
else:
description=""
pass
if signame not in list(self.sigs.keys()):
# Fake up an entry as though signame were in sigs.
self.dbgr.intf[-1].msg(self.info_fmt
% (signame, 'No', 'No', 'No', 'Yes',
description))
return
sig_obj = self.sigs[signame]
self.dbgr.intf[-1].msg(self.info_fmt %
(signame,
YN(sig_obj.b_stop),
YN(sig_obj.print_method is not None),
YN(sig_obj.print_stack),
YN(sig_obj.pass_along),
description))
return | Print status for a single signal name (signame) | Below is the the instruction that describes the task:
### Input:
Print status for a single signal name (signame)
### Response:
def print_info_signal_entry(self, signame):
"""Print status for a single signal name (signame)"""
if signame in signal_description:
description=signal_description[signame]
else:
description=""
pass
if signame not in list(self.sigs.keys()):
# Fake up an entry as though signame were in sigs.
self.dbgr.intf[-1].msg(self.info_fmt
% (signame, 'No', 'No', 'No', 'Yes',
description))
return
sig_obj = self.sigs[signame]
self.dbgr.intf[-1].msg(self.info_fmt %
(signame,
YN(sig_obj.b_stop),
YN(sig_obj.print_method is not None),
YN(sig_obj.print_stack),
YN(sig_obj.pass_along),
description))
return |
def _partialParseUnits(self, s, sourceTime):
"""
test if giving C{s} matched CRE_UNITS, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not
"""
parseStr = None
chunk1 = chunk2 = ''
# Quantity + Units
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
debug and log.debug('CRE_UNITS matched')
if self._UnitsTrapped(s, m, 'units'):
debug and log.debug('day suffix trapped by unit match')
else:
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
else:
parseStr = s
s = ''
if parseStr:
debug and log.debug('found (units) [%s][%s][%s]',
parseStr, chunk1, chunk2)
sourceTime = self._evalUnits(parseStr, sourceTime)
return s, sourceTime, bool(parseStr) | test if giving C{s} matched CRE_UNITS, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not | Below is the the instruction that describes the task:
### Input:
test if giving C{s} matched CRE_UNITS, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not
### Response:
def _partialParseUnits(self, s, sourceTime):
"""
test if giving C{s} matched CRE_UNITS, used by L{parse()}
@type s: string
@param s: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of remained date/time text, datetime object and
an boolean value to describ if matched or not
"""
parseStr = None
chunk1 = chunk2 = ''
# Quantity + Units
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
debug and log.debug('CRE_UNITS matched')
if self._UnitsTrapped(s, m, 'units'):
debug and log.debug('day suffix trapped by unit match')
else:
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
else:
parseStr = s
s = ''
if parseStr:
debug and log.debug('found (units) [%s][%s][%s]',
parseStr, chunk1, chunk2)
sourceTime = self._evalUnits(parseStr, sourceTime)
return s, sourceTime, bool(parseStr) |
def simulate(args):
"""
%prog simulate run_dir 1 300
Simulate BAMs with varying inserts with dwgsim. The above command will
simulate between 1 to 300 CAGs in the HD region, in a directory called
`run_dir`.
"""
p = OptionParser(simulate.__doc__)
p.add_option("--method", choices=("wgsim", "eagle"), default="eagle",
help="Read simulator")
p.add_option("--ref", default="hg38", choices=("hg38", "hg19"),
help="Reference genome version")
p.add_option("--tred", default="HD", help="TRED locus")
add_simulate_options(p)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
rundir, startunits, endunits = args
ref = opts.ref
ref_fasta = "/mnt/ref/{}.upper.fa".format(ref)
startunits, endunits = int(startunits), int(endunits)
basecwd = os.getcwd()
mkdir(rundir)
os.chdir(rundir)
cwd = os.getcwd()
# TRED region (e.g. Huntington)
pad_left, pad_right = 1000, 10000
repo = TREDsRepo(ref=ref)
tred = repo[opts.tred]
chr, start, end = tred.chr, tred.repeat_start, tred.repeat_end
logging.debug("Simulating {}".format(tred))
fasta = Fasta(ref_fasta)
seq_left = fasta[chr][start - pad_left:start - 1]
seq_right = fasta[chr][end: end + pad_right]
motif = tred.repeat
simulate_method = wgsim if opts.method == "wgsim" else eagle
# Write fake sequence
for units in range(startunits, endunits + 1):
pf = str(units)
mkdir(pf)
os.chdir(pf)
seq = str(seq_left) + motif * units + str(seq_right)
fastafile = pf + ".fasta"
make_fasta(seq, fastafile, id=chr.upper())
# Simulate reads on it
simulate_method([fastafile, "--depth={}".format(opts.depth),
"--readlen={}".format(opts.readlen),
"--distance={}".format(opts.distance),
"--outfile={}".format(pf)])
read1 = pf + ".bwa.read1.fastq"
read2 = pf + ".bwa.read2.fastq"
samfile, _ = align([ref_fasta, read1, read2])
indexed_samfile = index([samfile])
sh("mv {} ../{}.bam".format(indexed_samfile, pf))
sh("mv {}.bai ../{}.bam.bai".format(indexed_samfile, pf))
os.chdir(cwd)
shutil.rmtree(pf)
os.chdir(basecwd) | %prog simulate run_dir 1 300
Simulate BAMs with varying inserts with dwgsim. The above command will
simulate between 1 to 300 CAGs in the HD region, in a directory called
`run_dir`. | Below is the the instruction that describes the task:
### Input:
%prog simulate run_dir 1 300
Simulate BAMs with varying inserts with dwgsim. The above command will
simulate between 1 to 300 CAGs in the HD region, in a directory called
`run_dir`.
### Response:
def simulate(args):
"""
%prog simulate run_dir 1 300
Simulate BAMs with varying inserts with dwgsim. The above command will
simulate between 1 to 300 CAGs in the HD region, in a directory called
`run_dir`.
"""
p = OptionParser(simulate.__doc__)
p.add_option("--method", choices=("wgsim", "eagle"), default="eagle",
help="Read simulator")
p.add_option("--ref", default="hg38", choices=("hg38", "hg19"),
help="Reference genome version")
p.add_option("--tred", default="HD", help="TRED locus")
add_simulate_options(p)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
rundir, startunits, endunits = args
ref = opts.ref
ref_fasta = "/mnt/ref/{}.upper.fa".format(ref)
startunits, endunits = int(startunits), int(endunits)
basecwd = os.getcwd()
mkdir(rundir)
os.chdir(rundir)
cwd = os.getcwd()
# TRED region (e.g. Huntington)
pad_left, pad_right = 1000, 10000
repo = TREDsRepo(ref=ref)
tred = repo[opts.tred]
chr, start, end = tred.chr, tred.repeat_start, tred.repeat_end
logging.debug("Simulating {}".format(tred))
fasta = Fasta(ref_fasta)
seq_left = fasta[chr][start - pad_left:start - 1]
seq_right = fasta[chr][end: end + pad_right]
motif = tred.repeat
simulate_method = wgsim if opts.method == "wgsim" else eagle
# Write fake sequence
for units in range(startunits, endunits + 1):
pf = str(units)
mkdir(pf)
os.chdir(pf)
seq = str(seq_left) + motif * units + str(seq_right)
fastafile = pf + ".fasta"
make_fasta(seq, fastafile, id=chr.upper())
# Simulate reads on it
simulate_method([fastafile, "--depth={}".format(opts.depth),
"--readlen={}".format(opts.readlen),
"--distance={}".format(opts.distance),
"--outfile={}".format(pf)])
read1 = pf + ".bwa.read1.fastq"
read2 = pf + ".bwa.read2.fastq"
samfile, _ = align([ref_fasta, read1, read2])
indexed_samfile = index([samfile])
sh("mv {} ../{}.bam".format(indexed_samfile, pf))
sh("mv {}.bai ../{}.bam.bai".format(indexed_samfile, pf))
os.chdir(cwd)
shutil.rmtree(pf)
os.chdir(basecwd) |
def this_month(today: datetime=None, tz=None):
"""
Returns current month begin (inclusive) and end (exclusive).
:param today: Some date in the month (defaults current datetime)
:param tz: Timezone (defaults pytz UTC)
:return: begin (inclusive), end (exclusive)
"""
if today is None:
today = datetime.utcnow()
begin = datetime(day=1, month=today.month, year=today.year)
end = begin + timedelta(days=32)
end = datetime(day=1, month=end.month, year=end.year)
return localize_time_range(begin, end, tz) | Returns current month begin (inclusive) and end (exclusive).
:param today: Some date in the month (defaults current datetime)
:param tz: Timezone (defaults pytz UTC)
:return: begin (inclusive), end (exclusive) | Below is the the instruction that describes the task:
### Input:
Returns current month begin (inclusive) and end (exclusive).
:param today: Some date in the month (defaults current datetime)
:param tz: Timezone (defaults pytz UTC)
:return: begin (inclusive), end (exclusive)
### Response:
def this_month(today: datetime=None, tz=None):
"""
Returns current month begin (inclusive) and end (exclusive).
:param today: Some date in the month (defaults current datetime)
:param tz: Timezone (defaults pytz UTC)
:return: begin (inclusive), end (exclusive)
"""
if today is None:
today = datetime.utcnow()
begin = datetime(day=1, month=today.month, year=today.year)
end = begin + timedelta(days=32)
end = datetime(day=1, month=end.month, year=end.year)
return localize_time_range(begin, end, tz) |
def _int_growth(z, **cosmo):
""" Returns integral of the linear growth factor from z=200 to z=z """
zmax = 200
if hasattr(z, "__len__"):
for zval in z:
assert(zval < zmax)
else:
assert(z < zmax)
y, yerr = scipy.integrate.quad(
lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +
cosmo['omega_lambda_0'])**(1.5),
z, zmax)
return(y) | Returns integral of the linear growth factor from z=200 to z=z | Below is the the instruction that describes the task:
### Input:
Returns integral of the linear growth factor from z=200 to z=z
### Response:
def _int_growth(z, **cosmo):
""" Returns integral of the linear growth factor from z=200 to z=z """
zmax = 200
if hasattr(z, "__len__"):
for zval in z:
assert(zval < zmax)
else:
assert(z < zmax)
y, yerr = scipy.integrate.quad(
lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +
cosmo['omega_lambda_0'])**(1.5),
z, zmax)
return(y) |
def layout(mtf_graph, mesh_shape, mtf_outputs=()):
"""Compute layout rules based on a computational graph and mesh shape.
Args:
mtf_graph: a mtf.Graph.
mesh_shape: an mtf.Shape, str, or listlike of mtf.Dimension.
mtf_outputs: an optional iterable of mtf.Tensor, representing the outputs
of the computation.
Returns:
a mtf.LayoutRules
"""
mesh_shape = mtf.convert_to_shape(mesh_shape)
estimator = memory_estimator.MemoryEstimator(mtf_graph, mesh_shape,
mtf_outputs)
optimizer = layout_optimizer.LayoutOptimizer(estimator)
return mtf.convert_to_layout_rules(optimizer.solve()) | Compute layout rules based on a computational graph and mesh shape.
Args:
mtf_graph: a mtf.Graph.
mesh_shape: an mtf.Shape, str, or listlike of mtf.Dimension.
mtf_outputs: an optional iterable of mtf.Tensor, representing the outputs
of the computation.
Returns:
a mtf.LayoutRules | Below is the the instruction that describes the task:
### Input:
Compute layout rules based on a computational graph and mesh shape.
Args:
mtf_graph: a mtf.Graph.
mesh_shape: an mtf.Shape, str, or listlike of mtf.Dimension.
mtf_outputs: an optional iterable of mtf.Tensor, representing the outputs
of the computation.
Returns:
a mtf.LayoutRules
### Response:
def layout(mtf_graph, mesh_shape, mtf_outputs=()):
"""Compute layout rules based on a computational graph and mesh shape.
Args:
mtf_graph: a mtf.Graph.
mesh_shape: an mtf.Shape, str, or listlike of mtf.Dimension.
mtf_outputs: an optional iterable of mtf.Tensor, representing the outputs
of the computation.
Returns:
a mtf.LayoutRules
"""
mesh_shape = mtf.convert_to_shape(mesh_shape)
estimator = memory_estimator.MemoryEstimator(mtf_graph, mesh_shape,
mtf_outputs)
optimizer = layout_optimizer.LayoutOptimizer(estimator)
return mtf.convert_to_layout_rules(optimizer.solve()) |
def maf_somatic_variant_stats(variant, variant_metadata):
"""
Parse out the variant calling statistics for a given variant from a MAF file
Assumes the MAF format described here: https://www.biostars.org/p/161298/#161777
Parameters
----------
variant : varcode.Variant
variant_metadata : dict
Dictionary of metadata for this variant
Returns
-------
SomaticVariantStats
"""
tumor_stats = None
normal_stats = None
if "t_ref_count" in variant_metadata:
tumor_stats = _maf_variant_stats(variant, variant_metadata, prefix="t")
if "n_ref_count" in variant_metadata:
normal_stats = _maf_variant_stats(variant, variant_metadata, prefix="n")
return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats) | Parse out the variant calling statistics for a given variant from a MAF file
Assumes the MAF format described here: https://www.biostars.org/p/161298/#161777
Parameters
----------
variant : varcode.Variant
variant_metadata : dict
Dictionary of metadata for this variant
Returns
-------
SomaticVariantStats | Below is the the instruction that describes the task:
### Input:
Parse out the variant calling statistics for a given variant from a MAF file
Assumes the MAF format described here: https://www.biostars.org/p/161298/#161777
Parameters
----------
variant : varcode.Variant
variant_metadata : dict
Dictionary of metadata for this variant
Returns
-------
SomaticVariantStats
### Response:
def maf_somatic_variant_stats(variant, variant_metadata):
"""
Parse out the variant calling statistics for a given variant from a MAF file
Assumes the MAF format described here: https://www.biostars.org/p/161298/#161777
Parameters
----------
variant : varcode.Variant
variant_metadata : dict
Dictionary of metadata for this variant
Returns
-------
SomaticVariantStats
"""
tumor_stats = None
normal_stats = None
if "t_ref_count" in variant_metadata:
tumor_stats = _maf_variant_stats(variant, variant_metadata, prefix="t")
if "n_ref_count" in variant_metadata:
normal_stats = _maf_variant_stats(variant, variant_metadata, prefix="n")
return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats) |
def _build_named_object_ids(parameters):
"""Builds a list of NamedObjectId."""
if isinstance(parameters, str):
return [_build_named_object_id(parameters)]
return [_build_named_object_id(parameter) for parameter in parameters] | Builds a list of NamedObjectId. | Below is the the instruction that describes the task:
### Input:
Builds a list of NamedObjectId.
### Response:
def _build_named_object_ids(parameters):
"""Builds a list of NamedObjectId."""
if isinstance(parameters, str):
return [_build_named_object_id(parameters)]
return [_build_named_object_id(parameter) for parameter in parameters] |
def get(self, par_names=None, obs_names=None):
"""get a new pst object with subset of parameters and/or observations
Parameters
----------
par_names : list
a list of parameter names to have in the new Pst instance.
If None, all parameters are in the new Pst instance. Default
is None
obs_names : list
a list of observation names to have in the new Pst instance.
If None, all observations are in teh new Pst instance. Default
is None
Returns
-------
Pst : Pst
a new Pst instance
"""
pass
#if par_names is None and obs_names is None:
# return copy.deepcopy(self)
if par_names is None:
par_names = self.parameter_data.parnme
if obs_names is None:
obs_names = self.observation_data.obsnme
new_par = self.parameter_data.copy()
if par_names is not None:
new_par.index = new_par.parnme
new_par = new_par.loc[par_names, :]
new_obs = self.observation_data.copy()
new_res = None
if obs_names is not None:
new_obs.index = new_obs.obsnme
new_obs = new_obs.loc[obs_names]
if self.__res is not None:
new_res = copy.deepcopy(self.res)
new_res.index = new_res.name
new_res = new_res.loc[obs_names, :]
new_pargp = self.parameter_groups.copy()
new_pargp.index = new_pargp.pargpnme.apply(str.strip)
new_pargp_names = new_par.pargp.value_counts().index
new_pargp = new_pargp.loc[new_pargp_names,:]
new_pst = Pst(self.filename, resfile=self.resfile, load=False)
new_pst.parameter_data = new_par
new_pst.observation_data = new_obs
new_pst.parameter_groups = new_pargp
new_pst.__res = new_res
new_pst.prior_information = self.prior_information
new_pst.rectify_pi()
new_pst.control_data = self.control_data.copy()
new_pst.model_command = self.model_command
new_pst.template_files = self.template_files
new_pst.input_files = self.input_files
new_pst.instruction_files = self.instruction_files
new_pst.output_files = self.output_files
if self.tied is not None:
warnings.warn("Pst.get() not checking for tied parameter " +
"compatibility in new Pst instance",PyemuWarning)
#new_pst.tied = self.tied.copy()
new_pst.other_lines = self.other_lines
new_pst.pestpp_options = self.pestpp_options
new_pst.regul_lines = self.regul_lines
return new_pst | get a new pst object with subset of parameters and/or observations
Parameters
----------
par_names : list
a list of parameter names to have in the new Pst instance.
If None, all parameters are in the new Pst instance. Default
is None
obs_names : list
a list of observation names to have in the new Pst instance.
If None, all observations are in teh new Pst instance. Default
is None
Returns
-------
Pst : Pst
a new Pst instance | Below is the the instruction that describes the task:
### Input:
get a new pst object with subset of parameters and/or observations
Parameters
----------
par_names : list
a list of parameter names to have in the new Pst instance.
If None, all parameters are in the new Pst instance. Default
is None
obs_names : list
a list of observation names to have in the new Pst instance.
If None, all observations are in teh new Pst instance. Default
is None
Returns
-------
Pst : Pst
a new Pst instance
### Response:
def get(self, par_names=None, obs_names=None):
"""get a new pst object with subset of parameters and/or observations
Parameters
----------
par_names : list
a list of parameter names to have in the new Pst instance.
If None, all parameters are in the new Pst instance. Default
is None
obs_names : list
a list of observation names to have in the new Pst instance.
If None, all observations are in teh new Pst instance. Default
is None
Returns
-------
Pst : Pst
a new Pst instance
"""
pass
#if par_names is None and obs_names is None:
# return copy.deepcopy(self)
if par_names is None:
par_names = self.parameter_data.parnme
if obs_names is None:
obs_names = self.observation_data.obsnme
new_par = self.parameter_data.copy()
if par_names is not None:
new_par.index = new_par.parnme
new_par = new_par.loc[par_names, :]
new_obs = self.observation_data.copy()
new_res = None
if obs_names is not None:
new_obs.index = new_obs.obsnme
new_obs = new_obs.loc[obs_names]
if self.__res is not None:
new_res = copy.deepcopy(self.res)
new_res.index = new_res.name
new_res = new_res.loc[obs_names, :]
new_pargp = self.parameter_groups.copy()
new_pargp.index = new_pargp.pargpnme.apply(str.strip)
new_pargp_names = new_par.pargp.value_counts().index
new_pargp = new_pargp.loc[new_pargp_names,:]
new_pst = Pst(self.filename, resfile=self.resfile, load=False)
new_pst.parameter_data = new_par
new_pst.observation_data = new_obs
new_pst.parameter_groups = new_pargp
new_pst.__res = new_res
new_pst.prior_information = self.prior_information
new_pst.rectify_pi()
new_pst.control_data = self.control_data.copy()
new_pst.model_command = self.model_command
new_pst.template_files = self.template_files
new_pst.input_files = self.input_files
new_pst.instruction_files = self.instruction_files
new_pst.output_files = self.output_files
if self.tied is not None:
warnings.warn("Pst.get() not checking for tied parameter " +
"compatibility in new Pst instance",PyemuWarning)
#new_pst.tied = self.tied.copy()
new_pst.other_lines = self.other_lines
new_pst.pestpp_options = self.pestpp_options
new_pst.regul_lines = self.regul_lines
return new_pst |
def render(text, context=None):
"""
Depreceated call to render_markdown().
"""
warning = (
"wagtailmarkdown.utils.render() is deprecated. Use "
"wagtailmarkdown.utils.render_markdown() instead."
)
warnings.warn(warning, WagtailMarkdownDeprecationWarning, stacklevel=2)
return render_markdown(text, context) | Depreceated call to render_markdown(). | Below is the the instruction that describes the task:
### Input:
Depreceated call to render_markdown().
### Response:
def render(text, context=None):
"""
Depreceated call to render_markdown().
"""
warning = (
"wagtailmarkdown.utils.render() is deprecated. Use "
"wagtailmarkdown.utils.render_markdown() instead."
)
warnings.warn(warning, WagtailMarkdownDeprecationWarning, stacklevel=2)
return render_markdown(text, context) |
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
)) | Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val" | Below is the the instruction that describes the task:
### Input:
Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
### Response:
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
)) |
def _get_base_command(self):
""" Returns the full command string
input_arg: the argument to the command which represents the input
to the program, this will be a string, either
representing input or a filename to get input from
tI"""
command_parts = []
# Append a change directory to the beginning of the command to change
# to self.WorkingDir before running the command
# WorkingDir should be in quotes -- filenames might contain spaces
cd_command = ''.join(['cd ', str(self.WorkingDir), ';'])
if self._command is None:
raise ApplicationError('_command has not been set.')
command = self._command
parameters = self.Parameters
command_parts.append(cd_command)
command_parts.append(command)
command_parts.append(self._command_delimiter.join(filter(
None, (map(str, parameters.values())))))
return self._command_delimiter.join(command_parts).strip() | Returns the full command string
input_arg: the argument to the command which represents the input
to the program, this will be a string, either
representing input or a filename to get input from
tI | Below is the the instruction that describes the task:
### Input:
Returns the full command string
input_arg: the argument to the command which represents the input
to the program, this will be a string, either
representing input or a filename to get input from
tI
### Response:
def _get_base_command(self):
""" Returns the full command string
input_arg: the argument to the command which represents the input
to the program, this will be a string, either
representing input or a filename to get input from
tI"""
command_parts = []
# Append a change directory to the beginning of the command to change
# to self.WorkingDir before running the command
# WorkingDir should be in quotes -- filenames might contain spaces
cd_command = ''.join(['cd ', str(self.WorkingDir), ';'])
if self._command is None:
raise ApplicationError('_command has not been set.')
command = self._command
parameters = self.Parameters
command_parts.append(cd_command)
command_parts.append(command)
command_parts.append(self._command_delimiter.join(filter(
None, (map(str, parameters.values())))))
return self._command_delimiter.join(command_parts).strip() |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CallContext for this CallInstance
:rtype: twilio.rest.api.v2010.account.call.CallContext
"""
if self._context is None:
self._context = CallContext(
self._version,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CallContext for this CallInstance
:rtype: twilio.rest.api.v2010.account.call.CallContext | Below is the the instruction that describes the task:
### Input:
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CallContext for this CallInstance
:rtype: twilio.rest.api.v2010.account.call.CallContext
### Response:
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CallContext for this CallInstance
:rtype: twilio.rest.api.v2010.account.call.CallContext
"""
if self._context is None:
self._context = CallContext(
self._version,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
return self._context |
def voronoi_regular_to_pix_from_grids_and_geometry(regular_grid, regular_to_nearest_pix, pixel_centres,
pixel_neighbors, pixel_neighbors_size):
""" Compute the mappings between a set of regular-grid pixels and pixelization pixels, using information on \
how regular pixels map to their closest pixelization pixel on the image-plane pix-grid and the pixelization's \
pixel centres.
To determine the complete set of regular-pixel to pixelization pixel mappings, we must pair every regular-pixel to \
its nearest pixel. Using a full nearest neighbor search to do this is slow, thus the pixel neighbors (derived via \
the Voronoi grid) are used to localize each nearest neighbor search via a graph search.
Parameters
----------
regular_grid : RegularGrid
The grid of (y,x) arc-second coordinates at the centre of every unmasked pixel, which has been traced to \
to an irregular grid via lens.
regular_to_nearest_pix : ndarray
A 1D array that maps every regular-grid pixel to its nearest pix-grid pixel (as determined on the unlensed \
2D array).
pixel_centres : ndarray
The (y,x) centre of every Voronoi pixel in arc-seconds.
pixel_neighbors : ndarray
An array of length (voronoi_pixels) which provides the index of all neighbors of every pixel in \
the Voronoi grid (entries of -1 correspond to no neighbor).
pixel_neighbors_size : ndarray
An array of length (voronoi_pixels) which gives the number of neighbors of every pixel in the \
Voronoi grid.
"""
regular_to_pix = np.zeros((regular_grid.shape[0]))
for regular_index in range(regular_grid.shape[0]):
nearest_pix_pixel_index = regular_to_nearest_pix[regular_index]
while True:
nearest_pix_pixel_center = pixel_centres[nearest_pix_pixel_index]
sub_to_nearest_pix_distance = (regular_grid[regular_index, 0] - nearest_pix_pixel_center[0]) ** 2 + \
(regular_grid[regular_index, 1] - nearest_pix_pixel_center[1]) ** 2
closest_separation_from_pix_neighbor = 1.0e8
for neighbor_index in range(pixel_neighbors_size[nearest_pix_pixel_index]):
neighbor = pixel_neighbors[nearest_pix_pixel_index, neighbor_index]
separation_from_neighbor = (regular_grid[regular_index, 0] - pixel_centres[neighbor, 0]) ** 2 + \
(regular_grid[regular_index, 1] - pixel_centres[neighbor, 1]) ** 2
if separation_from_neighbor < closest_separation_from_pix_neighbor:
closest_separation_from_pix_neighbor = separation_from_neighbor
closest_neighbor_index = neighbor_index
neighboring_pix_pixel_index = pixel_neighbors[nearest_pix_pixel_index, closest_neighbor_index]
sub_to_neighboring_pix_distance = closest_separation_from_pix_neighbor
if sub_to_nearest_pix_distance <= sub_to_neighboring_pix_distance:
regular_to_pix[regular_index] = nearest_pix_pixel_index
break
else:
nearest_pix_pixel_index = neighboring_pix_pixel_index
return regular_to_pix | Compute the mappings between a set of regular-grid pixels and pixelization pixels, using information on \
how regular pixels map to their closest pixelization pixel on the image-plane pix-grid and the pixelization's \
pixel centres.
To determine the complete set of regular-pixel to pixelization pixel mappings, we must pair every regular-pixel to \
its nearest pixel. Using a full nearest neighbor search to do this is slow, thus the pixel neighbors (derived via \
the Voronoi grid) are used to localize each nearest neighbor search via a graph search.
Parameters
----------
regular_grid : RegularGrid
The grid of (y,x) arc-second coordinates at the centre of every unmasked pixel, which has been traced to \
to an irregular grid via lens.
regular_to_nearest_pix : ndarray
A 1D array that maps every regular-grid pixel to its nearest pix-grid pixel (as determined on the unlensed \
2D array).
pixel_centres : ndarray
The (y,x) centre of every Voronoi pixel in arc-seconds.
pixel_neighbors : ndarray
An array of length (voronoi_pixels) which provides the index of all neighbors of every pixel in \
the Voronoi grid (entries of -1 correspond to no neighbor).
pixel_neighbors_size : ndarray
An array of length (voronoi_pixels) which gives the number of neighbors of every pixel in the \
Voronoi grid. | Below is the the instruction that describes the task:
### Input:
Compute the mappings between a set of regular-grid pixels and pixelization pixels, using information on \
how regular pixels map to their closest pixelization pixel on the image-plane pix-grid and the pixelization's \
pixel centres.
To determine the complete set of regular-pixel to pixelization pixel mappings, we must pair every regular-pixel to \
its nearest pixel. Using a full nearest neighbor search to do this is slow, thus the pixel neighbors (derived via \
the Voronoi grid) are used to localize each nearest neighbor search via a graph search.
Parameters
----------
regular_grid : RegularGrid
The grid of (y,x) arc-second coordinates at the centre of every unmasked pixel, which has been traced to \
to an irregular grid via lens.
regular_to_nearest_pix : ndarray
A 1D array that maps every regular-grid pixel to its nearest pix-grid pixel (as determined on the unlensed \
2D array).
pixel_centres : ndarray
The (y,x) centre of every Voronoi pixel in arc-seconds.
pixel_neighbors : ndarray
An array of length (voronoi_pixels) which provides the index of all neighbors of every pixel in \
the Voronoi grid (entries of -1 correspond to no neighbor).
pixel_neighbors_size : ndarray
An array of length (voronoi_pixels) which gives the number of neighbors of every pixel in the \
Voronoi grid.
### Response:
def voronoi_regular_to_pix_from_grids_and_geometry(regular_grid, regular_to_nearest_pix, pixel_centres,
pixel_neighbors, pixel_neighbors_size):
""" Compute the mappings between a set of regular-grid pixels and pixelization pixels, using information on \
how regular pixels map to their closest pixelization pixel on the image-plane pix-grid and the pixelization's \
pixel centres.
To determine the complete set of regular-pixel to pixelization pixel mappings, we must pair every regular-pixel to \
its nearest pixel. Using a full nearest neighbor search to do this is slow, thus the pixel neighbors (derived via \
the Voronoi grid) are used to localize each nearest neighbor search via a graph search.
Parameters
----------
regular_grid : RegularGrid
The grid of (y,x) arc-second coordinates at the centre of every unmasked pixel, which has been traced to \
to an irregular grid via lens.
regular_to_nearest_pix : ndarray
A 1D array that maps every regular-grid pixel to its nearest pix-grid pixel (as determined on the unlensed \
2D array).
pixel_centres : ndarray
The (y,x) centre of every Voronoi pixel in arc-seconds.
pixel_neighbors : ndarray
An array of length (voronoi_pixels) which provides the index of all neighbors of every pixel in \
the Voronoi grid (entries of -1 correspond to no neighbor).
pixel_neighbors_size : ndarray
An array of length (voronoi_pixels) which gives the number of neighbors of every pixel in the \
Voronoi grid.
"""
regular_to_pix = np.zeros((regular_grid.shape[0]))
for regular_index in range(regular_grid.shape[0]):
nearest_pix_pixel_index = regular_to_nearest_pix[regular_index]
while True:
nearest_pix_pixel_center = pixel_centres[nearest_pix_pixel_index]
sub_to_nearest_pix_distance = (regular_grid[regular_index, 0] - nearest_pix_pixel_center[0]) ** 2 + \
(regular_grid[regular_index, 1] - nearest_pix_pixel_center[1]) ** 2
closest_separation_from_pix_neighbor = 1.0e8
for neighbor_index in range(pixel_neighbors_size[nearest_pix_pixel_index]):
neighbor = pixel_neighbors[nearest_pix_pixel_index, neighbor_index]
separation_from_neighbor = (regular_grid[regular_index, 0] - pixel_centres[neighbor, 0]) ** 2 + \
(regular_grid[regular_index, 1] - pixel_centres[neighbor, 1]) ** 2
if separation_from_neighbor < closest_separation_from_pix_neighbor:
closest_separation_from_pix_neighbor = separation_from_neighbor
closest_neighbor_index = neighbor_index
neighboring_pix_pixel_index = pixel_neighbors[nearest_pix_pixel_index, closest_neighbor_index]
sub_to_neighboring_pix_distance = closest_separation_from_pix_neighbor
if sub_to_nearest_pix_distance <= sub_to_neighboring_pix_distance:
regular_to_pix[regular_index] = nearest_pix_pixel_index
break
else:
nearest_pix_pixel_index = neighboring_pix_pixel_index
return regular_to_pix |
def send(self, user_id, total_amount, send_name, act_name,
wishing, remark, total_num=1, client_ip=None,
out_trade_no=None, scene_id=None, consume_mch_id=None):
"""
发送现金红包
:param user_id: 接收红包的用户在公众号下的 openid
:param total_amount: 红包金额,单位分
:param send_name: 商户名称
:param act_name: 活动名称
:param wishing: 红包祝福语
:param remark: 备注
:param client_ip: 可选,调用接口的机器 IP 地址
:param total_num: 可选,红包发放总人数,默认为 1
:param out_trade_no: 可选,商户订单号,默认会自动生成
:param scene_id: 可选,发放红包使用场景,红包金额大于200时必传
:param consume_mch_id: 可选,资金授权商户号。服务商替特约商户发放时使用
:return: 返回的结果数据字典
"""
if not out_trade_no:
now = datetime.now()
out_trade_no = '{0}{1}{2}'.format(
self.mch_id,
now.strftime('%Y%m%d%H%M%S'),
random.randint(1000, 10000)
)
data = {
'wxappid': self.appid,
're_openid': user_id,
'total_amount': total_amount,
'send_name': send_name,
'act_name': act_name,
'wishing': wishing,
'remark': remark,
'client_ip': client_ip or get_external_ip(),
'total_num': total_num,
'mch_billno': out_trade_no,
'scene_id': scene_id,
'risk_info': None,
'consume_mch_id': consume_mch_id
}
return self._post('mmpaymkttransfers/sendredpack', data=data) | 发送现金红包
:param user_id: 接收红包的用户在公众号下的 openid
:param total_amount: 红包金额,单位分
:param send_name: 商户名称
:param act_name: 活动名称
:param wishing: 红包祝福语
:param remark: 备注
:param client_ip: 可选,调用接口的机器 IP 地址
:param total_num: 可选,红包发放总人数,默认为 1
:param out_trade_no: 可选,商户订单号,默认会自动生成
:param scene_id: 可选,发放红包使用场景,红包金额大于200时必传
:param consume_mch_id: 可选,资金授权商户号。服务商替特约商户发放时使用
:return: 返回的结果数据字典 | Below is the the instruction that describes the task:
### Input:
发送现金红包
:param user_id: 接收红包的用户在公众号下的 openid
:param total_amount: 红包金额,单位分
:param send_name: 商户名称
:param act_name: 活动名称
:param wishing: 红包祝福语
:param remark: 备注
:param client_ip: 可选,调用接口的机器 IP 地址
:param total_num: 可选,红包发放总人数,默认为 1
:param out_trade_no: 可选,商户订单号,默认会自动生成
:param scene_id: 可选,发放红包使用场景,红包金额大于200时必传
:param consume_mch_id: 可选,资金授权商户号。服务商替特约商户发放时使用
:return: 返回的结果数据字典
### Response:
def send(self, user_id, total_amount, send_name, act_name,
wishing, remark, total_num=1, client_ip=None,
out_trade_no=None, scene_id=None, consume_mch_id=None):
"""
发送现金红包
:param user_id: 接收红包的用户在公众号下的 openid
:param total_amount: 红包金额,单位分
:param send_name: 商户名称
:param act_name: 活动名称
:param wishing: 红包祝福语
:param remark: 备注
:param client_ip: 可选,调用接口的机器 IP 地址
:param total_num: 可选,红包发放总人数,默认为 1
:param out_trade_no: 可选,商户订单号,默认会自动生成
:param scene_id: 可选,发放红包使用场景,红包金额大于200时必传
:param consume_mch_id: 可选,资金授权商户号。服务商替特约商户发放时使用
:return: 返回的结果数据字典
"""
if not out_trade_no:
now = datetime.now()
out_trade_no = '{0}{1}{2}'.format(
self.mch_id,
now.strftime('%Y%m%d%H%M%S'),
random.randint(1000, 10000)
)
data = {
'wxappid': self.appid,
're_openid': user_id,
'total_amount': total_amount,
'send_name': send_name,
'act_name': act_name,
'wishing': wishing,
'remark': remark,
'client_ip': client_ip or get_external_ip(),
'total_num': total_num,
'mch_billno': out_trade_no,
'scene_id': scene_id,
'risk_info': None,
'consume_mch_id': consume_mch_id
}
return self._post('mmpaymkttransfers/sendredpack', data=data) |
def position_windows(pos, size, start, stop, step):
"""Convenience function to construct windows for the
:func:`windowed_statistic` and :func:`windowed_count` functions.
"""
last = False
# determine start and stop positions
if start is None:
start = pos[0]
if stop is None:
stop = pos[-1]
if step is None:
# non-overlapping
step = size
windows = []
for window_start in range(start, stop, step):
# determine window stop
window_stop = window_start + size
if window_stop >= stop:
# last window
window_stop = stop
last = True
else:
window_stop -= 1
windows.append([window_start, window_stop])
if last:
break
return np.asarray(windows) | Convenience function to construct windows for the
:func:`windowed_statistic` and :func:`windowed_count` functions. | Below is the the instruction that describes the task:
### Input:
Convenience function to construct windows for the
:func:`windowed_statistic` and :func:`windowed_count` functions.
### Response:
def position_windows(pos, size, start, stop, step):
"""Convenience function to construct windows for the
:func:`windowed_statistic` and :func:`windowed_count` functions.
"""
last = False
# determine start and stop positions
if start is None:
start = pos[0]
if stop is None:
stop = pos[-1]
if step is None:
# non-overlapping
step = size
windows = []
for window_start in range(start, stop, step):
# determine window stop
window_stop = window_start + size
if window_stop >= stop:
# last window
window_stop = stop
last = True
else:
window_stop -= 1
windows.append([window_start, window_stop])
if last:
break
return np.asarray(windows) |
def filter_queryset(self, request, queryset, view):
"""Filter the queryset.
This is the main entry-point to this class, and
is called by DRF's list handler.
"""
self.request = request
self.view = view
# enable addition of extra filters (i.e., a Q())
# so custom filters can be added to the queryset without
# running into https://code.djangoproject.com/ticket/18437
# which, without this, would mean that filters added to the queryset
# after this is called may not behave as expected
extra_filters = self.view.get_extra_filters(request)
disable_prefetches = self.view.is_update()
self.DEBUG = settings.DEBUG
return self._build_queryset(
queryset=queryset,
extra_filters=extra_filters,
disable_prefetches=disable_prefetches,
) | Filter the queryset.
This is the main entry-point to this class, and
is called by DRF's list handler. | Below is the the instruction that describes the task:
### Input:
Filter the queryset.
This is the main entry-point to this class, and
is called by DRF's list handler.
### Response:
def filter_queryset(self, request, queryset, view):
"""Filter the queryset.
This is the main entry-point to this class, and
is called by DRF's list handler.
"""
self.request = request
self.view = view
# enable addition of extra filters (i.e., a Q())
# so custom filters can be added to the queryset without
# running into https://code.djangoproject.com/ticket/18437
# which, without this, would mean that filters added to the queryset
# after this is called may not behave as expected
extra_filters = self.view.get_extra_filters(request)
disable_prefetches = self.view.is_update()
self.DEBUG = settings.DEBUG
return self._build_queryset(
queryset=queryset,
extra_filters=extra_filters,
disable_prefetches=disable_prefetches,
) |
def average_colors(c1, c2):
''' Average the values of two colors together '''
r = int((c1[0] + c2[0])/2)
g = int((c1[1] + c2[1])/2)
b = int((c1[2] + c2[2])/2)
return (r, g, b) | Average the values of two colors together | Below is the the instruction that describes the task:
### Input:
Average the values of two colors together
### Response:
def average_colors(c1, c2):
''' Average the values of two colors together '''
r = int((c1[0] + c2[0])/2)
g = int((c1[1] + c2[1])/2)
b = int((c1[2] + c2[2])/2)
return (r, g, b) |
def submit_openmetric(self, metric_name, metric, scraper_config, hostname=None):
"""
For each sample in the metric, report it as a gauge with all labels as tags
except if a labels dict is passed, in which case keys are label names we'll extract
and corresponding values are tag names we'll use (eg: {'node': 'node'}).
Histograms generate a set of values instead of a unique metric.
send_histograms_buckets is used to specify if yes or no you want to
send the buckets as tagged values when dealing with histograms.
`custom_tags` is an array of 'tag:value' that will be added to the
metric when sending the gauge to Datadog.
"""
if metric.type in ["gauge", "counter", "rate"]:
metric_name_with_namespace = '{}.{}'.format(scraper_config['namespace'], metric_name)
for sample in metric.samples:
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric {}".format(sample[self.SAMPLE_NAME]))
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
# Determine the tags to send
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
if metric.type == "counter" and scraper_config['send_monotonic_counter']:
self.monotonic_count(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname)
elif metric.type == "rate":
self.rate(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname)
else:
self.gauge(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname)
elif metric.type == "histogram":
self._submit_gauges_from_histogram(metric_name, metric, scraper_config)
elif metric.type == "summary":
self._submit_gauges_from_summary(metric_name, metric, scraper_config)
else:
self.log.error("Metric type {} unsupported for metric {}.".format(metric.type, metric_name)) | For each sample in the metric, report it as a gauge with all labels as tags
except if a labels dict is passed, in which case keys are label names we'll extract
and corresponding values are tag names we'll use (eg: {'node': 'node'}).
Histograms generate a set of values instead of a unique metric.
send_histograms_buckets is used to specify if yes or no you want to
send the buckets as tagged values when dealing with histograms.
`custom_tags` is an array of 'tag:value' that will be added to the
metric when sending the gauge to Datadog. | Below is the the instruction that describes the task:
### Input:
For each sample in the metric, report it as a gauge with all labels as tags
except if a labels dict is passed, in which case keys are label names we'll extract
and corresponding values are tag names we'll use (eg: {'node': 'node'}).
Histograms generate a set of values instead of a unique metric.
send_histograms_buckets is used to specify if yes or no you want to
send the buckets as tagged values when dealing with histograms.
`custom_tags` is an array of 'tag:value' that will be added to the
metric when sending the gauge to Datadog.
### Response:
def submit_openmetric(self, metric_name, metric, scraper_config, hostname=None):
"""
For each sample in the metric, report it as a gauge with all labels as tags
except if a labels dict is passed, in which case keys are label names we'll extract
and corresponding values are tag names we'll use (eg: {'node': 'node'}).
Histograms generate a set of values instead of a unique metric.
send_histograms_buckets is used to specify if yes or no you want to
send the buckets as tagged values when dealing with histograms.
`custom_tags` is an array of 'tag:value' that will be added to the
metric when sending the gauge to Datadog.
"""
if metric.type in ["gauge", "counter", "rate"]:
metric_name_with_namespace = '{}.{}'.format(scraper_config['namespace'], metric_name)
for sample in metric.samples:
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric {}".format(sample[self.SAMPLE_NAME]))
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
# Determine the tags to send
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
if metric.type == "counter" and scraper_config['send_monotonic_counter']:
self.monotonic_count(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname)
elif metric.type == "rate":
self.rate(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname)
else:
self.gauge(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname)
elif metric.type == "histogram":
self._submit_gauges_from_histogram(metric_name, metric, scraper_config)
elif metric.type == "summary":
self._submit_gauges_from_summary(metric_name, metric, scraper_config)
else:
self.log.error("Metric type {} unsupported for metric {}.".format(metric.type, metric_name)) |
def delete(self, key):
"""
Removes the specified key from the database.
"""
obj = self._get_content()
obj.pop(key, None)
self.write_data(self.path, obj) | Removes the specified key from the database. | Below is the the instruction that describes the task:
### Input:
Removes the specified key from the database.
### Response:
def delete(self, key):
"""
Removes the specified key from the database.
"""
obj = self._get_content()
obj.pop(key, None)
self.write_data(self.path, obj) |
def __getAvatar(self, web):
"""Scrap the avatar from a GitHub profile.
:param web: parsed web.
:type web: BeautifulSoup node.
"""
try:
self.avatar = web.find("img", {"class": "avatar"})['src'][:-10]
except IndexError as error:
print("There was an error with the user " + self.name)
print(error)
except AttributeError as error:
print("There was an error with the user " + self.name)
print(error) | Scrap the avatar from a GitHub profile.
:param web: parsed web.
:type web: BeautifulSoup node. | Below is the the instruction that describes the task:
### Input:
Scrap the avatar from a GitHub profile.
:param web: parsed web.
:type web: BeautifulSoup node.
### Response:
def __getAvatar(self, web):
"""Scrap the avatar from a GitHub profile.
:param web: parsed web.
:type web: BeautifulSoup node.
"""
try:
self.avatar = web.find("img", {"class": "avatar"})['src'][:-10]
except IndexError as error:
print("There was an error with the user " + self.name)
print(error)
except AttributeError as error:
print("There was an error with the user " + self.name)
print(error) |
def main(args=None):
"""Extract all exon annotations of protein-coding genes."""
if args is None:
parser = get_argument_parser()
args = parser.parse_args()
input_file = args.annotation_file
output_file = args.output_file
species = args.species
chrom_pat = args.chromosome_pattern
field_name = args.field_name
log_file = args.log_file
quiet = args.quiet
verbose = args.verbose
# configure root logger
log_stream = sys.stdout
if output_file == '-':
# if we print output to stdout, redirect log messages to stderr
log_stream = sys.stderr
logger = misc.get_logger(log_stream = log_stream, log_file = log_file,
quiet = quiet, verbose = verbose)
if chrom_pat is None:
chrom_pat = re.compile(ensembl.SPECIES_CHROMPAT[species])
else:
chrom_pat = re.compile(chrom_pat)
logger.info('Regular expression used for filtering chromosome names: "%s"',
chrom_pat.pattern)
chromosomes = set()
excluded_chromosomes = set()
i = 0
exons = 0
logger.info('Parsing data...')
if input_file == '-':
input_file = None
with misc.smart_open_read(input_file, mode = 'rb', try_gzip = True) as fh, \
misc.smart_open_write(output_file) as ofh:
#if i >= 500000: break
reader = csv.reader(fh, dialect = 'excel-tab')
writer = csv.writer(ofh, dialect = 'excel-tab', lineterminator = os.linesep,
quoting = csv.QUOTE_NONE , quotechar = '|')
for l in reader:
i += 1
#if i % int(1e5) == 0:
# print '\r%d...' %(i), ; sys.stdout.flush() # report progress
if len(l) > 1 and l[2] == field_name:
attr = parse_attributes(l[8])
type_ = attr['gene_biotype']
if type_ in ['protein_coding','polymorphic_pseudogene']:
# test whether chromosome is valid
chrom = l[0]
m = chrom_pat.match(chrom)
if m is None:
excluded_chromosomes.add(chrom)
continue
chromosomes.add(chrom)
writer.writerow(l)
exons += 1
logger.info('Done! (Parsed %d lines.)', i)
logger.info('')
logger.info('Gene chromosomes (%d):', len(chromosomes))
logger.info('\t' + ', '.join(sorted(chromosomes)))
logger.info('')
logger.info('Excluded chromosomes (%d):', len(excluded_chromosomes))
logger.info('\t' + ', '.join(sorted(excluded_chromosomes)))
logger.info('')
logger.info('Total no. of exons: %d' %(exons))
return 0 | Extract all exon annotations of protein-coding genes. | Below is the the instruction that describes the task:
### Input:
Extract all exon annotations of protein-coding genes.
### Response:
def main(args=None):
"""Extract all exon annotations of protein-coding genes."""
if args is None:
parser = get_argument_parser()
args = parser.parse_args()
input_file = args.annotation_file
output_file = args.output_file
species = args.species
chrom_pat = args.chromosome_pattern
field_name = args.field_name
log_file = args.log_file
quiet = args.quiet
verbose = args.verbose
# configure root logger
log_stream = sys.stdout
if output_file == '-':
# if we print output to stdout, redirect log messages to stderr
log_stream = sys.stderr
logger = misc.get_logger(log_stream = log_stream, log_file = log_file,
quiet = quiet, verbose = verbose)
if chrom_pat is None:
chrom_pat = re.compile(ensembl.SPECIES_CHROMPAT[species])
else:
chrom_pat = re.compile(chrom_pat)
logger.info('Regular expression used for filtering chromosome names: "%s"',
chrom_pat.pattern)
chromosomes = set()
excluded_chromosomes = set()
i = 0
exons = 0
logger.info('Parsing data...')
if input_file == '-':
input_file = None
with misc.smart_open_read(input_file, mode = 'rb', try_gzip = True) as fh, \
misc.smart_open_write(output_file) as ofh:
#if i >= 500000: break
reader = csv.reader(fh, dialect = 'excel-tab')
writer = csv.writer(ofh, dialect = 'excel-tab', lineterminator = os.linesep,
quoting = csv.QUOTE_NONE , quotechar = '|')
for l in reader:
i += 1
#if i % int(1e5) == 0:
# print '\r%d...' %(i), ; sys.stdout.flush() # report progress
if len(l) > 1 and l[2] == field_name:
attr = parse_attributes(l[8])
type_ = attr['gene_biotype']
if type_ in ['protein_coding','polymorphic_pseudogene']:
# test whether chromosome is valid
chrom = l[0]
m = chrom_pat.match(chrom)
if m is None:
excluded_chromosomes.add(chrom)
continue
chromosomes.add(chrom)
writer.writerow(l)
exons += 1
logger.info('Done! (Parsed %d lines.)', i)
logger.info('')
logger.info('Gene chromosomes (%d):', len(chromosomes))
logger.info('\t' + ', '.join(sorted(chromosomes)))
logger.info('')
logger.info('Excluded chromosomes (%d):', len(excluded_chromosomes))
logger.info('\t' + ', '.join(sorted(excluded_chromosomes)))
logger.info('')
logger.info('Total no. of exons: %d' %(exons))
return 0 |
def query_directory(self, pattern, file_information_class, flags=None,
file_index=0, max_output=65536, send=True):
"""
Run a Query/Find on an opened directory based on the params passed in.
Supports out of band send function, call this function with send=False
to return a tuple of (SMB2QueryDirectoryRequest, receive_func) instead
of sending the the request and waiting for the response. The
receive_func can be used to get the response from the server by passing
in the Request that was used to sent it out of band.
:param pattern: The string pattern to use for the query, this pattern
format is based on the SMB server but * is usually a wildcard
:param file_information_class: FileInformationClass that defines the
format of the result that is returned
:param flags: QueryDirectoryFlags that control how the operation must
be processed.
:param file_index: If the flags SMB2_INDEX_SPECIFIED, this is the index
the query should resume on, otherwise should be 0
:param max_output: The maximum output size, defaulted to the max credit
size but can be increased to reduced round trip operations.
:param send: Whether to send the request in the same call or return the
message to the caller and the unpack function
:return: A list of structures defined in query_info.py, the list entry
structure is based on the value of file_information_class in the
request message
"""
query = SMB2QueryDirectoryRequest()
query['file_information_class'] = file_information_class
query['flags'] = flags
query['file_index'] = file_index
query['file_id'] = self.file_id
query['output_buffer_length'] = max_output
query['buffer'] = pattern.encode('utf-16-le')
if not send:
return query, self._query_directory_response
log.info("Session: %s, Tree Connect: %s - sending SMB2 Query "
"Directory Request for directory %s"
% (self.tree_connect.session.username,
self.tree_connect.share_name, self.file_name))
log.debug(str(query))
request = self.connection.send(query,
self.tree_connect.session.session_id,
self.tree_connect.tree_connect_id)
return self._query_directory_response(request) | Run a Query/Find on an opened directory based on the params passed in.
Supports out of band send function, call this function with send=False
to return a tuple of (SMB2QueryDirectoryRequest, receive_func) instead
of sending the the request and waiting for the response. The
receive_func can be used to get the response from the server by passing
in the Request that was used to sent it out of band.
:param pattern: The string pattern to use for the query, this pattern
format is based on the SMB server but * is usually a wildcard
:param file_information_class: FileInformationClass that defines the
format of the result that is returned
:param flags: QueryDirectoryFlags that control how the operation must
be processed.
:param file_index: If the flags SMB2_INDEX_SPECIFIED, this is the index
the query should resume on, otherwise should be 0
:param max_output: The maximum output size, defaulted to the max credit
size but can be increased to reduced round trip operations.
:param send: Whether to send the request in the same call or return the
message to the caller and the unpack function
:return: A list of structures defined in query_info.py, the list entry
structure is based on the value of file_information_class in the
request message | Below is the the instruction that describes the task:
### Input:
Run a Query/Find on an opened directory based on the params passed in.
Supports out of band send function, call this function with send=False
to return a tuple of (SMB2QueryDirectoryRequest, receive_func) instead
of sending the the request and waiting for the response. The
receive_func can be used to get the response from the server by passing
in the Request that was used to sent it out of band.
:param pattern: The string pattern to use for the query, this pattern
format is based on the SMB server but * is usually a wildcard
:param file_information_class: FileInformationClass that defines the
format of the result that is returned
:param flags: QueryDirectoryFlags that control how the operation must
be processed.
:param file_index: If the flags SMB2_INDEX_SPECIFIED, this is the index
the query should resume on, otherwise should be 0
:param max_output: The maximum output size, defaulted to the max credit
size but can be increased to reduced round trip operations.
:param send: Whether to send the request in the same call or return the
message to the caller and the unpack function
:return: A list of structures defined in query_info.py, the list entry
structure is based on the value of file_information_class in the
request message
### Response:
def query_directory(self, pattern, file_information_class, flags=None,
file_index=0, max_output=65536, send=True):
"""
Run a Query/Find on an opened directory based on the params passed in.
Supports out of band send function, call this function with send=False
to return a tuple of (SMB2QueryDirectoryRequest, receive_func) instead
of sending the the request and waiting for the response. The
receive_func can be used to get the response from the server by passing
in the Request that was used to sent it out of band.
:param pattern: The string pattern to use for the query, this pattern
format is based on the SMB server but * is usually a wildcard
:param file_information_class: FileInformationClass that defines the
format of the result that is returned
:param flags: QueryDirectoryFlags that control how the operation must
be processed.
:param file_index: If the flags SMB2_INDEX_SPECIFIED, this is the index
the query should resume on, otherwise should be 0
:param max_output: The maximum output size, defaulted to the max credit
size but can be increased to reduced round trip operations.
:param send: Whether to send the request in the same call or return the
message to the caller and the unpack function
:return: A list of structures defined in query_info.py, the list entry
structure is based on the value of file_information_class in the
request message
"""
query = SMB2QueryDirectoryRequest()
query['file_information_class'] = file_information_class
query['flags'] = flags
query['file_index'] = file_index
query['file_id'] = self.file_id
query['output_buffer_length'] = max_output
query['buffer'] = pattern.encode('utf-16-le')
if not send:
return query, self._query_directory_response
log.info("Session: %s, Tree Connect: %s - sending SMB2 Query "
"Directory Request for directory %s"
% (self.tree_connect.session.username,
self.tree_connect.share_name, self.file_name))
log.debug(str(query))
request = self.connection.send(query,
self.tree_connect.session.session_id,
self.tree_connect.tree_connect_id)
return self._query_directory_response(request) |
def pb_for_delete(document_path, option):
"""Make a ``Write`` protobuf for ``delete()`` methods.
Args:
document_path (str): A fully-qualified document path.
option (optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes.
Returns:
google.cloud.firestore_v1beta1.types.Write: A
``Write`` protobuf instance for the ``delete()``.
"""
write_pb = write_pb2.Write(delete=document_path)
if option is not None:
option.modify_write(write_pb)
return write_pb | Make a ``Write`` protobuf for ``delete()`` methods.
Args:
document_path (str): A fully-qualified document path.
option (optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes.
Returns:
google.cloud.firestore_v1beta1.types.Write: A
``Write`` protobuf instance for the ``delete()``. | Below is the the instruction that describes the task:
### Input:
Make a ``Write`` protobuf for ``delete()`` methods.
Args:
document_path (str): A fully-qualified document path.
option (optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes.
Returns:
google.cloud.firestore_v1beta1.types.Write: A
``Write`` protobuf instance for the ``delete()``.
### Response:
def pb_for_delete(document_path, option):
"""Make a ``Write`` protobuf for ``delete()`` methods.
Args:
document_path (str): A fully-qualified document path.
option (optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes.
Returns:
google.cloud.firestore_v1beta1.types.Write: A
``Write`` protobuf instance for the ``delete()``.
"""
write_pb = write_pb2.Write(delete=document_path)
if option is not None:
option.modify_write(write_pb)
return write_pb |
def step(self, action, blocking=True):
"""Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
Transition tuple when blocking, otherwise callable that returns the
transition tuple.
"""
promise = self.call('step', action)
if blocking:
return promise()
else:
return promise | Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
Transition tuple when blocking, otherwise callable that returns the
transition tuple. | Below is the the instruction that describes the task:
### Input:
Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
Transition tuple when blocking, otherwise callable that returns the
transition tuple.
### Response:
def step(self, action, blocking=True):
"""Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
Transition tuple when blocking, otherwise callable that returns the
transition tuple.
"""
promise = self.call('step', action)
if blocking:
return promise()
else:
return promise |
def tool(self):
"""The tool that was in use during this event.
If the caller keeps a reference to a tool, the tool object will
compare equal to the previously obtained tool object.
Note:
Physical tool tracking requires hardware support. If unavailable,
libinput creates one tool per type per tablet. See
`Tracking unique tools`_ for more details.
Returns:
~libinput.define.TabletTool: The new tool triggering this event.
"""
htablettool = self._libinput.libinput_event_tablet_tool_get_tool(
self._handle)
return TabletTool(htablettool, self._libinput) | The tool that was in use during this event.
If the caller keeps a reference to a tool, the tool object will
compare equal to the previously obtained tool object.
Note:
Physical tool tracking requires hardware support. If unavailable,
libinput creates one tool per type per tablet. See
`Tracking unique tools`_ for more details.
Returns:
~libinput.define.TabletTool: The new tool triggering this event. | Below is the the instruction that describes the task:
### Input:
The tool that was in use during this event.
If the caller keeps a reference to a tool, the tool object will
compare equal to the previously obtained tool object.
Note:
Physical tool tracking requires hardware support. If unavailable,
libinput creates one tool per type per tablet. See
`Tracking unique tools`_ for more details.
Returns:
~libinput.define.TabletTool: The new tool triggering this event.
### Response:
def tool(self):
"""The tool that was in use during this event.
If the caller keeps a reference to a tool, the tool object will
compare equal to the previously obtained tool object.
Note:
Physical tool tracking requires hardware support. If unavailable,
libinput creates one tool per type per tablet. See
`Tracking unique tools`_ for more details.
Returns:
~libinput.define.TabletTool: The new tool triggering this event.
"""
htablettool = self._libinput.libinput_event_tablet_tool_get_tool(
self._handle)
return TabletTool(htablettool, self._libinput) |
def get_newcommand_macros(tex_source):
r"""Get all ``\newcommand`` macro definition from TeX source.
Parameters
----------
tex_source : `str`
TeX source content.
Returns
-------
macros : `dict`
Keys are macro names (including leading ``\``) and values are the
content (as `str`) of the macros.
Notes
-----
``\newcommand`` macros with arguments are not supported.
"""
macros = {}
command = LatexCommand(
'newcommand',
{'name': 'name', 'required': True, 'bracket': '{'},
{'name': 'content', 'required': True, 'bracket': '{'})
for macro in command.parse(tex_source):
macros[macro['name']] = macro['content']
return macros | r"""Get all ``\newcommand`` macro definition from TeX source.
Parameters
----------
tex_source : `str`
TeX source content.
Returns
-------
macros : `dict`
Keys are macro names (including leading ``\``) and values are the
content (as `str`) of the macros.
Notes
-----
``\newcommand`` macros with arguments are not supported. | Below is the the instruction that describes the task:
### Input:
r"""Get all ``\newcommand`` macro definition from TeX source.
Parameters
----------
tex_source : `str`
TeX source content.
Returns
-------
macros : `dict`
Keys are macro names (including leading ``\``) and values are the
content (as `str`) of the macros.
Notes
-----
``\newcommand`` macros with arguments are not supported.
### Response:
def get_newcommand_macros(tex_source):
r"""Get all ``\newcommand`` macro definition from TeX source.
Parameters
----------
tex_source : `str`
TeX source content.
Returns
-------
macros : `dict`
Keys are macro names (including leading ``\``) and values are the
content (as `str`) of the macros.
Notes
-----
``\newcommand`` macros with arguments are not supported.
"""
macros = {}
command = LatexCommand(
'newcommand',
{'name': 'name', 'required': True, 'bracket': '{'},
{'name': 'content', 'required': True, 'bracket': '{'})
for macro in command.parse(tex_source):
macros[macro['name']] = macro['content']
return macros |
def getBuild(self, build_id, **kwargs):
"""
Load all information about a build and return a custom Build class.
Calls "getBuild" XML-RPC.
:param build_id: ``int``, for example 12345
:returns: deferred that when fired returns a Build (Munch, dict-like)
object representing this Koji build, or None if no build was
found.
"""
buildinfo = yield self.call('getBuild', build_id, **kwargs)
build = Build.fromDict(buildinfo)
if build:
build.connection = self
defer.returnValue(build) | Load all information about a build and return a custom Build class.
Calls "getBuild" XML-RPC.
:param build_id: ``int``, for example 12345
:returns: deferred that when fired returns a Build (Munch, dict-like)
object representing this Koji build, or None if no build was
found. | Below is the the instruction that describes the task:
### Input:
Load all information about a build and return a custom Build class.
Calls "getBuild" XML-RPC.
:param build_id: ``int``, for example 12345
:returns: deferred that when fired returns a Build (Munch, dict-like)
object representing this Koji build, or None if no build was
found.
### Response:
def getBuild(self, build_id, **kwargs):
"""
Load all information about a build and return a custom Build class.
Calls "getBuild" XML-RPC.
:param build_id: ``int``, for example 12345
:returns: deferred that when fired returns a Build (Munch, dict-like)
object representing this Koji build, or None if no build was
found.
"""
buildinfo = yield self.call('getBuild', build_id, **kwargs)
build = Build.fromDict(buildinfo)
if build:
build.connection = self
defer.returnValue(build) |
def GetParametro(self, clave, clave1=None, clave2=None, clave3=None, clave4=None):
"Devuelve un parámetro de salida (establecido por llamada anterior)"
# útil para parámetros de salida (por ej. campos de TransaccionPlainWS)
valor = self.params_out.get(clave)
# busco datos "anidados" (listas / diccionarios)
for clave in (clave1, clave2, clave3, clave4):
if clave is not None and valor is not None:
if isinstance(clave1, basestring) and clave.isdigit():
clave = int(clave)
try:
valor = valor[clave]
except (KeyError, IndexError):
valor = None
if valor is not None:
if isinstance(valor, basestring):
return valor
else:
return str(valor)
else:
return "" | Devuelve un parámetro de salida (establecido por llamada anterior) | Below is the the instruction that describes the task:
### Input:
Devuelve un parámetro de salida (establecido por llamada anterior)
### Response:
def GetParametro(self, clave, clave1=None, clave2=None, clave3=None, clave4=None):
"Devuelve un parámetro de salida (establecido por llamada anterior)"
# útil para parámetros de salida (por ej. campos de TransaccionPlainWS)
valor = self.params_out.get(clave)
# busco datos "anidados" (listas / diccionarios)
for clave in (clave1, clave2, clave3, clave4):
if clave is not None and valor is not None:
if isinstance(clave1, basestring) and clave.isdigit():
clave = int(clave)
try:
valor = valor[clave]
except (KeyError, IndexError):
valor = None
if valor is not None:
if isinstance(valor, basestring):
return valor
else:
return str(valor)
else:
return "" |
def train_supervised(
input,
lr=0.1,
dim=100,
ws=5,
epoch=5,
minCount=1,
minCountLabel=0,
minn=0,
maxn=0,
neg=5,
wordNgrams=1,
loss="softmax",
bucket=2000000,
thread=multiprocessing.cpu_count() - 1,
lrUpdateRate=100,
t=1e-4,
label="__label__",
verbose=2,
pretrainedVectors="",
):
"""
Train a supervised model and return a model object.
input must be a filepath. The input text does not need to be tokenized
as per the tokenize function, but it must be preprocessed and encoded
as UTF-8. You might want to consult standard preprocessing scripts such
as tokenizer.perl mentioned here: http://www.statmt.org/wmt07/baseline.html
The input file must must contain at least one label per line. For an
example consult the example datasets which are part of the fastText
repository such as the dataset pulled by classification-example.sh.
"""
model = "supervised"
a = _build_args(locals())
ft = _FastText()
fasttext.train(ft.f, a)
return ft | Train a supervised model and return a model object.
input must be a filepath. The input text does not need to be tokenized
as per the tokenize function, but it must be preprocessed and encoded
as UTF-8. You might want to consult standard preprocessing scripts such
as tokenizer.perl mentioned here: http://www.statmt.org/wmt07/baseline.html
The input file must must contain at least one label per line. For an
example consult the example datasets which are part of the fastText
repository such as the dataset pulled by classification-example.sh. | Below is the the instruction that describes the task:
### Input:
Train a supervised model and return a model object.
input must be a filepath. The input text does not need to be tokenized
as per the tokenize function, but it must be preprocessed and encoded
as UTF-8. You might want to consult standard preprocessing scripts such
as tokenizer.perl mentioned here: http://www.statmt.org/wmt07/baseline.html
The input file must must contain at least one label per line. For an
example consult the example datasets which are part of the fastText
repository such as the dataset pulled by classification-example.sh.
### Response:
def train_supervised(
input,
lr=0.1,
dim=100,
ws=5,
epoch=5,
minCount=1,
minCountLabel=0,
minn=0,
maxn=0,
neg=5,
wordNgrams=1,
loss="softmax",
bucket=2000000,
thread=multiprocessing.cpu_count() - 1,
lrUpdateRate=100,
t=1e-4,
label="__label__",
verbose=2,
pretrainedVectors="",
):
"""
Train a supervised model and return a model object.
input must be a filepath. The input text does not need to be tokenized
as per the tokenize function, but it must be preprocessed and encoded
as UTF-8. You might want to consult standard preprocessing scripts such
as tokenizer.perl mentioned here: http://www.statmt.org/wmt07/baseline.html
The input file must must contain at least one label per line. For an
example consult the example datasets which are part of the fastText
repository such as the dataset pulled by classification-example.sh.
"""
model = "supervised"
a = _build_args(locals())
ft = _FastText()
fasttext.train(ft.f, a)
return ft |
def transform(self, X, lenscale=None):
"""
Apply the Fast Food RBF basis to X.
Parameters
----------
X: ndarray
(N, d) array of observations where N is the number of samples, and
d is the dimensionality of X.
lenscale: scalar or ndarray, optional
scalar or array of shape (d,) length scales (one for each dimension
of X).If not input, this uses the value of the initial length
scale.
Returns
-------
ndarray:
of shape (N, 2*nbases) where nbases is number of random bases to
use, given in the constructor (to nearest larger two power).
"""
lenscale = self._check_dim(X.shape[1], lenscale)
VX = self._makeVX(X / lenscale)
Phi = np.hstack((np.cos(VX), np.sin(VX))) / np.sqrt(self.n)
return Phi | Apply the Fast Food RBF basis to X.
Parameters
----------
X: ndarray
(N, d) array of observations where N is the number of samples, and
d is the dimensionality of X.
lenscale: scalar or ndarray, optional
scalar or array of shape (d,) length scales (one for each dimension
of X).If not input, this uses the value of the initial length
scale.
Returns
-------
ndarray:
of shape (N, 2*nbases) where nbases is number of random bases to
use, given in the constructor (to nearest larger two power). | Below is the the instruction that describes the task:
### Input:
Apply the Fast Food RBF basis to X.
Parameters
----------
X: ndarray
(N, d) array of observations where N is the number of samples, and
d is the dimensionality of X.
lenscale: scalar or ndarray, optional
scalar or array of shape (d,) length scales (one for each dimension
of X).If not input, this uses the value of the initial length
scale.
Returns
-------
ndarray:
of shape (N, 2*nbases) where nbases is number of random bases to
use, given in the constructor (to nearest larger two power).
### Response:
def transform(self, X, lenscale=None):
"""
Apply the Fast Food RBF basis to X.
Parameters
----------
X: ndarray
(N, d) array of observations where N is the number of samples, and
d is the dimensionality of X.
lenscale: scalar or ndarray, optional
scalar or array of shape (d,) length scales (one for each dimension
of X).If not input, this uses the value of the initial length
scale.
Returns
-------
ndarray:
of shape (N, 2*nbases) where nbases is number of random bases to
use, given in the constructor (to nearest larger two power).
"""
lenscale = self._check_dim(X.shape[1], lenscale)
VX = self._makeVX(X / lenscale)
Phi = np.hstack((np.cos(VX), np.sin(VX))) / np.sqrt(self.n)
return Phi |
def get_strain(cryst, refcell=None):
'''Calculate strain tensor in the Voight notation
Computes the strain tensor in the Voight notation as a conventional
6-vector. The calculation is done with respect to the crystal
geometry passed in refcell parameter.
:param cryst: deformed structure
:param refcell: reference, undeformed structure
:returns: 6-vector of strain tensor in the Voight notation
'''
if refcell is None:
refcell = cryst
du = cryst.get_cell()-refcell.get_cell()
m = refcell.get_cell()
m = inv(m)
u = dot(m, du)
u = (u+u.T)/2
return array([u[0, 0], u[1, 1], u[2, 2], u[2, 1], u[2, 0], u[1, 0]]) | Calculate strain tensor in the Voight notation
Computes the strain tensor in the Voight notation as a conventional
6-vector. The calculation is done with respect to the crystal
geometry passed in refcell parameter.
:param cryst: deformed structure
:param refcell: reference, undeformed structure
:returns: 6-vector of strain tensor in the Voight notation | Below is the the instruction that describes the task:
### Input:
Calculate strain tensor in the Voight notation
Computes the strain tensor in the Voight notation as a conventional
6-vector. The calculation is done with respect to the crystal
geometry passed in refcell parameter.
:param cryst: deformed structure
:param refcell: reference, undeformed structure
:returns: 6-vector of strain tensor in the Voight notation
### Response:
def get_strain(cryst, refcell=None):
'''Calculate strain tensor in the Voight notation
Computes the strain tensor in the Voight notation as a conventional
6-vector. The calculation is done with respect to the crystal
geometry passed in refcell parameter.
:param cryst: deformed structure
:param refcell: reference, undeformed structure
:returns: 6-vector of strain tensor in the Voight notation
'''
if refcell is None:
refcell = cryst
du = cryst.get_cell()-refcell.get_cell()
m = refcell.get_cell()
m = inv(m)
u = dot(m, du)
u = (u+u.T)/2
return array([u[0, 0], u[1, 1], u[2, 2], u[2, 1], u[2, 0], u[1, 0]]) |
def get_locid(session, location):
"""
Make a request to locquery resource to translate a string location
search into an int locid.
Returns
----------
int
An int that OKCupid maps to a particular geographical location.
"""
locid = 0
query_parameters = {
'func': 'query',
'query': location,
}
loc_query = session.get('http://www.okcupid.com/locquery',
params=query_parameters)
p = html.fromstring(loc_query.content.decode('utf8'))
js = loads(p.text)
if 'results' in js and len(js['results']):
locid = js['results'][0]['locid']
return locid | Make a request to locquery resource to translate a string location
search into an int locid.
Returns
----------
int
An int that OKCupid maps to a particular geographical location. | Below is the the instruction that describes the task:
### Input:
Make a request to locquery resource to translate a string location
search into an int locid.
Returns
----------
int
An int that OKCupid maps to a particular geographical location.
### Response:
def get_locid(session, location):
"""
Make a request to locquery resource to translate a string location
search into an int locid.
Returns
----------
int
An int that OKCupid maps to a particular geographical location.
"""
locid = 0
query_parameters = {
'func': 'query',
'query': location,
}
loc_query = session.get('http://www.okcupid.com/locquery',
params=query_parameters)
p = html.fromstring(loc_query.content.decode('utf8'))
js = loads(p.text)
if 'results' in js and len(js['results']):
locid = js['results'][0]['locid']
return locid |
def encode(self, V, P, X, CC, seqNum, M, PT, SSRC, payload):
"""Encode the RTP packet with header fields and payload."""
timestamp = int(time())
header = bytearray(HEADER_SIZE)
# Fill the header bytearray with RTP header fields
# ...
header[0] = header[0] | V << 6;
header[0] = header[0] | P << 5;
header[0] = header[0] | X << 4;
header[0] = header[0] | CC;
header[1] = header[1] | M << 7;
header[1] = header[1] | PT;
header[2] = (seqNum >> 8) & 0xFF;
header[3] = seqNum & 0xFF;
header[4] = (timestamp >> 24) & 0xFF;
header[5] = (timestamp >> 16) & 0xFF;
header[6] = (timestamp >> 8) & 0xFF;
header[7] = timestamp & 0xFF;
header[8] = (SSRC >> 24) & 0xFF;
header[9] = (SSRC >> 16) & 0xFF;
header[10] = (SSRC >> 8) & 0xFF;
header[11] = SSRC & 0xFF
self.header = header
# Get the payload
# ...
self.payload = payload | Encode the RTP packet with header fields and payload. | Below is the the instruction that describes the task:
### Input:
Encode the RTP packet with header fields and payload.
### Response:
def encode(self, V, P, X, CC, seqNum, M, PT, SSRC, payload):
"""Encode the RTP packet with header fields and payload."""
timestamp = int(time())
header = bytearray(HEADER_SIZE)
# Fill the header bytearray with RTP header fields
# ...
header[0] = header[0] | V << 6;
header[0] = header[0] | P << 5;
header[0] = header[0] | X << 4;
header[0] = header[0] | CC;
header[1] = header[1] | M << 7;
header[1] = header[1] | PT;
header[2] = (seqNum >> 8) & 0xFF;
header[3] = seqNum & 0xFF;
header[4] = (timestamp >> 24) & 0xFF;
header[5] = (timestamp >> 16) & 0xFF;
header[6] = (timestamp >> 8) & 0xFF;
header[7] = timestamp & 0xFF;
header[8] = (SSRC >> 24) & 0xFF;
header[9] = (SSRC >> 16) & 0xFF;
header[10] = (SSRC >> 8) & 0xFF;
header[11] = SSRC & 0xFF
self.header = header
# Get the payload
# ...
self.payload = payload |
def _remove_person_from_group(person, group):
""" Call datastores after removing a person from a group. """
from karaage.datastores import remove_accounts_from_group
from karaage.datastores import remove_accounts_from_project
from karaage.datastores import remove_accounts_from_institute
a_list = person.account_set
remove_accounts_from_group(a_list, group)
for project in group.project_set.all():
remove_accounts_from_project(a_list, project)
for institute in group.institute_set.all():
remove_accounts_from_institute(a_list, institute) | Call datastores after removing a person from a group. | Below is the the instruction that describes the task:
### Input:
Call datastores after removing a person from a group.
### Response:
def _remove_person_from_group(person, group):
""" Call datastores after removing a person from a group. """
from karaage.datastores import remove_accounts_from_group
from karaage.datastores import remove_accounts_from_project
from karaage.datastores import remove_accounts_from_institute
a_list = person.account_set
remove_accounts_from_group(a_list, group)
for project in group.project_set.all():
remove_accounts_from_project(a_list, project)
for institute in group.institute_set.all():
remove_accounts_from_institute(a_list, institute) |
def to_json(self):
"""Convert the Design Day to a dictionary."""
return {
'name': self.name,
'day_type': self.day_type,
'location': self.location.to_json(),
'dry_bulb_condition': self.dry_bulb_condition.to_json(),
'humidity_condition': self.humidity_condition.to_json(),
'wind_condition': self.wind_condition.to_json(),
'sky_condition': self.sky_condition.to_json()
} | Convert the Design Day to a dictionary. | Below is the the instruction that describes the task:
### Input:
Convert the Design Day to a dictionary.
### Response:
def to_json(self):
"""Convert the Design Day to a dictionary."""
return {
'name': self.name,
'day_type': self.day_type,
'location': self.location.to_json(),
'dry_bulb_condition': self.dry_bulb_condition.to_json(),
'humidity_condition': self.humidity_condition.to_json(),
'wind_condition': self.wind_condition.to_json(),
'sky_condition': self.sky_condition.to_json()
} |
def mock_django_connection(disabled_features=None):
""" Overwrite the Django database configuration with a mocked version.
This is a helper function that does the actual monkey patching.
"""
db = connections.databases['default']
db['PASSWORD'] = '****'
db['USER'] = '**Database disabled for unit tests**'
ConnectionHandler.__getitem__ = MagicMock(name='mock_connection')
# noinspection PyUnresolvedReferences
mock_connection = ConnectionHandler.__getitem__.return_value
if disabled_features:
for feature in disabled_features:
setattr(mock_connection.features, feature, False)
mock_ops = mock_connection.ops
# noinspection PyUnusedLocal
def compiler(queryset, connection, using, **kwargs):
result = MagicMock(name='mock_connection.ops.compiler()')
# noinspection PyProtectedMember
result.execute_sql.side_effect = NotSupportedError(
"Mock database tried to execute SQL for {} model.".format(
queryset.model._meta.object_name))
result.has_results.side_effect = result.execute_sql.side_effect
return result
mock_ops.compiler.return_value.side_effect = compiler
mock_ops.integer_field_range.return_value = (-sys.maxsize - 1, sys.maxsize)
mock_ops.max_name_length.return_value = sys.maxsize
Model.refresh_from_db = Mock() | Overwrite the Django database configuration with a mocked version.
This is a helper function that does the actual monkey patching. | Below is the the instruction that describes the task:
### Input:
Overwrite the Django database configuration with a mocked version.
This is a helper function that does the actual monkey patching.
### Response:
def mock_django_connection(disabled_features=None):
""" Overwrite the Django database configuration with a mocked version.
This is a helper function that does the actual monkey patching.
"""
db = connections.databases['default']
db['PASSWORD'] = '****'
db['USER'] = '**Database disabled for unit tests**'
ConnectionHandler.__getitem__ = MagicMock(name='mock_connection')
# noinspection PyUnresolvedReferences
mock_connection = ConnectionHandler.__getitem__.return_value
if disabled_features:
for feature in disabled_features:
setattr(mock_connection.features, feature, False)
mock_ops = mock_connection.ops
# noinspection PyUnusedLocal
def compiler(queryset, connection, using, **kwargs):
result = MagicMock(name='mock_connection.ops.compiler()')
# noinspection PyProtectedMember
result.execute_sql.side_effect = NotSupportedError(
"Mock database tried to execute SQL for {} model.".format(
queryset.model._meta.object_name))
result.has_results.side_effect = result.execute_sql.side_effect
return result
mock_ops.compiler.return_value.side_effect = compiler
mock_ops.integer_field_range.return_value = (-sys.maxsize - 1, sys.maxsize)
mock_ops.max_name_length.return_value = sys.maxsize
Model.refresh_from_db = Mock() |
def plot_summary_axes(graph: BELGraph, lax, rax, logx=True):
"""Plots your graph summary statistics on the given axes.
After, you should run :func:`plt.tight_layout` and you must run :func:`plt.show` to view.
Shows:
1. Count of nodes, grouped by function type
2. Count of edges, grouped by relation type
:param pybel.BELGraph graph: A BEL graph
:param lax: An axis object from matplotlib
:param rax: An axis object from matplotlib
Example usage:
>>> import matplotlib.pyplot as plt
>>> from pybel import from_pickle
>>> from pybel_tools.summary import plot_summary_axes
>>> graph = from_pickle('~/dev/bms/aetionomy/parkinsons.gpickle')
>>> fig, axes = plt.subplots(1, 2, figsize=(10, 4))
>>> plot_summary_axes(graph, axes[0], axes[1])
>>> plt.tight_layout()
>>> plt.show()
"""
ntc = count_functions(graph)
etc = count_relations(graph)
df = pd.DataFrame.from_dict(dict(ntc), orient='index')
df_ec = pd.DataFrame.from_dict(dict(etc), orient='index')
df.sort_values(0, ascending=True).plot(kind='barh', logx=logx, ax=lax)
lax.set_title('Number of nodes: {}'.format(graph.number_of_nodes()))
df_ec.sort_values(0, ascending=True).plot(kind='barh', logx=logx, ax=rax)
rax.set_title('Number of edges: {}'.format(graph.number_of_edges())) | Plots your graph summary statistics on the given axes.
After, you should run :func:`plt.tight_layout` and you must run :func:`plt.show` to view.
Shows:
1. Count of nodes, grouped by function type
2. Count of edges, grouped by relation type
:param pybel.BELGraph graph: A BEL graph
:param lax: An axis object from matplotlib
:param rax: An axis object from matplotlib
Example usage:
>>> import matplotlib.pyplot as plt
>>> from pybel import from_pickle
>>> from pybel_tools.summary import plot_summary_axes
>>> graph = from_pickle('~/dev/bms/aetionomy/parkinsons.gpickle')
>>> fig, axes = plt.subplots(1, 2, figsize=(10, 4))
>>> plot_summary_axes(graph, axes[0], axes[1])
>>> plt.tight_layout()
>>> plt.show() | Below is the the instruction that describes the task:
### Input:
Plots your graph summary statistics on the given axes.
After, you should run :func:`plt.tight_layout` and you must run :func:`plt.show` to view.
Shows:
1. Count of nodes, grouped by function type
2. Count of edges, grouped by relation type
:param pybel.BELGraph graph: A BEL graph
:param lax: An axis object from matplotlib
:param rax: An axis object from matplotlib
Example usage:
>>> import matplotlib.pyplot as plt
>>> from pybel import from_pickle
>>> from pybel_tools.summary import plot_summary_axes
>>> graph = from_pickle('~/dev/bms/aetionomy/parkinsons.gpickle')
>>> fig, axes = plt.subplots(1, 2, figsize=(10, 4))
>>> plot_summary_axes(graph, axes[0], axes[1])
>>> plt.tight_layout()
>>> plt.show()
### Response:
def plot_summary_axes(graph: BELGraph, lax, rax, logx=True):
"""Plots your graph summary statistics on the given axes.
After, you should run :func:`plt.tight_layout` and you must run :func:`plt.show` to view.
Shows:
1. Count of nodes, grouped by function type
2. Count of edges, grouped by relation type
:param pybel.BELGraph graph: A BEL graph
:param lax: An axis object from matplotlib
:param rax: An axis object from matplotlib
Example usage:
>>> import matplotlib.pyplot as plt
>>> from pybel import from_pickle
>>> from pybel_tools.summary import plot_summary_axes
>>> graph = from_pickle('~/dev/bms/aetionomy/parkinsons.gpickle')
>>> fig, axes = plt.subplots(1, 2, figsize=(10, 4))
>>> plot_summary_axes(graph, axes[0], axes[1])
>>> plt.tight_layout()
>>> plt.show()
"""
ntc = count_functions(graph)
etc = count_relations(graph)
df = pd.DataFrame.from_dict(dict(ntc), orient='index')
df_ec = pd.DataFrame.from_dict(dict(etc), orient='index')
df.sort_values(0, ascending=True).plot(kind='barh', logx=logx, ax=lax)
lax.set_title('Number of nodes: {}'.format(graph.number_of_nodes()))
df_ec.sort_values(0, ascending=True).plot(kind='barh', logx=logx, ax=rax)
rax.set_title('Number of edges: {}'.format(graph.number_of_edges())) |
def are_all_matches_terminal(self,
predicate: Callable[[ops.Operation], bool]):
"""Check whether all of the ops that satisfy a predicate are terminal.
Args:
predicate: A predicate on ops.Operations which is being checked.
Returns:
Whether or not all `Operation` s in a circuit that satisfy the
given predicate are terminal.
"""
return all(
self.next_moment_operating_on(op.qubits, i + 1) is None for
(i, op) in self.findall_operations(predicate)
) | Check whether all of the ops that satisfy a predicate are terminal.
Args:
predicate: A predicate on ops.Operations which is being checked.
Returns:
Whether or not all `Operation` s in a circuit that satisfy the
given predicate are terminal. | Below is the the instruction that describes the task:
### Input:
Check whether all of the ops that satisfy a predicate are terminal.
Args:
predicate: A predicate on ops.Operations which is being checked.
Returns:
Whether or not all `Operation` s in a circuit that satisfy the
given predicate are terminal.
### Response:
def are_all_matches_terminal(self,
predicate: Callable[[ops.Operation], bool]):
"""Check whether all of the ops that satisfy a predicate are terminal.
Args:
predicate: A predicate on ops.Operations which is being checked.
Returns:
Whether or not all `Operation` s in a circuit that satisfy the
given predicate are terminal.
"""
return all(
self.next_moment_operating_on(op.qubits, i + 1) is None for
(i, op) in self.findall_operations(predicate)
) |
def reformat_python_docstrings(top_dirs: List[str],
correct_copyright_lines: List[str],
show_only: bool = True,
rewrite: bool = False,
process_only_filenum: int = None) -> None:
"""
Walk a directory, finding Python files and rewriting them.
Args:
top_dirs: list of directories to descend into
correct_copyright_lines:
list of lines (without newlines) representing the copyright
docstring block, including the transition lines of equals
symbols
show_only: show results (to stdout) only; don't rewrite
rewrite: write the changes
process_only_filenum: only process this file number (1-based index);
for debugging only
"""
filenum = 0
for top_dir in top_dirs:
for dirpath, dirnames, filenames in walk(top_dir):
for filename in filenames:
fullname = join(dirpath, filename)
extension = splitext(filename)[1]
if extension != PYTHON_EXTENSION:
# log.debug("Skipping non-Python file: {}", fullname)
continue
filenum += 1
if process_only_filenum and filenum != process_only_filenum:
continue
log.info("Processing file {}: {}", filenum, fullname)
proc = PythonProcessor(
full_path=fullname,
top_dir=top_dir,
correct_copyright_lines=correct_copyright_lines)
if show_only:
proc.show()
elif rewrite:
proc.rewrite_file() | Walk a directory, finding Python files and rewriting them.
Args:
top_dirs: list of directories to descend into
correct_copyright_lines:
list of lines (without newlines) representing the copyright
docstring block, including the transition lines of equals
symbols
show_only: show results (to stdout) only; don't rewrite
rewrite: write the changes
process_only_filenum: only process this file number (1-based index);
for debugging only | Below is the the instruction that describes the task:
### Input:
Walk a directory, finding Python files and rewriting them.
Args:
top_dirs: list of directories to descend into
correct_copyright_lines:
list of lines (without newlines) representing the copyright
docstring block, including the transition lines of equals
symbols
show_only: show results (to stdout) only; don't rewrite
rewrite: write the changes
process_only_filenum: only process this file number (1-based index);
for debugging only
### Response:
def reformat_python_docstrings(top_dirs: List[str],
correct_copyright_lines: List[str],
show_only: bool = True,
rewrite: bool = False,
process_only_filenum: int = None) -> None:
"""
Walk a directory, finding Python files and rewriting them.
Args:
top_dirs: list of directories to descend into
correct_copyright_lines:
list of lines (without newlines) representing the copyright
docstring block, including the transition lines of equals
symbols
show_only: show results (to stdout) only; don't rewrite
rewrite: write the changes
process_only_filenum: only process this file number (1-based index);
for debugging only
"""
filenum = 0
for top_dir in top_dirs:
for dirpath, dirnames, filenames in walk(top_dir):
for filename in filenames:
fullname = join(dirpath, filename)
extension = splitext(filename)[1]
if extension != PYTHON_EXTENSION:
# log.debug("Skipping non-Python file: {}", fullname)
continue
filenum += 1
if process_only_filenum and filenum != process_only_filenum:
continue
log.info("Processing file {}: {}", filenum, fullname)
proc = PythonProcessor(
full_path=fullname,
top_dir=top_dir,
correct_copyright_lines=correct_copyright_lines)
if show_only:
proc.show()
elif rewrite:
proc.rewrite_file() |
def handle(self, key, value):
'''
Processes a vaild action info request
@param key: The key that matched the request
@param value: The value associated with the key
'''
# the master dict to return
master = {}
master['uuid'] = value
master['total_pending'] = 0
master['server_time'] = int(self.get_current_time())
# break down key
elements = key.split(":")
dict = {}
dict['spiderid'] = elements[1]
dict['appid'] = elements[2]
# log we received the info message
extras = self.get_log_dict('info', dict['appid'],
dict['spiderid'], master['uuid'])
if len(elements) == 4:
dict['crawlid'] = elements[3]
extras = self.get_log_dict('info', dict['appid'],
dict['spiderid'], master['uuid'],
elements[3])
self.logger.info('Received info request', extra=extras)
# generate the information requested
if 'crawlid' in dict:
master = self._build_crawlid_info(master, dict)
else:
master = self._build_appid_info(master, dict)
if self._send_to_kafka(master):
extras['success'] = True
self.logger.info('Sent info to kafka', extra=extras)
else:
extras['success'] = False
self.logger.error('Failed to send info to kafka',
extra=extras) | Processes a vaild action info request
@param key: The key that matched the request
@param value: The value associated with the key | Below is the the instruction that describes the task:
### Input:
Processes a vaild action info request
@param key: The key that matched the request
@param value: The value associated with the key
### Response:
def handle(self, key, value):
'''
Processes a vaild action info request
@param key: The key that matched the request
@param value: The value associated with the key
'''
# the master dict to return
master = {}
master['uuid'] = value
master['total_pending'] = 0
master['server_time'] = int(self.get_current_time())
# break down key
elements = key.split(":")
dict = {}
dict['spiderid'] = elements[1]
dict['appid'] = elements[2]
# log we received the info message
extras = self.get_log_dict('info', dict['appid'],
dict['spiderid'], master['uuid'])
if len(elements) == 4:
dict['crawlid'] = elements[3]
extras = self.get_log_dict('info', dict['appid'],
dict['spiderid'], master['uuid'],
elements[3])
self.logger.info('Received info request', extra=extras)
# generate the information requested
if 'crawlid' in dict:
master = self._build_crawlid_info(master, dict)
else:
master = self._build_appid_info(master, dict)
if self._send_to_kafka(master):
extras['success'] = True
self.logger.info('Sent info to kafka', extra=extras)
else:
extras['success'] = False
self.logger.error('Failed to send info to kafka',
extra=extras) |
def save(self, validate):
"""
Save the current values in all the widgets back to the persistent data storage.
:param validate: whether to validate the saved data or not.
:raises: InvalidFields if any invalid data is found.
"""
invalid = []
for column in self._columns:
for widget in column:
if widget.is_valid or not validate:
if widget.name is not None:
# This relies on the fact that we are passed the actual
# dict and so can edit it directly. In this case, that
# is all we want - no need to update the widgets.
self._frame._data[widget.name] = widget.value
else:
invalid.append(widget.name)
if len(invalid) > 0:
raise InvalidFields(invalid) | Save the current values in all the widgets back to the persistent data storage.
:param validate: whether to validate the saved data or not.
:raises: InvalidFields if any invalid data is found. | Below is the the instruction that describes the task:
### Input:
Save the current values in all the widgets back to the persistent data storage.
:param validate: whether to validate the saved data or not.
:raises: InvalidFields if any invalid data is found.
### Response:
def save(self, validate):
"""
Save the current values in all the widgets back to the persistent data storage.
:param validate: whether to validate the saved data or not.
:raises: InvalidFields if any invalid data is found.
"""
invalid = []
for column in self._columns:
for widget in column:
if widget.is_valid or not validate:
if widget.name is not None:
# This relies on the fact that we are passed the actual
# dict and so can edit it directly. In this case, that
# is all we want - no need to update the widgets.
self._frame._data[widget.name] = widget.value
else:
invalid.append(widget.name)
if len(invalid) > 0:
raise InvalidFields(invalid) |
def create_bucket(self, instance, bucket_name):
"""
Create a new bucket in the specified instance.
:param str instance: A Yamcs instance name.
:param str bucket_name: The name of the bucket.
"""
req = rest_pb2.CreateBucketRequest()
req.name = bucket_name
url = '/buckets/{}'.format(instance)
self._client.post_proto(url, data=req.SerializeToString()) | Create a new bucket in the specified instance.
:param str instance: A Yamcs instance name.
:param str bucket_name: The name of the bucket. | Below is the the instruction that describes the task:
### Input:
Create a new bucket in the specified instance.
:param str instance: A Yamcs instance name.
:param str bucket_name: The name of the bucket.
### Response:
def create_bucket(self, instance, bucket_name):
"""
Create a new bucket in the specified instance.
:param str instance: A Yamcs instance name.
:param str bucket_name: The name of the bucket.
"""
req = rest_pb2.CreateBucketRequest()
req.name = bucket_name
url = '/buckets/{}'.format(instance)
self._client.post_proto(url, data=req.SerializeToString()) |
def from_board(cls, board):
'''
:param Board board: board to represent
:return: SkinnyBoard to represent the given Board
'''
if len(board):
left = board.left_end()
right = board.right_end()
else:
left = None
right = None
return cls(left, right, len(board)) | :param Board board: board to represent
:return: SkinnyBoard to represent the given Board | Below is the the instruction that describes the task:
### Input:
:param Board board: board to represent
:return: SkinnyBoard to represent the given Board
### Response:
def from_board(cls, board):
'''
:param Board board: board to represent
:return: SkinnyBoard to represent the given Board
'''
if len(board):
left = board.left_end()
right = board.right_end()
else:
left = None
right = None
return cls(left, right, len(board)) |
def _get_object(self, name):
"""
Helper function to retrieve the requested Object.
"""
if self.use_pyrax:
try:
return self.container.get_object(name)
except pyrax.exceptions.NoSuchObject:
return None
elif swiftclient:
try:
return self.container.get_object(name)
except swiftclient.exceptions.ClientException:
return None
else:
return self.container.get_object(name) | Helper function to retrieve the requested Object. | Below is the the instruction that describes the task:
### Input:
Helper function to retrieve the requested Object.
### Response:
def _get_object(self, name):
"""
Helper function to retrieve the requested Object.
"""
if self.use_pyrax:
try:
return self.container.get_object(name)
except pyrax.exceptions.NoSuchObject:
return None
elif swiftclient:
try:
return self.container.get_object(name)
except swiftclient.exceptions.ClientException:
return None
else:
return self.container.get_object(name) |
def process_text(self, t:str, tok:BaseTokenizer) -> List[str]:
"Process one text `t` with tokenizer `tok`."
for rule in self.pre_rules: t = rule(t)
toks = tok.tokenizer(t)
for rule in self.post_rules: toks = rule(toks)
return toks | Process one text `t` with tokenizer `tok`. | Below is the the instruction that describes the task:
### Input:
Process one text `t` with tokenizer `tok`.
### Response:
def process_text(self, t:str, tok:BaseTokenizer) -> List[str]:
"Process one text `t` with tokenizer `tok`."
for rule in self.pre_rules: t = rule(t)
toks = tok.tokenizer(t)
for rule in self.post_rules: toks = rule(toks)
return toks |
def create_h5py_with_large_cache(filename, cache_size_mb):
"""
Allows to open the hdf5 file with specified cache size
"""
# h5py does not allow to control the cache size from the high level
# we employ the workaround
# sources:
#http://stackoverflow.com/questions/14653259/how-to-set-cache-settings-while-using-h5py-high-level-interface
#https://groups.google.com/forum/#!msg/h5py/RVx1ZB6LpE4/KH57vq5yw2AJ
propfaid = h5py.h5p.create(h5py.h5p.FILE_ACCESS)
settings = list(propfaid.get_cache())
settings[2] = 1024 * 1024 * cache_size_mb
propfaid.set_cache(*settings)
fid = h5py.h5f.create(filename, flags=h5py.h5f.ACC_EXCL, fapl=propfaid)
fin = h5py.File(fid)
return fin | Allows to open the hdf5 file with specified cache size | Below is the the instruction that describes the task:
### Input:
Allows to open the hdf5 file with specified cache size
### Response:
def create_h5py_with_large_cache(filename, cache_size_mb):
"""
Allows to open the hdf5 file with specified cache size
"""
# h5py does not allow to control the cache size from the high level
# we employ the workaround
# sources:
#http://stackoverflow.com/questions/14653259/how-to-set-cache-settings-while-using-h5py-high-level-interface
#https://groups.google.com/forum/#!msg/h5py/RVx1ZB6LpE4/KH57vq5yw2AJ
propfaid = h5py.h5p.create(h5py.h5p.FILE_ACCESS)
settings = list(propfaid.get_cache())
settings[2] = 1024 * 1024 * cache_size_mb
propfaid.set_cache(*settings)
fid = h5py.h5f.create(filename, flags=h5py.h5f.ACC_EXCL, fapl=propfaid)
fin = h5py.File(fid)
return fin |
def geojson_polygon_to_mask(feature, shape, lat_idx, lon_idx):
"""Convert a GeoJSON polygon feature to a numpy array
Args:
feature (pygeoj.Feature): polygon feature to draw
shape (tuple(int, int)): shape of 2D target numpy array to draw polygon in
lat_idx (func): function converting a latitude to the (fractional) row index in the map
lon_idx (func): function converting a longitude to the (fractional) column index in the map
Returns:
np.array: mask, background is zero, foreground is one
"""
import matplotlib
# specify 'agg' renderer, Mac renderer does not support what we want to do below
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib import patches
import numpy as np
# we can only do polygons right now
if feature.geometry.type not in ('Polygon', 'MultiPolygon'):
raise ValueError("Cannot handle feature of type " + feature.geometry.type)
# fictional dpi - don't matter in the end
dpi = 100
# -- start documentation include: poly-setup
# make a new figure with no frame, no axes, with the correct size, black background
fig = plt.figure(frameon=False, dpi=dpi, )
fig.set_size_inches(shape[1] / float(dpi), shape[0] / float(dpi))
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
# noinspection PyTypeChecker
ax.set_xlim([0, shape[1]])
# noinspection PyTypeChecker
ax.set_ylim([0, shape[0]])
fig.add_axes(ax)
# -- end documentation include: poly-setup
# for normal polygons make coordinates iterable
if feature.geometry.type == 'Polygon':
coords = [feature.geometry.coordinates]
else:
coords = feature.geometry.coordinates
for poly_coords in coords:
# the polygon may contain multiple outlines; the first is
# always the outer one, the others are 'holes'
for i, outline in enumerate(poly_coords):
# inside/outside fill value: figure background is white by
# default, draw inverted polygon and invert again later
value = 0. if i == 0 else 1.
# convert lats/lons to row/column indices in the array
outline = np.array(outline)
xs = lon_idx(outline[:, 0])
ys = lat_idx(outline[:, 1])
# draw the polygon
poly = patches.Polygon(list(zip(xs, ys)),
facecolor=(value, value, value),
edgecolor='none',
antialiased=True)
ax.add_patch(poly)
# -- start documentation include: poly-extract
# extract the figure to a numpy array,
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
# reshape to a proper numpy array, keep one channel only
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))[:, :, 0]
# -- end documentation include: poly-extract
# make sure we get the right shape back
assert data.shape[0] == shape[0]
assert data.shape[1] == shape[1]
# convert from uints back to floats and invert to get black background
data = 1. - data.astype(float) / 255. # type: np.array
# image is flipped horizontally w.r.t. map
data = data[::-1, :]
# done, clean up
plt.close('all')
return data | Convert a GeoJSON polygon feature to a numpy array
Args:
feature (pygeoj.Feature): polygon feature to draw
shape (tuple(int, int)): shape of 2D target numpy array to draw polygon in
lat_idx (func): function converting a latitude to the (fractional) row index in the map
lon_idx (func): function converting a longitude to the (fractional) column index in the map
Returns:
np.array: mask, background is zero, foreground is one | Below is the the instruction that describes the task:
### Input:
Convert a GeoJSON polygon feature to a numpy array
Args:
feature (pygeoj.Feature): polygon feature to draw
shape (tuple(int, int)): shape of 2D target numpy array to draw polygon in
lat_idx (func): function converting a latitude to the (fractional) row index in the map
lon_idx (func): function converting a longitude to the (fractional) column index in the map
Returns:
np.array: mask, background is zero, foreground is one
### Response:
def geojson_polygon_to_mask(feature, shape, lat_idx, lon_idx):
"""Convert a GeoJSON polygon feature to a numpy array
Args:
feature (pygeoj.Feature): polygon feature to draw
shape (tuple(int, int)): shape of 2D target numpy array to draw polygon in
lat_idx (func): function converting a latitude to the (fractional) row index in the map
lon_idx (func): function converting a longitude to the (fractional) column index in the map
Returns:
np.array: mask, background is zero, foreground is one
"""
import matplotlib
# specify 'agg' renderer, Mac renderer does not support what we want to do below
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib import patches
import numpy as np
# we can only do polygons right now
if feature.geometry.type not in ('Polygon', 'MultiPolygon'):
raise ValueError("Cannot handle feature of type " + feature.geometry.type)
# fictional dpi - don't matter in the end
dpi = 100
# -- start documentation include: poly-setup
# make a new figure with no frame, no axes, with the correct size, black background
fig = plt.figure(frameon=False, dpi=dpi, )
fig.set_size_inches(shape[1] / float(dpi), shape[0] / float(dpi))
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
# noinspection PyTypeChecker
ax.set_xlim([0, shape[1]])
# noinspection PyTypeChecker
ax.set_ylim([0, shape[0]])
fig.add_axes(ax)
# -- end documentation include: poly-setup
# for normal polygons make coordinates iterable
if feature.geometry.type == 'Polygon':
coords = [feature.geometry.coordinates]
else:
coords = feature.geometry.coordinates
for poly_coords in coords:
# the polygon may contain multiple outlines; the first is
# always the outer one, the others are 'holes'
for i, outline in enumerate(poly_coords):
# inside/outside fill value: figure background is white by
# default, draw inverted polygon and invert again later
value = 0. if i == 0 else 1.
# convert lats/lons to row/column indices in the array
outline = np.array(outline)
xs = lon_idx(outline[:, 0])
ys = lat_idx(outline[:, 1])
# draw the polygon
poly = patches.Polygon(list(zip(xs, ys)),
facecolor=(value, value, value),
edgecolor='none',
antialiased=True)
ax.add_patch(poly)
# -- start documentation include: poly-extract
# extract the figure to a numpy array,
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
# reshape to a proper numpy array, keep one channel only
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))[:, :, 0]
# -- end documentation include: poly-extract
# make sure we get the right shape back
assert data.shape[0] == shape[0]
assert data.shape[1] == shape[1]
# convert from uints back to floats and invert to get black background
data = 1. - data.astype(float) / 255. # type: np.array
# image is flipped horizontally w.r.t. map
data = data[::-1, :]
# done, clean up
plt.close('all')
return data |
def get(self, *args, **kwargs):
'''
/label/s/view
'''
url_arr = self.parse_url(args[0])
if len(url_arr) == 2:
if url_arr[0] == 'remove':
self.remove_redis_keyword(url_arr[1])
else:
self.list(url_arr[0], url_arr[1])
elif len(url_arr) == 3:
self.list(url_arr[0], url_arr[1], url_arr[2])
else:
return False | /label/s/view | Below is the the instruction that describes the task:
### Input:
/label/s/view
### Response:
def get(self, *args, **kwargs):
'''
/label/s/view
'''
url_arr = self.parse_url(args[0])
if len(url_arr) == 2:
if url_arr[0] == 'remove':
self.remove_redis_keyword(url_arr[1])
else:
self.list(url_arr[0], url_arr[1])
elif len(url_arr) == 3:
self.list(url_arr[0], url_arr[1], url_arr[2])
else:
return False |
def get_highlights(self):
'''Get highlights for Bookmark instance.
:return: list of ``Highlight`` objects
:rtype: list
'''
# NOTE: all Instapaper API methods use POST except this one!
path = '/'.join([self.RESOURCE, str(self.object_id), 'highlights'])
response = self.client.request(path, method='GET', api_version='1.1')
items = response['data']
highlights = []
for item in items:
if item.get('type') == 'error':
raise Exception(item.get('message'))
elif item.get('type') == 'highlight':
highlights.append(Highlight(self, **item))
return highlights | Get highlights for Bookmark instance.
:return: list of ``Highlight`` objects
:rtype: list | Below is the the instruction that describes the task:
### Input:
Get highlights for Bookmark instance.
:return: list of ``Highlight`` objects
:rtype: list
### Response:
def get_highlights(self):
'''Get highlights for Bookmark instance.
:return: list of ``Highlight`` objects
:rtype: list
'''
# NOTE: all Instapaper API methods use POST except this one!
path = '/'.join([self.RESOURCE, str(self.object_id), 'highlights'])
response = self.client.request(path, method='GET', api_version='1.1')
items = response['data']
highlights = []
for item in items:
if item.get('type') == 'error':
raise Exception(item.get('message'))
elif item.get('type') == 'highlight':
highlights.append(Highlight(self, **item))
return highlights |
def section_term_lengths(neurites, neurite_type=NeuriteType.all):
'''Termination section lengths in a collection of neurites'''
return map_sections(_section_length, neurites, neurite_type=neurite_type,
iterator_type=Tree.ileaf) | Termination section lengths in a collection of neurites | Below is the the instruction that describes the task:
### Input:
Termination section lengths in a collection of neurites
### Response:
def section_term_lengths(neurites, neurite_type=NeuriteType.all):
'''Termination section lengths in a collection of neurites'''
return map_sections(_section_length, neurites, neurite_type=neurite_type,
iterator_type=Tree.ileaf) |
def _getSyntaxByXmlFileName(self, xmlFileName):
"""Get syntax by its xml file name
"""
import qutepart.syntax.loader # delayed import for avoid cross-imports problem
with self._loadedSyntaxesLock:
if not xmlFileName in self._loadedSyntaxes:
xmlFilePath = os.path.join(os.path.dirname(__file__), "data", "xml", xmlFileName)
syntax = Syntax(self)
self._loadedSyntaxes[xmlFileName] = syntax
qutepart.syntax.loader.loadSyntax(syntax, xmlFilePath)
return self._loadedSyntaxes[xmlFileName] | Get syntax by its xml file name | Below is the the instruction that describes the task:
### Input:
Get syntax by its xml file name
### Response:
def _getSyntaxByXmlFileName(self, xmlFileName):
"""Get syntax by its xml file name
"""
import qutepart.syntax.loader # delayed import for avoid cross-imports problem
with self._loadedSyntaxesLock:
if not xmlFileName in self._loadedSyntaxes:
xmlFilePath = os.path.join(os.path.dirname(__file__), "data", "xml", xmlFileName)
syntax = Syntax(self)
self._loadedSyntaxes[xmlFileName] = syntax
qutepart.syntax.loader.loadSyntax(syntax, xmlFilePath)
return self._loadedSyntaxes[xmlFileName] |
def add_data_point(self, x, y, number_format=None):
"""
Return an XyDataPoint object newly created with values *x* and *y*,
and appended to this sequence.
"""
data_point = XyDataPoint(self, x, y, number_format)
self.append(data_point)
return data_point | Return an XyDataPoint object newly created with values *x* and *y*,
and appended to this sequence. | Below is the the instruction that describes the task:
### Input:
Return an XyDataPoint object newly created with values *x* and *y*,
and appended to this sequence.
### Response:
def add_data_point(self, x, y, number_format=None):
"""
Return an XyDataPoint object newly created with values *x* and *y*,
and appended to this sequence.
"""
data_point = XyDataPoint(self, x, y, number_format)
self.append(data_point)
return data_point |
def _process_gxd_genotype_summary_view(self, limit=None):
"""
Add the genotype internal id to mgiid mapping to the idhashmap.
Also, add them as individuals to the graph.
We re-format the label to put the background strain in brackets
after the gvc.
We must pass through the file once to get the ids and
aggregate the vslcs into a hashmap into the genotype
Triples created:
<genotype id> a GENO:intrinsic_genotype
<genotype id> rdfs:label "<gvc> [bkgd]"
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
geno_hash = {}
raw = '/'.join((self.rawdir, 'gxd_genotype_summary_view'))
LOG.info("building labels for genotypes")
with open(raw, 'r') as f:
f.readline() # read the header row; skip
for line in f:
line = line.rstrip("\n")
line_counter += 1
(object_key, preferred, mgiid, subtype,
short_description) = line.split('\t')
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('genotype'):
continue
# add the internal genotype to mgi mapping
self.idhash['genotype'][object_key] = mgiid
if preferred == '1':
d = re.sub(r'\,', '/', short_description.strip())
if mgiid not in geno_hash:
geno_hash[mgiid] = {'vslcs': [d], 'subtype': subtype,
'key': object_key}
else:
vslcs = geno_hash[mgiid].get('vslcs')
vslcs.append(d)
else:
pass
# TODO what to do with != preferred
if not self.test_mode and limit is not None and line_counter > limit:
break
# now, loop through the hash and add the genotypes as individuals
# we add the mgi genotype as a synonym
# (we generate our own label later)
geno = Genotype(graph)
for gt in geno_hash:
genotype = geno_hash.get(gt)
gvc = sorted(genotype.get('vslcs'))
label = '; '.join(gvc) + ' [' + genotype.get('subtype') + ']'
geno.addGenotype(gt, None)
model.addComment(gt, self._makeInternalIdentifier(
'genotype', genotype.get('key')))
model.addSynonym(gt, label.strip())
return | Add the genotype internal id to mgiid mapping to the idhashmap.
Also, add them as individuals to the graph.
We re-format the label to put the background strain in brackets
after the gvc.
We must pass through the file once to get the ids and
aggregate the vslcs into a hashmap into the genotype
Triples created:
<genotype id> a GENO:intrinsic_genotype
<genotype id> rdfs:label "<gvc> [bkgd]"
:param limit:
:return: | Below is the the instruction that describes the task:
### Input:
Add the genotype internal id to mgiid mapping to the idhashmap.
Also, add them as individuals to the graph.
We re-format the label to put the background strain in brackets
after the gvc.
We must pass through the file once to get the ids and
aggregate the vslcs into a hashmap into the genotype
Triples created:
<genotype id> a GENO:intrinsic_genotype
<genotype id> rdfs:label "<gvc> [bkgd]"
:param limit:
:return:
### Response:
def _process_gxd_genotype_summary_view(self, limit=None):
"""
Add the genotype internal id to mgiid mapping to the idhashmap.
Also, add them as individuals to the graph.
We re-format the label to put the background strain in brackets
after the gvc.
We must pass through the file once to get the ids and
aggregate the vslcs into a hashmap into the genotype
Triples created:
<genotype id> a GENO:intrinsic_genotype
<genotype id> rdfs:label "<gvc> [bkgd]"
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
geno_hash = {}
raw = '/'.join((self.rawdir, 'gxd_genotype_summary_view'))
LOG.info("building labels for genotypes")
with open(raw, 'r') as f:
f.readline() # read the header row; skip
for line in f:
line = line.rstrip("\n")
line_counter += 1
(object_key, preferred, mgiid, subtype,
short_description) = line.split('\t')
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('genotype'):
continue
# add the internal genotype to mgi mapping
self.idhash['genotype'][object_key] = mgiid
if preferred == '1':
d = re.sub(r'\,', '/', short_description.strip())
if mgiid not in geno_hash:
geno_hash[mgiid] = {'vslcs': [d], 'subtype': subtype,
'key': object_key}
else:
vslcs = geno_hash[mgiid].get('vslcs')
vslcs.append(d)
else:
pass
# TODO what to do with != preferred
if not self.test_mode and limit is not None and line_counter > limit:
break
# now, loop through the hash and add the genotypes as individuals
# we add the mgi genotype as a synonym
# (we generate our own label later)
geno = Genotype(graph)
for gt in geno_hash:
genotype = geno_hash.get(gt)
gvc = sorted(genotype.get('vslcs'))
label = '; '.join(gvc) + ' [' + genotype.get('subtype') + ']'
geno.addGenotype(gt, None)
model.addComment(gt, self._makeInternalIdentifier(
'genotype', genotype.get('key')))
model.addSynonym(gt, label.strip())
return |
def check_solution(self, tx_context, flags=None, traceback_f=None):
"""
tx_context: information about the transaction that the VM may need
flags: gives the VM hints about which additional constraints to check
"""
for t in self.puzzle_and_solution_iterator(tx_context, flags=flags, traceback_f=traceback_f):
puzzle_script, solution_stack, flags, sighash_f = t
vm = self.VM(puzzle_script, tx_context, sighash_f, flags=flags, initial_stack=solution_stack[:])
vm.is_solution_script = False
vm.traceback_f = traceback_f
stack = vm.eval_script()
if len(stack) == 0 or not vm.bool_from_script_bytes(stack[-1]):
raise self.ScriptError("eval false", errno.EVAL_FALSE)
if flags & VERIFY_CLEANSTACK and len(stack) != 1:
raise self.ScriptError("stack not clean after evaluation", errno.CLEANSTACK) | tx_context: information about the transaction that the VM may need
flags: gives the VM hints about which additional constraints to check | Below is the the instruction that describes the task:
### Input:
tx_context: information about the transaction that the VM may need
flags: gives the VM hints about which additional constraints to check
### Response:
def check_solution(self, tx_context, flags=None, traceback_f=None):
"""
tx_context: information about the transaction that the VM may need
flags: gives the VM hints about which additional constraints to check
"""
for t in self.puzzle_and_solution_iterator(tx_context, flags=flags, traceback_f=traceback_f):
puzzle_script, solution_stack, flags, sighash_f = t
vm = self.VM(puzzle_script, tx_context, sighash_f, flags=flags, initial_stack=solution_stack[:])
vm.is_solution_script = False
vm.traceback_f = traceback_f
stack = vm.eval_script()
if len(stack) == 0 or not vm.bool_from_script_bytes(stack[-1]):
raise self.ScriptError("eval false", errno.EVAL_FALSE)
if flags & VERIFY_CLEANSTACK and len(stack) != 1:
raise self.ScriptError("stack not clean after evaluation", errno.CLEANSTACK) |
def get_tokens_list(self, registry_address: PaymentNetworkID):
"""Returns a list of tokens the node knows about"""
tokens_list = views.get_token_identifiers(
chain_state=views.state_from_raiden(self.raiden),
payment_network_id=registry_address,
)
return tokens_list | Returns a list of tokens the node knows about | Below is the the instruction that describes the task:
### Input:
Returns a list of tokens the node knows about
### Response:
def get_tokens_list(self, registry_address: PaymentNetworkID):
"""Returns a list of tokens the node knows about"""
tokens_list = views.get_token_identifiers(
chain_state=views.state_from_raiden(self.raiden),
payment_network_id=registry_address,
)
return tokens_list |
def wrap(self, message):
"""
NTM GSSwrap()
:param message: The message to be encrypted
:return: A Tuple containing the signature and the encrypted messaging
"""
cipher_text = _Ntlm2Session.encrypt(self, message)
signature = _Ntlm2Session.sign(self, message)
return cipher_text, signature | NTM GSSwrap()
:param message: The message to be encrypted
:return: A Tuple containing the signature and the encrypted messaging | Below is the the instruction that describes the task:
### Input:
NTM GSSwrap()
:param message: The message to be encrypted
:return: A Tuple containing the signature and the encrypted messaging
### Response:
def wrap(self, message):
"""
NTM GSSwrap()
:param message: The message to be encrypted
:return: A Tuple containing the signature and the encrypted messaging
"""
cipher_text = _Ntlm2Session.encrypt(self, message)
signature = _Ntlm2Session.sign(self, message)
return cipher_text, signature |
def run():
"""
Runs flake8 lint
:return:
A bool - if flake8 did not find any errors
"""
print('Running flake8 %s' % flake8.__version__)
flake8_style = get_style_guide(config_file=os.path.join(package_root, 'tox.ini'))
paths = []
for _dir in [package_name, 'dev', 'tests']:
for root, _, filenames in os.walk(_dir):
for filename in filenames:
if not filename.endswith('.py'):
continue
paths.append(os.path.join(root, filename))
report = flake8_style.check_files(paths)
success = report.total_errors == 0
if success:
print('OK')
return success | Runs flake8 lint
:return:
A bool - if flake8 did not find any errors | Below is the the instruction that describes the task:
### Input:
Runs flake8 lint
:return:
A bool - if flake8 did not find any errors
### Response:
def run():
"""
Runs flake8 lint
:return:
A bool - if flake8 did not find any errors
"""
print('Running flake8 %s' % flake8.__version__)
flake8_style = get_style_guide(config_file=os.path.join(package_root, 'tox.ini'))
paths = []
for _dir in [package_name, 'dev', 'tests']:
for root, _, filenames in os.walk(_dir):
for filename in filenames:
if not filename.endswith('.py'):
continue
paths.append(os.path.join(root, filename))
report = flake8_style.check_files(paths)
success = report.total_errors == 0
if success:
print('OK')
return success |
def copy(self, zero=None):
"""
Returns a Poly instance with the same terms, but as a "T" (tee) copy
when they're Stream instances, allowing maths using a polynomial more
than once.
"""
return Poly(OrderedDict((k, v.copy() if isinstance(v, Stream) else v)
for k, v in iteritems(self._data)),
zero=self.zero if zero is None else zero) | Returns a Poly instance with the same terms, but as a "T" (tee) copy
when they're Stream instances, allowing maths using a polynomial more
than once. | Below is the the instruction that describes the task:
### Input:
Returns a Poly instance with the same terms, but as a "T" (tee) copy
when they're Stream instances, allowing maths using a polynomial more
than once.
### Response:
def copy(self, zero=None):
"""
Returns a Poly instance with the same terms, but as a "T" (tee) copy
when they're Stream instances, allowing maths using a polynomial more
than once.
"""
return Poly(OrderedDict((k, v.copy() if isinstance(v, Stream) else v)
for k, v in iteritems(self._data)),
zero=self.zero if zero is None else zero) |
def add_worksheet(self):
"""Add a worksheet to the workbook."""
wsh = self.workbook.add_worksheet()
if self.vars.fld2col_widths is not None:
self.set_xlsx_colwidths(wsh, self.vars.fld2col_widths, self.wbfmtobj.get_prt_flds())
return wsh | Add a worksheet to the workbook. | Below is the the instruction that describes the task:
### Input:
Add a worksheet to the workbook.
### Response:
def add_worksheet(self):
"""Add a worksheet to the workbook."""
wsh = self.workbook.add_worksheet()
if self.vars.fld2col_widths is not None:
self.set_xlsx_colwidths(wsh, self.vars.fld2col_widths, self.wbfmtobj.get_prt_flds())
return wsh |
def load_entry_point_group(self, entry_point_group):
"""Load actions from an entry point group.
:param entry_point_group: The entrypoint group name to load plugins.
"""
for ep in pkg_resources.iter_entry_points(group=entry_point_group):
self.register_scope(ep.load()) | Load actions from an entry point group.
:param entry_point_group: The entrypoint group name to load plugins. | Below is the the instruction that describes the task:
### Input:
Load actions from an entry point group.
:param entry_point_group: The entrypoint group name to load plugins.
### Response:
def load_entry_point_group(self, entry_point_group):
"""Load actions from an entry point group.
:param entry_point_group: The entrypoint group name to load plugins.
"""
for ep in pkg_resources.iter_entry_points(group=entry_point_group):
self.register_scope(ep.load()) |
def post(self, request, format=None):
"""
Add a new bot
---
serializer: BotSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
"""
serializer = BotSerializer(data=request.data)
if serializer.is_valid():
bot = Bot.objects.create(owner=request.user,
name=serializer.data['name'])
return Response(BotSerializer(bot).data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) | Add a new bot
---
serializer: BotSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request | Below is the the instruction that describes the task:
### Input:
Add a new bot
---
serializer: BotSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
### Response:
def post(self, request, format=None):
"""
Add a new bot
---
serializer: BotSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
"""
serializer = BotSerializer(data=request.data)
if serializer.is_valid():
bot = Bot.objects.create(owner=request.user,
name=serializer.data['name'])
return Response(BotSerializer(bot).data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) |
def verify_string_dxid(dxid, expected_classes):
'''
:param dxid: Value to verify as a DNAnexus ID of class *expected_class*
:param expected_classes: Single string or list of strings of allowed classes of the ID, e.g. "file" or ["project", "container"]
:type expected_classes: string or list of strings
:raises: :exc:`~dxpy.exceptions.DXError` if *dxid* is not a string or is not a valid DNAnexus ID of the expected class
'''
if isinstance(expected_classes, basestring):
expected_classes = [expected_classes]
if not isinstance(expected_classes, list) or len(expected_classes) == 0:
raise DXError('verify_string_dxid: expected_classes should be a string or list of strings')
if not (isinstance(dxid, basestring) and
re.match('^(' + '|'.join(expected_classes) + ')-[0-9a-zA-Z]{24}$', dxid)):
if len(expected_classes) == 1:
str_expected_classes = expected_classes[0]
elif len(expected_classes) == 2:
str_expected_classes = ' or '.join(expected_classes)
else:
str_expected_classes = ', '.join(expected_classes[:-1]) + ', or ' + expected_classes[-1]
raise DXError('Invalid ID of class %s: %r' % (str_expected_classes, dxid)) | :param dxid: Value to verify as a DNAnexus ID of class *expected_class*
:param expected_classes: Single string or list of strings of allowed classes of the ID, e.g. "file" or ["project", "container"]
:type expected_classes: string or list of strings
:raises: :exc:`~dxpy.exceptions.DXError` if *dxid* is not a string or is not a valid DNAnexus ID of the expected class | Below is the the instruction that describes the task:
### Input:
:param dxid: Value to verify as a DNAnexus ID of class *expected_class*
:param expected_classes: Single string or list of strings of allowed classes of the ID, e.g. "file" or ["project", "container"]
:type expected_classes: string or list of strings
:raises: :exc:`~dxpy.exceptions.DXError` if *dxid* is not a string or is not a valid DNAnexus ID of the expected class
### Response:
def verify_string_dxid(dxid, expected_classes):
'''
:param dxid: Value to verify as a DNAnexus ID of class *expected_class*
:param expected_classes: Single string or list of strings of allowed classes of the ID, e.g. "file" or ["project", "container"]
:type expected_classes: string or list of strings
:raises: :exc:`~dxpy.exceptions.DXError` if *dxid* is not a string or is not a valid DNAnexus ID of the expected class
'''
if isinstance(expected_classes, basestring):
expected_classes = [expected_classes]
if not isinstance(expected_classes, list) or len(expected_classes) == 0:
raise DXError('verify_string_dxid: expected_classes should be a string or list of strings')
if not (isinstance(dxid, basestring) and
re.match('^(' + '|'.join(expected_classes) + ')-[0-9a-zA-Z]{24}$', dxid)):
if len(expected_classes) == 1:
str_expected_classes = expected_classes[0]
elif len(expected_classes) == 2:
str_expected_classes = ' or '.join(expected_classes)
else:
str_expected_classes = ', '.join(expected_classes[:-1]) + ', or ' + expected_classes[-1]
raise DXError('Invalid ID of class %s: %r' % (str_expected_classes, dxid)) |
def update_delivery_note_item(self, delivery_note_item_id, delivery_note_item_dict):
"""
Updates a delivery note item
:param delivery_note_item_id: delivery note item id
:param delivery_note_item_dict: dict
:return: dict
"""
return self._create_put_request(
resource=DELIVERY_NOTE_ITEMS,
billomat_id=delivery_note_item_id,
send_data=delivery_note_item_dict
) | Updates a delivery note item
:param delivery_note_item_id: delivery note item id
:param delivery_note_item_dict: dict
:return: dict | Below is the the instruction that describes the task:
### Input:
Updates a delivery note item
:param delivery_note_item_id: delivery note item id
:param delivery_note_item_dict: dict
:return: dict
### Response:
def update_delivery_note_item(self, delivery_note_item_id, delivery_note_item_dict):
"""
Updates a delivery note item
:param delivery_note_item_id: delivery note item id
:param delivery_note_item_dict: dict
:return: dict
"""
return self._create_put_request(
resource=DELIVERY_NOTE_ITEMS,
billomat_id=delivery_note_item_id,
send_data=delivery_note_item_dict
) |
def _check(value, message):
"""
Checks the libsbml return value and logs error messages.
If 'value' is None, logs an error message constructed using
'message' and then exits with status code 1. If 'value' is an integer,
it assumes it is a libSBML return status code. If the code value is
LIBSBML_OPERATION_SUCCESS, returns without further action; if it is not,
prints an error message constructed using 'message' along with text from
libSBML explaining the meaning of the code, and exits with status code 1.
"""
if value is None:
LOGGER.error('Error: LibSBML returned a null value trying '
'to <' + message + '>.')
elif type(value) is int:
if value == libsbml.LIBSBML_OPERATION_SUCCESS:
return
else:
LOGGER.error('Error encountered trying to <' + message + '>.')
LOGGER.error('LibSBML error code {}: {}'.format(str(value),
libsbml.OperationReturnValue_toString(value).strip()))
else:
return | Checks the libsbml return value and logs error messages.
If 'value' is None, logs an error message constructed using
'message' and then exits with status code 1. If 'value' is an integer,
it assumes it is a libSBML return status code. If the code value is
LIBSBML_OPERATION_SUCCESS, returns without further action; if it is not,
prints an error message constructed using 'message' along with text from
libSBML explaining the meaning of the code, and exits with status code 1. | Below is the the instruction that describes the task:
### Input:
Checks the libsbml return value and logs error messages.
If 'value' is None, logs an error message constructed using
'message' and then exits with status code 1. If 'value' is an integer,
it assumes it is a libSBML return status code. If the code value is
LIBSBML_OPERATION_SUCCESS, returns without further action; if it is not,
prints an error message constructed using 'message' along with text from
libSBML explaining the meaning of the code, and exits with status code 1.
### Response:
def _check(value, message):
"""
Checks the libsbml return value and logs error messages.
If 'value' is None, logs an error message constructed using
'message' and then exits with status code 1. If 'value' is an integer,
it assumes it is a libSBML return status code. If the code value is
LIBSBML_OPERATION_SUCCESS, returns without further action; if it is not,
prints an error message constructed using 'message' along with text from
libSBML explaining the meaning of the code, and exits with status code 1.
"""
if value is None:
LOGGER.error('Error: LibSBML returned a null value trying '
'to <' + message + '>.')
elif type(value) is int:
if value == libsbml.LIBSBML_OPERATION_SUCCESS:
return
else:
LOGGER.error('Error encountered trying to <' + message + '>.')
LOGGER.error('LibSBML error code {}: {}'.format(str(value),
libsbml.OperationReturnValue_toString(value).strip()))
else:
return |
def __isListOfTexts(self, docs):
""" Checks whether the input is a list of strings or Text-s;
"""
return isinstance(docs, list) and \
all(isinstance(d, (basestring, Text)) for d in docs) | Checks whether the input is a list of strings or Text-s; | Below is the the instruction that describes the task:
### Input:
Checks whether the input is a list of strings or Text-s;
### Response:
def __isListOfTexts(self, docs):
""" Checks whether the input is a list of strings or Text-s;
"""
return isinstance(docs, list) and \
all(isinstance(d, (basestring, Text)) for d in docs) |
def delete_identity(db, identity_id):
"""Remove an identity from the registry.
This function removes from the registry, the identity which its identifier
matches with id. Take into account that this function does not remove
unique identities.
When the given identity is not found in the registry a 'NotFoundError'
exception is raised.
:param db: database manager
:param identity_id: identifier assigned to the identity that will
be removed
:raises NotFoundError: raised when the identity does not exist in the
registry.
"""
with db.connect() as session:
identity = find_identity(session, identity_id)
if not identity:
raise NotFoundError(entity=identity_id)
delete_identity_db(session, identity) | Remove an identity from the registry.
This function removes from the registry, the identity which its identifier
matches with id. Take into account that this function does not remove
unique identities.
When the given identity is not found in the registry a 'NotFoundError'
exception is raised.
:param db: database manager
:param identity_id: identifier assigned to the identity that will
be removed
:raises NotFoundError: raised when the identity does not exist in the
registry. | Below is the the instruction that describes the task:
### Input:
Remove an identity from the registry.
This function removes from the registry, the identity which its identifier
matches with id. Take into account that this function does not remove
unique identities.
When the given identity is not found in the registry a 'NotFoundError'
exception is raised.
:param db: database manager
:param identity_id: identifier assigned to the identity that will
be removed
:raises NotFoundError: raised when the identity does not exist in the
registry.
### Response:
def delete_identity(db, identity_id):
"""Remove an identity from the registry.
This function removes from the registry, the identity which its identifier
matches with id. Take into account that this function does not remove
unique identities.
When the given identity is not found in the registry a 'NotFoundError'
exception is raised.
:param db: database manager
:param identity_id: identifier assigned to the identity that will
be removed
:raises NotFoundError: raised when the identity does not exist in the
registry.
"""
with db.connect() as session:
identity = find_identity(session, identity_id)
if not identity:
raise NotFoundError(entity=identity_id)
delete_identity_db(session, identity) |
def getBothEdges(self, label=None):
"""Gets all the edges of the node. If label
parameter is provided, it only returns the edges of
the given label
@params label: Optional parameter to filter the edges
@returns A generator function with the incoming edges"""
if label:
for edge in self.neoelement.relationships.all(types=[label]):
yield Edge(edge)
else:
for edge in self.neoelement.relationships.all():
yield Edge(edge) | Gets all the edges of the node. If label
parameter is provided, it only returns the edges of
the given label
@params label: Optional parameter to filter the edges
@returns A generator function with the incoming edges | Below is the the instruction that describes the task:
### Input:
Gets all the edges of the node. If label
parameter is provided, it only returns the edges of
the given label
@params label: Optional parameter to filter the edges
@returns A generator function with the incoming edges
### Response:
def getBothEdges(self, label=None):
"""Gets all the edges of the node. If label
parameter is provided, it only returns the edges of
the given label
@params label: Optional parameter to filter the edges
@returns A generator function with the incoming edges"""
if label:
for edge in self.neoelement.relationships.all(types=[label]):
yield Edge(edge)
else:
for edge in self.neoelement.relationships.all():
yield Edge(edge) |
def reset(self, keep_state=False):
"""Reset the shared state and drain Django Channels.
:param keep_state: If ``True``, do not reset the shared manager
state (useful in tests, where the settings overrides need to
be kept). Defaults to ``False``.
"""
if not keep_state:
self.state = state.ManagerState(state.MANAGER_STATE_PREFIX)
self.state.reset()
async_to_sync(consumer.run_consumer)(timeout=1)
async_to_sync(self.sync_counter.reset)() | Reset the shared state and drain Django Channels.
:param keep_state: If ``True``, do not reset the shared manager
state (useful in tests, where the settings overrides need to
be kept). Defaults to ``False``. | Below is the the instruction that describes the task:
### Input:
Reset the shared state and drain Django Channels.
:param keep_state: If ``True``, do not reset the shared manager
state (useful in tests, where the settings overrides need to
be kept). Defaults to ``False``.
### Response:
def reset(self, keep_state=False):
"""Reset the shared state and drain Django Channels.
:param keep_state: If ``True``, do not reset the shared manager
state (useful in tests, where the settings overrides need to
be kept). Defaults to ``False``.
"""
if not keep_state:
self.state = state.ManagerState(state.MANAGER_STATE_PREFIX)
self.state.reset()
async_to_sync(consumer.run_consumer)(timeout=1)
async_to_sync(self.sync_counter.reset)() |
def _weld_unary(array, weld_type, operation):
"""Apply operation on each element in the array.
As mentioned by Weld, the operations follow the behavior of the equivalent C functions from math.h
Parameters
----------
array : numpy.ndarray or WeldObject
Data
weld_type : WeldType
Of the data
operation : {'exp', 'log', 'sqrt', 'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'sinh', 'cosh', 'tanh', 'erf'}
Which unary operation to apply.
Returns
-------
WeldObject
Representation of this computation.
"""
if weld_type not in {WeldFloat(), WeldDouble()}:
raise TypeError('Unary operation supported only on scalar f32 or f64')
obj_id, weld_obj = create_weld_object(array)
weld_template = 'map({array}, |e: {type}| {op}(e))'
weld_obj.weld_code = weld_template.format(array=obj_id, type=weld_type, op=operation)
return weld_obj | Apply operation on each element in the array.
As mentioned by Weld, the operations follow the behavior of the equivalent C functions from math.h
Parameters
----------
array : numpy.ndarray or WeldObject
Data
weld_type : WeldType
Of the data
operation : {'exp', 'log', 'sqrt', 'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'sinh', 'cosh', 'tanh', 'erf'}
Which unary operation to apply.
Returns
-------
WeldObject
Representation of this computation. | Below is the the instruction that describes the task:
### Input:
Apply operation on each element in the array.
As mentioned by Weld, the operations follow the behavior of the equivalent C functions from math.h
Parameters
----------
array : numpy.ndarray or WeldObject
Data
weld_type : WeldType
Of the data
operation : {'exp', 'log', 'sqrt', 'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'sinh', 'cosh', 'tanh', 'erf'}
Which unary operation to apply.
Returns
-------
WeldObject
Representation of this computation.
### Response:
def _weld_unary(array, weld_type, operation):
"""Apply operation on each element in the array.
As mentioned by Weld, the operations follow the behavior of the equivalent C functions from math.h
Parameters
----------
array : numpy.ndarray or WeldObject
Data
weld_type : WeldType
Of the data
operation : {'exp', 'log', 'sqrt', 'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'sinh', 'cosh', 'tanh', 'erf'}
Which unary operation to apply.
Returns
-------
WeldObject
Representation of this computation.
"""
if weld_type not in {WeldFloat(), WeldDouble()}:
raise TypeError('Unary operation supported only on scalar f32 or f64')
obj_id, weld_obj = create_weld_object(array)
weld_template = 'map({array}, |e: {type}| {op}(e))'
weld_obj.weld_code = weld_template.format(array=obj_id, type=weld_type, op=operation)
return weld_obj |
async def xclaim(self, name: str, group: str, consumer: str, min_idle_time: int, *stream_ids):
"""
[NOTICE] Not officially released yet
Gets ownership of one or multiple messages in the Pending Entries List of a given stream consumer group.
:param name: name of the stream
:param group: name of the consumer group
:param consumer: name of the consumer
:param min_idle_time: ms
If the message ID (among the specified ones) exists, and its idle time greater
or equal to min_idle_time, then the message new owner
becomes the specified <consumer>. If the minimum idle time specified
is zero, messages are claimed regardless of their idle time.
:param stream_ids:
"""
return await self.execute_command('XCLAIM', name, group, consumer, min_idle_time, *stream_ids) | [NOTICE] Not officially released yet
Gets ownership of one or multiple messages in the Pending Entries List of a given stream consumer group.
:param name: name of the stream
:param group: name of the consumer group
:param consumer: name of the consumer
:param min_idle_time: ms
If the message ID (among the specified ones) exists, and its idle time greater
or equal to min_idle_time, then the message new owner
becomes the specified <consumer>. If the minimum idle time specified
is zero, messages are claimed regardless of their idle time.
:param stream_ids: | Below is the the instruction that describes the task:
### Input:
[NOTICE] Not officially released yet
Gets ownership of one or multiple messages in the Pending Entries List of a given stream consumer group.
:param name: name of the stream
:param group: name of the consumer group
:param consumer: name of the consumer
:param min_idle_time: ms
If the message ID (among the specified ones) exists, and its idle time greater
or equal to min_idle_time, then the message new owner
becomes the specified <consumer>. If the minimum idle time specified
is zero, messages are claimed regardless of their idle time.
:param stream_ids:
### Response:
async def xclaim(self, name: str, group: str, consumer: str, min_idle_time: int, *stream_ids):
"""
[NOTICE] Not officially released yet
Gets ownership of one or multiple messages in the Pending Entries List of a given stream consumer group.
:param name: name of the stream
:param group: name of the consumer group
:param consumer: name of the consumer
:param min_idle_time: ms
If the message ID (among the specified ones) exists, and its idle time greater
or equal to min_idle_time, then the message new owner
becomes the specified <consumer>. If the minimum idle time specified
is zero, messages are claimed regardless of their idle time.
:param stream_ids:
"""
return await self.execute_command('XCLAIM', name, group, consumer, min_idle_time, *stream_ids) |
def put_group_policy(group_name, policy_name, policy_json, region=None, key=None,
keyid=None, profile=None):
'''
Adds or updates the specified policy document for the specified group.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_iam.put_group_policy mygroup policyname policyrules
'''
group = get_group(group_name, region=region, key=key, keyid=keyid,
profile=profile)
if not group:
log.error('Group %s does not exist', group_name)
return False
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
if not isinstance(policy_json, six.string_types):
policy_json = salt.utils.json.dumps(policy_json)
created = conn.put_group_policy(group_name, policy_name,
policy_json)
if created:
log.info('Created policy for IAM group %s.', group_name)
return True
log.error('Could not create policy for IAM group %s', group_name)
except boto.exception.BotoServerError as e:
log.debug(e)
log.error('Failed to create policy for IAM group %s', group_name)
return False | Adds or updates the specified policy document for the specified group.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_iam.put_group_policy mygroup policyname policyrules | Below is the the instruction that describes the task:
### Input:
Adds or updates the specified policy document for the specified group.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_iam.put_group_policy mygroup policyname policyrules
### Response:
def put_group_policy(group_name, policy_name, policy_json, region=None, key=None,
keyid=None, profile=None):
'''
Adds or updates the specified policy document for the specified group.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_iam.put_group_policy mygroup policyname policyrules
'''
group = get_group(group_name, region=region, key=key, keyid=keyid,
profile=profile)
if not group:
log.error('Group %s does not exist', group_name)
return False
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
if not isinstance(policy_json, six.string_types):
policy_json = salt.utils.json.dumps(policy_json)
created = conn.put_group_policy(group_name, policy_name,
policy_json)
if created:
log.info('Created policy for IAM group %s.', group_name)
return True
log.error('Could not create policy for IAM group %s', group_name)
except boto.exception.BotoServerError as e:
log.debug(e)
log.error('Failed to create policy for IAM group %s', group_name)
return False |
def load(cls, path, base=None):
'''Return a list of the tasks stored in a file'''
base = base or os.getcwd()
absolute = os.path.abspath(path)
parent = os.path.dirname(absolute)
name, _, _ = os.path.basename(absolute).rpartition('.py')
fobj, path, description = imp.find_module(name, [parent])
try:
imp.load_module(name, fobj, path, description)
finally:
if fobj:
fobj.close()
# Manipulate the full names of the tasks to be relative to the provided
# base
relative, _, _ = os.path.relpath(path, base).rpartition('.py')
for task in cls._cache:
parts = relative.split(os.path.sep)
parts.append(task.name)
# If it's either in shovel.py, or folder/__init__.py, then we
# should consider it as being at one level above that file
parts = [part.strip('.') for part in parts if part not in
('shovel', '.shovel', '__init__', '.', '..', '')]
task.fullname = '.'.join(parts)
logger.debug('Found task %s in %s' % (task.fullname, task.module))
return cls.clear() | Return a list of the tasks stored in a file | Below is the the instruction that describes the task:
### Input:
Return a list of the tasks stored in a file
### Response:
def load(cls, path, base=None):
'''Return a list of the tasks stored in a file'''
base = base or os.getcwd()
absolute = os.path.abspath(path)
parent = os.path.dirname(absolute)
name, _, _ = os.path.basename(absolute).rpartition('.py')
fobj, path, description = imp.find_module(name, [parent])
try:
imp.load_module(name, fobj, path, description)
finally:
if fobj:
fobj.close()
# Manipulate the full names of the tasks to be relative to the provided
# base
relative, _, _ = os.path.relpath(path, base).rpartition('.py')
for task in cls._cache:
parts = relative.split(os.path.sep)
parts.append(task.name)
# If it's either in shovel.py, or folder/__init__.py, then we
# should consider it as being at one level above that file
parts = [part.strip('.') for part in parts if part not in
('shovel', '.shovel', '__init__', '.', '..', '')]
task.fullname = '.'.join(parts)
logger.debug('Found task %s in %s' % (task.fullname, task.module))
return cls.clear() |
def disable_svc_check(self, service):
"""Disable checks for a service
Format of the line that triggers function call::
DISABLE_SVC_CHECK;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
"""
if service.active_checks_enabled:
service.disable_active_checks(self.daemon.checks)
service.modified_attributes |= \
DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value
self.send_an_element(service.get_update_status_brok()) | Disable checks for a service
Format of the line that triggers function call::
DISABLE_SVC_CHECK;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None | Below is the the instruction that describes the task:
### Input:
Disable checks for a service
Format of the line that triggers function call::
DISABLE_SVC_CHECK;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
### Response:
def disable_svc_check(self, service):
"""Disable checks for a service
Format of the line that triggers function call::
DISABLE_SVC_CHECK;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
"""
if service.active_checks_enabled:
service.disable_active_checks(self.daemon.checks)
service.modified_attributes |= \
DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value
self.send_an_element(service.get_update_status_brok()) |
def get_tile_image(imgs, tile_shape=None, result_img=None, margin_color=None):
"""Concatenate images whose sizes are different.
@param imgs: image list which should be concatenated
@param tile_shape: shape for which images should be concatenated
@param result_img: numpy array to put result image
"""
def resize(*args, **kwargs):
# anti_aliasing arg cannot be passed to skimage<0.14
# use LooseVersion to allow 0.14dev.
if LooseVersion(skimage.__version__) < LooseVersion('0.14'):
kwargs.pop('anti_aliasing', None)
return skimage.transform.resize(*args, **kwargs)
def get_tile_shape(img_num):
x_num = 0
y_num = int(math.sqrt(img_num))
while x_num * y_num < img_num:
x_num += 1
return y_num, x_num
if tile_shape is None:
tile_shape = get_tile_shape(len(imgs))
# get max tile size to which each image should be resized
max_height, max_width = np.inf, np.inf
for img in imgs:
max_height = min([max_height, img.shape[0]])
max_width = min([max_width, img.shape[1]])
# resize and concatenate images
for i, img in enumerate(imgs):
h, w = img.shape[:2]
dtype = img.dtype
h_scale, w_scale = max_height / h, max_width / w
scale = min([h_scale, w_scale])
h, w = int(scale * h), int(scale * w)
img = resize(
image=img,
output_shape=(h, w),
mode='reflect',
preserve_range=True,
anti_aliasing=True,
).astype(dtype)
if len(img.shape) == 3:
img = centerize(img, (max_height, max_width, 3), margin_color)
else:
img = centerize(img, (max_height, max_width), margin_color)
imgs[i] = img
return _tile_images(imgs, tile_shape, result_img) | Concatenate images whose sizes are different.
@param imgs: image list which should be concatenated
@param tile_shape: shape for which images should be concatenated
@param result_img: numpy array to put result image | Below is the the instruction that describes the task:
### Input:
Concatenate images whose sizes are different.
@param imgs: image list which should be concatenated
@param tile_shape: shape for which images should be concatenated
@param result_img: numpy array to put result image
### Response:
def get_tile_image(imgs, tile_shape=None, result_img=None, margin_color=None):
"""Concatenate images whose sizes are different.
@param imgs: image list which should be concatenated
@param tile_shape: shape for which images should be concatenated
@param result_img: numpy array to put result image
"""
def resize(*args, **kwargs):
# anti_aliasing arg cannot be passed to skimage<0.14
# use LooseVersion to allow 0.14dev.
if LooseVersion(skimage.__version__) < LooseVersion('0.14'):
kwargs.pop('anti_aliasing', None)
return skimage.transform.resize(*args, **kwargs)
def get_tile_shape(img_num):
x_num = 0
y_num = int(math.sqrt(img_num))
while x_num * y_num < img_num:
x_num += 1
return y_num, x_num
if tile_shape is None:
tile_shape = get_tile_shape(len(imgs))
# get max tile size to which each image should be resized
max_height, max_width = np.inf, np.inf
for img in imgs:
max_height = min([max_height, img.shape[0]])
max_width = min([max_width, img.shape[1]])
# resize and concatenate images
for i, img in enumerate(imgs):
h, w = img.shape[:2]
dtype = img.dtype
h_scale, w_scale = max_height / h, max_width / w
scale = min([h_scale, w_scale])
h, w = int(scale * h), int(scale * w)
img = resize(
image=img,
output_shape=(h, w),
mode='reflect',
preserve_range=True,
anti_aliasing=True,
).astype(dtype)
if len(img.shape) == 3:
img = centerize(img, (max_height, max_width, 3), margin_color)
else:
img = centerize(img, (max_height, max_width), margin_color)
imgs[i] = img
return _tile_images(imgs, tile_shape, result_img) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.