code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def checkArgs(args):
"""Checks the arguments and options.
:param args: a :py:class:`argparse.Namespace` object containing the options
of the program.
:type args: argparse.Namespace
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exists with code 1.
"""
# Checking the input files
if not os.path.isfile(args.mds):
msg = "{}: no such file".format(args.mds)
raise ProgramError(msg)
if not os.path.isfile(args.population_file):
msg = "{}: no such file".format(args.population_file)
raise ProgramError(msg)
# Checking the chosen components
if args.xaxis == args.yaxis:
msg = "xaxis must be different than yaxis"
raise ProgramError(msg)
return True | Checks the arguments and options.
:param args: a :py:class:`argparse.Namespace` object containing the options
of the program.
:type args: argparse.Namespace
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exists with code 1. | Below is the the instruction that describes the task:
### Input:
Checks the arguments and options.
:param args: a :py:class:`argparse.Namespace` object containing the options
of the program.
:type args: argparse.Namespace
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exists with code 1.
### Response:
def checkArgs(args):
"""Checks the arguments and options.
:param args: a :py:class:`argparse.Namespace` object containing the options
of the program.
:type args: argparse.Namespace
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exists with code 1.
"""
# Checking the input files
if not os.path.isfile(args.mds):
msg = "{}: no such file".format(args.mds)
raise ProgramError(msg)
if not os.path.isfile(args.population_file):
msg = "{}: no such file".format(args.population_file)
raise ProgramError(msg)
# Checking the chosen components
if args.xaxis == args.yaxis:
msg = "xaxis must be different than yaxis"
raise ProgramError(msg)
return True |
def get_instance(self, payload):
"""
Build an instance of AvailableAddOnExtensionInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.marketplace.available_add_on.available_add_on_extension.AvailableAddOnExtensionInstance
:rtype: twilio.rest.preview.marketplace.available_add_on.available_add_on_extension.AvailableAddOnExtensionInstance
"""
return AvailableAddOnExtensionInstance(
self._version,
payload,
available_add_on_sid=self._solution['available_add_on_sid'],
) | Build an instance of AvailableAddOnExtensionInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.marketplace.available_add_on.available_add_on_extension.AvailableAddOnExtensionInstance
:rtype: twilio.rest.preview.marketplace.available_add_on.available_add_on_extension.AvailableAddOnExtensionInstance | Below is the the instruction that describes the task:
### Input:
Build an instance of AvailableAddOnExtensionInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.marketplace.available_add_on.available_add_on_extension.AvailableAddOnExtensionInstance
:rtype: twilio.rest.preview.marketplace.available_add_on.available_add_on_extension.AvailableAddOnExtensionInstance
### Response:
def get_instance(self, payload):
"""
Build an instance of AvailableAddOnExtensionInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.marketplace.available_add_on.available_add_on_extension.AvailableAddOnExtensionInstance
:rtype: twilio.rest.preview.marketplace.available_add_on.available_add_on_extension.AvailableAddOnExtensionInstance
"""
return AvailableAddOnExtensionInstance(
self._version,
payload,
available_add_on_sid=self._solution['available_add_on_sid'],
) |
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
app(self.input_file.path) # 1) Path of the input file
app(self.output_file.path) # 2) Path of the output file
app(self.ddb_filepath) # 3) Input derivative database e.g. t13.ddb.in
app(self.md_filepath) # 4) Output molecular dynamics e.g. t13.md
app(self.gkk_filepath) # 5) Input elphon matrix elements (GKK file)
app(self.outdir.path_join("out")) # 6) Base name for elphon output files e.g. t13
app(self.ddk_filepath) # 7) File containing ddk filenames for elphon/transport.
return "\n".join(lines) | String with the list of files and prefixes needed to execute ABINIT. | Below is the the instruction that describes the task:
### Input:
String with the list of files and prefixes needed to execute ABINIT.
### Response:
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
app(self.input_file.path) # 1) Path of the input file
app(self.output_file.path) # 2) Path of the output file
app(self.ddb_filepath) # 3) Input derivative database e.g. t13.ddb.in
app(self.md_filepath) # 4) Output molecular dynamics e.g. t13.md
app(self.gkk_filepath) # 5) Input elphon matrix elements (GKK file)
app(self.outdir.path_join("out")) # 6) Base name for elphon output files e.g. t13
app(self.ddk_filepath) # 7) File containing ddk filenames for elphon/transport.
return "\n".join(lines) |
def frompath(path, accessor=None, ext=None, start=None, stop=None, recursive=False, npartitions=None, dims=None, dtype=None, labels=None, recount=False, engine=None, credentials=None):
"""
Load images from a path using the given accessor.
Supports both local and remote filesystems.
Parameters
----------
accessor : function
Apply to each item after loading to yield an image.
ext : str, optional, default=None
File extension.
npartitions : int, optional, default=None
Number of partitions for computational engine,
if None will use default for engine.
dims : tuple, optional, default=None
Dimensions of images.
dtype : str, optional, default=None
Numerical type of images.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
start, stop : nonnegative int, optional, default=None
Indices of files to load, interpreted using Python slicing conventions.
recursive : boolean, optional, default=False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
recount : boolean, optional, default=False
Force subsequent record counting.
"""
from thunder.readers import get_parallel_reader
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext, start=start, stop=stop,
recursive=recursive, npartitions=npartitions)
if spark and isinstance(engine, spark):
if accessor:
data = data.flatMap(accessor)
if recount:
nrecords = None
def switch(record):
ary, idx = record
return (idx,), ary
data = data.values().zipWithIndex().map(switch)
else:
nrecords = reader.nfiles
return fromrdd(data, nrecords=nrecords, dims=dims, dtype=dtype, labels=labels, ordered=True)
else:
if accessor:
data = [accessor(d) for d in data]
flattened = list(itertools.chain(*data))
values = [kv[1] for kv in flattened]
return fromarray(values, labels=labels) | Load images from a path using the given accessor.
Supports both local and remote filesystems.
Parameters
----------
accessor : function
Apply to each item after loading to yield an image.
ext : str, optional, default=None
File extension.
npartitions : int, optional, default=None
Number of partitions for computational engine,
if None will use default for engine.
dims : tuple, optional, default=None
Dimensions of images.
dtype : str, optional, default=None
Numerical type of images.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
start, stop : nonnegative int, optional, default=None
Indices of files to load, interpreted using Python slicing conventions.
recursive : boolean, optional, default=False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
recount : boolean, optional, default=False
Force subsequent record counting. | Below is the the instruction that describes the task:
### Input:
Load images from a path using the given accessor.
Supports both local and remote filesystems.
Parameters
----------
accessor : function
Apply to each item after loading to yield an image.
ext : str, optional, default=None
File extension.
npartitions : int, optional, default=None
Number of partitions for computational engine,
if None will use default for engine.
dims : tuple, optional, default=None
Dimensions of images.
dtype : str, optional, default=None
Numerical type of images.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
start, stop : nonnegative int, optional, default=None
Indices of files to load, interpreted using Python slicing conventions.
recursive : boolean, optional, default=False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
recount : boolean, optional, default=False
Force subsequent record counting.
### Response:
def frompath(path, accessor=None, ext=None, start=None, stop=None, recursive=False, npartitions=None, dims=None, dtype=None, labels=None, recount=False, engine=None, credentials=None):
"""
Load images from a path using the given accessor.
Supports both local and remote filesystems.
Parameters
----------
accessor : function
Apply to each item after loading to yield an image.
ext : str, optional, default=None
File extension.
npartitions : int, optional, default=None
Number of partitions for computational engine,
if None will use default for engine.
dims : tuple, optional, default=None
Dimensions of images.
dtype : str, optional, default=None
Numerical type of images.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
start, stop : nonnegative int, optional, default=None
Indices of files to load, interpreted using Python slicing conventions.
recursive : boolean, optional, default=False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
recount : boolean, optional, default=False
Force subsequent record counting.
"""
from thunder.readers import get_parallel_reader
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext, start=start, stop=stop,
recursive=recursive, npartitions=npartitions)
if spark and isinstance(engine, spark):
if accessor:
data = data.flatMap(accessor)
if recount:
nrecords = None
def switch(record):
ary, idx = record
return (idx,), ary
data = data.values().zipWithIndex().map(switch)
else:
nrecords = reader.nfiles
return fromrdd(data, nrecords=nrecords, dims=dims, dtype=dtype, labels=labels, ordered=True)
else:
if accessor:
data = [accessor(d) for d in data]
flattened = list(itertools.chain(*data))
values = [kv[1] for kv in flattened]
return fromarray(values, labels=labels) |
def _print_pgfplot_libs_message(data):
"""Prints message to screen indicating the use of PGFPlots and its
libraries."""
pgfplotslibs = ",".join(list(data["pgfplots libs"]))
tikzlibs = ",".join(list(data["tikz libs"]))
print(70 * "=")
print("Please add the following lines to your LaTeX preamble:\n")
print("\\usepackage[utf8]{inputenc}")
print("\\usepackage{fontspec} % This line only for XeLaTeX and LuaLaTeX")
print("\\usepackage{pgfplots}")
if tikzlibs:
print("\\usetikzlibrary{" + tikzlibs + "}")
if pgfplotslibs:
print("\\usepgfplotslibrary{" + pgfplotslibs + "}")
print(70 * "=")
return | Prints message to screen indicating the use of PGFPlots and its
libraries. | Below is the the instruction that describes the task:
### Input:
Prints message to screen indicating the use of PGFPlots and its
libraries.
### Response:
def _print_pgfplot_libs_message(data):
"""Prints message to screen indicating the use of PGFPlots and its
libraries."""
pgfplotslibs = ",".join(list(data["pgfplots libs"]))
tikzlibs = ",".join(list(data["tikz libs"]))
print(70 * "=")
print("Please add the following lines to your LaTeX preamble:\n")
print("\\usepackage[utf8]{inputenc}")
print("\\usepackage{fontspec} % This line only for XeLaTeX and LuaLaTeX")
print("\\usepackage{pgfplots}")
if tikzlibs:
print("\\usetikzlibrary{" + tikzlibs + "}")
if pgfplotslibs:
print("\\usepgfplotslibrary{" + pgfplotslibs + "}")
print(70 * "=")
return |
def delete_group(self, group_id, keep_non_orphans=False, keep_orphans=False):
"""
Delete a group trigger
:param group_id: ID of the group trigger to delete
:param keep_non_orphans: if True converts the non-orphan member triggers to standard triggers
:param keep_orphans: if True converts the orphan member triggers to standard triggers
"""
params = {'keepNonOrphans': str(keep_non_orphans).lower(), 'keepOrphans': str(keep_orphans).lower()}
self._delete(self._service_url(['triggers', 'groups', group_id], params=params)) | Delete a group trigger
:param group_id: ID of the group trigger to delete
:param keep_non_orphans: if True converts the non-orphan member triggers to standard triggers
:param keep_orphans: if True converts the orphan member triggers to standard triggers | Below is the the instruction that describes the task:
### Input:
Delete a group trigger
:param group_id: ID of the group trigger to delete
:param keep_non_orphans: if True converts the non-orphan member triggers to standard triggers
:param keep_orphans: if True converts the orphan member triggers to standard triggers
### Response:
def delete_group(self, group_id, keep_non_orphans=False, keep_orphans=False):
"""
Delete a group trigger
:param group_id: ID of the group trigger to delete
:param keep_non_orphans: if True converts the non-orphan member triggers to standard triggers
:param keep_orphans: if True converts the orphan member triggers to standard triggers
"""
params = {'keepNonOrphans': str(keep_non_orphans).lower(), 'keepOrphans': str(keep_orphans).lower()}
self._delete(self._service_url(['triggers', 'groups', group_id], params=params)) |
def valid_path(value):
"Validate a cookie path ASCII string"
# Generate UnicodeDecodeError if path can't store as ASCII.
value.encode("ascii")
# Cookies without leading slash will likely be ignored, raise ASAP.
if not (value and value[0] == "/"):
return False
if not Definitions.PATH_RE.match(value):
return False
return True | Validate a cookie path ASCII string | Below is the the instruction that describes the task:
### Input:
Validate a cookie path ASCII string
### Response:
def valid_path(value):
"Validate a cookie path ASCII string"
# Generate UnicodeDecodeError if path can't store as ASCII.
value.encode("ascii")
# Cookies without leading slash will likely be ignored, raise ASAP.
if not (value and value[0] == "/"):
return False
if not Definitions.PATH_RE.match(value):
return False
return True |
def obsoleteas(self, to='name_short'):
"""
Return obsolete countries in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame
"""
if isinstance(to, str):
to = [to]
return self.data[self.data.obsolete > 0][to] | Return obsolete countries in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame | Below is the the instruction that describes the task:
### Input:
Return obsolete countries in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame
### Response:
def obsoleteas(self, to='name_short'):
"""
Return obsolete countries in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame
"""
if isinstance(to, str):
to = [to]
return self.data[self.data.obsolete > 0][to] |
def set_group(self, group):
"""Set group size of DMatrix (used for ranking).
Parameters
----------
group : array like
Group size of each group
"""
_check_call(_LIB.XGDMatrixSetGroup(self.handle,
c_array(ctypes.c_uint, group),
c_bst_ulong(len(group)))) | Set group size of DMatrix (used for ranking).
Parameters
----------
group : array like
Group size of each group | Below is the the instruction that describes the task:
### Input:
Set group size of DMatrix (used for ranking).
Parameters
----------
group : array like
Group size of each group
### Response:
def set_group(self, group):
"""Set group size of DMatrix (used for ranking).
Parameters
----------
group : array like
Group size of each group
"""
_check_call(_LIB.XGDMatrixSetGroup(self.handle,
c_array(ctypes.c_uint, group),
c_bst_ulong(len(group)))) |
def _get_csv_cells_gen(self, line):
"""Generator of values in a csv line"""
digest_types = self.digest_types
for j, value in enumerate(line):
if self.first_line:
digest_key = None
digest = lambda x: x.decode(self.encoding)
else:
try:
digest_key = digest_types[j]
except IndexError:
digest_key = digest_types[0]
digest = Digest(acceptable_types=[digest_key],
encoding=self.encoding)
try:
digest_res = digest(value)
if digest_res == "\b":
digest_res = None
elif digest_key is not types.CodeType:
digest_res = repr(digest_res)
except Exception:
digest_res = ""
yield digest_res | Generator of values in a csv line | Below is the the instruction that describes the task:
### Input:
Generator of values in a csv line
### Response:
def _get_csv_cells_gen(self, line):
"""Generator of values in a csv line"""
digest_types = self.digest_types
for j, value in enumerate(line):
if self.first_line:
digest_key = None
digest = lambda x: x.decode(self.encoding)
else:
try:
digest_key = digest_types[j]
except IndexError:
digest_key = digest_types[0]
digest = Digest(acceptable_types=[digest_key],
encoding=self.encoding)
try:
digest_res = digest(value)
if digest_res == "\b":
digest_res = None
elif digest_key is not types.CodeType:
digest_res = repr(digest_res)
except Exception:
digest_res = ""
yield digest_res |
def _load_instance(self, instance_id):
"""
Return instance with the given id.
For performance reasons, the instance ID is first searched for in the
collection of VM instances started by ElastiCluster
(`self._instances`), then in the list of all instances known to the
cloud provider at the time of the last update
(`self._cached_instances`), and finally the cloud provider is directly
queried.
:param str instance_id: instance identifier
:return: py:class:`boto.ec2.instance.Reservation` - instance
:raises: `InstanceError` is returned if the instance can't
be found in the local cache or in the cloud.
"""
# if instance is known, return it
if instance_id in self._instances:
return self._instances[instance_id]
# else, check (cached) list from provider
if instance_id not in self._cached_instances:
self._cached_instances = self._build_cached_instances()
if instance_id in self._cached_instances:
inst = self._cached_instances[instance_id]
self._instances[instance_id] = inst
return inst
# If we reached this point, the instance was not found neither
# in the caches nor on the website.
raise InstanceNotFoundError(
"Instance `{instance_id}` not found"
.format(instance_id=instance_id)) | Return instance with the given id.
For performance reasons, the instance ID is first searched for in the
collection of VM instances started by ElastiCluster
(`self._instances`), then in the list of all instances known to the
cloud provider at the time of the last update
(`self._cached_instances`), and finally the cloud provider is directly
queried.
:param str instance_id: instance identifier
:return: py:class:`boto.ec2.instance.Reservation` - instance
:raises: `InstanceError` is returned if the instance can't
be found in the local cache or in the cloud. | Below is the the instruction that describes the task:
### Input:
Return instance with the given id.
For performance reasons, the instance ID is first searched for in the
collection of VM instances started by ElastiCluster
(`self._instances`), then in the list of all instances known to the
cloud provider at the time of the last update
(`self._cached_instances`), and finally the cloud provider is directly
queried.
:param str instance_id: instance identifier
:return: py:class:`boto.ec2.instance.Reservation` - instance
:raises: `InstanceError` is returned if the instance can't
be found in the local cache or in the cloud.
### Response:
def _load_instance(self, instance_id):
"""
Return instance with the given id.
For performance reasons, the instance ID is first searched for in the
collection of VM instances started by ElastiCluster
(`self._instances`), then in the list of all instances known to the
cloud provider at the time of the last update
(`self._cached_instances`), and finally the cloud provider is directly
queried.
:param str instance_id: instance identifier
:return: py:class:`boto.ec2.instance.Reservation` - instance
:raises: `InstanceError` is returned if the instance can't
be found in the local cache or in the cloud.
"""
# if instance is known, return it
if instance_id in self._instances:
return self._instances[instance_id]
# else, check (cached) list from provider
if instance_id not in self._cached_instances:
self._cached_instances = self._build_cached_instances()
if instance_id in self._cached_instances:
inst = self._cached_instances[instance_id]
self._instances[instance_id] = inst
return inst
# If we reached this point, the instance was not found neither
# in the caches nor on the website.
raise InstanceNotFoundError(
"Instance `{instance_id}` not found"
.format(instance_id=instance_id)) |
def findFilesWithPattern(
self, fileName, searchPattern, fillFindData, dokanFileInfo
):
"""Find files in a certain path that match the search pattern.
:param fileName: path to search
:type fileName: ctypes.c_wchar_p
:param searchPattern: pattern to search for
:type searchPattern: ctypes.c_wchar_p
:param fillFindData: function pointer for populating search results
:type fillFindData: PFillFindData
:param dokanFileInfo: used by Dokan
:type dokanFileInfo: PDOKAN_FILE_INFO
:return: error code
:rtype: ctypes.c_int
"""
try:
ret = self.operations('findFilesWithPattern', fileName, searchPattern)
if ret is None:
return d1_onedrive.impl.drivers.dokan.const.DOKAN_ERROR
for r in ret:
create_ft = self.python_timestamp_to_win32_filetime(r['ctime'])
last_access_ft = self.python_timestamp_to_win32_filetime(r['atime'])
last_write_ft = self.python_timestamp_to_win32_filetime(r['wtime'])
cft = ctypes.wintypes.FILETIME(create_ft[0], create_ft[1])
laft = ctypes.wintypes.FILETIME(last_access_ft[0], last_access_ft[1])
lwft = ctypes.wintypes.FILETIME(last_write_ft[0], last_write_ft[1])
size = self.pyint_to_double_dwords(r['size'])
File = ctypes.wintypes.WIN32_FIND_DATAW(
ctypes.c_ulong(r['attr']), # attributes
cft, # creation time
laft, # last access time
lwft, # last write time
size[1], # upper bits of size
size[0], # lower bits of size
ctypes.c_ulong(0), # reserved for FS
ctypes.c_ulong(0), # reserved for FS
r['name'], # file name
'',
) # alternate name
pFile = ctypes.wintypes.PWIN32_FIND_DATAW(File)
fillFindData(pFile, dokanFileInfo)
return d1_onedrive.impl.drivers.dokan.const.DOKAN_SUCCESS
except Exception as e:
logging.error('%s', e)
return d1_onedrive.impl.drivers.dokan.const.DOKAN_ERROR | Find files in a certain path that match the search pattern.
:param fileName: path to search
:type fileName: ctypes.c_wchar_p
:param searchPattern: pattern to search for
:type searchPattern: ctypes.c_wchar_p
:param fillFindData: function pointer for populating search results
:type fillFindData: PFillFindData
:param dokanFileInfo: used by Dokan
:type dokanFileInfo: PDOKAN_FILE_INFO
:return: error code
:rtype: ctypes.c_int | Below is the the instruction that describes the task:
### Input:
Find files in a certain path that match the search pattern.
:param fileName: path to search
:type fileName: ctypes.c_wchar_p
:param searchPattern: pattern to search for
:type searchPattern: ctypes.c_wchar_p
:param fillFindData: function pointer for populating search results
:type fillFindData: PFillFindData
:param dokanFileInfo: used by Dokan
:type dokanFileInfo: PDOKAN_FILE_INFO
:return: error code
:rtype: ctypes.c_int
### Response:
def findFilesWithPattern(
self, fileName, searchPattern, fillFindData, dokanFileInfo
):
"""Find files in a certain path that match the search pattern.
:param fileName: path to search
:type fileName: ctypes.c_wchar_p
:param searchPattern: pattern to search for
:type searchPattern: ctypes.c_wchar_p
:param fillFindData: function pointer for populating search results
:type fillFindData: PFillFindData
:param dokanFileInfo: used by Dokan
:type dokanFileInfo: PDOKAN_FILE_INFO
:return: error code
:rtype: ctypes.c_int
"""
try:
ret = self.operations('findFilesWithPattern', fileName, searchPattern)
if ret is None:
return d1_onedrive.impl.drivers.dokan.const.DOKAN_ERROR
for r in ret:
create_ft = self.python_timestamp_to_win32_filetime(r['ctime'])
last_access_ft = self.python_timestamp_to_win32_filetime(r['atime'])
last_write_ft = self.python_timestamp_to_win32_filetime(r['wtime'])
cft = ctypes.wintypes.FILETIME(create_ft[0], create_ft[1])
laft = ctypes.wintypes.FILETIME(last_access_ft[0], last_access_ft[1])
lwft = ctypes.wintypes.FILETIME(last_write_ft[0], last_write_ft[1])
size = self.pyint_to_double_dwords(r['size'])
File = ctypes.wintypes.WIN32_FIND_DATAW(
ctypes.c_ulong(r['attr']), # attributes
cft, # creation time
laft, # last access time
lwft, # last write time
size[1], # upper bits of size
size[0], # lower bits of size
ctypes.c_ulong(0), # reserved for FS
ctypes.c_ulong(0), # reserved for FS
r['name'], # file name
'',
) # alternate name
pFile = ctypes.wintypes.PWIN32_FIND_DATAW(File)
fillFindData(pFile, dokanFileInfo)
return d1_onedrive.impl.drivers.dokan.const.DOKAN_SUCCESS
except Exception as e:
logging.error('%s', e)
return d1_onedrive.impl.drivers.dokan.const.DOKAN_ERROR |
def _code(self):
"""
(internal) generates imports, code and runtime calls to save a pipeline.
"""
icode, tcode = '', '' # imports, task code
icall, pcall = '', '' # imap calls, piper calls
tdone, idone = [], [] # task done, imap done
for piper in self:
p = piper
w = piper.worker
i = piper.imap
in_ = i.name if hasattr(i, 'name') else False
if in_ and in_ not in idone:
icall += I_SIG % (in_, i.worker_type, i.worker_num, i.stride, \
i.buffer, i.ordered, i.skip, in_)
idone.append(in_)
ws = W_SIG % (",".join([t.__name__ for t in w.task]), w.args, w.kwargs)
pcall += P_SIG % (p.name, ws, in_, p.consume, p.produce, p.spawn, \
p.timeout, p.branch, p.debug, p.name, p.track)
for t in w.task:
if (t in tdone) or not t:
continue
tm, tn = t.__module__, t.__name__
if (tm == '__builtin__') or hasattr(p, tn):
continue
if tm == '__main__' or tm == self.filename:
tcode += getsource(t)
else:
icode += 'from %s import %s\n' % (tm, tn)
tdone.append(t)
pipers = [p.name for p in self]
pipers = '[%s]' % ", ".join(pipers)
pipes = [L_SIG % (d.name, s.name) for s, d in self.edges()]
pipes = '[%s]' % ", ".join(pipes) # pipes
xtras = [str(self[p].xtra) for p in self]
xtras = '[%s]' % ",".join(xtras) # node xtra
return (icode, tcode, icall, pcall, pipers, xtras, pipes) | (internal) generates imports, code and runtime calls to save a pipeline. | Below is the the instruction that describes the task:
### Input:
(internal) generates imports, code and runtime calls to save a pipeline.
### Response:
def _code(self):
"""
(internal) generates imports, code and runtime calls to save a pipeline.
"""
icode, tcode = '', '' # imports, task code
icall, pcall = '', '' # imap calls, piper calls
tdone, idone = [], [] # task done, imap done
for piper in self:
p = piper
w = piper.worker
i = piper.imap
in_ = i.name if hasattr(i, 'name') else False
if in_ and in_ not in idone:
icall += I_SIG % (in_, i.worker_type, i.worker_num, i.stride, \
i.buffer, i.ordered, i.skip, in_)
idone.append(in_)
ws = W_SIG % (",".join([t.__name__ for t in w.task]), w.args, w.kwargs)
pcall += P_SIG % (p.name, ws, in_, p.consume, p.produce, p.spawn, \
p.timeout, p.branch, p.debug, p.name, p.track)
for t in w.task:
if (t in tdone) or not t:
continue
tm, tn = t.__module__, t.__name__
if (tm == '__builtin__') or hasattr(p, tn):
continue
if tm == '__main__' or tm == self.filename:
tcode += getsource(t)
else:
icode += 'from %s import %s\n' % (tm, tn)
tdone.append(t)
pipers = [p.name for p in self]
pipers = '[%s]' % ", ".join(pipers)
pipes = [L_SIG % (d.name, s.name) for s, d in self.edges()]
pipes = '[%s]' % ", ".join(pipes) # pipes
xtras = [str(self[p].xtra) for p in self]
xtras = '[%s]' % ",".join(xtras) # node xtra
return (icode, tcode, icall, pcall, pipers, xtras, pipes) |
def request_transfer(subject, recipient, comment):
'''Initiate a transfer request'''
TransferPermission(subject).test()
if recipient == (subject.organization or subject.owner):
raise ValueError(
'Recipient should be different than the current owner')
transfer = Transfer.objects.create(
owner=subject.organization or subject.owner,
recipient=recipient,
subject=subject,
comment=comment
)
return transfer | Initiate a transfer request | Below is the the instruction that describes the task:
### Input:
Initiate a transfer request
### Response:
def request_transfer(subject, recipient, comment):
'''Initiate a transfer request'''
TransferPermission(subject).test()
if recipient == (subject.organization or subject.owner):
raise ValueError(
'Recipient should be different than the current owner')
transfer = Transfer.objects.create(
owner=subject.organization or subject.owner,
recipient=recipient,
subject=subject,
comment=comment
)
return transfer |
def _createStatsDict(self, headers, rows):
"""Utility method that returns database stats as a nested dictionary.
@param headers: List of columns in query result.
@param rows: List of rows in query result.
@return: Nested dictionary of values.
First key is the database name and the second key is the
statistics counter name.
"""
dbstats = {}
for row in rows:
dbstats[row[0]] = dict(zip(headers[1:], row[1:]))
return dbstats | Utility method that returns database stats as a nested dictionary.
@param headers: List of columns in query result.
@param rows: List of rows in query result.
@return: Nested dictionary of values.
First key is the database name and the second key is the
statistics counter name. | Below is the the instruction that describes the task:
### Input:
Utility method that returns database stats as a nested dictionary.
@param headers: List of columns in query result.
@param rows: List of rows in query result.
@return: Nested dictionary of values.
First key is the database name and the second key is the
statistics counter name.
### Response:
def _createStatsDict(self, headers, rows):
"""Utility method that returns database stats as a nested dictionary.
@param headers: List of columns in query result.
@param rows: List of rows in query result.
@return: Nested dictionary of values.
First key is the database name and the second key is the
statistics counter name.
"""
dbstats = {}
for row in rows:
dbstats[row[0]] = dict(zip(headers[1:], row[1:]))
return dbstats |
def size(cls, pid):
"""Return the number of connections in the pool
:param str pid: The pool id
:rtype int
"""
with cls._lock:
cls._ensure_pool_exists(pid)
return len(cls._pools[pid]) | Return the number of connections in the pool
:param str pid: The pool id
:rtype int | Below is the the instruction that describes the task:
### Input:
Return the number of connections in the pool
:param str pid: The pool id
:rtype int
### Response:
def size(cls, pid):
"""Return the number of connections in the pool
:param str pid: The pool id
:rtype int
"""
with cls._lock:
cls._ensure_pool_exists(pid)
return len(cls._pools[pid]) |
def is_stale(target, source):
"""Test whether the target file/directory is stale based on the source
file/directory.
"""
if not os.path.exists(target):
return True
target_mtime = recursive_mtime(target) or 0
return compare_recursive_mtime(source, cutoff=target_mtime) | Test whether the target file/directory is stale based on the source
file/directory. | Below is the the instruction that describes the task:
### Input:
Test whether the target file/directory is stale based on the source
file/directory.
### Response:
def is_stale(target, source):
"""Test whether the target file/directory is stale based on the source
file/directory.
"""
if not os.path.exists(target):
return True
target_mtime = recursive_mtime(target) or 0
return compare_recursive_mtime(source, cutoff=target_mtime) |
def build_vf_node(vf):
"""
Convert a VulnerabilityFunction object into a Node suitable
for XML conversion.
"""
nodes = [Node('imls', {'imt': vf.imt}, vf.imls),
Node('meanLRs', {}, vf.mean_loss_ratios),
Node('covLRs', {}, vf.covs)]
return Node(
'vulnerabilityFunction',
{'id': vf.id, 'dist': vf.distribution_name}, nodes=nodes) | Convert a VulnerabilityFunction object into a Node suitable
for XML conversion. | Below is the the instruction that describes the task:
### Input:
Convert a VulnerabilityFunction object into a Node suitable
for XML conversion.
### Response:
def build_vf_node(vf):
"""
Convert a VulnerabilityFunction object into a Node suitable
for XML conversion.
"""
nodes = [Node('imls', {'imt': vf.imt}, vf.imls),
Node('meanLRs', {}, vf.mean_loss_ratios),
Node('covLRs', {}, vf.covs)]
return Node(
'vulnerabilityFunction',
{'id': vf.id, 'dist': vf.distribution_name}, nodes=nodes) |
def _process_response(self, request, response):
"""Convert HttpResponseRedirect to HttpResponse if request is via ajax.
This is to allow ajax request to redirect url.
"""
if request.is_ajax() and hasattr(request, 'horizon'):
queued_msgs = request.horizon['async_messages']
if type(response) == http.HttpResponseRedirect:
# Drop our messages back into the session as per usual so they
# don't disappear during the redirect. Not that we explicitly
# use django's messages methods here.
for tag, message, extra_tags in queued_msgs:
getattr(django_messages, tag)(request, message, extra_tags)
if response['location'].startswith(settings.LOGOUT_URL):
redirect_response = http.HttpResponse(status=401)
# This header is used for handling the logout in JS
redirect_response['logout'] = True
if self.logout_reason is not None:
utils.add_logout_reason(
request, redirect_response, self.logout_reason,
'error')
else:
redirect_response = http.HttpResponse()
# Use a set while checking if we want a cookie's attributes
# copied
cookie_keys = {'max_age', 'expires', 'path', 'domain',
'secure', 'httponly', 'logout_reason'}
# Copy cookies from HttpResponseRedirect towards HttpResponse
for cookie_name, cookie in response.cookies.items():
cookie_kwargs = dict((
(key, value) for key, value in cookie.items()
if key in cookie_keys and value
))
redirect_response.set_cookie(
cookie_name, cookie.value, **cookie_kwargs)
redirect_response['X-Horizon-Location'] = response['location']
upload_url_key = 'X-File-Upload-URL'
if upload_url_key in response:
self._copy_headers(response, redirect_response,
(upload_url_key, 'X-Auth-Token'))
return redirect_response
if queued_msgs:
# TODO(gabriel): When we have an async connection to the
# client (e.g. websockets) this should be pushed to the
# socket queue rather than being sent via a header.
# The header method has notable drawbacks (length limits,
# etc.) and is not meant as a long-term solution.
response['X-Horizon-Messages'] = json.dumps(queued_msgs)
return response | Convert HttpResponseRedirect to HttpResponse if request is via ajax.
This is to allow ajax request to redirect url. | Below is the the instruction that describes the task:
### Input:
Convert HttpResponseRedirect to HttpResponse if request is via ajax.
This is to allow ajax request to redirect url.
### Response:
def _process_response(self, request, response):
"""Convert HttpResponseRedirect to HttpResponse if request is via ajax.
This is to allow ajax request to redirect url.
"""
if request.is_ajax() and hasattr(request, 'horizon'):
queued_msgs = request.horizon['async_messages']
if type(response) == http.HttpResponseRedirect:
# Drop our messages back into the session as per usual so they
# don't disappear during the redirect. Not that we explicitly
# use django's messages methods here.
for tag, message, extra_tags in queued_msgs:
getattr(django_messages, tag)(request, message, extra_tags)
if response['location'].startswith(settings.LOGOUT_URL):
redirect_response = http.HttpResponse(status=401)
# This header is used for handling the logout in JS
redirect_response['logout'] = True
if self.logout_reason is not None:
utils.add_logout_reason(
request, redirect_response, self.logout_reason,
'error')
else:
redirect_response = http.HttpResponse()
# Use a set while checking if we want a cookie's attributes
# copied
cookie_keys = {'max_age', 'expires', 'path', 'domain',
'secure', 'httponly', 'logout_reason'}
# Copy cookies from HttpResponseRedirect towards HttpResponse
for cookie_name, cookie in response.cookies.items():
cookie_kwargs = dict((
(key, value) for key, value in cookie.items()
if key in cookie_keys and value
))
redirect_response.set_cookie(
cookie_name, cookie.value, **cookie_kwargs)
redirect_response['X-Horizon-Location'] = response['location']
upload_url_key = 'X-File-Upload-URL'
if upload_url_key in response:
self._copy_headers(response, redirect_response,
(upload_url_key, 'X-Auth-Token'))
return redirect_response
if queued_msgs:
# TODO(gabriel): When we have an async connection to the
# client (e.g. websockets) this should be pushed to the
# socket queue rather than being sent via a header.
# The header method has notable drawbacks (length limits,
# etc.) and is not meant as a long-term solution.
response['X-Horizon-Messages'] = json.dumps(queued_msgs)
return response |
def device_info(soapy_args=''):
"""Returns info about selected SoapySDR device"""
text = []
try:
device = simplesoapy.SoapyDevice(soapy_args)
text.append('Selected device: {}'.format(device.hardware))
text.append(' Available RX channels:')
text.append(' {}'.format(', '.join(str(x) for x in device.list_channels())))
text.append(' Available antennas:')
text.append(' {}'.format(', '.join(device.list_antennas())))
text.append(' Available tunable elements:')
text.append(' {}'.format(', '.join(device.list_frequencies())))
text.append(' Available amplification elements:')
text.append(' {}'.format(', '.join(device.list_gains())))
text.append(' Available device settings:')
for key, s in device.list_settings().items():
text.append(wrap('{} ... {} - {} (default: {})'.format(key, s['name'], s['description'], s['value'])))
text.append(' Available stream arguments:')
for key, s in device.list_stream_args().items():
text.append(wrap('{} ... {} - {} (default: {})'.format(key, s['name'], s['description'], s['value'])))
text.append(' Allowed gain range [dB]:')
text.append(' {:.2f} - {:.2f}'.format(*device.get_gain_range()))
text.append(' Allowed frequency range [MHz]:')
text.append(' {:.2f} - {:.2f}'.format(*[x / 1e6 for x in device.get_frequency_range()]))
text.append(' Allowed sample rates [MHz]:')
rates = []
for r in device.list_sample_rates():
if r[0] == r[1]:
rates.append('{:.2f}'.format(r[0] / 1e6))
else:
rates.append('{:.2f} - {:.2f}'.format(r[0] / 1e6, r[1] / 1e6))
text.append(wrap(', '.join(rates)))
text.append(' Allowed bandwidths [MHz]:')
bandwidths = []
for b in device.list_bandwidths():
if b[0] == b[1]:
bandwidths.append('{:.2f}'.format(b[0] / 1e6))
else:
bandwidths.append('{:.2f} - {:.2f}'.format(b[0] / 1e6, b[1] / 1e6))
if bandwidths:
text.append(wrap(', '.join(bandwidths)))
else:
text.append(' N/A')
except RuntimeError:
device = None
text.append('No devices found!')
return (device, '\n'.join(text)) | Returns info about selected SoapySDR device | Below is the the instruction that describes the task:
### Input:
Returns info about selected SoapySDR device
### Response:
def device_info(soapy_args=''):
"""Returns info about selected SoapySDR device"""
text = []
try:
device = simplesoapy.SoapyDevice(soapy_args)
text.append('Selected device: {}'.format(device.hardware))
text.append(' Available RX channels:')
text.append(' {}'.format(', '.join(str(x) for x in device.list_channels())))
text.append(' Available antennas:')
text.append(' {}'.format(', '.join(device.list_antennas())))
text.append(' Available tunable elements:')
text.append(' {}'.format(', '.join(device.list_frequencies())))
text.append(' Available amplification elements:')
text.append(' {}'.format(', '.join(device.list_gains())))
text.append(' Available device settings:')
for key, s in device.list_settings().items():
text.append(wrap('{} ... {} - {} (default: {})'.format(key, s['name'], s['description'], s['value'])))
text.append(' Available stream arguments:')
for key, s in device.list_stream_args().items():
text.append(wrap('{} ... {} - {} (default: {})'.format(key, s['name'], s['description'], s['value'])))
text.append(' Allowed gain range [dB]:')
text.append(' {:.2f} - {:.2f}'.format(*device.get_gain_range()))
text.append(' Allowed frequency range [MHz]:')
text.append(' {:.2f} - {:.2f}'.format(*[x / 1e6 for x in device.get_frequency_range()]))
text.append(' Allowed sample rates [MHz]:')
rates = []
for r in device.list_sample_rates():
if r[0] == r[1]:
rates.append('{:.2f}'.format(r[0] / 1e6))
else:
rates.append('{:.2f} - {:.2f}'.format(r[0] / 1e6, r[1] / 1e6))
text.append(wrap(', '.join(rates)))
text.append(' Allowed bandwidths [MHz]:')
bandwidths = []
for b in device.list_bandwidths():
if b[0] == b[1]:
bandwidths.append('{:.2f}'.format(b[0] / 1e6))
else:
bandwidths.append('{:.2f} - {:.2f}'.format(b[0] / 1e6, b[1] / 1e6))
if bandwidths:
text.append(wrap(', '.join(bandwidths)))
else:
text.append(' N/A')
except RuntimeError:
device = None
text.append('No devices found!')
return (device, '\n'.join(text)) |
def generate_command(self):
""" Generate a sample command
"""
example = []
example.append(f"{sys.argv[0]}")
for key in sorted(list(self.spec.keys())):
if self.spec[key]['type'] == list:
value = " ".join(self.spec[key].get('example', ''))
elif self.spec[key]['type'] == dict:
value = f"\'{json.dumps(self.spec[key].get('example', ''))}\'"
else:
value = self.spec[key].get('example', '')
string = f" --{key.lower()} {value}"
example.append(string)
print(" \\\n".join(example)) | Generate a sample command | Below is the the instruction that describes the task:
### Input:
Generate a sample command
### Response:
def generate_command(self):
""" Generate a sample command
"""
example = []
example.append(f"{sys.argv[0]}")
for key in sorted(list(self.spec.keys())):
if self.spec[key]['type'] == list:
value = " ".join(self.spec[key].get('example', ''))
elif self.spec[key]['type'] == dict:
value = f"\'{json.dumps(self.spec[key].get('example', ''))}\'"
else:
value = self.spec[key].get('example', '')
string = f" --{key.lower()} {value}"
example.append(string)
print(" \\\n".join(example)) |
def alts_columns_used(self):
"""
Columns from the alternatives table that are used for filtering.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.alts_predict_filters),
util.columns_in_filters(self.alts_fit_filters)))) | Columns from the alternatives table that are used for filtering. | Below is the the instruction that describes the task:
### Input:
Columns from the alternatives table that are used for filtering.
### Response:
def alts_columns_used(self):
"""
Columns from the alternatives table that are used for filtering.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.alts_predict_filters),
util.columns_in_filters(self.alts_fit_filters)))) |
def main(argString=None):
"""The main function of this module.
:param argString: the options.
:type argString: list
Here are the steps for this function:
1. Prints the options.
2. Uses Plink to extract markers according to LD
(:py:func:`selectSNPsAccordingToLD`).
3. Checks if there is enough markers after pruning
(:py:func:`checkNumberOfSNP`). If not, then quits.
4. Extract markers according to LD (:py:func:`extractSNPs`).
5. Runs Plink with the ``genome`` option (:py:func:`runGenome`). Quits here
if the user asker only for the ``genome`` file.
6. Finds related individuals and gets values for plotting
(:py:func:`extractRelatedIndividuals`).
7. Plots ``Z1`` in function of ``IBS2 ratio`` for related individuals
(:py:func:`plot_related_data`).
8. Plots ``Z2`` in function of ``IBS2 ratio`` for related individuals
(:py:func:`plot_related_data`).
"""
# Getting and checking the options
args = parseArgs(argString)
checkArgs(args)
logger.info("Options used:")
for key, value in vars(args).iteritems():
logger.info(" --{} {}".format(key.replace("_", "-"), value))
# Run plink
logger.info("Running Plink to extract SNPs according to LD")
snpsToExtract = selectSNPsAccordingToLD(args)
# Check there is enough SNP in the output file
logger.info("Checking if there are enough extracted SNP")
if not checkNumberOfSNP(snpsToExtract, args.min_nb_snp):
# There are not enough markers
logger.info("There are not enough SNPs: STOPPING NOW!")
else:
# Extract the SNPs
logger.info("Extracting the SNPs using Plink")
newBfile = extractSNPs(snpsToExtract, args)
# Run the genome command from plink
logger.info("Creating the genome file using Plink")
genomeFileName = runGenome(newBfile, args)
if args.genome_only:
# We just want the genome file
return newBfile
# Extract related individuals
logger.info("Finding related individuals from genome file")
related_data = extractRelatedIndividuals(genomeFileName, args.out,
args.ibs2_ratio)
# Are there related samples?
if related_data is None:
logger.info("There are no related samples in the dataset")
else:
# Plot the related data
logger.info("Plotting related individuals")
plot_related_data(related_data["IBS2_RATIO"], related_data["Z1"],
related_data["CODE"], r"$Z_1$",
args.out + ".related_individuals_z1.png", args)
plot_related_data(related_data["IBS2_RATIO"], related_data["Z2"],
related_data["CODE"], r"$Z_2$",
args.out + ".related_individuals_z2.png", args) | The main function of this module.
:param argString: the options.
:type argString: list
Here are the steps for this function:
1. Prints the options.
2. Uses Plink to extract markers according to LD
(:py:func:`selectSNPsAccordingToLD`).
3. Checks if there is enough markers after pruning
(:py:func:`checkNumberOfSNP`). If not, then quits.
4. Extract markers according to LD (:py:func:`extractSNPs`).
5. Runs Plink with the ``genome`` option (:py:func:`runGenome`). Quits here
if the user asker only for the ``genome`` file.
6. Finds related individuals and gets values for plotting
(:py:func:`extractRelatedIndividuals`).
7. Plots ``Z1`` in function of ``IBS2 ratio`` for related individuals
(:py:func:`plot_related_data`).
8. Plots ``Z2`` in function of ``IBS2 ratio`` for related individuals
(:py:func:`plot_related_data`). | Below is the the instruction that describes the task:
### Input:
The main function of this module.
:param argString: the options.
:type argString: list
Here are the steps for this function:
1. Prints the options.
2. Uses Plink to extract markers according to LD
(:py:func:`selectSNPsAccordingToLD`).
3. Checks if there is enough markers after pruning
(:py:func:`checkNumberOfSNP`). If not, then quits.
4. Extract markers according to LD (:py:func:`extractSNPs`).
5. Runs Plink with the ``genome`` option (:py:func:`runGenome`). Quits here
if the user asker only for the ``genome`` file.
6. Finds related individuals and gets values for plotting
(:py:func:`extractRelatedIndividuals`).
7. Plots ``Z1`` in function of ``IBS2 ratio`` for related individuals
(:py:func:`plot_related_data`).
8. Plots ``Z2`` in function of ``IBS2 ratio`` for related individuals
(:py:func:`plot_related_data`).
### Response:
def main(argString=None):
"""The main function of this module.
:param argString: the options.
:type argString: list
Here are the steps for this function:
1. Prints the options.
2. Uses Plink to extract markers according to LD
(:py:func:`selectSNPsAccordingToLD`).
3. Checks if there is enough markers after pruning
(:py:func:`checkNumberOfSNP`). If not, then quits.
4. Extract markers according to LD (:py:func:`extractSNPs`).
5. Runs Plink with the ``genome`` option (:py:func:`runGenome`). Quits here
if the user asker only for the ``genome`` file.
6. Finds related individuals and gets values for plotting
(:py:func:`extractRelatedIndividuals`).
7. Plots ``Z1`` in function of ``IBS2 ratio`` for related individuals
(:py:func:`plot_related_data`).
8. Plots ``Z2`` in function of ``IBS2 ratio`` for related individuals
(:py:func:`plot_related_data`).
"""
# Getting and checking the options
args = parseArgs(argString)
checkArgs(args)
logger.info("Options used:")
for key, value in vars(args).iteritems():
logger.info(" --{} {}".format(key.replace("_", "-"), value))
# Run plink
logger.info("Running Plink to extract SNPs according to LD")
snpsToExtract = selectSNPsAccordingToLD(args)
# Check there is enough SNP in the output file
logger.info("Checking if there are enough extracted SNP")
if not checkNumberOfSNP(snpsToExtract, args.min_nb_snp):
# There are not enough markers
logger.info("There are not enough SNPs: STOPPING NOW!")
else:
# Extract the SNPs
logger.info("Extracting the SNPs using Plink")
newBfile = extractSNPs(snpsToExtract, args)
# Run the genome command from plink
logger.info("Creating the genome file using Plink")
genomeFileName = runGenome(newBfile, args)
if args.genome_only:
# We just want the genome file
return newBfile
# Extract related individuals
logger.info("Finding related individuals from genome file")
related_data = extractRelatedIndividuals(genomeFileName, args.out,
args.ibs2_ratio)
# Are there related samples?
if related_data is None:
logger.info("There are no related samples in the dataset")
else:
# Plot the related data
logger.info("Plotting related individuals")
plot_related_data(related_data["IBS2_RATIO"], related_data["Z1"],
related_data["CODE"], r"$Z_1$",
args.out + ".related_individuals_z1.png", args)
plot_related_data(related_data["IBS2_RATIO"], related_data["Z2"],
related_data["CODE"], r"$Z_2$",
args.out + ".related_individuals_z2.png", args) |
def fetch_organization_courses(organization):
"""
Retrieves the set of courses currently linked to the specified organization
"""
organization_obj = serializers.deserialize_organization(organization)
queryset = internal.OrganizationCourse.objects.filter(
organization=organization_obj,
active=True
).select_related('organization')
return [serializers.serialize_organization_with_course(organization) for organization in queryset] | Retrieves the set of courses currently linked to the specified organization | Below is the the instruction that describes the task:
### Input:
Retrieves the set of courses currently linked to the specified organization
### Response:
def fetch_organization_courses(organization):
"""
Retrieves the set of courses currently linked to the specified organization
"""
organization_obj = serializers.deserialize_organization(organization)
queryset = internal.OrganizationCourse.objects.filter(
organization=organization_obj,
active=True
).select_related('organization')
return [serializers.serialize_organization_with_course(organization) for organization in queryset] |
def maps_get_rules_output_rules_groupname(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
maps_get_rules = ET.Element("maps_get_rules")
config = maps_get_rules
output = ET.SubElement(maps_get_rules, "output")
rules = ET.SubElement(output, "rules")
groupname = ET.SubElement(rules, "groupname")
groupname.text = kwargs.pop('groupname')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def maps_get_rules_output_rules_groupname(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
maps_get_rules = ET.Element("maps_get_rules")
config = maps_get_rules
output = ET.SubElement(maps_get_rules, "output")
rules = ET.SubElement(output, "rules")
groupname = ET.SubElement(rules, "groupname")
groupname.text = kwargs.pop('groupname')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_module_environment(env=None, function=None):
'''
Get module optional environment.
To setup an environment option for a particular module,
add either pillar or config at the minion as follows:
system-environment:
modules:
pkg:
_:
LC_ALL: en_GB.UTF-8
FOO: bar
install:
HELLO: world
states:
pkg:
_:
LC_ALL: en_US.Latin-1
NAME: Fred
So this will export the environment to all the modules,
states, returnes etc. And calling this function with the globals()
in that context will fetch the environment for further reuse.
Underscore '_' exports environment for all functions within the module.
If you want to specifially export environment only for one function,
specify it as in the example above "install".
First will be fetched configuration, where virtual name goes first,
then the physical name of the module overrides the virtual settings.
Then pillar settings will override the configuration in the same order.
:param env:
:param function: name of a particular function
:return: dict
'''
result = {}
if not env:
env = {}
for env_src in [env.get('__opts__', {}), env.get('__pillar__', {})]:
fname = env.get('__file__', '')
physical_name = os.path.basename(fname).split('.')[0]
section = os.path.basename(os.path.dirname(fname))
m_names = [env.get('__virtualname__')]
if physical_name not in m_names:
m_names.append(physical_name)
for m_name in m_names:
if not m_name:
continue
result.update(env_src.get('system-environment', {}).get(
section, {}).get(m_name, {}).get('_', {}).copy())
if function is not None:
result.update(env_src.get('system-environment', {}).get(
section, {}).get(m_name, {}).get(function, {}).copy())
return result | Get module optional environment.
To setup an environment option for a particular module,
add either pillar or config at the minion as follows:
system-environment:
modules:
pkg:
_:
LC_ALL: en_GB.UTF-8
FOO: bar
install:
HELLO: world
states:
pkg:
_:
LC_ALL: en_US.Latin-1
NAME: Fred
So this will export the environment to all the modules,
states, returnes etc. And calling this function with the globals()
in that context will fetch the environment for further reuse.
Underscore '_' exports environment for all functions within the module.
If you want to specifially export environment only for one function,
specify it as in the example above "install".
First will be fetched configuration, where virtual name goes first,
then the physical name of the module overrides the virtual settings.
Then pillar settings will override the configuration in the same order.
:param env:
:param function: name of a particular function
:return: dict | Below is the the instruction that describes the task:
### Input:
Get module optional environment.
To setup an environment option for a particular module,
add either pillar or config at the minion as follows:
system-environment:
modules:
pkg:
_:
LC_ALL: en_GB.UTF-8
FOO: bar
install:
HELLO: world
states:
pkg:
_:
LC_ALL: en_US.Latin-1
NAME: Fred
So this will export the environment to all the modules,
states, returnes etc. And calling this function with the globals()
in that context will fetch the environment for further reuse.
Underscore '_' exports environment for all functions within the module.
If you want to specifially export environment only for one function,
specify it as in the example above "install".
First will be fetched configuration, where virtual name goes first,
then the physical name of the module overrides the virtual settings.
Then pillar settings will override the configuration in the same order.
:param env:
:param function: name of a particular function
:return: dict
### Response:
def get_module_environment(env=None, function=None):
'''
Get module optional environment.
To setup an environment option for a particular module,
add either pillar or config at the minion as follows:
system-environment:
modules:
pkg:
_:
LC_ALL: en_GB.UTF-8
FOO: bar
install:
HELLO: world
states:
pkg:
_:
LC_ALL: en_US.Latin-1
NAME: Fred
So this will export the environment to all the modules,
states, returnes etc. And calling this function with the globals()
in that context will fetch the environment for further reuse.
Underscore '_' exports environment for all functions within the module.
If you want to specifially export environment only for one function,
specify it as in the example above "install".
First will be fetched configuration, where virtual name goes first,
then the physical name of the module overrides the virtual settings.
Then pillar settings will override the configuration in the same order.
:param env:
:param function: name of a particular function
:return: dict
'''
result = {}
if not env:
env = {}
for env_src in [env.get('__opts__', {}), env.get('__pillar__', {})]:
fname = env.get('__file__', '')
physical_name = os.path.basename(fname).split('.')[0]
section = os.path.basename(os.path.dirname(fname))
m_names = [env.get('__virtualname__')]
if physical_name not in m_names:
m_names.append(physical_name)
for m_name in m_names:
if not m_name:
continue
result.update(env_src.get('system-environment', {}).get(
section, {}).get(m_name, {}).get('_', {}).copy())
if function is not None:
result.update(env_src.get('system-environment', {}).get(
section, {}).get(m_name, {}).get(function, {}).copy())
return result |
def killBatchJobs(self, jobIDs):
"""Kills the given jobs, represented as Job ids, then checks they are dead by checking
they are not in the list of issued jobs.
"""
while True:
for jobID in jobIDs:
if jobID in self.runningJobs:
self.runningJobs.remove(jobID)
exitValue = self._runParasol(['remove', 'job', str(jobID)],
autoRetry=False)[0]
logger.debug("Tried to remove jobID: %i, with exit value: %i" % (jobID, exitValue))
runningJobs = self.getIssuedBatchJobIDs()
if set(jobIDs).difference(set(runningJobs)) == set(jobIDs):
break
logger.warn( 'Tried to kill some jobs, but something happened and they are still '
'going, will try againin 5s.')
time.sleep(5)
# Update the CPU usage, because killed jobs aren't written to the results file.
for jobID in jobIDs:
if jobID in list(self.jobIDsToCpu.keys()):
self.usedCpus -= self.jobIDsToCpu.pop(jobID) | Kills the given jobs, represented as Job ids, then checks they are dead by checking
they are not in the list of issued jobs. | Below is the the instruction that describes the task:
### Input:
Kills the given jobs, represented as Job ids, then checks they are dead by checking
they are not in the list of issued jobs.
### Response:
def killBatchJobs(self, jobIDs):
"""Kills the given jobs, represented as Job ids, then checks they are dead by checking
they are not in the list of issued jobs.
"""
while True:
for jobID in jobIDs:
if jobID in self.runningJobs:
self.runningJobs.remove(jobID)
exitValue = self._runParasol(['remove', 'job', str(jobID)],
autoRetry=False)[0]
logger.debug("Tried to remove jobID: %i, with exit value: %i" % (jobID, exitValue))
runningJobs = self.getIssuedBatchJobIDs()
if set(jobIDs).difference(set(runningJobs)) == set(jobIDs):
break
logger.warn( 'Tried to kill some jobs, but something happened and they are still '
'going, will try againin 5s.')
time.sleep(5)
# Update the CPU usage, because killed jobs aren't written to the results file.
for jobID in jobIDs:
if jobID in list(self.jobIDsToCpu.keys()):
self.usedCpus -= self.jobIDsToCpu.pop(jobID) |
def run_filter(vrn_file, align_bam, ref_file, data, items):
"""Filter and annotate somatic VCFs with damage/bias artifacts on low frequency variants.
Moves damage estimation to INFO field, instead of leaving in FILTER.
"""
if not should_filter(items) or not vcfutils.vcf_has_variants(vrn_file):
return data
else:
raw_file = "%s-damage.vcf" % utils.splitext_plus(vrn_file)[0]
out_plot_files = ["%s%s" % (utils.splitext_plus(raw_file)[0], ext)
for ext in ["_seq_bias_simplified.pdf", "_pcr_bias_simplified.pdf"]]
if not utils.file_uptodate(raw_file, vrn_file) and not utils.file_uptodate(raw_file + ".gz", vrn_file):
with file_transaction(items[0], raw_file) as tx_out_file:
# Does not apply --qcSummary plotting due to slow runtimes
cmd = ["dkfzbiasfilter.py", "--filterCycles", "1", "--passOnly",
"--tempFolder", os.path.dirname(tx_out_file),
vrn_file, align_bam, ref_file, tx_out_file]
do.run(cmd, "Filter low frequency variants for DNA damage and strand bias")
for out_plot in out_plot_files:
tx_plot_file = os.path.join("%s_qcSummary" % utils.splitext_plus(tx_out_file)[0], "plots",
os.path.basename(out_plot))
if utils.file_exists(tx_plot_file):
shutil.move(tx_plot_file, out_plot)
raw_file = vcfutils.bgzip_and_index(raw_file, items[0]["config"])
data["vrn_file"] = _filter_to_info(raw_file, items[0])
out_plot_files = [x for x in out_plot_files if utils.file_exists(x)]
data["damage_plots"] = out_plot_files
return data | Filter and annotate somatic VCFs with damage/bias artifacts on low frequency variants.
Moves damage estimation to INFO field, instead of leaving in FILTER. | Below is the the instruction that describes the task:
### Input:
Filter and annotate somatic VCFs with damage/bias artifacts on low frequency variants.
Moves damage estimation to INFO field, instead of leaving in FILTER.
### Response:
def run_filter(vrn_file, align_bam, ref_file, data, items):
"""Filter and annotate somatic VCFs with damage/bias artifacts on low frequency variants.
Moves damage estimation to INFO field, instead of leaving in FILTER.
"""
if not should_filter(items) or not vcfutils.vcf_has_variants(vrn_file):
return data
else:
raw_file = "%s-damage.vcf" % utils.splitext_plus(vrn_file)[0]
out_plot_files = ["%s%s" % (utils.splitext_plus(raw_file)[0], ext)
for ext in ["_seq_bias_simplified.pdf", "_pcr_bias_simplified.pdf"]]
if not utils.file_uptodate(raw_file, vrn_file) and not utils.file_uptodate(raw_file + ".gz", vrn_file):
with file_transaction(items[0], raw_file) as tx_out_file:
# Does not apply --qcSummary plotting due to slow runtimes
cmd = ["dkfzbiasfilter.py", "--filterCycles", "1", "--passOnly",
"--tempFolder", os.path.dirname(tx_out_file),
vrn_file, align_bam, ref_file, tx_out_file]
do.run(cmd, "Filter low frequency variants for DNA damage and strand bias")
for out_plot in out_plot_files:
tx_plot_file = os.path.join("%s_qcSummary" % utils.splitext_plus(tx_out_file)[0], "plots",
os.path.basename(out_plot))
if utils.file_exists(tx_plot_file):
shutil.move(tx_plot_file, out_plot)
raw_file = vcfutils.bgzip_and_index(raw_file, items[0]["config"])
data["vrn_file"] = _filter_to_info(raw_file, items[0])
out_plot_files = [x for x in out_plot_files if utils.file_exists(x)]
data["damage_plots"] = out_plot_files
return data |
def FileHashIndexQuery(self, subject, target_prefix, limit=100):
"""Search the index for matches starting with target_prefix.
Args:
subject: The index to use. Should be a urn that points to the sha256
namespace.
target_prefix: The prefix to match against the index.
limit: Either a tuple of (start, limit) or a maximum number of results to
return.
Yields:
URNs of files which have the same data as this file - as read from the
index.
"""
if isinstance(limit, (tuple, list)):
start, length = limit # pylint: disable=unpacking-non-sequence
else:
start = 0
length = limit
prefix = (DataStore.FILE_HASH_TEMPLATE % target_prefix).lower()
results = self.ResolvePrefix(subject, prefix, limit=limit)
for i, (_, hit, _) in enumerate(results):
if i < start:
continue
if i >= start + length:
break
yield rdfvalue.RDFURN(hit) | Search the index for matches starting with target_prefix.
Args:
subject: The index to use. Should be a urn that points to the sha256
namespace.
target_prefix: The prefix to match against the index.
limit: Either a tuple of (start, limit) or a maximum number of results to
return.
Yields:
URNs of files which have the same data as this file - as read from the
index. | Below is the the instruction that describes the task:
### Input:
Search the index for matches starting with target_prefix.
Args:
subject: The index to use. Should be a urn that points to the sha256
namespace.
target_prefix: The prefix to match against the index.
limit: Either a tuple of (start, limit) or a maximum number of results to
return.
Yields:
URNs of files which have the same data as this file - as read from the
index.
### Response:
def FileHashIndexQuery(self, subject, target_prefix, limit=100):
"""Search the index for matches starting with target_prefix.
Args:
subject: The index to use. Should be a urn that points to the sha256
namespace.
target_prefix: The prefix to match against the index.
limit: Either a tuple of (start, limit) or a maximum number of results to
return.
Yields:
URNs of files which have the same data as this file - as read from the
index.
"""
if isinstance(limit, (tuple, list)):
start, length = limit # pylint: disable=unpacking-non-sequence
else:
start = 0
length = limit
prefix = (DataStore.FILE_HASH_TEMPLATE % target_prefix).lower()
results = self.ResolvePrefix(subject, prefix, limit=limit)
for i, (_, hit, _) in enumerate(results):
if i < start:
continue
if i >= start + length:
break
yield rdfvalue.RDFURN(hit) |
def get_padding_lengths(self) -> Dict[str, Dict[str, int]]:
"""
Gets the maximum padding lengths from all ``Instances`` in this batch. Each ``Instance``
has multiple ``Fields``, and each ``Field`` could have multiple things that need padding.
We look at all fields in all instances, and find the max values for each (field_name,
padding_key) pair, returning them in a dictionary.
This can then be used to convert this batch into arrays of consistent length, or to set
model parameters, etc.
"""
padding_lengths: Dict[str, Dict[str, int]] = defaultdict(dict)
all_instance_lengths: List[Dict[str, Dict[str, int]]] = [instance.get_padding_lengths()
for instance in self.instances]
if not all_instance_lengths:
return {**padding_lengths}
all_field_lengths: Dict[str, List[Dict[str, int]]] = defaultdict(list)
for instance_lengths in all_instance_lengths:
for field_name, instance_field_lengths in instance_lengths.items():
all_field_lengths[field_name].append(instance_field_lengths)
for field_name, field_lengths in all_field_lengths.items():
for padding_key in field_lengths[0].keys():
max_value = max(x[padding_key] if padding_key in x else 0 for x in field_lengths)
padding_lengths[field_name][padding_key] = max_value
return {**padding_lengths} | Gets the maximum padding lengths from all ``Instances`` in this batch. Each ``Instance``
has multiple ``Fields``, and each ``Field`` could have multiple things that need padding.
We look at all fields in all instances, and find the max values for each (field_name,
padding_key) pair, returning them in a dictionary.
This can then be used to convert this batch into arrays of consistent length, or to set
model parameters, etc. | Below is the the instruction that describes the task:
### Input:
Gets the maximum padding lengths from all ``Instances`` in this batch. Each ``Instance``
has multiple ``Fields``, and each ``Field`` could have multiple things that need padding.
We look at all fields in all instances, and find the max values for each (field_name,
padding_key) pair, returning them in a dictionary.
This can then be used to convert this batch into arrays of consistent length, or to set
model parameters, etc.
### Response:
def get_padding_lengths(self) -> Dict[str, Dict[str, int]]:
"""
Gets the maximum padding lengths from all ``Instances`` in this batch. Each ``Instance``
has multiple ``Fields``, and each ``Field`` could have multiple things that need padding.
We look at all fields in all instances, and find the max values for each (field_name,
padding_key) pair, returning them in a dictionary.
This can then be used to convert this batch into arrays of consistent length, or to set
model parameters, etc.
"""
padding_lengths: Dict[str, Dict[str, int]] = defaultdict(dict)
all_instance_lengths: List[Dict[str, Dict[str, int]]] = [instance.get_padding_lengths()
for instance in self.instances]
if not all_instance_lengths:
return {**padding_lengths}
all_field_lengths: Dict[str, List[Dict[str, int]]] = defaultdict(list)
for instance_lengths in all_instance_lengths:
for field_name, instance_field_lengths in instance_lengths.items():
all_field_lengths[field_name].append(instance_field_lengths)
for field_name, field_lengths in all_field_lengths.items():
for padding_key in field_lengths[0].keys():
max_value = max(x[padding_key] if padding_key in x else 0 for x in field_lengths)
padding_lengths[field_name][padding_key] = max_value
return {**padding_lengths} |
def get_equivalent_qpoints(self, index):
"""
Returns the list of qpoint indices equivalent (meaning they are the
same frac coords) to the given one.
Args:
index: the qpoint index
Returns:
a list of equivalent indices
TODO: now it uses the label we might want to use coordinates instead
(in case there was a mislabel)
"""
#if the qpoint has no label it can"t have a repetition along the band
#structure line object
if self.qpoints[index].label is None:
return [index]
list_index_qpoints = []
for i in range(self.nb_qpoints):
if self.qpoints[i].label == self.qpoints[index].label:
list_index_qpoints.append(i)
return list_index_qpoints | Returns the list of qpoint indices equivalent (meaning they are the
same frac coords) to the given one.
Args:
index: the qpoint index
Returns:
a list of equivalent indices
TODO: now it uses the label we might want to use coordinates instead
(in case there was a mislabel) | Below is the the instruction that describes the task:
### Input:
Returns the list of qpoint indices equivalent (meaning they are the
same frac coords) to the given one.
Args:
index: the qpoint index
Returns:
a list of equivalent indices
TODO: now it uses the label we might want to use coordinates instead
(in case there was a mislabel)
### Response:
def get_equivalent_qpoints(self, index):
"""
Returns the list of qpoint indices equivalent (meaning they are the
same frac coords) to the given one.
Args:
index: the qpoint index
Returns:
a list of equivalent indices
TODO: now it uses the label we might want to use coordinates instead
(in case there was a mislabel)
"""
#if the qpoint has no label it can"t have a repetition along the band
#structure line object
if self.qpoints[index].label is None:
return [index]
list_index_qpoints = []
for i in range(self.nb_qpoints):
if self.qpoints[i].label == self.qpoints[index].label:
list_index_qpoints.append(i)
return list_index_qpoints |
def readlines(self, sizehint=-1):
"""
readlines([size]) -> list of strings, each a line from the file.
Call readline() repeatedly and return a list of the lines so read.
The optional size argument, if given, is an approximate bound on the
total number of bytes in the lines returned.
"""
if self.closed:
raise ValueError('I/O operation on closed file')
lines = []
while True:
line = self.readline(sizehint)
if not line:
break
lines.append(line)
if sizehint >= 0:
sizehint -= len(line)
if sizehint <= 0:
break
return lines | readlines([size]) -> list of strings, each a line from the file.
Call readline() repeatedly and return a list of the lines so read.
The optional size argument, if given, is an approximate bound on the
total number of bytes in the lines returned. | Below is the the instruction that describes the task:
### Input:
readlines([size]) -> list of strings, each a line from the file.
Call readline() repeatedly and return a list of the lines so read.
The optional size argument, if given, is an approximate bound on the
total number of bytes in the lines returned.
### Response:
def readlines(self, sizehint=-1):
"""
readlines([size]) -> list of strings, each a line from the file.
Call readline() repeatedly and return a list of the lines so read.
The optional size argument, if given, is an approximate bound on the
total number of bytes in the lines returned.
"""
if self.closed:
raise ValueError('I/O operation on closed file')
lines = []
while True:
line = self.readline(sizehint)
if not line:
break
lines.append(line)
if sizehint >= 0:
sizehint -= len(line)
if sizehint <= 0:
break
return lines |
def _clean_listofcomponents(listofcomponents):
"""force it to be a list of tuples"""
def totuple(item):
"""return a tuple"""
if isinstance(item, (tuple, list)):
return item
else:
return (item, None)
return [totuple(item) for item in listofcomponents] | force it to be a list of tuples | Below is the the instruction that describes the task:
### Input:
force it to be a list of tuples
### Response:
def _clean_listofcomponents(listofcomponents):
"""force it to be a list of tuples"""
def totuple(item):
"""return a tuple"""
if isinstance(item, (tuple, list)):
return item
else:
return (item, None)
return [totuple(item) for item in listofcomponents] |
def t_ATOM(self, t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = PLLexer.reserved.get(t.value, 'ATOM') # Check for reserved words
return t | r'[a-zA-Z_][a-zA-Z_0-9]* | Below is the the instruction that describes the task:
### Input:
r'[a-zA-Z_][a-zA-Z_0-9]*
### Response:
def t_ATOM(self, t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = PLLexer.reserved.get(t.value, 'ATOM') # Check for reserved words
return t |
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf | Return size bytes from the stream. | Below is the the instruction that describes the task:
### Input:
Return size bytes from the stream.
### Response:
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf |
def file_put( blockchain_id, hostname, recipient_blockchain_ids, data_name, input_path, passphrase=None, config_path=CONFIG_PATH, wallet_keys=None ):
"""
Send a file to the given recipient, encrypted and signed with the
given blockchain ID.
Allow each recipient to receive the data on each of their hosts.
Return {'status': True} on success, and upload to cloud storage
Return {'error': ...} on error
"""
fd, output_path = tempfile.mkstemp( prefix="blockstack-file-" )
os.fchmod( fd, 0600 )
os.close(fd)
config_dir = os.path.dirname(config_path)
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
all_recipients = []
# make available to all other hosts for this blockchain_id
my_hosts = file_list_hosts( blockchain_id, wallet_keys=wallet_keys, config_path=config_path )
if 'error' in my_hosts:
log.error("Failed to list hosts: %s" % my_hosts['error'])
os.unlink(output_path)
return {'error': 'Failed to look up sender keys'}
if hostname in my_hosts:
my_hosts.remove(hostname)
all_recipients += [(blockchain_id, host) for host in my_hosts['hosts']]
# make available to all hosts for each recipient
for recipient_blockchain_id in recipient_blockchain_ids:
their_hosts = file_list_hosts( recipient_blockchain_id, wallet_keys=wallet_keys, config_path=config_path )
if 'error' in their_hosts:
log.error("Failed to list hosts for %s: %s" % (recipient_blockchain_id, their_hosts['error']))
os.unlink(output_path)
return {'error': 'Failed to look up recipient keys'}
all_recipients += [(recipient_blockchain_id, host) for host in their_hosts['hosts']]
# encrypt
res = file_encrypt( blockchain_id, hostname, all_recipients, input_path, output_path, passphrase=passphrase, config_path=config_path, wallet_keys=wallet_keys )
if 'error' in res:
log.error("Failed to encrypt: %s" % res['error'])
os.unlink(output_path)
return {'error': 'Failed to encrypt'}
# load up
with open(output_path, "r") as f:
ciphertext = f.read()
message = {'ciphertext': ciphertext, 'sender_key_id': res['sender_key_id']}
# put to mutable storage
fq_data_name = file_fq_data_name( data_name )
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
res = blockstack_client.data_put( blockstack_client.make_mutable_data_url( blockchain_id, fq_data_name, None ), message, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in res:
log.error("Failed to put data: %s" % res['error'])
os.unlink(output_path)
return {'error': 'Failed to replicate data'}
os.unlink(output_path)
return {'status': True} | Send a file to the given recipient, encrypted and signed with the
given blockchain ID.
Allow each recipient to receive the data on each of their hosts.
Return {'status': True} on success, and upload to cloud storage
Return {'error': ...} on error | Below is the the instruction that describes the task:
### Input:
Send a file to the given recipient, encrypted and signed with the
given blockchain ID.
Allow each recipient to receive the data on each of their hosts.
Return {'status': True} on success, and upload to cloud storage
Return {'error': ...} on error
### Response:
def file_put( blockchain_id, hostname, recipient_blockchain_ids, data_name, input_path, passphrase=None, config_path=CONFIG_PATH, wallet_keys=None ):
"""
Send a file to the given recipient, encrypted and signed with the
given blockchain ID.
Allow each recipient to receive the data on each of their hosts.
Return {'status': True} on success, and upload to cloud storage
Return {'error': ...} on error
"""
fd, output_path = tempfile.mkstemp( prefix="blockstack-file-" )
os.fchmod( fd, 0600 )
os.close(fd)
config_dir = os.path.dirname(config_path)
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
all_recipients = []
# make available to all other hosts for this blockchain_id
my_hosts = file_list_hosts( blockchain_id, wallet_keys=wallet_keys, config_path=config_path )
if 'error' in my_hosts:
log.error("Failed to list hosts: %s" % my_hosts['error'])
os.unlink(output_path)
return {'error': 'Failed to look up sender keys'}
if hostname in my_hosts:
my_hosts.remove(hostname)
all_recipients += [(blockchain_id, host) for host in my_hosts['hosts']]
# make available to all hosts for each recipient
for recipient_blockchain_id in recipient_blockchain_ids:
their_hosts = file_list_hosts( recipient_blockchain_id, wallet_keys=wallet_keys, config_path=config_path )
if 'error' in their_hosts:
log.error("Failed to list hosts for %s: %s" % (recipient_blockchain_id, their_hosts['error']))
os.unlink(output_path)
return {'error': 'Failed to look up recipient keys'}
all_recipients += [(recipient_blockchain_id, host) for host in their_hosts['hosts']]
# encrypt
res = file_encrypt( blockchain_id, hostname, all_recipients, input_path, output_path, passphrase=passphrase, config_path=config_path, wallet_keys=wallet_keys )
if 'error' in res:
log.error("Failed to encrypt: %s" % res['error'])
os.unlink(output_path)
return {'error': 'Failed to encrypt'}
# load up
with open(output_path, "r") as f:
ciphertext = f.read()
message = {'ciphertext': ciphertext, 'sender_key_id': res['sender_key_id']}
# put to mutable storage
fq_data_name = file_fq_data_name( data_name )
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
res = blockstack_client.data_put( blockstack_client.make_mutable_data_url( blockchain_id, fq_data_name, None ), message, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in res:
log.error("Failed to put data: %s" % res['error'])
os.unlink(output_path)
return {'error': 'Failed to replicate data'}
os.unlink(output_path)
return {'status': True} |
def replace_in_files(dirname, replace):
"""Replace current version with new version in requirements files."""
filepath = os.path.abspath(dirname / "requirements.in")
if os.path.isfile(filepath) and header_footer_exists(filepath):
replaced = re.sub(Utils.exp, replace, get_file_string(filepath))
with open(filepath, "w") as f:
f.write(replaced)
print(color(
"Written to file: {}".format(filepath),
fg='magenta', style='bold')) | Replace current version with new version in requirements files. | Below is the the instruction that describes the task:
### Input:
Replace current version with new version in requirements files.
### Response:
def replace_in_files(dirname, replace):
"""Replace current version with new version in requirements files."""
filepath = os.path.abspath(dirname / "requirements.in")
if os.path.isfile(filepath) and header_footer_exists(filepath):
replaced = re.sub(Utils.exp, replace, get_file_string(filepath))
with open(filepath, "w") as f:
f.write(replaced)
print(color(
"Written to file: {}".format(filepath),
fg='magenta', style='bold')) |
def _next_non_ff_byte(self, start):
"""
Return an offset, byte 2-tuple for the next byte in *stream* that is
not '\xFF', starting with the byte at offset *start*. If the byte at
offset *start* is not '\xFF', *start* and the returned *offset* will
be the same.
"""
self._stream.seek(start)
byte_ = self._read_byte()
while byte_ == b'\xFF':
byte_ = self._read_byte()
offset_of_non_ff_byte = self._stream.tell() - 1
return offset_of_non_ff_byte, byte_ | Return an offset, byte 2-tuple for the next byte in *stream* that is
not '\xFF', starting with the byte at offset *start*. If the byte at
offset *start* is not '\xFF', *start* and the returned *offset* will
be the same. | Below is the the instruction that describes the task:
### Input:
Return an offset, byte 2-tuple for the next byte in *stream* that is
not '\xFF', starting with the byte at offset *start*. If the byte at
offset *start* is not '\xFF', *start* and the returned *offset* will
be the same.
### Response:
def _next_non_ff_byte(self, start):
"""
Return an offset, byte 2-tuple for the next byte in *stream* that is
not '\xFF', starting with the byte at offset *start*. If the byte at
offset *start* is not '\xFF', *start* and the returned *offset* will
be the same.
"""
self._stream.seek(start)
byte_ = self._read_byte()
while byte_ == b'\xFF':
byte_ = self._read_byte()
offset_of_non_ff_byte = self._stream.tell() - 1
return offset_of_non_ff_byte, byte_ |
def cache_subdirectory(
reference_name=None,
annotation_name=None,
annotation_version=None):
"""
Which cache subdirectory to use for a given annotation database
over a particular reference. All arguments can be omitted to just get
the base subdirectory for all pyensembl cached datasets.
"""
if reference_name is None:
reference_name = ""
if annotation_name is None:
annotation_name = ""
if annotation_version is None:
annotation_version = ""
reference_dir = join(CACHE_BASE_SUBDIR, reference_name)
annotation_dir = "%s%s" % (annotation_name, annotation_version)
return join(reference_dir, annotation_dir) | Which cache subdirectory to use for a given annotation database
over a particular reference. All arguments can be omitted to just get
the base subdirectory for all pyensembl cached datasets. | Below is the the instruction that describes the task:
### Input:
Which cache subdirectory to use for a given annotation database
over a particular reference. All arguments can be omitted to just get
the base subdirectory for all pyensembl cached datasets.
### Response:
def cache_subdirectory(
reference_name=None,
annotation_name=None,
annotation_version=None):
"""
Which cache subdirectory to use for a given annotation database
over a particular reference. All arguments can be omitted to just get
the base subdirectory for all pyensembl cached datasets.
"""
if reference_name is None:
reference_name = ""
if annotation_name is None:
annotation_name = ""
if annotation_version is None:
annotation_version = ""
reference_dir = join(CACHE_BASE_SUBDIR, reference_name)
annotation_dir = "%s%s" % (annotation_name, annotation_version)
return join(reference_dir, annotation_dir) |
def crypto_pwhash_str_verify(passwd_hash, passwd):
"""
Verifies the ``passwd`` against a given password hash.
Returns True on success, raises InvalidkeyError on failure
:param passwd_hash: saved password hash
:type passwd_hash: bytes
:param passwd: password to be checked
:type passwd: bytes
:return: success
:rtype: boolean
"""
ensure(isinstance(passwd_hash, bytes),
raising=TypeError)
ensure(isinstance(passwd, bytes),
raising=TypeError)
ensure(len(passwd_hash) <= 127,
"Hash must be at most 127 bytes long",
raising=exc.ValueError)
ret = lib.crypto_pwhash_str_verify(passwd_hash, passwd, len(passwd))
ensure(ret == 0,
"Wrong password",
raising=exc.InvalidkeyError)
# all went well, therefore:
return True | Verifies the ``passwd`` against a given password hash.
Returns True on success, raises InvalidkeyError on failure
:param passwd_hash: saved password hash
:type passwd_hash: bytes
:param passwd: password to be checked
:type passwd: bytes
:return: success
:rtype: boolean | Below is the the instruction that describes the task:
### Input:
Verifies the ``passwd`` against a given password hash.
Returns True on success, raises InvalidkeyError on failure
:param passwd_hash: saved password hash
:type passwd_hash: bytes
:param passwd: password to be checked
:type passwd: bytes
:return: success
:rtype: boolean
### Response:
def crypto_pwhash_str_verify(passwd_hash, passwd):
"""
Verifies the ``passwd`` against a given password hash.
Returns True on success, raises InvalidkeyError on failure
:param passwd_hash: saved password hash
:type passwd_hash: bytes
:param passwd: password to be checked
:type passwd: bytes
:return: success
:rtype: boolean
"""
ensure(isinstance(passwd_hash, bytes),
raising=TypeError)
ensure(isinstance(passwd, bytes),
raising=TypeError)
ensure(len(passwd_hash) <= 127,
"Hash must be at most 127 bytes long",
raising=exc.ValueError)
ret = lib.crypto_pwhash_str_verify(passwd_hash, passwd, len(passwd))
ensure(ret == 0,
"Wrong password",
raising=exc.InvalidkeyError)
# all went well, therefore:
return True |
def add_operation(self, operation_type, operation, mode=None):
"""Add an operation to the version
:param mode: Name of the mode in which the operation is executed
:type mode: str
:param operation_type: one of 'pre', 'post'
:type operation_type: str
:param operation: the operation to add
:type operation: :class:`marabunta.model.Operation`
"""
version_mode = self._get_version_mode(mode=mode)
if operation_type == 'pre':
version_mode.add_pre(operation)
elif operation_type == 'post':
version_mode.add_post(operation)
else:
raise ConfigurationError(
u"Type of operation must be 'pre' or 'post', got %s" %
(operation_type,)
) | Add an operation to the version
:param mode: Name of the mode in which the operation is executed
:type mode: str
:param operation_type: one of 'pre', 'post'
:type operation_type: str
:param operation: the operation to add
:type operation: :class:`marabunta.model.Operation` | Below is the the instruction that describes the task:
### Input:
Add an operation to the version
:param mode: Name of the mode in which the operation is executed
:type mode: str
:param operation_type: one of 'pre', 'post'
:type operation_type: str
:param operation: the operation to add
:type operation: :class:`marabunta.model.Operation`
### Response:
def add_operation(self, operation_type, operation, mode=None):
"""Add an operation to the version
:param mode: Name of the mode in which the operation is executed
:type mode: str
:param operation_type: one of 'pre', 'post'
:type operation_type: str
:param operation: the operation to add
:type operation: :class:`marabunta.model.Operation`
"""
version_mode = self._get_version_mode(mode=mode)
if operation_type == 'pre':
version_mode.add_pre(operation)
elif operation_type == 'post':
version_mode.add_post(operation)
else:
raise ConfigurationError(
u"Type of operation must be 'pre' or 'post', got %s" %
(operation_type,)
) |
def ingest(self, sources=None, tables=None, stage=None, force=False, load_meta=False):
"""Ingest a set of sources, specified as source objects, source names, or destination tables.
If no stage is specified, execute the sources in groups by stage.
Note, however, that when this is called from run_stage, all of the sources have the same stage, so they
get grouped together. The result it that the stage in the inner loop is the same as the stage being
run buy run_stage.
"""
from itertools import groupby
from ambry.bundle.events import TAG
from fs.errors import ResourceNotFoundError
import zlib
self.log('---- Ingesting ----')
self.dstate = self.STATES.BUILDING
self.commit() # WTF? Without this, postgres blocks between table query, and update seq id in source tables.
key = lambda s: s.stage if s.stage else 1
def not_final_or_delete(s):
import zlib
if force:
return True
try:
return s.is_processable and not s.is_ingested and not s.is_built
except (IOError, zlib.error):
s.local_datafile.remove()
return True
sources = sorted(self._resolve_sources(sources, tables, stage, predicate=not_final_or_delete),
key=key)
if not sources:
self.log('No sources left to ingest')
return
self.state = self.STATES.INGESTING
count = 0
errors = 0
self._run_events(TAG.BEFORE_INGEST, 0)
# Clear out all ingested files that are malformed
for s in self.sources:
if s.is_downloadable:
df = s.datafile
try:
info = df.info
df.close()
except (ResourceNotFoundError, zlib.error, IOError):
df.remove()
for stage, g in groupby(sources, key):
sources = [s for s in g if not_final_or_delete(s)]
if not len(sources):
continue
self._run_events(TAG.BEFORE_INGEST, stage)
stage_errors = self._ingest_sources(sources, stage, force=force)
errors += stage_errors
count += len(sources) - stage_errors
self._run_events(TAG.AFTER_INGEST, stage)
self.record_stage_state(self.STATES.INGESTING, stage)
self.state = self.STATES.INGESTED
try:
pass
finally:
self._run_events(TAG.AFTER_INGEST, 0)
self.log('Ingested {} sources'.format(count))
if load_meta:
if len(sources) == 1:
iterable_source, source_pipe = self.source_pipe(sources[0])
try:
meta = iterable_source.meta
if meta:
self.metadata.about.title = meta['title']
self.metadata.about.summary = meta['summary']
self.build_source_files.bundle_meta.objects_to_record()
except AttributeError as e:
self.warn("Failed to set metadata: {}".format(e))
pass
else:
self.warn("Didn't not load meta from source. Must have exactly one soruce, got {}".format(len(sources)))
self.commit()
if errors == 0:
return True
else:
return False | Ingest a set of sources, specified as source objects, source names, or destination tables.
If no stage is specified, execute the sources in groups by stage.
Note, however, that when this is called from run_stage, all of the sources have the same stage, so they
get grouped together. The result it that the stage in the inner loop is the same as the stage being
run buy run_stage. | Below is the the instruction that describes the task:
### Input:
Ingest a set of sources, specified as source objects, source names, or destination tables.
If no stage is specified, execute the sources in groups by stage.
Note, however, that when this is called from run_stage, all of the sources have the same stage, so they
get grouped together. The result it that the stage in the inner loop is the same as the stage being
run buy run_stage.
### Response:
def ingest(self, sources=None, tables=None, stage=None, force=False, load_meta=False):
"""Ingest a set of sources, specified as source objects, source names, or destination tables.
If no stage is specified, execute the sources in groups by stage.
Note, however, that when this is called from run_stage, all of the sources have the same stage, so they
get grouped together. The result it that the stage in the inner loop is the same as the stage being
run buy run_stage.
"""
from itertools import groupby
from ambry.bundle.events import TAG
from fs.errors import ResourceNotFoundError
import zlib
self.log('---- Ingesting ----')
self.dstate = self.STATES.BUILDING
self.commit() # WTF? Without this, postgres blocks between table query, and update seq id in source tables.
key = lambda s: s.stage if s.stage else 1
def not_final_or_delete(s):
import zlib
if force:
return True
try:
return s.is_processable and not s.is_ingested and not s.is_built
except (IOError, zlib.error):
s.local_datafile.remove()
return True
sources = sorted(self._resolve_sources(sources, tables, stage, predicate=not_final_or_delete),
key=key)
if not sources:
self.log('No sources left to ingest')
return
self.state = self.STATES.INGESTING
count = 0
errors = 0
self._run_events(TAG.BEFORE_INGEST, 0)
# Clear out all ingested files that are malformed
for s in self.sources:
if s.is_downloadable:
df = s.datafile
try:
info = df.info
df.close()
except (ResourceNotFoundError, zlib.error, IOError):
df.remove()
for stage, g in groupby(sources, key):
sources = [s for s in g if not_final_or_delete(s)]
if not len(sources):
continue
self._run_events(TAG.BEFORE_INGEST, stage)
stage_errors = self._ingest_sources(sources, stage, force=force)
errors += stage_errors
count += len(sources) - stage_errors
self._run_events(TAG.AFTER_INGEST, stage)
self.record_stage_state(self.STATES.INGESTING, stage)
self.state = self.STATES.INGESTED
try:
pass
finally:
self._run_events(TAG.AFTER_INGEST, 0)
self.log('Ingested {} sources'.format(count))
if load_meta:
if len(sources) == 1:
iterable_source, source_pipe = self.source_pipe(sources[0])
try:
meta = iterable_source.meta
if meta:
self.metadata.about.title = meta['title']
self.metadata.about.summary = meta['summary']
self.build_source_files.bundle_meta.objects_to_record()
except AttributeError as e:
self.warn("Failed to set metadata: {}".format(e))
pass
else:
self.warn("Didn't not load meta from source. Must have exactly one soruce, got {}".format(len(sources)))
self.commit()
if errors == 0:
return True
else:
return False |
def search(self,
start_predictions: torch.Tensor,
start_state: StateType,
step: StepFunctionType) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Given a starting state and a step function, apply beam search to find the
most likely target sequences.
Notes
-----
If your step function returns ``-inf`` for some log probabilities
(like if you're using a masked log-softmax) then some of the "best"
sequences returned may also have ``-inf`` log probability. Specifically
this happens when the beam size is smaller than the number of actions
with finite log probability (non-zero probability) returned by the step function.
Therefore if you're using a mask you may want to check the results from ``search``
and potentially discard sequences with non-finite log probability.
Parameters
----------
start_predictions : ``torch.Tensor``
A tensor containing the initial predictions with shape ``(batch_size,)``.
Usually the initial predictions are just the index of the "start" token
in the target vocabulary.
start_state : ``StateType``
The initial state passed to the ``step`` function. Each value of the state dict
should be a tensor of shape ``(batch_size, *)``, where ``*`` means any other
number of dimensions.
step : ``StepFunctionType``
A function that is responsible for computing the next most likely tokens,
given the current state and the predictions from the last time step.
The function should accept two arguments. The first being a tensor
of shape ``(group_size,)``, representing the index of the predicted
tokens from the last time step, and the second being the current state.
The ``group_size`` will be ``batch_size * beam_size``, except in the initial
step, for which it will just be ``batch_size``.
The function is expected to return a tuple, where the first element
is a tensor of shape ``(group_size, target_vocab_size)`` containing
the log probabilities of the tokens for the next step, and the second
element is the updated state. The tensor in the state should have shape
``(group_size, *)``, where ``*`` means any other number of dimensions.
Returns
-------
Tuple[torch.Tensor, torch.Tensor]
Tuple of ``(predictions, log_probabilities)``, where ``predictions``
has shape ``(batch_size, beam_size, max_steps)`` and ``log_probabilities``
has shape ``(batch_size, beam_size)``.
"""
batch_size = start_predictions.size()[0]
# List of (batch_size, beam_size) tensors. One for each time step. Does not
# include the start symbols, which are implicit.
predictions: List[torch.Tensor] = []
# List of (batch_size, beam_size) tensors. One for each time step. None for
# the first. Stores the index n for the parent prediction, i.e.
# predictions[t-1][i][n], that it came from.
backpointers: List[torch.Tensor] = []
# Calculate the first timestep. This is done outside the main loop
# because we are going from a single decoder input (the output from the
# encoder) to the top `beam_size` decoder outputs. On the other hand,
# within the main loop we are going from the `beam_size` elements of the
# beam to `beam_size`^2 candidates from which we will select the top
# `beam_size` elements for the next iteration.
# shape: (batch_size, num_classes)
start_class_log_probabilities, state = step(start_predictions, start_state)
num_classes = start_class_log_probabilities.size()[1]
# Make sure `per_node_beam_size` is not larger than `num_classes`.
if self.per_node_beam_size > num_classes:
raise ConfigurationError(f"Target vocab size ({num_classes:d}) too small "
f"relative to per_node_beam_size ({self.per_node_beam_size:d}).\n"
f"Please decrease beam_size or per_node_beam_size.")
# shape: (batch_size, beam_size), (batch_size, beam_size)
start_top_log_probabilities, start_predicted_classes = \
start_class_log_probabilities.topk(self.beam_size)
if self.beam_size == 1 and (start_predicted_classes == self._end_index).all():
warnings.warn("Empty sequences predicted. You may want to increase the beam size or ensure "
"your step function is working properly.",
RuntimeWarning)
return start_predicted_classes.unsqueeze(-1), start_top_log_probabilities
# The log probabilities for the last time step.
# shape: (batch_size, beam_size)
last_log_probabilities = start_top_log_probabilities
# shape: [(batch_size, beam_size)]
predictions.append(start_predicted_classes)
# Log probability tensor that mandates that the end token is selected.
# shape: (batch_size * beam_size, num_classes)
log_probs_after_end = start_class_log_probabilities.new_full(
(batch_size * self.beam_size, num_classes),
float("-inf")
)
log_probs_after_end[:, self._end_index] = 0.
# Set the same state for each element in the beam.
for key, state_tensor in state.items():
_, *last_dims = state_tensor.size()
# shape: (batch_size * beam_size, *)
state[key] = state_tensor.\
unsqueeze(1).\
expand(batch_size, self.beam_size, *last_dims).\
reshape(batch_size * self.beam_size, *last_dims)
for timestep in range(self.max_steps - 1):
# shape: (batch_size * beam_size,)
last_predictions = predictions[-1].reshape(batch_size * self.beam_size)
# If every predicted token from the last step is `self._end_index`,
# then we can stop early.
if (last_predictions == self._end_index).all():
break
# Take a step. This get the predicted log probs of the next classes
# and updates the state.
# shape: (batch_size * beam_size, num_classes)
class_log_probabilities, state = step(last_predictions, state)
# shape: (batch_size * beam_size, num_classes)
last_predictions_expanded = last_predictions.unsqueeze(-1).expand(
batch_size * self.beam_size,
num_classes
)
# Here we are finding any beams where we predicted the end token in
# the previous timestep and replacing the distribution with a
# one-hot distribution, forcing the beam to predict the end token
# this timestep as well.
# shape: (batch_size * beam_size, num_classes)
cleaned_log_probabilities = torch.where(
last_predictions_expanded == self._end_index,
log_probs_after_end,
class_log_probabilities
)
# shape (both): (batch_size * beam_size, per_node_beam_size)
top_log_probabilities, predicted_classes = \
cleaned_log_probabilities.topk(self.per_node_beam_size)
# Here we expand the last log probabilities to (batch_size * beam_size, per_node_beam_size)
# so that we can add them to the current log probs for this timestep.
# This lets us maintain the log probability of each element on the beam.
# shape: (batch_size * beam_size, per_node_beam_size)
expanded_last_log_probabilities = last_log_probabilities.\
unsqueeze(2).\
expand(batch_size, self.beam_size, self.per_node_beam_size).\
reshape(batch_size * self.beam_size, self.per_node_beam_size)
# shape: (batch_size * beam_size, per_node_beam_size)
summed_top_log_probabilities = top_log_probabilities + expanded_last_log_probabilities
# shape: (batch_size, beam_size * per_node_beam_size)
reshaped_summed = summed_top_log_probabilities.\
reshape(batch_size, self.beam_size * self.per_node_beam_size)
# shape: (batch_size, beam_size * per_node_beam_size)
reshaped_predicted_classes = predicted_classes.\
reshape(batch_size, self.beam_size * self.per_node_beam_size)
# Keep only the top `beam_size` beam indices.
# shape: (batch_size, beam_size), (batch_size, beam_size)
restricted_beam_log_probs, restricted_beam_indices = reshaped_summed.topk(self.beam_size)
# Use the beam indices to extract the corresponding classes.
# shape: (batch_size, beam_size)
restricted_predicted_classes = reshaped_predicted_classes.gather(1, restricted_beam_indices)
predictions.append(restricted_predicted_classes)
# shape: (batch_size, beam_size)
last_log_probabilities = restricted_beam_log_probs
# The beam indices come from a `beam_size * per_node_beam_size` dimension where the
# indices with a common ancestor are grouped together. Hence
# dividing by per_node_beam_size gives the ancestor. (Note that this is integer
# division as the tensor is a LongTensor.)
# shape: (batch_size, beam_size)
backpointer = restricted_beam_indices / self.per_node_beam_size
backpointers.append(backpointer)
# Keep only the pieces of the state tensors corresponding to the
# ancestors created this iteration.
for key, state_tensor in state.items():
_, *last_dims = state_tensor.size()
# shape: (batch_size, beam_size, *)
expanded_backpointer = backpointer.\
view(batch_size, self.beam_size, *([1] * len(last_dims))).\
expand(batch_size, self.beam_size, *last_dims)
# shape: (batch_size * beam_size, *)
state[key] = state_tensor.\
reshape(batch_size, self.beam_size, *last_dims).\
gather(1, expanded_backpointer).\
reshape(batch_size * self.beam_size, *last_dims)
if not torch.isfinite(last_log_probabilities).all():
warnings.warn("Infinite log probabilities encountered. Some final sequences may not make sense. "
"This can happen when the beam size is larger than the number of valid (non-zero "
"probability) transitions that the step function produces.",
RuntimeWarning)
# Reconstruct the sequences.
# shape: [(batch_size, beam_size, 1)]
reconstructed_predictions = [predictions[-1].unsqueeze(2)]
# shape: (batch_size, beam_size)
cur_backpointers = backpointers[-1]
for timestep in range(len(predictions) - 2, 0, -1):
# shape: (batch_size, beam_size, 1)
cur_preds = predictions[timestep].gather(1, cur_backpointers).unsqueeze(2)
reconstructed_predictions.append(cur_preds)
# shape: (batch_size, beam_size)
cur_backpointers = backpointers[timestep - 1].gather(1, cur_backpointers)
# shape: (batch_size, beam_size, 1)
final_preds = predictions[0].gather(1, cur_backpointers).unsqueeze(2)
reconstructed_predictions.append(final_preds)
# shape: (batch_size, beam_size, max_steps)
all_predictions = torch.cat(list(reversed(reconstructed_predictions)), 2)
return all_predictions, last_log_probabilities | Given a starting state and a step function, apply beam search to find the
most likely target sequences.
Notes
-----
If your step function returns ``-inf`` for some log probabilities
(like if you're using a masked log-softmax) then some of the "best"
sequences returned may also have ``-inf`` log probability. Specifically
this happens when the beam size is smaller than the number of actions
with finite log probability (non-zero probability) returned by the step function.
Therefore if you're using a mask you may want to check the results from ``search``
and potentially discard sequences with non-finite log probability.
Parameters
----------
start_predictions : ``torch.Tensor``
A tensor containing the initial predictions with shape ``(batch_size,)``.
Usually the initial predictions are just the index of the "start" token
in the target vocabulary.
start_state : ``StateType``
The initial state passed to the ``step`` function. Each value of the state dict
should be a tensor of shape ``(batch_size, *)``, where ``*`` means any other
number of dimensions.
step : ``StepFunctionType``
A function that is responsible for computing the next most likely tokens,
given the current state and the predictions from the last time step.
The function should accept two arguments. The first being a tensor
of shape ``(group_size,)``, representing the index of the predicted
tokens from the last time step, and the second being the current state.
The ``group_size`` will be ``batch_size * beam_size``, except in the initial
step, for which it will just be ``batch_size``.
The function is expected to return a tuple, where the first element
is a tensor of shape ``(group_size, target_vocab_size)`` containing
the log probabilities of the tokens for the next step, and the second
element is the updated state. The tensor in the state should have shape
``(group_size, *)``, where ``*`` means any other number of dimensions.
Returns
-------
Tuple[torch.Tensor, torch.Tensor]
Tuple of ``(predictions, log_probabilities)``, where ``predictions``
has shape ``(batch_size, beam_size, max_steps)`` and ``log_probabilities``
has shape ``(batch_size, beam_size)``. | Below is the the instruction that describes the task:
### Input:
Given a starting state and a step function, apply beam search to find the
most likely target sequences.
Notes
-----
If your step function returns ``-inf`` for some log probabilities
(like if you're using a masked log-softmax) then some of the "best"
sequences returned may also have ``-inf`` log probability. Specifically
this happens when the beam size is smaller than the number of actions
with finite log probability (non-zero probability) returned by the step function.
Therefore if you're using a mask you may want to check the results from ``search``
and potentially discard sequences with non-finite log probability.
Parameters
----------
start_predictions : ``torch.Tensor``
A tensor containing the initial predictions with shape ``(batch_size,)``.
Usually the initial predictions are just the index of the "start" token
in the target vocabulary.
start_state : ``StateType``
The initial state passed to the ``step`` function. Each value of the state dict
should be a tensor of shape ``(batch_size, *)``, where ``*`` means any other
number of dimensions.
step : ``StepFunctionType``
A function that is responsible for computing the next most likely tokens,
given the current state and the predictions from the last time step.
The function should accept two arguments. The first being a tensor
of shape ``(group_size,)``, representing the index of the predicted
tokens from the last time step, and the second being the current state.
The ``group_size`` will be ``batch_size * beam_size``, except in the initial
step, for which it will just be ``batch_size``.
The function is expected to return a tuple, where the first element
is a tensor of shape ``(group_size, target_vocab_size)`` containing
the log probabilities of the tokens for the next step, and the second
element is the updated state. The tensor in the state should have shape
``(group_size, *)``, where ``*`` means any other number of dimensions.
Returns
-------
Tuple[torch.Tensor, torch.Tensor]
Tuple of ``(predictions, log_probabilities)``, where ``predictions``
has shape ``(batch_size, beam_size, max_steps)`` and ``log_probabilities``
has shape ``(batch_size, beam_size)``.
### Response:
def search(self,
start_predictions: torch.Tensor,
start_state: StateType,
step: StepFunctionType) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Given a starting state and a step function, apply beam search to find the
most likely target sequences.
Notes
-----
If your step function returns ``-inf`` for some log probabilities
(like if you're using a masked log-softmax) then some of the "best"
sequences returned may also have ``-inf`` log probability. Specifically
this happens when the beam size is smaller than the number of actions
with finite log probability (non-zero probability) returned by the step function.
Therefore if you're using a mask you may want to check the results from ``search``
and potentially discard sequences with non-finite log probability.
Parameters
----------
start_predictions : ``torch.Tensor``
A tensor containing the initial predictions with shape ``(batch_size,)``.
Usually the initial predictions are just the index of the "start" token
in the target vocabulary.
start_state : ``StateType``
The initial state passed to the ``step`` function. Each value of the state dict
should be a tensor of shape ``(batch_size, *)``, where ``*`` means any other
number of dimensions.
step : ``StepFunctionType``
A function that is responsible for computing the next most likely tokens,
given the current state and the predictions from the last time step.
The function should accept two arguments. The first being a tensor
of shape ``(group_size,)``, representing the index of the predicted
tokens from the last time step, and the second being the current state.
The ``group_size`` will be ``batch_size * beam_size``, except in the initial
step, for which it will just be ``batch_size``.
The function is expected to return a tuple, where the first element
is a tensor of shape ``(group_size, target_vocab_size)`` containing
the log probabilities of the tokens for the next step, and the second
element is the updated state. The tensor in the state should have shape
``(group_size, *)``, where ``*`` means any other number of dimensions.
Returns
-------
Tuple[torch.Tensor, torch.Tensor]
Tuple of ``(predictions, log_probabilities)``, where ``predictions``
has shape ``(batch_size, beam_size, max_steps)`` and ``log_probabilities``
has shape ``(batch_size, beam_size)``.
"""
batch_size = start_predictions.size()[0]
# List of (batch_size, beam_size) tensors. One for each time step. Does not
# include the start symbols, which are implicit.
predictions: List[torch.Tensor] = []
# List of (batch_size, beam_size) tensors. One for each time step. None for
# the first. Stores the index n for the parent prediction, i.e.
# predictions[t-1][i][n], that it came from.
backpointers: List[torch.Tensor] = []
# Calculate the first timestep. This is done outside the main loop
# because we are going from a single decoder input (the output from the
# encoder) to the top `beam_size` decoder outputs. On the other hand,
# within the main loop we are going from the `beam_size` elements of the
# beam to `beam_size`^2 candidates from which we will select the top
# `beam_size` elements for the next iteration.
# shape: (batch_size, num_classes)
start_class_log_probabilities, state = step(start_predictions, start_state)
num_classes = start_class_log_probabilities.size()[1]
# Make sure `per_node_beam_size` is not larger than `num_classes`.
if self.per_node_beam_size > num_classes:
raise ConfigurationError(f"Target vocab size ({num_classes:d}) too small "
f"relative to per_node_beam_size ({self.per_node_beam_size:d}).\n"
f"Please decrease beam_size or per_node_beam_size.")
# shape: (batch_size, beam_size), (batch_size, beam_size)
start_top_log_probabilities, start_predicted_classes = \
start_class_log_probabilities.topk(self.beam_size)
if self.beam_size == 1 and (start_predicted_classes == self._end_index).all():
warnings.warn("Empty sequences predicted. You may want to increase the beam size or ensure "
"your step function is working properly.",
RuntimeWarning)
return start_predicted_classes.unsqueeze(-1), start_top_log_probabilities
# The log probabilities for the last time step.
# shape: (batch_size, beam_size)
last_log_probabilities = start_top_log_probabilities
# shape: [(batch_size, beam_size)]
predictions.append(start_predicted_classes)
# Log probability tensor that mandates that the end token is selected.
# shape: (batch_size * beam_size, num_classes)
log_probs_after_end = start_class_log_probabilities.new_full(
(batch_size * self.beam_size, num_classes),
float("-inf")
)
log_probs_after_end[:, self._end_index] = 0.
# Set the same state for each element in the beam.
for key, state_tensor in state.items():
_, *last_dims = state_tensor.size()
# shape: (batch_size * beam_size, *)
state[key] = state_tensor.\
unsqueeze(1).\
expand(batch_size, self.beam_size, *last_dims).\
reshape(batch_size * self.beam_size, *last_dims)
for timestep in range(self.max_steps - 1):
# shape: (batch_size * beam_size,)
last_predictions = predictions[-1].reshape(batch_size * self.beam_size)
# If every predicted token from the last step is `self._end_index`,
# then we can stop early.
if (last_predictions == self._end_index).all():
break
# Take a step. This get the predicted log probs of the next classes
# and updates the state.
# shape: (batch_size * beam_size, num_classes)
class_log_probabilities, state = step(last_predictions, state)
# shape: (batch_size * beam_size, num_classes)
last_predictions_expanded = last_predictions.unsqueeze(-1).expand(
batch_size * self.beam_size,
num_classes
)
# Here we are finding any beams where we predicted the end token in
# the previous timestep and replacing the distribution with a
# one-hot distribution, forcing the beam to predict the end token
# this timestep as well.
# shape: (batch_size * beam_size, num_classes)
cleaned_log_probabilities = torch.where(
last_predictions_expanded == self._end_index,
log_probs_after_end,
class_log_probabilities
)
# shape (both): (batch_size * beam_size, per_node_beam_size)
top_log_probabilities, predicted_classes = \
cleaned_log_probabilities.topk(self.per_node_beam_size)
# Here we expand the last log probabilities to (batch_size * beam_size, per_node_beam_size)
# so that we can add them to the current log probs for this timestep.
# This lets us maintain the log probability of each element on the beam.
# shape: (batch_size * beam_size, per_node_beam_size)
expanded_last_log_probabilities = last_log_probabilities.\
unsqueeze(2).\
expand(batch_size, self.beam_size, self.per_node_beam_size).\
reshape(batch_size * self.beam_size, self.per_node_beam_size)
# shape: (batch_size * beam_size, per_node_beam_size)
summed_top_log_probabilities = top_log_probabilities + expanded_last_log_probabilities
# shape: (batch_size, beam_size * per_node_beam_size)
reshaped_summed = summed_top_log_probabilities.\
reshape(batch_size, self.beam_size * self.per_node_beam_size)
# shape: (batch_size, beam_size * per_node_beam_size)
reshaped_predicted_classes = predicted_classes.\
reshape(batch_size, self.beam_size * self.per_node_beam_size)
# Keep only the top `beam_size` beam indices.
# shape: (batch_size, beam_size), (batch_size, beam_size)
restricted_beam_log_probs, restricted_beam_indices = reshaped_summed.topk(self.beam_size)
# Use the beam indices to extract the corresponding classes.
# shape: (batch_size, beam_size)
restricted_predicted_classes = reshaped_predicted_classes.gather(1, restricted_beam_indices)
predictions.append(restricted_predicted_classes)
# shape: (batch_size, beam_size)
last_log_probabilities = restricted_beam_log_probs
# The beam indices come from a `beam_size * per_node_beam_size` dimension where the
# indices with a common ancestor are grouped together. Hence
# dividing by per_node_beam_size gives the ancestor. (Note that this is integer
# division as the tensor is a LongTensor.)
# shape: (batch_size, beam_size)
backpointer = restricted_beam_indices / self.per_node_beam_size
backpointers.append(backpointer)
# Keep only the pieces of the state tensors corresponding to the
# ancestors created this iteration.
for key, state_tensor in state.items():
_, *last_dims = state_tensor.size()
# shape: (batch_size, beam_size, *)
expanded_backpointer = backpointer.\
view(batch_size, self.beam_size, *([1] * len(last_dims))).\
expand(batch_size, self.beam_size, *last_dims)
# shape: (batch_size * beam_size, *)
state[key] = state_tensor.\
reshape(batch_size, self.beam_size, *last_dims).\
gather(1, expanded_backpointer).\
reshape(batch_size * self.beam_size, *last_dims)
if not torch.isfinite(last_log_probabilities).all():
warnings.warn("Infinite log probabilities encountered. Some final sequences may not make sense. "
"This can happen when the beam size is larger than the number of valid (non-zero "
"probability) transitions that the step function produces.",
RuntimeWarning)
# Reconstruct the sequences.
# shape: [(batch_size, beam_size, 1)]
reconstructed_predictions = [predictions[-1].unsqueeze(2)]
# shape: (batch_size, beam_size)
cur_backpointers = backpointers[-1]
for timestep in range(len(predictions) - 2, 0, -1):
# shape: (batch_size, beam_size, 1)
cur_preds = predictions[timestep].gather(1, cur_backpointers).unsqueeze(2)
reconstructed_predictions.append(cur_preds)
# shape: (batch_size, beam_size)
cur_backpointers = backpointers[timestep - 1].gather(1, cur_backpointers)
# shape: (batch_size, beam_size, 1)
final_preds = predictions[0].gather(1, cur_backpointers).unsqueeze(2)
reconstructed_predictions.append(final_preds)
# shape: (batch_size, beam_size, max_steps)
all_predictions = torch.cat(list(reversed(reconstructed_predictions)), 2)
return all_predictions, last_log_probabilities |
def drop(self, **kwargs):
"""
Removes a collection from the database.
**kwargs only because of the optional "writeConcern" field, but does nothing in the TinyDB database.
:return: Returns True when successfully drops a collection. Returns False when collection to drop does not
exist.
"""
if self.table:
self.parent.tinydb.purge_table(self.tablename)
return True
else:
return False | Removes a collection from the database.
**kwargs only because of the optional "writeConcern" field, but does nothing in the TinyDB database.
:return: Returns True when successfully drops a collection. Returns False when collection to drop does not
exist. | Below is the the instruction that describes the task:
### Input:
Removes a collection from the database.
**kwargs only because of the optional "writeConcern" field, but does nothing in the TinyDB database.
:return: Returns True when successfully drops a collection. Returns False when collection to drop does not
exist.
### Response:
def drop(self, **kwargs):
"""
Removes a collection from the database.
**kwargs only because of the optional "writeConcern" field, but does nothing in the TinyDB database.
:return: Returns True when successfully drops a collection. Returns False when collection to drop does not
exist.
"""
if self.table:
self.parent.tinydb.purge_table(self.tablename)
return True
else:
return False |
def create_record(self, type, name, data, priority=None, port=None,
weight=None, **kwargs):
# pylint: disable=redefined-builtin
"""
Add a new DNS record to the domain
:param str type: the type of DNS record to add (``"A"``, ``"CNAME"``,
etc.)
:param str name: the name (hostname, alias, etc.) of the new record
:param str data: the value of the new record
:param int priority: the priority of the new record (SRV and MX records
only)
:param int port: the port that the service is accessible on (SRV
records only)
:param int weight: the weight of records with the same priority (SRV
records only)
:param kwargs: additional fields to include in the API request
:return: the new domain record
:rtype: DomainRecord
:raises DOAPIError: if the API endpoint replies with an error
"""
api = self.doapi_manager
data = {
"type": type,
"name": name,
"data": data,
"priority": priority,
"port": port,
"weight": weight,
}
data.update(kwargs)
return self._record(api.request(self.record_url, method='POST',
data=data)["domain_record"]) | Add a new DNS record to the domain
:param str type: the type of DNS record to add (``"A"``, ``"CNAME"``,
etc.)
:param str name: the name (hostname, alias, etc.) of the new record
:param str data: the value of the new record
:param int priority: the priority of the new record (SRV and MX records
only)
:param int port: the port that the service is accessible on (SRV
records only)
:param int weight: the weight of records with the same priority (SRV
records only)
:param kwargs: additional fields to include in the API request
:return: the new domain record
:rtype: DomainRecord
:raises DOAPIError: if the API endpoint replies with an error | Below is the the instruction that describes the task:
### Input:
Add a new DNS record to the domain
:param str type: the type of DNS record to add (``"A"``, ``"CNAME"``,
etc.)
:param str name: the name (hostname, alias, etc.) of the new record
:param str data: the value of the new record
:param int priority: the priority of the new record (SRV and MX records
only)
:param int port: the port that the service is accessible on (SRV
records only)
:param int weight: the weight of records with the same priority (SRV
records only)
:param kwargs: additional fields to include in the API request
:return: the new domain record
:rtype: DomainRecord
:raises DOAPIError: if the API endpoint replies with an error
### Response:
def create_record(self, type, name, data, priority=None, port=None,
weight=None, **kwargs):
# pylint: disable=redefined-builtin
"""
Add a new DNS record to the domain
:param str type: the type of DNS record to add (``"A"``, ``"CNAME"``,
etc.)
:param str name: the name (hostname, alias, etc.) of the new record
:param str data: the value of the new record
:param int priority: the priority of the new record (SRV and MX records
only)
:param int port: the port that the service is accessible on (SRV
records only)
:param int weight: the weight of records with the same priority (SRV
records only)
:param kwargs: additional fields to include in the API request
:return: the new domain record
:rtype: DomainRecord
:raises DOAPIError: if the API endpoint replies with an error
"""
api = self.doapi_manager
data = {
"type": type,
"name": name,
"data": data,
"priority": priority,
"port": port,
"weight": weight,
}
data.update(kwargs)
return self._record(api.request(self.record_url, method='POST',
data=data)["domain_record"]) |
def build_trees_from_sentence( sentence, syntactic_relations, layer=LAYER_VISLCG3, \
sentence_id=0, **kwargs ):
''' Given a sentence ( a list of EstNLTK's word tokens ), and a list of
dependency syntactic relations ( output of normalise_alignments() ),
builds trees ( estnltk.syntax.utils.Tree objects ) from the sentence,
and returns as a list of Trees (roots of trees).
Note that there is one-to-many correspondence between EstNLTK's
sentences and dependency syntactic trees, so the resulting list can
contain more than one tree (root);
'''
trees_of_sentence = []
nodes = [ -1 ]
while( len(nodes) > 0 ):
node = nodes.pop(0)
# Find tokens in the sentence that take this node as their parent
for i, syntax_token in enumerate( syntactic_relations ):
parents = [ o[1] for o in syntax_token[PARSER_OUT] ]
# There should be only one parent node; If there is more than one, take the
# first node;
parent = parents[0]
if parent == node:
labels = [ o[0] for o in syntax_token[PARSER_OUT] ]
estnltk_token = sentence[i]
tree1 = Tree( estnltk_token, i, sentence_id, labels, parser=layer )
if INIT_PARSER_OUT in syntax_token:
tree1.parser_output = syntax_token[INIT_PARSER_OUT]
tree1.syntax_token = syntax_token
if parent == -1:
# Add the root node
trees_of_sentence.append( tree1 )
elif parent == i:
# If, for some strange reason, the node is unnormalised and is still
# linked to itself, add it as a singleton tree
trees_of_sentence.append( tree1 )
else:
# For each root node, attempt to add the child
for root_node in trees_of_sentence:
root_node.add_child_to_subtree( parent, tree1 )
if parent != i:
# Add the current node as a future parent to be examined
nodes.append( i )
return trees_of_sentence | Given a sentence ( a list of EstNLTK's word tokens ), and a list of
dependency syntactic relations ( output of normalise_alignments() ),
builds trees ( estnltk.syntax.utils.Tree objects ) from the sentence,
and returns as a list of Trees (roots of trees).
Note that there is one-to-many correspondence between EstNLTK's
sentences and dependency syntactic trees, so the resulting list can
contain more than one tree (root); | Below is the the instruction that describes the task:
### Input:
Given a sentence ( a list of EstNLTK's word tokens ), and a list of
dependency syntactic relations ( output of normalise_alignments() ),
builds trees ( estnltk.syntax.utils.Tree objects ) from the sentence,
and returns as a list of Trees (roots of trees).
Note that there is one-to-many correspondence between EstNLTK's
sentences and dependency syntactic trees, so the resulting list can
contain more than one tree (root);
### Response:
def build_trees_from_sentence( sentence, syntactic_relations, layer=LAYER_VISLCG3, \
sentence_id=0, **kwargs ):
''' Given a sentence ( a list of EstNLTK's word tokens ), and a list of
dependency syntactic relations ( output of normalise_alignments() ),
builds trees ( estnltk.syntax.utils.Tree objects ) from the sentence,
and returns as a list of Trees (roots of trees).
Note that there is one-to-many correspondence between EstNLTK's
sentences and dependency syntactic trees, so the resulting list can
contain more than one tree (root);
'''
trees_of_sentence = []
nodes = [ -1 ]
while( len(nodes) > 0 ):
node = nodes.pop(0)
# Find tokens in the sentence that take this node as their parent
for i, syntax_token in enumerate( syntactic_relations ):
parents = [ o[1] for o in syntax_token[PARSER_OUT] ]
# There should be only one parent node; If there is more than one, take the
# first node;
parent = parents[0]
if parent == node:
labels = [ o[0] for o in syntax_token[PARSER_OUT] ]
estnltk_token = sentence[i]
tree1 = Tree( estnltk_token, i, sentence_id, labels, parser=layer )
if INIT_PARSER_OUT in syntax_token:
tree1.parser_output = syntax_token[INIT_PARSER_OUT]
tree1.syntax_token = syntax_token
if parent == -1:
# Add the root node
trees_of_sentence.append( tree1 )
elif parent == i:
# If, for some strange reason, the node is unnormalised and is still
# linked to itself, add it as a singleton tree
trees_of_sentence.append( tree1 )
else:
# For each root node, attempt to add the child
for root_node in trees_of_sentence:
root_node.add_child_to_subtree( parent, tree1 )
if parent != i:
# Add the current node as a future parent to be examined
nodes.append( i )
return trees_of_sentence |
def setup_logging(self):
"""Setup python logging handler."""
date_format = '%Y-%m-%dT%H:%M:%S'
log_format = '%(asctime)s %(levelname)s: %(message)s'
if self.opts.verbose:
lvl = logging.DEBUG
else:
lvl = logging.INFO
# Requests is a bit chatty
logging.getLogger('requests').setLevel('WARNING')
self.logger.setLevel(lvl)
stdout = logging.StreamHandler(sys.stdout)
stdout.setLevel(lvl)
formatter = logging.Formatter(log_format, date_format)
stdout.setFormatter(formatter)
self.logger.addHandler(stdout)
# Decided not to use stderr
# stderr = logging.StreamHandler(sys.stderr)
# stderr.setLevel(logging.ERROR) # Error and above go to both stdout & stderr
# formatter = logging.Formatter(log_format, date_format)
# stderr.setFormatter(formatter)
# self.logger.addHandler(stderr)
log = self.opts.log or self.config['crony'].get('log_file')
if log:
logfile = logging.FileHandler(log)
logfile.setLevel(lvl)
formatter = logging.Formatter(log_format, date_format)
logfile.setFormatter(formatter)
self.logger.addHandler(logfile)
if self.sentry_client:
sentry = SentryHandler(self.sentry_client)
sentry.setLevel(logging.ERROR)
self.logger.addHandler(sentry)
self.logger.debug('Logging setup complete.') | Setup python logging handler. | Below is the the instruction that describes the task:
### Input:
Setup python logging handler.
### Response:
def setup_logging(self):
"""Setup python logging handler."""
date_format = '%Y-%m-%dT%H:%M:%S'
log_format = '%(asctime)s %(levelname)s: %(message)s'
if self.opts.verbose:
lvl = logging.DEBUG
else:
lvl = logging.INFO
# Requests is a bit chatty
logging.getLogger('requests').setLevel('WARNING')
self.logger.setLevel(lvl)
stdout = logging.StreamHandler(sys.stdout)
stdout.setLevel(lvl)
formatter = logging.Formatter(log_format, date_format)
stdout.setFormatter(formatter)
self.logger.addHandler(stdout)
# Decided not to use stderr
# stderr = logging.StreamHandler(sys.stderr)
# stderr.setLevel(logging.ERROR) # Error and above go to both stdout & stderr
# formatter = logging.Formatter(log_format, date_format)
# stderr.setFormatter(formatter)
# self.logger.addHandler(stderr)
log = self.opts.log or self.config['crony'].get('log_file')
if log:
logfile = logging.FileHandler(log)
logfile.setLevel(lvl)
formatter = logging.Formatter(log_format, date_format)
logfile.setFormatter(formatter)
self.logger.addHandler(logfile)
if self.sentry_client:
sentry = SentryHandler(self.sentry_client)
sentry.setLevel(logging.ERROR)
self.logger.addHandler(sentry)
self.logger.debug('Logging setup complete.') |
def css(app, env):
"""
Add bolditalic CSS.
:param app: Sphinx application context.
:param env: Sphinx environment context.
"""
srcdir = os.path.abspath(os.path.dirname(__file__))
cssfile = 'bolditalic.css'
csspath = os.path.join(srcdir, cssfile)
buildpath = os.path.join(app.outdir, '_static')
try:
os.makedirs(buildpath)
except OSError:
if not os.path.isdir(buildpath):
raise
copy(csspath, buildpath)
app.add_stylesheet(cssfile)
return | Add bolditalic CSS.
:param app: Sphinx application context.
:param env: Sphinx environment context. | Below is the the instruction that describes the task:
### Input:
Add bolditalic CSS.
:param app: Sphinx application context.
:param env: Sphinx environment context.
### Response:
def css(app, env):
"""
Add bolditalic CSS.
:param app: Sphinx application context.
:param env: Sphinx environment context.
"""
srcdir = os.path.abspath(os.path.dirname(__file__))
cssfile = 'bolditalic.css'
csspath = os.path.join(srcdir, cssfile)
buildpath = os.path.join(app.outdir, '_static')
try:
os.makedirs(buildpath)
except OSError:
if not os.path.isdir(buildpath):
raise
copy(csspath, buildpath)
app.add_stylesheet(cssfile)
return |
def resolve_tree_i18n_alias(self, alias):
"""Resolves internationalized tree alias.
Verifies whether a separate sitetree is available for currently active language.
If so, returns i18n alias. If not, returns the initial alias.
:param str|unicode alias:
:rtype: str|unicode
"""
if alias not in _I18N_TREES:
return alias
current_language_code = self.current_lang
i18n_tree_alias = '%s_%s' % (alias, current_language_code)
trees_count = self.cache.get_entry('tree_aliases', i18n_tree_alias)
if trees_count is False:
trees_count = MODEL_TREE_CLASS.objects.filter(alias=i18n_tree_alias).count()
self.cache.set_entry('tree_aliases', i18n_tree_alias, trees_count)
if trees_count:
alias = i18n_tree_alias
return alias | Resolves internationalized tree alias.
Verifies whether a separate sitetree is available for currently active language.
If so, returns i18n alias. If not, returns the initial alias.
:param str|unicode alias:
:rtype: str|unicode | Below is the the instruction that describes the task:
### Input:
Resolves internationalized tree alias.
Verifies whether a separate sitetree is available for currently active language.
If so, returns i18n alias. If not, returns the initial alias.
:param str|unicode alias:
:rtype: str|unicode
### Response:
def resolve_tree_i18n_alias(self, alias):
"""Resolves internationalized tree alias.
Verifies whether a separate sitetree is available for currently active language.
If so, returns i18n alias. If not, returns the initial alias.
:param str|unicode alias:
:rtype: str|unicode
"""
if alias not in _I18N_TREES:
return alias
current_language_code = self.current_lang
i18n_tree_alias = '%s_%s' % (alias, current_language_code)
trees_count = self.cache.get_entry('tree_aliases', i18n_tree_alias)
if trees_count is False:
trees_count = MODEL_TREE_CLASS.objects.filter(alias=i18n_tree_alias).count()
self.cache.set_entry('tree_aliases', i18n_tree_alias, trees_count)
if trees_count:
alias = i18n_tree_alias
return alias |
def remove_resources(self, resources):
"""
Removes resources from the event.
*resources* can be a list of email addresses or :class:`ExchangeEventAttendee` objects.
"""
resources_to_delete = self._build_resource_dictionary(resources)
for email in resources_to_delete.keys():
if email in self._resources:
del self._resources[email]
self._dirty_attributes.add(u'resources') | Removes resources from the event.
*resources* can be a list of email addresses or :class:`ExchangeEventAttendee` objects. | Below is the the instruction that describes the task:
### Input:
Removes resources from the event.
*resources* can be a list of email addresses or :class:`ExchangeEventAttendee` objects.
### Response:
def remove_resources(self, resources):
"""
Removes resources from the event.
*resources* can be a list of email addresses or :class:`ExchangeEventAttendee` objects.
"""
resources_to_delete = self._build_resource_dictionary(resources)
for email in resources_to_delete.keys():
if email in self._resources:
del self._resources[email]
self._dirty_attributes.add(u'resources') |
def gradient_list(start, end, steps):
"""Compute gradient colors"""
delta = [end[i] - start[i] for i in range(4)]
return [bytearray(start[j] + (delta[j] * i) // steps for j in range(4))
for i in range(steps + 1)] | Compute gradient colors | Below is the the instruction that describes the task:
### Input:
Compute gradient colors
### Response:
def gradient_list(start, end, steps):
"""Compute gradient colors"""
delta = [end[i] - start[i] for i in range(4)]
return [bytearray(start[j] + (delta[j] * i) // steps for j in range(4))
for i in range(steps + 1)] |
def p_expr_noin(self, p):
"""expr_noin : assignment_expr_noin
| expr_noin COMMA assignment_expr_noin
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = ast.Comma(left=p[1], right=p[3]) | expr_noin : assignment_expr_noin
| expr_noin COMMA assignment_expr_noin | Below is the the instruction that describes the task:
### Input:
expr_noin : assignment_expr_noin
| expr_noin COMMA assignment_expr_noin
### Response:
def p_expr_noin(self, p):
"""expr_noin : assignment_expr_noin
| expr_noin COMMA assignment_expr_noin
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = ast.Comma(left=p[1], right=p[3]) |
def addItem(self, item, message=None):
"""add a new Item class object"""
if message is None:
message = 'Adding item %s' % item.path
try:
v = Version.new(repo=self)
v.addItem(item)
v.save(message)
except VersionError, e:
raise RepoError(e) | add a new Item class object | Below is the the instruction that describes the task:
### Input:
add a new Item class object
### Response:
def addItem(self, item, message=None):
"""add a new Item class object"""
if message is None:
message = 'Adding item %s' % item.path
try:
v = Version.new(repo=self)
v.addItem(item)
v.save(message)
except VersionError, e:
raise RepoError(e) |
def __convert_json_to_projects_map(self, json):
""" Convert JSON format to the projects map format
map[ds][repository] = project
If a repository is in several projects assign to leaf
Check that all JSON data is in the database
:param json: data with the projects to repositories mapping
:returns: the repositories to projects mapping per data source
"""
ds_repo_to_prj = {}
for project in json:
for ds in json[project]:
if ds == "meta":
continue # not a real data source
if ds not in ds_repo_to_prj:
if ds not in ds_repo_to_prj:
ds_repo_to_prj[ds] = {}
for repo in json[project][ds]:
if repo in ds_repo_to_prj[ds]:
if project == ds_repo_to_prj[ds][repo]:
logger.debug("Duplicated repo: %s %s %s", ds, repo, project)
else:
if len(project.split(".")) > len(ds_repo_to_prj[ds][repo].split(".")):
logger.debug("Changed repo project because we found a leaf: %s leaf vs %s (%s, %s)",
project, ds_repo_to_prj[ds][repo], repo, ds)
ds_repo_to_prj[ds][repo] = project
else:
ds_repo_to_prj[ds][repo] = project
return ds_repo_to_prj | Convert JSON format to the projects map format
map[ds][repository] = project
If a repository is in several projects assign to leaf
Check that all JSON data is in the database
:param json: data with the projects to repositories mapping
:returns: the repositories to projects mapping per data source | Below is the the instruction that describes the task:
### Input:
Convert JSON format to the projects map format
map[ds][repository] = project
If a repository is in several projects assign to leaf
Check that all JSON data is in the database
:param json: data with the projects to repositories mapping
:returns: the repositories to projects mapping per data source
### Response:
def __convert_json_to_projects_map(self, json):
""" Convert JSON format to the projects map format
map[ds][repository] = project
If a repository is in several projects assign to leaf
Check that all JSON data is in the database
:param json: data with the projects to repositories mapping
:returns: the repositories to projects mapping per data source
"""
ds_repo_to_prj = {}
for project in json:
for ds in json[project]:
if ds == "meta":
continue # not a real data source
if ds not in ds_repo_to_prj:
if ds not in ds_repo_to_prj:
ds_repo_to_prj[ds] = {}
for repo in json[project][ds]:
if repo in ds_repo_to_prj[ds]:
if project == ds_repo_to_prj[ds][repo]:
logger.debug("Duplicated repo: %s %s %s", ds, repo, project)
else:
if len(project.split(".")) > len(ds_repo_to_prj[ds][repo].split(".")):
logger.debug("Changed repo project because we found a leaf: %s leaf vs %s (%s, %s)",
project, ds_repo_to_prj[ds][repo], repo, ds)
ds_repo_to_prj[ds][repo] = project
else:
ds_repo_to_prj[ds][repo] = project
return ds_repo_to_prj |
def default_create_thread(callback):
"""
Default thread creation - used to create threads when the client doesn't want to provide their
own thread creation.
:param function callback: the callback function provided to threading.Thread
"""
thread = threading.Thread(None, callback)
thread.daemon = True # Don't let thread prevent termination
thread.start()
return thread | Default thread creation - used to create threads when the client doesn't want to provide their
own thread creation.
:param function callback: the callback function provided to threading.Thread | Below is the the instruction that describes the task:
### Input:
Default thread creation - used to create threads when the client doesn't want to provide their
own thread creation.
:param function callback: the callback function provided to threading.Thread
### Response:
def default_create_thread(callback):
"""
Default thread creation - used to create threads when the client doesn't want to provide their
own thread creation.
:param function callback: the callback function provided to threading.Thread
"""
thread = threading.Thread(None, callback)
thread.daemon = True # Don't let thread prevent termination
thread.start()
return thread |
def update(self, *args, **kwargs):
""" update() method will *recursively* update nested dict:
>>> d=Dict({'a':{'b':{'c':3,'d':4},'h':4}})
>>> d.update({'a':{'b':{'c':'888'}}})
>>> d
{'a': {'b': {'c': '888', 'd': 4}, 'h': 4}}
please use update_dict() if you do not want this behaviour
"""
for arg in args:
if not arg:
continue
elif isinstance(arg, dict):
for k, v in arg.items():
self._update_kv(k, v)
elif isinstance(arg, (list, tuple)) and (not isinstance(arg[0], (list, tuple))):
k = arg[0]
v = arg[1]
self._update_kv(k, v)
elif isinstance(arg, (list, tuple)) or isgenerator(arg):
for k, v in arg:
self._update_kv(k, v)
else:
raise TypeError("update does not understand "
"{0} types".format(type(arg)))
for k, v in kwargs.items():
self._update_kv(k, v) | update() method will *recursively* update nested dict:
>>> d=Dict({'a':{'b':{'c':3,'d':4},'h':4}})
>>> d.update({'a':{'b':{'c':'888'}}})
>>> d
{'a': {'b': {'c': '888', 'd': 4}, 'h': 4}}
please use update_dict() if you do not want this behaviour | Below is the the instruction that describes the task:
### Input:
update() method will *recursively* update nested dict:
>>> d=Dict({'a':{'b':{'c':3,'d':4},'h':4}})
>>> d.update({'a':{'b':{'c':'888'}}})
>>> d
{'a': {'b': {'c': '888', 'd': 4}, 'h': 4}}
please use update_dict() if you do not want this behaviour
### Response:
def update(self, *args, **kwargs):
""" update() method will *recursively* update nested dict:
>>> d=Dict({'a':{'b':{'c':3,'d':4},'h':4}})
>>> d.update({'a':{'b':{'c':'888'}}})
>>> d
{'a': {'b': {'c': '888', 'd': 4}, 'h': 4}}
please use update_dict() if you do not want this behaviour
"""
for arg in args:
if not arg:
continue
elif isinstance(arg, dict):
for k, v in arg.items():
self._update_kv(k, v)
elif isinstance(arg, (list, tuple)) and (not isinstance(arg[0], (list, tuple))):
k = arg[0]
v = arg[1]
self._update_kv(k, v)
elif isinstance(arg, (list, tuple)) or isgenerator(arg):
for k, v in arg:
self._update_kv(k, v)
else:
raise TypeError("update does not understand "
"{0} types".format(type(arg)))
for k, v in kwargs.items():
self._update_kv(k, v) |
def vcpu_pin(vm_, vcpu, cpus):
'''
Set which CPUs a VCPU can use.
CLI Example:
.. code-block:: bash
salt 'foo' virt.vcpu_pin domU-id 2 1
salt 'foo' virt.vcpu_pin domU-id 2 2-6
'''
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, 'VM', vm_)
if vm_uuid is False:
return False
# from xm's main
def cpu_make_map(cpulist):
cpus = []
for c in cpulist.split(','):
if c == '':
continue
if '-' in c:
(x, y) = c.split('-')
for i in range(int(x), int(y) + 1):
cpus.append(int(i))
else:
# remove this element from the list
if c[0] == '^':
cpus = [x for x in cpus if x != int(c[1:])]
else:
cpus.append(int(c))
cpus.sort()
return ','.join(map(str, cpus))
if cpus == 'all':
cpumap = cpu_make_map('0-63')
else:
cpumap = cpu_make_map('{0}'.format(cpus))
try:
xapi.VM.add_to_VCPUs_params_live(vm_uuid,
'cpumap{0}'.format(vcpu), cpumap)
return True
# VM.add_to_VCPUs_params_live() implementation in xend 4.1+ has
# a bug which makes the client call fail.
# That code is accurate for all others XenAPI implementations, but
# for that particular one, fallback to xm / xl instead.
except Exception:
return __salt__['cmd.run'](
'{0} vcpu-pin {1} {2} {3}'.format(_get_xtool(), vm_, vcpu, cpus),
python_shell=False) | Set which CPUs a VCPU can use.
CLI Example:
.. code-block:: bash
salt 'foo' virt.vcpu_pin domU-id 2 1
salt 'foo' virt.vcpu_pin domU-id 2 2-6 | Below is the the instruction that describes the task:
### Input:
Set which CPUs a VCPU can use.
CLI Example:
.. code-block:: bash
salt 'foo' virt.vcpu_pin domU-id 2 1
salt 'foo' virt.vcpu_pin domU-id 2 2-6
### Response:
def vcpu_pin(vm_, vcpu, cpus):
'''
Set which CPUs a VCPU can use.
CLI Example:
.. code-block:: bash
salt 'foo' virt.vcpu_pin domU-id 2 1
salt 'foo' virt.vcpu_pin domU-id 2 2-6
'''
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, 'VM', vm_)
if vm_uuid is False:
return False
# from xm's main
def cpu_make_map(cpulist):
cpus = []
for c in cpulist.split(','):
if c == '':
continue
if '-' in c:
(x, y) = c.split('-')
for i in range(int(x), int(y) + 1):
cpus.append(int(i))
else:
# remove this element from the list
if c[0] == '^':
cpus = [x for x in cpus if x != int(c[1:])]
else:
cpus.append(int(c))
cpus.sort()
return ','.join(map(str, cpus))
if cpus == 'all':
cpumap = cpu_make_map('0-63')
else:
cpumap = cpu_make_map('{0}'.format(cpus))
try:
xapi.VM.add_to_VCPUs_params_live(vm_uuid,
'cpumap{0}'.format(vcpu), cpumap)
return True
# VM.add_to_VCPUs_params_live() implementation in xend 4.1+ has
# a bug which makes the client call fail.
# That code is accurate for all others XenAPI implementations, but
# for that particular one, fallback to xm / xl instead.
except Exception:
return __salt__['cmd.run'](
'{0} vcpu-pin {1} {2} {3}'.format(_get_xtool(), vm_, vcpu, cpus),
python_shell=False) |
def modify(self, pk=None, create_on_missing=False, **kwargs):
"""Modify an existing notification template.
Not all required configuration-related fields (required according to
notification_type) should be provided.
Fields in the resource's `identity` tuple can be used in lieu of a
primary key for a lookup; in such a case, only other fields are
written.
To modify unique fields, you must use the primary key for the lookup.
=====API DOCS=====
Modify an already existing object.
:param pk: Primary key of the resource to be modified.
:type pk: int
:param create_on_missing: Flag that if set, a new object is created if ``pk`` is not set and objects
matching the appropriate unique criteria is not found.
:type create_on_missing: bool
:param `**kwargs`: Keyword arguments which, all together, will be used as PATCH body to modify the
resource object. if ``pk`` is not set, key-value pairs of ``**kwargs`` which are
also in resource's identity will be used to lookup existing reosource.
:returns: A dictionary combining the JSON output of the modified resource, as well as two extra fields:
"changed", a flag indicating if the resource is successfully updated; "id", an integer which
is the primary key of the updated object.
:rtype: dict
=====API DOCS=====
"""
# Create the resource if needed.
if pk is None and create_on_missing:
try:
self.get(**copy.deepcopy(kwargs))
except exc.NotFound:
return self.create(**kwargs)
# Modify everything except notification type and configuration
config_item = self._separate(kwargs)
notification_type = kwargs.pop('notification_type', None)
debug.log('Modify everything except notification type and'
' configuration', header='details')
part_result = super(Resource, self).\
modify(pk=pk, create_on_missing=create_on_missing, **kwargs)
# Modify notification type and configuration
if notification_type is None or \
notification_type == part_result['notification_type']:
for item in part_result['notification_configuration']:
if item not in config_item or not config_item[item]:
to_add = part_result['notification_configuration'][item]
if not (to_add == '$encrypted$' and
item in Resource.encrypted_fields):
config_item[item] = to_add
if notification_type is None:
kwargs['notification_type'] = part_result['notification_type']
else:
kwargs['notification_type'] = notification_type
self._configuration(kwargs, config_item)
debug.log('Modify notification type and configuration',
header='details')
result = super(Resource, self).\
modify(pk=pk, create_on_missing=create_on_missing, **kwargs)
# Update 'changed' field to give general changed info
if 'changed' in result and 'changed' in part_result:
result['changed'] = result['changed'] or part_result['changed']
return result | Modify an existing notification template.
Not all required configuration-related fields (required according to
notification_type) should be provided.
Fields in the resource's `identity` tuple can be used in lieu of a
primary key for a lookup; in such a case, only other fields are
written.
To modify unique fields, you must use the primary key for the lookup.
=====API DOCS=====
Modify an already existing object.
:param pk: Primary key of the resource to be modified.
:type pk: int
:param create_on_missing: Flag that if set, a new object is created if ``pk`` is not set and objects
matching the appropriate unique criteria is not found.
:type create_on_missing: bool
:param `**kwargs`: Keyword arguments which, all together, will be used as PATCH body to modify the
resource object. if ``pk`` is not set, key-value pairs of ``**kwargs`` which are
also in resource's identity will be used to lookup existing reosource.
:returns: A dictionary combining the JSON output of the modified resource, as well as two extra fields:
"changed", a flag indicating if the resource is successfully updated; "id", an integer which
is the primary key of the updated object.
:rtype: dict
=====API DOCS===== | Below is the the instruction that describes the task:
### Input:
Modify an existing notification template.
Not all required configuration-related fields (required according to
notification_type) should be provided.
Fields in the resource's `identity` tuple can be used in lieu of a
primary key for a lookup; in such a case, only other fields are
written.
To modify unique fields, you must use the primary key for the lookup.
=====API DOCS=====
Modify an already existing object.
:param pk: Primary key of the resource to be modified.
:type pk: int
:param create_on_missing: Flag that if set, a new object is created if ``pk`` is not set and objects
matching the appropriate unique criteria is not found.
:type create_on_missing: bool
:param `**kwargs`: Keyword arguments which, all together, will be used as PATCH body to modify the
resource object. if ``pk`` is not set, key-value pairs of ``**kwargs`` which are
also in resource's identity will be used to lookup existing reosource.
:returns: A dictionary combining the JSON output of the modified resource, as well as two extra fields:
"changed", a flag indicating if the resource is successfully updated; "id", an integer which
is the primary key of the updated object.
:rtype: dict
=====API DOCS=====
### Response:
def modify(self, pk=None, create_on_missing=False, **kwargs):
"""Modify an existing notification template.
Not all required configuration-related fields (required according to
notification_type) should be provided.
Fields in the resource's `identity` tuple can be used in lieu of a
primary key for a lookup; in such a case, only other fields are
written.
To modify unique fields, you must use the primary key for the lookup.
=====API DOCS=====
Modify an already existing object.
:param pk: Primary key of the resource to be modified.
:type pk: int
:param create_on_missing: Flag that if set, a new object is created if ``pk`` is not set and objects
matching the appropriate unique criteria is not found.
:type create_on_missing: bool
:param `**kwargs`: Keyword arguments which, all together, will be used as PATCH body to modify the
resource object. if ``pk`` is not set, key-value pairs of ``**kwargs`` which are
also in resource's identity will be used to lookup existing reosource.
:returns: A dictionary combining the JSON output of the modified resource, as well as two extra fields:
"changed", a flag indicating if the resource is successfully updated; "id", an integer which
is the primary key of the updated object.
:rtype: dict
=====API DOCS=====
"""
# Create the resource if needed.
if pk is None and create_on_missing:
try:
self.get(**copy.deepcopy(kwargs))
except exc.NotFound:
return self.create(**kwargs)
# Modify everything except notification type and configuration
config_item = self._separate(kwargs)
notification_type = kwargs.pop('notification_type', None)
debug.log('Modify everything except notification type and'
' configuration', header='details')
part_result = super(Resource, self).\
modify(pk=pk, create_on_missing=create_on_missing, **kwargs)
# Modify notification type and configuration
if notification_type is None or \
notification_type == part_result['notification_type']:
for item in part_result['notification_configuration']:
if item not in config_item or not config_item[item]:
to_add = part_result['notification_configuration'][item]
if not (to_add == '$encrypted$' and
item in Resource.encrypted_fields):
config_item[item] = to_add
if notification_type is None:
kwargs['notification_type'] = part_result['notification_type']
else:
kwargs['notification_type'] = notification_type
self._configuration(kwargs, config_item)
debug.log('Modify notification type and configuration',
header='details')
result = super(Resource, self).\
modify(pk=pk, create_on_missing=create_on_missing, **kwargs)
# Update 'changed' field to give general changed info
if 'changed' in result and 'changed' in part_result:
result['changed'] = result['changed'] or part_result['changed']
return result |
def sign_metadata(metadata, key, cert, sign_algorithm=OneLogin_Saml2_Constants.RSA_SHA1, digest_algorithm=OneLogin_Saml2_Constants.SHA1):
"""
Signs the metadata with the key/cert provided
:param metadata: SAML Metadata XML
:type metadata: string
:param key: x509 key
:type key: string
:param cert: x509 cert
:type cert: string
:returns: Signed Metadata
:rtype: string
:param sign_algorithm: Signature algorithm method
:type sign_algorithm: string
:param digest_algorithm: Digest algorithm method
:type digest_algorithm: string
"""
return OneLogin_Saml2_Utils.add_sign(metadata, key, cert, False, sign_algorithm, digest_algorithm) | Signs the metadata with the key/cert provided
:param metadata: SAML Metadata XML
:type metadata: string
:param key: x509 key
:type key: string
:param cert: x509 cert
:type cert: string
:returns: Signed Metadata
:rtype: string
:param sign_algorithm: Signature algorithm method
:type sign_algorithm: string
:param digest_algorithm: Digest algorithm method
:type digest_algorithm: string | Below is the the instruction that describes the task:
### Input:
Signs the metadata with the key/cert provided
:param metadata: SAML Metadata XML
:type metadata: string
:param key: x509 key
:type key: string
:param cert: x509 cert
:type cert: string
:returns: Signed Metadata
:rtype: string
:param sign_algorithm: Signature algorithm method
:type sign_algorithm: string
:param digest_algorithm: Digest algorithm method
:type digest_algorithm: string
### Response:
def sign_metadata(metadata, key, cert, sign_algorithm=OneLogin_Saml2_Constants.RSA_SHA1, digest_algorithm=OneLogin_Saml2_Constants.SHA1):
"""
Signs the metadata with the key/cert provided
:param metadata: SAML Metadata XML
:type metadata: string
:param key: x509 key
:type key: string
:param cert: x509 cert
:type cert: string
:returns: Signed Metadata
:rtype: string
:param sign_algorithm: Signature algorithm method
:type sign_algorithm: string
:param digest_algorithm: Digest algorithm method
:type digest_algorithm: string
"""
return OneLogin_Saml2_Utils.add_sign(metadata, key, cert, False, sign_algorithm, digest_algorithm) |
def build_policy(self, name, statements, roles, is_managed_policy=False):
"""
Generate policy for IAM cloudformation template
:param name: Name of the policy
:param statements: The "rules" the policy should have
:param roles: The roles associated with this policy
:param is_managed_policy: True if managed policy
:return: Ref to new policy
"""
if is_managed_policy:
policy = ManagedPolicy(
self.name_strip(name, True),
PolicyDocument={
"Version": self.VERSION_IAM,
"Statement": statements,
},
Roles=roles,
Path=self.__role_path,
)
else:
policy = PolicyType(
self.name_strip(name, True),
PolicyName=self.name_strip(name, True),
PolicyDocument={
"Version": self.VERSION_IAM,
"Statement": statements,
},
Roles=roles,
)
self.__template.add_resource(policy)
return policy | Generate policy for IAM cloudformation template
:param name: Name of the policy
:param statements: The "rules" the policy should have
:param roles: The roles associated with this policy
:param is_managed_policy: True if managed policy
:return: Ref to new policy | Below is the the instruction that describes the task:
### Input:
Generate policy for IAM cloudformation template
:param name: Name of the policy
:param statements: The "rules" the policy should have
:param roles: The roles associated with this policy
:param is_managed_policy: True if managed policy
:return: Ref to new policy
### Response:
def build_policy(self, name, statements, roles, is_managed_policy=False):
"""
Generate policy for IAM cloudformation template
:param name: Name of the policy
:param statements: The "rules" the policy should have
:param roles: The roles associated with this policy
:param is_managed_policy: True if managed policy
:return: Ref to new policy
"""
if is_managed_policy:
policy = ManagedPolicy(
self.name_strip(name, True),
PolicyDocument={
"Version": self.VERSION_IAM,
"Statement": statements,
},
Roles=roles,
Path=self.__role_path,
)
else:
policy = PolicyType(
self.name_strip(name, True),
PolicyName=self.name_strip(name, True),
PolicyDocument={
"Version": self.VERSION_IAM,
"Statement": statements,
},
Roles=roles,
)
self.__template.add_resource(policy)
return policy |
def set_fd_value(tag, value):
"""
Setters for data that also work with implicit transfersyntax
:param value: the value to set on the tag
:param tag: the tag to read
"""
if tag.VR == 'OB' or tag.VR == 'UN':
value = struct.pack('d', value)
tag.value = value | Setters for data that also work with implicit transfersyntax
:param value: the value to set on the tag
:param tag: the tag to read | Below is the the instruction that describes the task:
### Input:
Setters for data that also work with implicit transfersyntax
:param value: the value to set on the tag
:param tag: the tag to read
### Response:
def set_fd_value(tag, value):
"""
Setters for data that also work with implicit transfersyntax
:param value: the value to set on the tag
:param tag: the tag to read
"""
if tag.VR == 'OB' or tag.VR == 'UN':
value = struct.pack('d', value)
tag.value = value |
def services(self):
"""
Gets the services object which will provide the ArcGIS Server's
admin information about services and folders.
"""
if self._resources is None:
self.__init()
if "services" in self._resources:
url = self._url + "/services"
return _services.Services(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=True)
else:
return None | Gets the services object which will provide the ArcGIS Server's
admin information about services and folders. | Below is the the instruction that describes the task:
### Input:
Gets the services object which will provide the ArcGIS Server's
admin information about services and folders.
### Response:
def services(self):
"""
Gets the services object which will provide the ArcGIS Server's
admin information about services and folders.
"""
if self._resources is None:
self.__init()
if "services" in self._resources:
url = self._url + "/services"
return _services.Services(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=True)
else:
return None |
def disconnect_account(self):
"""
Disconnect current account from the application
:return:
"""
url = self.disconnect_url
result = self.get(url)
return result | Disconnect current account from the application
:return: | Below is the the instruction that describes the task:
### Input:
Disconnect current account from the application
:return:
### Response:
def disconnect_account(self):
"""
Disconnect current account from the application
:return:
"""
url = self.disconnect_url
result = self.get(url)
return result |
def copy(self):
"""
Get a copy of this item but with a new id
:return: copy of this object with a new id
:rtype: object
"""
# New dummy item with it's own running properties
copied_item = self.__class__({})
# Now, copy the properties
for prop in self.__class__.properties:
if prop in ['uuid']:
continue
val = getattr(self, prop, None)
if val is not None:
setattr(copied_item, prop, val)
# Also copy some running properties
# The custom variables
if hasattr(self, "customs"):
copied_item.customs = copy(self.customs)
# And tags/templates
if hasattr(self, "tags"):
copied_item.tags = copy(self.tags)
if hasattr(self, "templates"):
copied_item.templates = copy(self.templates)
return copied_item | Get a copy of this item but with a new id
:return: copy of this object with a new id
:rtype: object | Below is the the instruction that describes the task:
### Input:
Get a copy of this item but with a new id
:return: copy of this object with a new id
:rtype: object
### Response:
def copy(self):
"""
Get a copy of this item but with a new id
:return: copy of this object with a new id
:rtype: object
"""
# New dummy item with it's own running properties
copied_item = self.__class__({})
# Now, copy the properties
for prop in self.__class__.properties:
if prop in ['uuid']:
continue
val = getattr(self, prop, None)
if val is not None:
setattr(copied_item, prop, val)
# Also copy some running properties
# The custom variables
if hasattr(self, "customs"):
copied_item.customs = copy(self.customs)
# And tags/templates
if hasattr(self, "tags"):
copied_item.tags = copy(self.tags)
if hasattr(self, "templates"):
copied_item.templates = copy(self.templates)
return copied_item |
def read_resolv_conf(self, f):
"""Process f as a file in the /etc/resolv.conf format. If f is
a string, it is used as the name of the file to open; otherwise it
is treated as the file itself."""
if isinstance(f, basestring):
try:
f = open(f, 'r')
except IOError:
# /etc/resolv.conf doesn't exist, can't be read, etc.
# We'll just use the default resolver configuration.
self.nameservers = ['127.0.0.1']
return
want_close = True
else:
want_close = False
try:
for l in f:
l = l.strip()
if len(l) == 0 or l[0] == '#' or l[0] == ';':
continue
tokens = l.split()
if len(tokens) < 2:
continue
if tokens[0] == 'nameserver':
self.nameservers.append(tokens[1])
elif tokens[0] == 'domain':
self.domain = dns.name.from_text(tokens[1])
elif tokens[0] == 'search':
for suffix in tokens[1:]:
self.search.add(dns.name.from_text(suffix))
finally:
if want_close:
f.close()
if len(self.nameservers) == 0:
self.nameservers.append('127.0.0.1') | Process f as a file in the /etc/resolv.conf format. If f is
a string, it is used as the name of the file to open; otherwise it
is treated as the file itself. | Below is the the instruction that describes the task:
### Input:
Process f as a file in the /etc/resolv.conf format. If f is
a string, it is used as the name of the file to open; otherwise it
is treated as the file itself.
### Response:
def read_resolv_conf(self, f):
"""Process f as a file in the /etc/resolv.conf format. If f is
a string, it is used as the name of the file to open; otherwise it
is treated as the file itself."""
if isinstance(f, basestring):
try:
f = open(f, 'r')
except IOError:
# /etc/resolv.conf doesn't exist, can't be read, etc.
# We'll just use the default resolver configuration.
self.nameservers = ['127.0.0.1']
return
want_close = True
else:
want_close = False
try:
for l in f:
l = l.strip()
if len(l) == 0 or l[0] == '#' or l[0] == ';':
continue
tokens = l.split()
if len(tokens) < 2:
continue
if tokens[0] == 'nameserver':
self.nameservers.append(tokens[1])
elif tokens[0] == 'domain':
self.domain = dns.name.from_text(tokens[1])
elif tokens[0] == 'search':
for suffix in tokens[1:]:
self.search.add(dns.name.from_text(suffix))
finally:
if want_close:
f.close()
if len(self.nameservers) == 0:
self.nameservers.append('127.0.0.1') |
def allows_url (self, url_data):
"""Ask robots.txt allowance."""
roboturl = url_data.get_robots_txt_url()
with self.get_lock(roboturl):
return self._allows_url(url_data, roboturl) | Ask robots.txt allowance. | Below is the the instruction that describes the task:
### Input:
Ask robots.txt allowance.
### Response:
def allows_url (self, url_data):
"""Ask robots.txt allowance."""
roboturl = url_data.get_robots_txt_url()
with self.get_lock(roboturl):
return self._allows_url(url_data, roboturl) |
def buffer(self,
geometries,
inSR,
distances,
units,
outSR=None,
bufferSR=None,
unionResults=True,
geodesic=True
):
"""
The buffer operation is performed on a geometry service resource
The result of this operation is buffered polygons at the
specified distances for the input geometry array. Options are
available to union buffers and to use geodesic distance.
Inputs:
geometries - array of geometries (structured as JSON geometry
objects returned by the ArcGIS REST API).
inSR - spatial reference of the input geometries WKID.
outSR - spatial reference for the returned geometries.
bufferSR - WKID or a spatial reference JSON object in
which the geometries are buffered.
distances - distances that each of the input geometries is buffered
unit - units for calculating each buffer distance.
unionResults - if true, all geometries buffered at a given distance
are unioned into a single (possibly multipart) polygon,
and the unioned geometry is placed in the output array.
geodesic - set geodesic to true to buffer the using geodesic distance.
"""
url = self._url + "/buffer"
params = {
"f" : "json",
"inSR" : inSR,
"geodesic" : geodesic,
"unionResults" : unionResults
}
if isinstance(geometries, list) and len(geometries) > 0:
g = geometries[0]
if isinstance(g, Polygon):
params['geometries'] = {"geometryType": "esriGeometryPolygon",
"geometries" : self.__geomToStringArray(geometries, "list")}
elif isinstance(g, Point):
params['geometries'] = {"geometryType": "esriGeometryPoint",
"geometries" : self.__geomToStringArray(geometries, "list")}
elif isinstance(g, Polyline):
params['geometries'] = {"geometryType": "esriGeometryPolyline",
"geometries" : self.__geomToStringArray(geometries, "list")}
else:
return None
if isinstance(distances, list):
distances = [str(d) for d in distances]
params['distances'] = ",".join(distances)
else:
params['distances'] = str(distances)
params['units'] = units
if bufferSR is not None:
params['bufferSR'] = bufferSR
if outSR is not None:
params['outSR'] = outSR
return self._get(url, param_dict=params,
proxy_port=self._proxy_port,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url) | The buffer operation is performed on a geometry service resource
The result of this operation is buffered polygons at the
specified distances for the input geometry array. Options are
available to union buffers and to use geodesic distance.
Inputs:
geometries - array of geometries (structured as JSON geometry
objects returned by the ArcGIS REST API).
inSR - spatial reference of the input geometries WKID.
outSR - spatial reference for the returned geometries.
bufferSR - WKID or a spatial reference JSON object in
which the geometries are buffered.
distances - distances that each of the input geometries is buffered
unit - units for calculating each buffer distance.
unionResults - if true, all geometries buffered at a given distance
are unioned into a single (possibly multipart) polygon,
and the unioned geometry is placed in the output array.
geodesic - set geodesic to true to buffer the using geodesic distance. | Below is the the instruction that describes the task:
### Input:
The buffer operation is performed on a geometry service resource
The result of this operation is buffered polygons at the
specified distances for the input geometry array. Options are
available to union buffers and to use geodesic distance.
Inputs:
geometries - array of geometries (structured as JSON geometry
objects returned by the ArcGIS REST API).
inSR - spatial reference of the input geometries WKID.
outSR - spatial reference for the returned geometries.
bufferSR - WKID or a spatial reference JSON object in
which the geometries are buffered.
distances - distances that each of the input geometries is buffered
unit - units for calculating each buffer distance.
unionResults - if true, all geometries buffered at a given distance
are unioned into a single (possibly multipart) polygon,
and the unioned geometry is placed in the output array.
geodesic - set geodesic to true to buffer the using geodesic distance.
### Response:
def buffer(self,
geometries,
inSR,
distances,
units,
outSR=None,
bufferSR=None,
unionResults=True,
geodesic=True
):
"""
The buffer operation is performed on a geometry service resource
The result of this operation is buffered polygons at the
specified distances for the input geometry array. Options are
available to union buffers and to use geodesic distance.
Inputs:
geometries - array of geometries (structured as JSON geometry
objects returned by the ArcGIS REST API).
inSR - spatial reference of the input geometries WKID.
outSR - spatial reference for the returned geometries.
bufferSR - WKID or a spatial reference JSON object in
which the geometries are buffered.
distances - distances that each of the input geometries is buffered
unit - units for calculating each buffer distance.
unionResults - if true, all geometries buffered at a given distance
are unioned into a single (possibly multipart) polygon,
and the unioned geometry is placed in the output array.
geodesic - set geodesic to true to buffer the using geodesic distance.
"""
url = self._url + "/buffer"
params = {
"f" : "json",
"inSR" : inSR,
"geodesic" : geodesic,
"unionResults" : unionResults
}
if isinstance(geometries, list) and len(geometries) > 0:
g = geometries[0]
if isinstance(g, Polygon):
params['geometries'] = {"geometryType": "esriGeometryPolygon",
"geometries" : self.__geomToStringArray(geometries, "list")}
elif isinstance(g, Point):
params['geometries'] = {"geometryType": "esriGeometryPoint",
"geometries" : self.__geomToStringArray(geometries, "list")}
elif isinstance(g, Polyline):
params['geometries'] = {"geometryType": "esriGeometryPolyline",
"geometries" : self.__geomToStringArray(geometries, "list")}
else:
return None
if isinstance(distances, list):
distances = [str(d) for d in distances]
params['distances'] = ",".join(distances)
else:
params['distances'] = str(distances)
params['units'] = units
if bufferSR is not None:
params['bufferSR'] = bufferSR
if outSR is not None:
params['outSR'] = outSR
return self._get(url, param_dict=params,
proxy_port=self._proxy_port,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url) |
def _parse_path_table(self, ptr_size, extent):
# type: (int, int) -> Tuple[List[path_table_record.PathTableRecord], Dict[int, path_table_record.PathTableRecord]]
'''
An internal method to parse a path table on an ISO. For each path
table entry found, a Path Table Record object is created, and the
callback is called.
Parameters:
vd - The volume descriptor that these path table records correspond to.
extent - The extent at which this path table record starts.
callback - The callback to call for each path table record.
Returns:
A tuple consisting of the list of path table record entries and a
dictionary of the extent locations to the path table record entries.
'''
self._seek_to_extent(extent)
data = self._cdfp.read(ptr_size)
offset = 0
out = []
extent_to_ptr = {}
while offset < ptr_size:
ptr = path_table_record.PathTableRecord()
len_di_byte = bytearray([data[offset]])[0]
read_len = path_table_record.PathTableRecord.record_length(len_di_byte)
ptr.parse(data[offset:offset + read_len])
out.append(ptr)
extent_to_ptr[ptr.extent_location] = ptr
offset += read_len
return out, extent_to_ptr | An internal method to parse a path table on an ISO. For each path
table entry found, a Path Table Record object is created, and the
callback is called.
Parameters:
vd - The volume descriptor that these path table records correspond to.
extent - The extent at which this path table record starts.
callback - The callback to call for each path table record.
Returns:
A tuple consisting of the list of path table record entries and a
dictionary of the extent locations to the path table record entries. | Below is the the instruction that describes the task:
### Input:
An internal method to parse a path table on an ISO. For each path
table entry found, a Path Table Record object is created, and the
callback is called.
Parameters:
vd - The volume descriptor that these path table records correspond to.
extent - The extent at which this path table record starts.
callback - The callback to call for each path table record.
Returns:
A tuple consisting of the list of path table record entries and a
dictionary of the extent locations to the path table record entries.
### Response:
def _parse_path_table(self, ptr_size, extent):
# type: (int, int) -> Tuple[List[path_table_record.PathTableRecord], Dict[int, path_table_record.PathTableRecord]]
'''
An internal method to parse a path table on an ISO. For each path
table entry found, a Path Table Record object is created, and the
callback is called.
Parameters:
vd - The volume descriptor that these path table records correspond to.
extent - The extent at which this path table record starts.
callback - The callback to call for each path table record.
Returns:
A tuple consisting of the list of path table record entries and a
dictionary of the extent locations to the path table record entries.
'''
self._seek_to_extent(extent)
data = self._cdfp.read(ptr_size)
offset = 0
out = []
extent_to_ptr = {}
while offset < ptr_size:
ptr = path_table_record.PathTableRecord()
len_di_byte = bytearray([data[offset]])[0]
read_len = path_table_record.PathTableRecord.record_length(len_di_byte)
ptr.parse(data[offset:offset + read_len])
out.append(ptr)
extent_to_ptr[ptr.extent_location] = ptr
offset += read_len
return out, extent_to_ptr |
def merge_fastqs(job, job_vars):
"""
Unzips input sample and concats the Read1 and Read2 groups together.
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
cores = input_args['cpu_count']
single_end_reads = input_args['single_end_reads']
# I/O
sample = return_input_paths(job, work_dir, ids, 'sample.tar')
# Untar File
# subprocess.check_call(['unzip', sample, '-d', work_dir])
subprocess.check_call(['tar', '-xvf', sample, '-C', work_dir])
# Remove large files before creating concat versions.
os.remove(os.path.join(work_dir, 'sample.tar'))
# Zcat files in parallel
if single_end_reads:
files = sorted(glob.glob(os.path.join(work_dir, '*')))
with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1:
subprocess.check_call(['zcat'] + files, stdout=f1)
# FileStore
ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq'))
else:
r1_files = sorted(glob.glob(os.path.join(work_dir, '*R1*')))
r2_files = sorted(glob.glob(os.path.join(work_dir, '*R2*')))
with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1:
p1 = subprocess.Popen(['zcat'] + r1_files, stdout=f1)
with open(os.path.join(work_dir, 'R2.fastq'), 'w') as f2:
p2 = subprocess.Popen(['zcat'] + r2_files, stdout=f2)
p1.wait()
p2.wait()
# FileStore
ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq'))
ids['R2.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2.fastq'))
job.fileStore.deleteGlobalFile(ids['sample.tar'])
# Spawn child job
return job.addChildJobFn(mapsplice, job_vars, cores=cores, disk='130 G').rv() | Unzips input sample and concats the Read1 and Read2 groups together.
job_vars: tuple Tuple of dictionaries: input_args and ids | Below is the the instruction that describes the task:
### Input:
Unzips input sample and concats the Read1 and Read2 groups together.
job_vars: tuple Tuple of dictionaries: input_args and ids
### Response:
def merge_fastqs(job, job_vars):
"""
Unzips input sample and concats the Read1 and Read2 groups together.
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
cores = input_args['cpu_count']
single_end_reads = input_args['single_end_reads']
# I/O
sample = return_input_paths(job, work_dir, ids, 'sample.tar')
# Untar File
# subprocess.check_call(['unzip', sample, '-d', work_dir])
subprocess.check_call(['tar', '-xvf', sample, '-C', work_dir])
# Remove large files before creating concat versions.
os.remove(os.path.join(work_dir, 'sample.tar'))
# Zcat files in parallel
if single_end_reads:
files = sorted(glob.glob(os.path.join(work_dir, '*')))
with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1:
subprocess.check_call(['zcat'] + files, stdout=f1)
# FileStore
ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq'))
else:
r1_files = sorted(glob.glob(os.path.join(work_dir, '*R1*')))
r2_files = sorted(glob.glob(os.path.join(work_dir, '*R2*')))
with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1:
p1 = subprocess.Popen(['zcat'] + r1_files, stdout=f1)
with open(os.path.join(work_dir, 'R2.fastq'), 'w') as f2:
p2 = subprocess.Popen(['zcat'] + r2_files, stdout=f2)
p1.wait()
p2.wait()
# FileStore
ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq'))
ids['R2.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2.fastq'))
job.fileStore.deleteGlobalFile(ids['sample.tar'])
# Spawn child job
return job.addChildJobFn(mapsplice, job_vars, cores=cores, disk='130 G').rv() |
def chat_react(self, msg_id, emoji='smile', **kwargs):
"""Updates the text of the chat message."""
return self.__call_api_post('chat.react', messageId=msg_id, emoji=emoji, kwargs=kwargs) | Updates the text of the chat message. | Below is the the instruction that describes the task:
### Input:
Updates the text of the chat message.
### Response:
def chat_react(self, msg_id, emoji='smile', **kwargs):
"""Updates the text of the chat message."""
return self.__call_api_post('chat.react', messageId=msg_id, emoji=emoji, kwargs=kwargs) |
def sort(filename, key, outputFile, fields=None, watermark=1024 * 1024 * 100):
"""Sort a potentially big file
filename - the input file (standard File format)
key - a list of field names to sort by
outputFile - the name of the output file
fields - a list of fields that should be included (all fields if None)
watermark - when available memory goes bellow the watermark create a new chunk
sort() works by reading as records from the file into memory
and calling _sortChunk() on each chunk. In the process it gets
rid of unneeded fields if any. Once all the chunks have been sorted and
written to chunk files it calls _merge() to merge all the chunks into a
single sorted file.
Note, that sort() gets a key that contains field names, which it converts
into field indices for _sortChunk() becuase _sortChunk() doesn't need to know
the field name.
sort() figures out by itself how many chunk files to use by reading records
from the file until the low watermark value of availabel memory is hit and
then it sorts the current records, generates a chunk file, clears the sorted
records and starts on a new chunk.
The key field names are turned into indices
"""
if fields is not None:
assert set(key).issubset(set([f[0] for f in fields]))
with FileRecordStream(filename) as f:
# Find the indices of the requested fields
if fields:
fieldNames = [ff[0] for ff in fields]
indices = [f.getFieldNames().index(name) for name in fieldNames]
assert len(indices) == len(fields)
else:
fileds = f.getFields()
fieldNames = f.getFieldNames()
indices = None
# turn key fields to key indices
key = [fieldNames.index(name) for name in key]
chunk = 0
records = []
for i, r in enumerate(f):
# Select requested fields only
if indices:
temp = []
for i in indices:
temp.append(r[i])
r = temp
# Store processed record
records.append(r)
# Check memory
available_memory = psutil.avail_phymem()
# If bellow the watermark create a new chunk, reset and keep going
if available_memory < watermark:
_sortChunk(records, key, chunk, fields)
records = []
chunk += 1
# Sort and write the remainder
if len(records) > 0:
_sortChunk(records, key, chunk, fields)
chunk += 1
# Marge all the files
_mergeFiles(key, chunk, outputFile, fields) | Sort a potentially big file
filename - the input file (standard File format)
key - a list of field names to sort by
outputFile - the name of the output file
fields - a list of fields that should be included (all fields if None)
watermark - when available memory goes bellow the watermark create a new chunk
sort() works by reading as records from the file into memory
and calling _sortChunk() on each chunk. In the process it gets
rid of unneeded fields if any. Once all the chunks have been sorted and
written to chunk files it calls _merge() to merge all the chunks into a
single sorted file.
Note, that sort() gets a key that contains field names, which it converts
into field indices for _sortChunk() becuase _sortChunk() doesn't need to know
the field name.
sort() figures out by itself how many chunk files to use by reading records
from the file until the low watermark value of availabel memory is hit and
then it sorts the current records, generates a chunk file, clears the sorted
records and starts on a new chunk.
The key field names are turned into indices | Below is the the instruction that describes the task:
### Input:
Sort a potentially big file
filename - the input file (standard File format)
key - a list of field names to sort by
outputFile - the name of the output file
fields - a list of fields that should be included (all fields if None)
watermark - when available memory goes bellow the watermark create a new chunk
sort() works by reading as records from the file into memory
and calling _sortChunk() on each chunk. In the process it gets
rid of unneeded fields if any. Once all the chunks have been sorted and
written to chunk files it calls _merge() to merge all the chunks into a
single sorted file.
Note, that sort() gets a key that contains field names, which it converts
into field indices for _sortChunk() becuase _sortChunk() doesn't need to know
the field name.
sort() figures out by itself how many chunk files to use by reading records
from the file until the low watermark value of availabel memory is hit and
then it sorts the current records, generates a chunk file, clears the sorted
records and starts on a new chunk.
The key field names are turned into indices
### Response:
def sort(filename, key, outputFile, fields=None, watermark=1024 * 1024 * 100):
"""Sort a potentially big file
filename - the input file (standard File format)
key - a list of field names to sort by
outputFile - the name of the output file
fields - a list of fields that should be included (all fields if None)
watermark - when available memory goes bellow the watermark create a new chunk
sort() works by reading as records from the file into memory
and calling _sortChunk() on each chunk. In the process it gets
rid of unneeded fields if any. Once all the chunks have been sorted and
written to chunk files it calls _merge() to merge all the chunks into a
single sorted file.
Note, that sort() gets a key that contains field names, which it converts
into field indices for _sortChunk() becuase _sortChunk() doesn't need to know
the field name.
sort() figures out by itself how many chunk files to use by reading records
from the file until the low watermark value of availabel memory is hit and
then it sorts the current records, generates a chunk file, clears the sorted
records and starts on a new chunk.
The key field names are turned into indices
"""
if fields is not None:
assert set(key).issubset(set([f[0] for f in fields]))
with FileRecordStream(filename) as f:
# Find the indices of the requested fields
if fields:
fieldNames = [ff[0] for ff in fields]
indices = [f.getFieldNames().index(name) for name in fieldNames]
assert len(indices) == len(fields)
else:
fileds = f.getFields()
fieldNames = f.getFieldNames()
indices = None
# turn key fields to key indices
key = [fieldNames.index(name) for name in key]
chunk = 0
records = []
for i, r in enumerate(f):
# Select requested fields only
if indices:
temp = []
for i in indices:
temp.append(r[i])
r = temp
# Store processed record
records.append(r)
# Check memory
available_memory = psutil.avail_phymem()
# If bellow the watermark create a new chunk, reset and keep going
if available_memory < watermark:
_sortChunk(records, key, chunk, fields)
records = []
chunk += 1
# Sort and write the remainder
if len(records) > 0:
_sortChunk(records, key, chunk, fields)
chunk += 1
# Marge all the files
_mergeFiles(key, chunk, outputFile, fields) |
def _system_path(self, subdir, basename=''):
'''
Gets the full path to the 'subdir/basename' file in the system binwalk directory.
@subdir - Subdirectory inside the system binwalk directory.
@basename - File name inside the subdirectory.
Returns the full path to the 'subdir/basename' file.
'''
try:
return self._file_path(os.path.join(self.system_dir, subdir), basename)
except KeyboardInterrupt as e:
raise e
except Exception:
return None | Gets the full path to the 'subdir/basename' file in the system binwalk directory.
@subdir - Subdirectory inside the system binwalk directory.
@basename - File name inside the subdirectory.
Returns the full path to the 'subdir/basename' file. | Below is the the instruction that describes the task:
### Input:
Gets the full path to the 'subdir/basename' file in the system binwalk directory.
@subdir - Subdirectory inside the system binwalk directory.
@basename - File name inside the subdirectory.
Returns the full path to the 'subdir/basename' file.
### Response:
def _system_path(self, subdir, basename=''):
'''
Gets the full path to the 'subdir/basename' file in the system binwalk directory.
@subdir - Subdirectory inside the system binwalk directory.
@basename - File name inside the subdirectory.
Returns the full path to the 'subdir/basename' file.
'''
try:
return self._file_path(os.path.join(self.system_dir, subdir), basename)
except KeyboardInterrupt as e:
raise e
except Exception:
return None |
async def _find_trigger(self,
request: Request,
origin: Optional[Text]=None,
internal: bool=False) \
-> Tuple[
Optional[BaseTrigger],
Optional[Type[BaseState]],
Optional[bool],
]:
"""
Find the best trigger for this request, or go away.
"""
reg = request.register
if not origin:
origin = reg.get(Register.STATE)
logger.debug('From state: %s', origin)
results = await asyncio.gather(*(
x.rank(request, origin)
for x
in self.transitions
if x.internal == internal
))
if len(results):
score, trigger, state, dnr = max(results, key=lambda x: x[0])
if score >= settings.MINIMAL_TRIGGER_SCORE:
return trigger, state, dnr
return None, None, None | Find the best trigger for this request, or go away. | Below is the the instruction that describes the task:
### Input:
Find the best trigger for this request, or go away.
### Response:
async def _find_trigger(self,
request: Request,
origin: Optional[Text]=None,
internal: bool=False) \
-> Tuple[
Optional[BaseTrigger],
Optional[Type[BaseState]],
Optional[bool],
]:
"""
Find the best trigger for this request, or go away.
"""
reg = request.register
if not origin:
origin = reg.get(Register.STATE)
logger.debug('From state: %s', origin)
results = await asyncio.gather(*(
x.rank(request, origin)
for x
in self.transitions
if x.internal == internal
))
if len(results):
score, trigger, state, dnr = max(results, key=lambda x: x[0])
if score >= settings.MINIMAL_TRIGGER_SCORE:
return trigger, state, dnr
return None, None, None |
def _first(self, tag):
''' Returns the first element with required POS-tag. '''
self.getelements()
for element in self.elements:
if tag in self.pos(element):
return element
return None | Returns the first element with required POS-tag. | Below is the the instruction that describes the task:
### Input:
Returns the first element with required POS-tag.
### Response:
def _first(self, tag):
''' Returns the first element with required POS-tag. '''
self.getelements()
for element in self.elements:
if tag in self.pos(element):
return element
return None |
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return '' | Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file | Below is the the instruction that describes the task:
### Input:
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
### Response:
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return '' |
def _clone(self, deepcopy=True):
"""Internal clone helper."""
clone = self._clone_base()
values_to_clone = ("spec", "projection", "skip", "limit",
"max_time_ms", "max_await_time_ms", "comment",
"max", "min", "ordering", "explain", "hint",
"batch_size", "max_scan", "manipulate",
"query_flags", "modifiers", "collation")
data = dict((k, v) for k, v in iteritems(self.__dict__)
if k.startswith('_Cursor__') and k[9:] in values_to_clone)
if deepcopy:
data = self._deepcopy(data)
clone.__dict__.update(data)
return clone | Internal clone helper. | Below is the the instruction that describes the task:
### Input:
Internal clone helper.
### Response:
def _clone(self, deepcopy=True):
"""Internal clone helper."""
clone = self._clone_base()
values_to_clone = ("spec", "projection", "skip", "limit",
"max_time_ms", "max_await_time_ms", "comment",
"max", "min", "ordering", "explain", "hint",
"batch_size", "max_scan", "manipulate",
"query_flags", "modifiers", "collation")
data = dict((k, v) for k, v in iteritems(self.__dict__)
if k.startswith('_Cursor__') and k[9:] in values_to_clone)
if deepcopy:
data = self._deepcopy(data)
clone.__dict__.update(data)
return clone |
def XML(content, source=None):
"""Parses the XML text using the ET.XML function, but handling the ParseError in
a user-friendly way.
"""
try:
tree = ET.XML(content)
except ET.ParseError as err:
x_parse_error(err, content, source)
return tree | Parses the XML text using the ET.XML function, but handling the ParseError in
a user-friendly way. | Below is the the instruction that describes the task:
### Input:
Parses the XML text using the ET.XML function, but handling the ParseError in
a user-friendly way.
### Response:
def XML(content, source=None):
"""Parses the XML text using the ET.XML function, but handling the ParseError in
a user-friendly way.
"""
try:
tree = ET.XML(content)
except ET.ParseError as err:
x_parse_error(err, content, source)
return tree |
def readBimFile(basefilename):
"""
Helper fuinction that reads bim files
"""
# read bim file
bim_fn = basefilename+'.bim'
rv = SP.loadtxt(bim_fn,delimiter='\t',usecols = (0,3),dtype=int)
return rv | Helper fuinction that reads bim files | Below is the the instruction that describes the task:
### Input:
Helper fuinction that reads bim files
### Response:
def readBimFile(basefilename):
"""
Helper fuinction that reads bim files
"""
# read bim file
bim_fn = basefilename+'.bim'
rv = SP.loadtxt(bim_fn,delimiter='\t',usecols = (0,3),dtype=int)
return rv |
def update_consumer_group(self, project, logstore, consumer_group, timeout=None, in_order=None):
""" Update consumer group
:type project: string
:param project: project name
:type logstore: string
:param logstore: logstore name
:type consumer_group: string
:param consumer_group: consumer group name
:type timeout: int
:param timeout: timeout
:type in_order: bool
:param in_order: order
:return: None
"""
if in_order is None and timeout is None:
raise ValueError('in_order and timeout can\'t all be None')
elif in_order is not None and timeout is not None:
body_dict = {
'order': in_order,
'timeout': timeout
}
elif in_order is not None:
body_dict = {
'order': in_order
}
else:
body_dict = {
'timeout': timeout
}
body_str = six.b(json.dumps(body_dict))
headers = {
"x-log-bodyrawsize": str(len(body_str)),
"Content-Type": "application/json"
}
params = {}
resource = "/logstores/" + logstore + "/consumergroups/" + consumer_group
(resp, header) = self._send("PUT", project, body_str, resource, params, headers)
return UpdateConsumerGroupResponse(header, resp) | Update consumer group
:type project: string
:param project: project name
:type logstore: string
:param logstore: logstore name
:type consumer_group: string
:param consumer_group: consumer group name
:type timeout: int
:param timeout: timeout
:type in_order: bool
:param in_order: order
:return: None | Below is the the instruction that describes the task:
### Input:
Update consumer group
:type project: string
:param project: project name
:type logstore: string
:param logstore: logstore name
:type consumer_group: string
:param consumer_group: consumer group name
:type timeout: int
:param timeout: timeout
:type in_order: bool
:param in_order: order
:return: None
### Response:
def update_consumer_group(self, project, logstore, consumer_group, timeout=None, in_order=None):
""" Update consumer group
:type project: string
:param project: project name
:type logstore: string
:param logstore: logstore name
:type consumer_group: string
:param consumer_group: consumer group name
:type timeout: int
:param timeout: timeout
:type in_order: bool
:param in_order: order
:return: None
"""
if in_order is None and timeout is None:
raise ValueError('in_order and timeout can\'t all be None')
elif in_order is not None and timeout is not None:
body_dict = {
'order': in_order,
'timeout': timeout
}
elif in_order is not None:
body_dict = {
'order': in_order
}
else:
body_dict = {
'timeout': timeout
}
body_str = six.b(json.dumps(body_dict))
headers = {
"x-log-bodyrawsize": str(len(body_str)),
"Content-Type": "application/json"
}
params = {}
resource = "/logstores/" + logstore + "/consumergroups/" + consumer_group
(resp, header) = self._send("PUT", project, body_str, resource, params, headers)
return UpdateConsumerGroupResponse(header, resp) |
def sync(self):
"""Retrieve panel information from ElkM1"""
self._elk.add_handler('VN', self._vn_handler)
self._elk.add_handler('XK', self._xk_handler)
self._elk.add_handler('RP', self._rp_handler)
self._elk.add_handler('IE', self._elk.call_sync_handlers)
self._elk.add_handler('SS', self._ss_handler)
self._elk.send(vn_encode())
self._elk.send(lw_encode())
self._elk.send(ss_encode()) | Retrieve panel information from ElkM1 | Below is the the instruction that describes the task:
### Input:
Retrieve panel information from ElkM1
### Response:
def sync(self):
"""Retrieve panel information from ElkM1"""
self._elk.add_handler('VN', self._vn_handler)
self._elk.add_handler('XK', self._xk_handler)
self._elk.add_handler('RP', self._rp_handler)
self._elk.add_handler('IE', self._elk.call_sync_handlers)
self._elk.add_handler('SS', self._ss_handler)
self._elk.send(vn_encode())
self._elk.send(lw_encode())
self._elk.send(ss_encode()) |
def generate_sample_module(module_dir):
"""Generate skeleton sample module."""
if os.path.isdir(module_dir):
LOGGER.error("Error generating sample module -- directory %s "
"already exists!",
module_dir)
sys.exit(1)
os.mkdir(module_dir) | Generate skeleton sample module. | Below is the the instruction that describes the task:
### Input:
Generate skeleton sample module.
### Response:
def generate_sample_module(module_dir):
"""Generate skeleton sample module."""
if os.path.isdir(module_dir):
LOGGER.error("Error generating sample module -- directory %s "
"already exists!",
module_dir)
sys.exit(1)
os.mkdir(module_dir) |
def _parse_from_file(self, file_path):
"""
см. описание _parse_from_text.
Только на вход подаётся не текст, а путь к файлу.
"""
file_path = abspath(file_path)
if not isfile(file_path):
raise MarkovTextExcept("Передан не файл.")
with open(file_path, "rb") as txt_file:
for line in txt_file:
text = line.decode("utf-8", "ignore").strip()
if not text:
continue
yield from self._parse_from_text(text) | см. описание _parse_from_text.
Только на вход подаётся не текст, а путь к файлу. | Below is the the instruction that describes the task:
### Input:
см. описание _parse_from_text.
Только на вход подаётся не текст, а путь к файлу.
### Response:
def _parse_from_file(self, file_path):
"""
см. описание _parse_from_text.
Только на вход подаётся не текст, а путь к файлу.
"""
file_path = abspath(file_path)
if not isfile(file_path):
raise MarkovTextExcept("Передан не файл.")
with open(file_path, "rb") as txt_file:
for line in txt_file:
text = line.decode("utf-8", "ignore").strip()
if not text:
continue
yield from self._parse_from_text(text) |
def panel_capacity(panel):
"""
Returns the panel capacity in MW.
Parameters
----------
panel : string
Panel name, e.g. "Sunpower"
Returns
-------
capacity : float
In MW
"""
c = vresutils.reatlas.solarpanelconf_to_solar_panel_config_object(panel)
return c['A'] + c['B'] * 1000 + c['C'] * np.log(1000) | Returns the panel capacity in MW.
Parameters
----------
panel : string
Panel name, e.g. "Sunpower"
Returns
-------
capacity : float
In MW | Below is the the instruction that describes the task:
### Input:
Returns the panel capacity in MW.
Parameters
----------
panel : string
Panel name, e.g. "Sunpower"
Returns
-------
capacity : float
In MW
### Response:
def panel_capacity(panel):
"""
Returns the panel capacity in MW.
Parameters
----------
panel : string
Panel name, e.g. "Sunpower"
Returns
-------
capacity : float
In MW
"""
c = vresutils.reatlas.solarpanelconf_to_solar_panel_config_object(panel)
return c['A'] + c['B'] * 1000 + c['C'] * np.log(1000) |
def window_lanczos(N):
r"""Lanczos window also known as sinc window.
:param N: window length
.. math:: w(n) = sinc \left( \frac{2n}{N-1} - 1 \right)
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'lanczos')
.. seealso:: :func:`create_window`, :class:`Window`
"""
if N ==1:
return ones(1)
n = linspace(-N/2., N/2., N)
win = sinc(2*n/(N-1.))
return win | r"""Lanczos window also known as sinc window.
:param N: window length
.. math:: w(n) = sinc \left( \frac{2n}{N-1} - 1 \right)
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'lanczos')
.. seealso:: :func:`create_window`, :class:`Window` | Below is the the instruction that describes the task:
### Input:
r"""Lanczos window also known as sinc window.
:param N: window length
.. math:: w(n) = sinc \left( \frac{2n}{N-1} - 1 \right)
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'lanczos')
.. seealso:: :func:`create_window`, :class:`Window`
### Response:
def window_lanczos(N):
r"""Lanczos window also known as sinc window.
:param N: window length
.. math:: w(n) = sinc \left( \frac{2n}{N-1} - 1 \right)
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'lanczos')
.. seealso:: :func:`create_window`, :class:`Window`
"""
if N ==1:
return ones(1)
n = linspace(-N/2., N/2., N)
win = sinc(2*n/(N-1.))
return win |
def create_sdist(self, tag):
"""Create an sdist and return the full file path of the .tar.gz."""
logger.info("Making tempdir for %s with tag %s...",
self.package, tag)
self.wrapper.vcs.checkout_from_tag(tag)
# checkout_from_tag() chdirs to a temp directory that we need to clean up
# later.
self.temp_tagdir = os.path.realpath(os.getcwd())
logger.debug("Tag checkout placed in %s", self.temp_tagdir)
python = sys.executable
logger.debug(command("%s setup.py sdist" % python))
tarball = find_tarball(self.temp_tagdir, self.package, tag)
return tarball | Create an sdist and return the full file path of the .tar.gz. | Below is the the instruction that describes the task:
### Input:
Create an sdist and return the full file path of the .tar.gz.
### Response:
def create_sdist(self, tag):
"""Create an sdist and return the full file path of the .tar.gz."""
logger.info("Making tempdir for %s with tag %s...",
self.package, tag)
self.wrapper.vcs.checkout_from_tag(tag)
# checkout_from_tag() chdirs to a temp directory that we need to clean up
# later.
self.temp_tagdir = os.path.realpath(os.getcwd())
logger.debug("Tag checkout placed in %s", self.temp_tagdir)
python = sys.executable
logger.debug(command("%s setup.py sdist" % python))
tarball = find_tarball(self.temp_tagdir, self.package, tag)
return tarball |
def poll_output(self):
"""
Append lines from stdout to self.output.
Returns:
list: The lines added since last call
"""
if self.block:
return self.output
new_list = self.output[self.old_output_size:]
self.old_output_size += len(new_list)
return new_list | Append lines from stdout to self.output.
Returns:
list: The lines added since last call | Below is the the instruction that describes the task:
### Input:
Append lines from stdout to self.output.
Returns:
list: The lines added since last call
### Response:
def poll_output(self):
"""
Append lines from stdout to self.output.
Returns:
list: The lines added since last call
"""
if self.block:
return self.output
new_list = self.output[self.old_output_size:]
self.old_output_size += len(new_list)
return new_list |
def remove_comments(tex_source):
"""Delete latex comments from TeX source.
Parameters
----------
tex_source : str
TeX source content.
Returns
-------
tex_source : str
TeX source without comments.
"""
# Expression via http://stackoverflow.com/a/13365453
return re.sub(r'(?<!\\)%.*$', r'', tex_source, flags=re.M) | Delete latex comments from TeX source.
Parameters
----------
tex_source : str
TeX source content.
Returns
-------
tex_source : str
TeX source without comments. | Below is the the instruction that describes the task:
### Input:
Delete latex comments from TeX source.
Parameters
----------
tex_source : str
TeX source content.
Returns
-------
tex_source : str
TeX source without comments.
### Response:
def remove_comments(tex_source):
"""Delete latex comments from TeX source.
Parameters
----------
tex_source : str
TeX source content.
Returns
-------
tex_source : str
TeX source without comments.
"""
# Expression via http://stackoverflow.com/a/13365453
return re.sub(r'(?<!\\)%.*$', r'', tex_source, flags=re.M) |
def _setup_adc(self, flags):
'''Initialize ADC
'''
self._intf.write(self._base_addr + self.MAX_1239_ADD, array('B', pack('B', flags))) | Initialize ADC | Below is the the instruction that describes the task:
### Input:
Initialize ADC
### Response:
def _setup_adc(self, flags):
'''Initialize ADC
'''
self._intf.write(self._base_addr + self.MAX_1239_ADD, array('B', pack('B', flags))) |
def _adjust_cwd(self, path):
"""
Return an adjusted path if we're emulating a "current working
directory" for the server.
"""
path = b(path)
if self._cwd is None:
return path
if len(path) and path[0:1] == b_slash:
# absolute path
return path
if self._cwd == b_slash:
return self._cwd + path
return self._cwd + b_slash + path | Return an adjusted path if we're emulating a "current working
directory" for the server. | Below is the the instruction that describes the task:
### Input:
Return an adjusted path if we're emulating a "current working
directory" for the server.
### Response:
def _adjust_cwd(self, path):
"""
Return an adjusted path if we're emulating a "current working
directory" for the server.
"""
path = b(path)
if self._cwd is None:
return path
if len(path) and path[0:1] == b_slash:
# absolute path
return path
if self._cwd == b_slash:
return self._cwd + path
return self._cwd + b_slash + path |
def rename(self, old_table, new_table):
"""
Rename a table.
You must have ALTER and DROP privileges for the original table,
and CREATE and INSERT privileges for the new table.
"""
try:
command = 'RENAME TABLE {0} TO {1}'.format(wrap(old_table), wrap(new_table))
except:
command = 'ALTER TABLE {0} RENAME {1}'.format(wrap(old_table), wrap(new_table))
self.execute(command)
self._printer('Renamed {0} to {1}'.format(wrap(old_table), wrap(new_table)))
return old_table, new_table | Rename a table.
You must have ALTER and DROP privileges for the original table,
and CREATE and INSERT privileges for the new table. | Below is the the instruction that describes the task:
### Input:
Rename a table.
You must have ALTER and DROP privileges for the original table,
and CREATE and INSERT privileges for the new table.
### Response:
def rename(self, old_table, new_table):
"""
Rename a table.
You must have ALTER and DROP privileges for the original table,
and CREATE and INSERT privileges for the new table.
"""
try:
command = 'RENAME TABLE {0} TO {1}'.format(wrap(old_table), wrap(new_table))
except:
command = 'ALTER TABLE {0} RENAME {1}'.format(wrap(old_table), wrap(new_table))
self.execute(command)
self._printer('Renamed {0} to {1}'.format(wrap(old_table), wrap(new_table)))
return old_table, new_table |
def list_folders(location='\\'):
r'''
List all folders located in a specific location in the task scheduler.
:param str location: A string value representing the folder from which you
want to list tasks. Default is '\\' which is the root for the task
scheduler (C:\Windows\System32\tasks).
:return: Returns a list of folders.
:rtype: list
CLI Example:
.. code-block:: bash
salt 'minion-id' task.list_folders
'''
# Create the task service object
with salt.utils.winapi.Com():
task_service = win32com.client.Dispatch("Schedule.Service")
task_service.Connect()
# Get the folder to list folders from
task_folder = task_service.GetFolder(location)
folders = task_folder.GetFolders(0)
ret = []
for folder in folders:
ret.append(folder.Name)
return ret | r'''
List all folders located in a specific location in the task scheduler.
:param str location: A string value representing the folder from which you
want to list tasks. Default is '\\' which is the root for the task
scheduler (C:\Windows\System32\tasks).
:return: Returns a list of folders.
:rtype: list
CLI Example:
.. code-block:: bash
salt 'minion-id' task.list_folders | Below is the the instruction that describes the task:
### Input:
r'''
List all folders located in a specific location in the task scheduler.
:param str location: A string value representing the folder from which you
want to list tasks. Default is '\\' which is the root for the task
scheduler (C:\Windows\System32\tasks).
:return: Returns a list of folders.
:rtype: list
CLI Example:
.. code-block:: bash
salt 'minion-id' task.list_folders
### Response:
def list_folders(location='\\'):
r'''
List all folders located in a specific location in the task scheduler.
:param str location: A string value representing the folder from which you
want to list tasks. Default is '\\' which is the root for the task
scheduler (C:\Windows\System32\tasks).
:return: Returns a list of folders.
:rtype: list
CLI Example:
.. code-block:: bash
salt 'minion-id' task.list_folders
'''
# Create the task service object
with salt.utils.winapi.Com():
task_service = win32com.client.Dispatch("Schedule.Service")
task_service.Connect()
# Get the folder to list folders from
task_folder = task_service.GetFolder(location)
folders = task_folder.GetFolders(0)
ret = []
for folder in folders:
ret.append(folder.Name)
return ret |
def disable_reporters(self):
"""disable all reporters"""
for _reporters in self._reports.values():
for report_id, _, _ in _reporters:
self.disable_report(report_id) | disable all reporters | Below is the the instruction that describes the task:
### Input:
disable all reporters
### Response:
def disable_reporters(self):
"""disable all reporters"""
for _reporters in self._reports.values():
for report_id, _, _ in _reporters:
self.disable_report(report_id) |
def _set_network(self, v, load=False):
"""
Setter method for network, mapped from YANG variable /routing_system/interface/ve/ipv6/interface_ospfv3_conf/network (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_network is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_network() directly.
YANG Description: To configure the OSPF network type.The default setting of the parameter depends on the network type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'broadcast': {'value': 1}, u'point-to-point': {'value': 2}},), is_leaf=True, yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface type'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """network must be of a type compatible with enumeration""",
'defined-type': "brocade-ospfv3:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'broadcast': {'value': 1}, u'point-to-point': {'value': 2}},), is_leaf=True, yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface type'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='enumeration', is_config=True)""",
})
self.__network = t
if hasattr(self, '_set'):
self._set() | Setter method for network, mapped from YANG variable /routing_system/interface/ve/ipv6/interface_ospfv3_conf/network (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_network is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_network() directly.
YANG Description: To configure the OSPF network type.The default setting of the parameter depends on the network type. | Below is the the instruction that describes the task:
### Input:
Setter method for network, mapped from YANG variable /routing_system/interface/ve/ipv6/interface_ospfv3_conf/network (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_network is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_network() directly.
YANG Description: To configure the OSPF network type.The default setting of the parameter depends on the network type.
### Response:
def _set_network(self, v, load=False):
"""
Setter method for network, mapped from YANG variable /routing_system/interface/ve/ipv6/interface_ospfv3_conf/network (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_network is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_network() directly.
YANG Description: To configure the OSPF network type.The default setting of the parameter depends on the network type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'broadcast': {'value': 1}, u'point-to-point': {'value': 2}},), is_leaf=True, yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface type'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """network must be of a type compatible with enumeration""",
'defined-type': "brocade-ospfv3:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'broadcast': {'value': 1}, u'point-to-point': {'value': 2}},), is_leaf=True, yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface type'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='enumeration', is_config=True)""",
})
self.__network = t
if hasattr(self, '_set'):
self._set() |
def code_binary(item):
"""
Return a binary 'code' suitable for hashing.
"""
code_str = code(item)
if isinstance(code_str, six.string_types):
return code_str.encode('utf-8')
return code_str | Return a binary 'code' suitable for hashing. | Below is the the instruction that describes the task:
### Input:
Return a binary 'code' suitable for hashing.
### Response:
def code_binary(item):
"""
Return a binary 'code' suitable for hashing.
"""
code_str = code(item)
if isinstance(code_str, six.string_types):
return code_str.encode('utf-8')
return code_str |
def git_clone(target_dir, repo_location, branch_or_tag=None, verbose=True):
"""Clone repo at repo_location to target_dir and checkout branch_or_tag.
If branch_or_tag is not specified, the HEAD of the primary
branch of the cloned repo is checked out.
"""
target_dir = pipes.quote(target_dir)
command = ['git', 'clone']
if verbose:
command.append('--verbose')
if os.path.isdir(repo_location):
command.append('--no-hardlinks')
command.extend([pipes.quote(repo_location), target_dir])
if branch_or_tag:
command.extend(['--branch', branch_or_tag])
return execute_git_command(command) | Clone repo at repo_location to target_dir and checkout branch_or_tag.
If branch_or_tag is not specified, the HEAD of the primary
branch of the cloned repo is checked out. | Below is the the instruction that describes the task:
### Input:
Clone repo at repo_location to target_dir and checkout branch_or_tag.
If branch_or_tag is not specified, the HEAD of the primary
branch of the cloned repo is checked out.
### Response:
def git_clone(target_dir, repo_location, branch_or_tag=None, verbose=True):
"""Clone repo at repo_location to target_dir and checkout branch_or_tag.
If branch_or_tag is not specified, the HEAD of the primary
branch of the cloned repo is checked out.
"""
target_dir = pipes.quote(target_dir)
command = ['git', 'clone']
if verbose:
command.append('--verbose')
if os.path.isdir(repo_location):
command.append('--no-hardlinks')
command.extend([pipes.quote(repo_location), target_dir])
if branch_or_tag:
command.extend(['--branch', branch_or_tag])
return execute_git_command(command) |
def create_new_layer(layer, n_dim):
''' create new layer for the graph
'''
input_shape = layer.output.shape
dense_deeper_classes = [StubDense, get_dropout_class(n_dim), StubReLU]
conv_deeper_classes = [get_conv_class(n_dim), get_batch_norm_class(n_dim), StubReLU]
if is_layer(layer, "ReLU"):
conv_deeper_classes = [get_conv_class(n_dim), get_batch_norm_class(n_dim)]
dense_deeper_classes = [StubDense, get_dropout_class(n_dim)]
elif is_layer(layer, "Dropout"):
dense_deeper_classes = [StubDense, StubReLU]
elif is_layer(layer, "BatchNormalization"):
conv_deeper_classes = [get_conv_class(n_dim), StubReLU]
layer_class = None
if len(input_shape) == 1:
# It is in the dense layer part.
layer_class = sample(dense_deeper_classes, 1)[0]
else:
# It is in the conv layer part.
layer_class = sample(conv_deeper_classes, 1)[0]
if layer_class == StubDense:
new_layer = StubDense(input_shape[0], input_shape[0])
elif layer_class == get_dropout_class(n_dim):
new_layer = layer_class(Constant.DENSE_DROPOUT_RATE)
elif layer_class == get_conv_class(n_dim):
new_layer = layer_class(
input_shape[-1], input_shape[-1], sample((1, 3, 5), 1)[0], stride=1
)
elif layer_class == get_batch_norm_class(n_dim):
new_layer = layer_class(input_shape[-1])
elif layer_class == get_pooling_class(n_dim):
new_layer = layer_class(sample((1, 3, 5), 1)[0])
else:
new_layer = layer_class()
return new_layer | create new layer for the graph | Below is the the instruction that describes the task:
### Input:
create new layer for the graph
### Response:
def create_new_layer(layer, n_dim):
''' create new layer for the graph
'''
input_shape = layer.output.shape
dense_deeper_classes = [StubDense, get_dropout_class(n_dim), StubReLU]
conv_deeper_classes = [get_conv_class(n_dim), get_batch_norm_class(n_dim), StubReLU]
if is_layer(layer, "ReLU"):
conv_deeper_classes = [get_conv_class(n_dim), get_batch_norm_class(n_dim)]
dense_deeper_classes = [StubDense, get_dropout_class(n_dim)]
elif is_layer(layer, "Dropout"):
dense_deeper_classes = [StubDense, StubReLU]
elif is_layer(layer, "BatchNormalization"):
conv_deeper_classes = [get_conv_class(n_dim), StubReLU]
layer_class = None
if len(input_shape) == 1:
# It is in the dense layer part.
layer_class = sample(dense_deeper_classes, 1)[0]
else:
# It is in the conv layer part.
layer_class = sample(conv_deeper_classes, 1)[0]
if layer_class == StubDense:
new_layer = StubDense(input_shape[0], input_shape[0])
elif layer_class == get_dropout_class(n_dim):
new_layer = layer_class(Constant.DENSE_DROPOUT_RATE)
elif layer_class == get_conv_class(n_dim):
new_layer = layer_class(
input_shape[-1], input_shape[-1], sample((1, 3, 5), 1)[0], stride=1
)
elif layer_class == get_batch_norm_class(n_dim):
new_layer = layer_class(input_shape[-1])
elif layer_class == get_pooling_class(n_dim):
new_layer = layer_class(sample((1, 3, 5), 1)[0])
else:
new_layer = layer_class()
return new_layer |
def play_Note(self, note, channel=1, velocity=100):
"""Play a Note object on a channel with a velocity[0-127].
You can either specify the velocity and channel here as arguments or
you can set the Note.velocity and Note.channel attributes, which
will take presedence over the function arguments.
"""
if hasattr(note, 'velocity'):
velocity = note.velocity
if hasattr(note, 'channel'):
channel = note.channel
self.play_event(int(note) + 12, int(channel), int(velocity))
self.notify_listeners(self.MSG_PLAY_INT, {'channel': int(channel),
'note': int(note) + 12, 'velocity': int(velocity)})
self.notify_listeners(self.MSG_PLAY_NOTE, {'channel': int(channel),
'note': note, 'velocity': int(velocity)})
return True | Play a Note object on a channel with a velocity[0-127].
You can either specify the velocity and channel here as arguments or
you can set the Note.velocity and Note.channel attributes, which
will take presedence over the function arguments. | Below is the the instruction that describes the task:
### Input:
Play a Note object on a channel with a velocity[0-127].
You can either specify the velocity and channel here as arguments or
you can set the Note.velocity and Note.channel attributes, which
will take presedence over the function arguments.
### Response:
def play_Note(self, note, channel=1, velocity=100):
"""Play a Note object on a channel with a velocity[0-127].
You can either specify the velocity and channel here as arguments or
you can set the Note.velocity and Note.channel attributes, which
will take presedence over the function arguments.
"""
if hasattr(note, 'velocity'):
velocity = note.velocity
if hasattr(note, 'channel'):
channel = note.channel
self.play_event(int(note) + 12, int(channel), int(velocity))
self.notify_listeners(self.MSG_PLAY_INT, {'channel': int(channel),
'note': int(note) + 12, 'velocity': int(velocity)})
self.notify_listeners(self.MSG_PLAY_NOTE, {'channel': int(channel),
'note': note, 'velocity': int(velocity)})
return True |
def obsolete_rename(oldname, newfunc):
"""
Simple obsolete/removed method decorator
Parameters
----------
oldname : str
The name of the old obsolete name
newfunc : FunctionType
Replacement unbound member function.
"""
newname = newfunc.__name__
def __obsolete(*args, **kwargs):
warnings.warn(
"{oldname} is obsolete and is removed in PyQt5. "
"Use {newname} instead.".format(oldname=oldname, newname=newname),
DeprecationWarning,
stacklevel=2
)
return newfunc(*args, **kwargs)
__obsolete.__name__ = oldname
return __obsolete | Simple obsolete/removed method decorator
Parameters
----------
oldname : str
The name of the old obsolete name
newfunc : FunctionType
Replacement unbound member function. | Below is the the instruction that describes the task:
### Input:
Simple obsolete/removed method decorator
Parameters
----------
oldname : str
The name of the old obsolete name
newfunc : FunctionType
Replacement unbound member function.
### Response:
def obsolete_rename(oldname, newfunc):
"""
Simple obsolete/removed method decorator
Parameters
----------
oldname : str
The name of the old obsolete name
newfunc : FunctionType
Replacement unbound member function.
"""
newname = newfunc.__name__
def __obsolete(*args, **kwargs):
warnings.warn(
"{oldname} is obsolete and is removed in PyQt5. "
"Use {newname} instead.".format(oldname=oldname, newname=newname),
DeprecationWarning,
stacklevel=2
)
return newfunc(*args, **kwargs)
__obsolete.__name__ = oldname
return __obsolete |
def setLocation(self, location):
""" Change the upper left-hand corner to a new ``Location``
Doesn't change width or height
"""
if not location or not isinstance(location, Location):
raise ValueError("setLocation expected a Location object")
self.x = location.x
self.y = location.y
return self | Change the upper left-hand corner to a new ``Location``
Doesn't change width or height | Below is the the instruction that describes the task:
### Input:
Change the upper left-hand corner to a new ``Location``
Doesn't change width or height
### Response:
def setLocation(self, location):
""" Change the upper left-hand corner to a new ``Location``
Doesn't change width or height
"""
if not location or not isinstance(location, Location):
raise ValueError("setLocation expected a Location object")
self.x = location.x
self.y = location.y
return self |
def get_param_value_from_func_call(param_name, func, call_args, call_kwargs):
"""
Get the value of a function's parameter based on its signature
and the call's args and kwargs.
Example:
>>> def foo(a, b, c=3, d=4):
... pass
...
>>> # what would be the value of "c" when calling foo(1, b=2, c=33) ?
>>> get_param_value_from_func_call('c', foo, [1], {'b': 2, 'c': 33})
33
"""
signature = inspect.signature(func)
params_list = signature.parameters.keys()
if param_name not in params_list:
raise TypeError(f"'{param_name}' not found in {func.__name__}"
f"parameters list ([{params_list}])")
call = signature.bind(*call_args, **call_kwargs)
call.apply_defaults()
return call.arguments[param_name] | Get the value of a function's parameter based on its signature
and the call's args and kwargs.
Example:
>>> def foo(a, b, c=3, d=4):
... pass
...
>>> # what would be the value of "c" when calling foo(1, b=2, c=33) ?
>>> get_param_value_from_func_call('c', foo, [1], {'b': 2, 'c': 33})
33 | Below is the the instruction that describes the task:
### Input:
Get the value of a function's parameter based on its signature
and the call's args and kwargs.
Example:
>>> def foo(a, b, c=3, d=4):
... pass
...
>>> # what would be the value of "c" when calling foo(1, b=2, c=33) ?
>>> get_param_value_from_func_call('c', foo, [1], {'b': 2, 'c': 33})
33
### Response:
def get_param_value_from_func_call(param_name, func, call_args, call_kwargs):
"""
Get the value of a function's parameter based on its signature
and the call's args and kwargs.
Example:
>>> def foo(a, b, c=3, d=4):
... pass
...
>>> # what would be the value of "c" when calling foo(1, b=2, c=33) ?
>>> get_param_value_from_func_call('c', foo, [1], {'b': 2, 'c': 33})
33
"""
signature = inspect.signature(func)
params_list = signature.parameters.keys()
if param_name not in params_list:
raise TypeError(f"'{param_name}' not found in {func.__name__}"
f"parameters list ([{params_list}])")
call = signature.bind(*call_args, **call_kwargs)
call.apply_defaults()
return call.arguments[param_name] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.