code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def start_process(self, name):
"""Start a new consumer process for the given consumer name
:param str name: The consumer name
"""
process_name, proc = self.new_process(name)
LOGGER.info('Spawning %s process for %s', process_name, name)
# Append the process to the consumer process list
self.consumers[name].processes[process_name] = proc
# Start the process
try:
proc.start()
except IOError as error:
LOGGER.critical('Failed to start %s for %s: %r',
process_name, name, error)
del self.consumers[name].process[process_name] | Start a new consumer process for the given consumer name
:param str name: The consumer name | Below is the the instruction that describes the task:
### Input:
Start a new consumer process for the given consumer name
:param str name: The consumer name
### Response:
def start_process(self, name):
"""Start a new consumer process for the given consumer name
:param str name: The consumer name
"""
process_name, proc = self.new_process(name)
LOGGER.info('Spawning %s process for %s', process_name, name)
# Append the process to the consumer process list
self.consumers[name].processes[process_name] = proc
# Start the process
try:
proc.start()
except IOError as error:
LOGGER.critical('Failed to start %s for %s: %r',
process_name, name, error)
del self.consumers[name].process[process_name] |
def list_uncollated_submission_versions(self, course_id, ascending=None, assignment_id=None, user_id=None):
"""
List uncollated submission versions.
Gives a paginated, uncollated list of submission versions for all matching
submissions in the context. This SubmissionVersion objects will not include
the +new_grade+ or +previous_grade+ keys, only the +grade+; same for
+graded_at+ and +grader+.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""The id of the contextual course for this API call"""
path["course_id"] = course_id
# OPTIONAL - assignment_id
"""The ID of the assignment for which you want to see submissions. If
absent, versions of submissions from any assignment in the course are
included."""
if assignment_id is not None:
params["assignment_id"] = assignment_id
# OPTIONAL - user_id
"""The ID of the user for which you want to see submissions. If absent,
versions of submissions from any user in the course are included."""
if user_id is not None:
params["user_id"] = user_id
# OPTIONAL - ascending
"""Returns submission versions in ascending date order (oldest first). If
absent, returns submission versions in descending date order (newest
first)."""
if ascending is not None:
params["ascending"] = ascending
self.logger.debug("GET /api/v1/courses/{course_id}/gradebook_history/feed with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/gradebook_history/feed".format(**path), data=data, params=params, all_pages=True) | List uncollated submission versions.
Gives a paginated, uncollated list of submission versions for all matching
submissions in the context. This SubmissionVersion objects will not include
the +new_grade+ or +previous_grade+ keys, only the +grade+; same for
+graded_at+ and +grader+. | Below is the the instruction that describes the task:
### Input:
List uncollated submission versions.
Gives a paginated, uncollated list of submission versions for all matching
submissions in the context. This SubmissionVersion objects will not include
the +new_grade+ or +previous_grade+ keys, only the +grade+; same for
+graded_at+ and +grader+.
### Response:
def list_uncollated_submission_versions(self, course_id, ascending=None, assignment_id=None, user_id=None):
"""
List uncollated submission versions.
Gives a paginated, uncollated list of submission versions for all matching
submissions in the context. This SubmissionVersion objects will not include
the +new_grade+ or +previous_grade+ keys, only the +grade+; same for
+graded_at+ and +grader+.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""The id of the contextual course for this API call"""
path["course_id"] = course_id
# OPTIONAL - assignment_id
"""The ID of the assignment for which you want to see submissions. If
absent, versions of submissions from any assignment in the course are
included."""
if assignment_id is not None:
params["assignment_id"] = assignment_id
# OPTIONAL - user_id
"""The ID of the user for which you want to see submissions. If absent,
versions of submissions from any user in the course are included."""
if user_id is not None:
params["user_id"] = user_id
# OPTIONAL - ascending
"""Returns submission versions in ascending date order (oldest first). If
absent, returns submission versions in descending date order (newest
first)."""
if ascending is not None:
params["ascending"] = ascending
self.logger.debug("GET /api/v1/courses/{course_id}/gradebook_history/feed with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/gradebook_history/feed".format(**path), data=data, params=params, all_pages=True) |
def tpot_driver(args):
"""Perform a TPOT run."""
if args.VERBOSITY >= 2:
_print_args(args)
input_data = _read_data_file(args)
features = input_data.drop(args.TARGET_NAME, axis=1)
training_features, testing_features, training_target, testing_target = \
train_test_split(features, input_data[args.TARGET_NAME], random_state=args.RANDOM_STATE)
tpot_type = TPOTClassifier if args.TPOT_MODE == 'classification' else TPOTRegressor
scoring_func = load_scoring_function(args.SCORING_FN)
tpot_obj = tpot_type(
generations=args.GENERATIONS,
population_size=args.POPULATION_SIZE,
offspring_size=args.OFFSPRING_SIZE,
mutation_rate=args.MUTATION_RATE,
crossover_rate=args.CROSSOVER_RATE,
cv=args.NUM_CV_FOLDS,
subsample=args.SUBSAMPLE,
n_jobs=args.NUM_JOBS,
scoring=scoring_func,
max_time_mins=args.MAX_TIME_MINS,
max_eval_time_mins=args.MAX_EVAL_MINS,
random_state=args.RANDOM_STATE,
config_dict=args.CONFIG_FILE,
template=args.TEMPLATE,
memory=args.MEMORY,
periodic_checkpoint_folder=args.CHECKPOINT_FOLDER,
early_stop=args.EARLY_STOP,
verbosity=args.VERBOSITY,
disable_update_check=args.DISABLE_UPDATE_CHECK
)
tpot_obj.fit(training_features, training_target)
if args.VERBOSITY in [1, 2] and tpot_obj._optimized_pipeline:
training_score = max([x.wvalues[1] for x in tpot_obj._pareto_front.keys])
print('\nTraining score: {}'.format(training_score))
print('Holdout score: {}'.format(tpot_obj.score(testing_features, testing_target)))
elif args.VERBOSITY >= 3 and tpot_obj._pareto_front:
print('Final Pareto front testing scores:')
pipelines = zip(tpot_obj._pareto_front.items, reversed(tpot_obj._pareto_front.keys))
for pipeline, pipeline_scores in pipelines:
tpot_obj._fitted_pipeline = tpot_obj.pareto_front_fitted_pipelines_[str(pipeline)]
print('{TRAIN_SCORE}\t{TEST_SCORE}\t{PIPELINE}'.format(
TRAIN_SCORE=int(pipeline_scores.wvalues[0]),
TEST_SCORE=tpot_obj.score(testing_features, testing_target),
PIPELINE=pipeline
)
)
if args.OUTPUT_FILE:
tpot_obj.export(args.OUTPUT_FILE) | Perform a TPOT run. | Below is the the instruction that describes the task:
### Input:
Perform a TPOT run.
### Response:
def tpot_driver(args):
"""Perform a TPOT run."""
if args.VERBOSITY >= 2:
_print_args(args)
input_data = _read_data_file(args)
features = input_data.drop(args.TARGET_NAME, axis=1)
training_features, testing_features, training_target, testing_target = \
train_test_split(features, input_data[args.TARGET_NAME], random_state=args.RANDOM_STATE)
tpot_type = TPOTClassifier if args.TPOT_MODE == 'classification' else TPOTRegressor
scoring_func = load_scoring_function(args.SCORING_FN)
tpot_obj = tpot_type(
generations=args.GENERATIONS,
population_size=args.POPULATION_SIZE,
offspring_size=args.OFFSPRING_SIZE,
mutation_rate=args.MUTATION_RATE,
crossover_rate=args.CROSSOVER_RATE,
cv=args.NUM_CV_FOLDS,
subsample=args.SUBSAMPLE,
n_jobs=args.NUM_JOBS,
scoring=scoring_func,
max_time_mins=args.MAX_TIME_MINS,
max_eval_time_mins=args.MAX_EVAL_MINS,
random_state=args.RANDOM_STATE,
config_dict=args.CONFIG_FILE,
template=args.TEMPLATE,
memory=args.MEMORY,
periodic_checkpoint_folder=args.CHECKPOINT_FOLDER,
early_stop=args.EARLY_STOP,
verbosity=args.VERBOSITY,
disable_update_check=args.DISABLE_UPDATE_CHECK
)
tpot_obj.fit(training_features, training_target)
if args.VERBOSITY in [1, 2] and tpot_obj._optimized_pipeline:
training_score = max([x.wvalues[1] for x in tpot_obj._pareto_front.keys])
print('\nTraining score: {}'.format(training_score))
print('Holdout score: {}'.format(tpot_obj.score(testing_features, testing_target)))
elif args.VERBOSITY >= 3 and tpot_obj._pareto_front:
print('Final Pareto front testing scores:')
pipelines = zip(tpot_obj._pareto_front.items, reversed(tpot_obj._pareto_front.keys))
for pipeline, pipeline_scores in pipelines:
tpot_obj._fitted_pipeline = tpot_obj.pareto_front_fitted_pipelines_[str(pipeline)]
print('{TRAIN_SCORE}\t{TEST_SCORE}\t{PIPELINE}'.format(
TRAIN_SCORE=int(pipeline_scores.wvalues[0]),
TEST_SCORE=tpot_obj.score(testing_features, testing_target),
PIPELINE=pipeline
)
)
if args.OUTPUT_FILE:
tpot_obj.export(args.OUTPUT_FILE) |
def Log_startViolationsReport(self, config):
"""
Function path: Log.startViolationsReport
Domain: Log
Method name: startViolationsReport
Parameters:
Required arguments:
'config' (type: array) -> Configuration for violations.
No return value.
Description: start violation reporting.
"""
assert isinstance(config, (list, tuple)
), "Argument 'config' must be of type '['list', 'tuple']'. Received type: '%s'" % type(
config)
subdom_funcs = self.synchronous_command('Log.startViolationsReport',
config=config)
return subdom_funcs | Function path: Log.startViolationsReport
Domain: Log
Method name: startViolationsReport
Parameters:
Required arguments:
'config' (type: array) -> Configuration for violations.
No return value.
Description: start violation reporting. | Below is the the instruction that describes the task:
### Input:
Function path: Log.startViolationsReport
Domain: Log
Method name: startViolationsReport
Parameters:
Required arguments:
'config' (type: array) -> Configuration for violations.
No return value.
Description: start violation reporting.
### Response:
def Log_startViolationsReport(self, config):
"""
Function path: Log.startViolationsReport
Domain: Log
Method name: startViolationsReport
Parameters:
Required arguments:
'config' (type: array) -> Configuration for violations.
No return value.
Description: start violation reporting.
"""
assert isinstance(config, (list, tuple)
), "Argument 'config' must be of type '['list', 'tuple']'. Received type: '%s'" % type(
config)
subdom_funcs = self.synchronous_command('Log.startViolationsReport',
config=config)
return subdom_funcs |
def serialize(self):
"""This function serialize into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here we directly return all attributes
:return: json representation of a Acknowledge
:rtype: dict
"""
return {'uuid': self.uuid, 'ref': self.ref, 'sticky': self.sticky, 'notify': self.notify,
'end_time': self.end_time, 'author': self.author, 'comment': self.comment} | This function serialize into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here we directly return all attributes
:return: json representation of a Acknowledge
:rtype: dict | Below is the the instruction that describes the task:
### Input:
This function serialize into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here we directly return all attributes
:return: json representation of a Acknowledge
:rtype: dict
### Response:
def serialize(self):
"""This function serialize into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here we directly return all attributes
:return: json representation of a Acknowledge
:rtype: dict
"""
return {'uuid': self.uuid, 'ref': self.ref, 'sticky': self.sticky, 'notify': self.notify,
'end_time': self.end_time, 'author': self.author, 'comment': self.comment} |
def to_csv(self, output_file=None, *, fields=None, fields_to_explode=None, append=False, header=True, header_prefix='', sep=',', newline='\n'):
"""
Parameters
----------
output_file: str or file object or None
The file to which output will be written. By default, any existing content is
overwritten. Use `append=True` to open the file in append mode instead.
If `output_file` is None, the generated CSV output is returned as a string
instead of written to a file.
fields: list or dict
List of field names to export, or dictionary mapping output column names
to attribute names of the generators.
Examples:
fields=['field_name_1', 'field_name_2']
fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'}
fields_to_explode: list
Optional list of field names where each entry (which must itself be a sequence)
is to be "exploded" into separate rows. (*Note:* this is not supported yet for CSV export.)
append: bool
If `True`, open the file in 'append' mode to avoid overwriting existing content.
Default is `False`, i.e. any existing content will be overwritten.
This argument only has an effect if `output_file` is given (i.e. if output happens
to a file instead of returning a CSV string).
header: bool or str or None
If `header=False` or `header=None` then no header line will be written.
If `header` is a string then this string will be used as the header line.
If `header=True` then a header line will be automatically generated from
the field names of the custom generator.
header_prefix: str
If `header=True` then the auto-generated header line will be prefixed
with `header_prefix` (otherwise this argument has no effect). For example,
set `header_prefix='#'` to make the header line start with '#'. Default: ''
sep: str
Field separator to use in the output. Default: ','
newline: str
Line terminator to use in the output. Default: '\n'
Returns
-------
The return value depends on the value of `output_file`.
If `output_file` is given, writes the output to the file and returns `None`.
If `output_file` is `None`, returns a string containing the CSV output.
"""
assert isinstance(append, bool)
if fields is None:
raise NotImplementedError("TODO: derive field names automatically from the generator which produced this item list")
if fields_to_explode is not None:
raise NotImplementedError("TODO: the 'fields_to_explode' argument is not supported for CSV export yet.")
if isinstance(fields, (list, tuple)):
fields = {name: name for name in fields}
header_line = _generate_csv_header_line(header=header, header_prefix=header_prefix, header_names=fields.keys(), sep=sep, newline=newline)
if output_file is None:
file_or_string = io.StringIO()
elif isinstance(output_file, str):
mode = 'a' if append else 'w'
file_or_string = open(output_file, mode)
# ensure parent directory of output file exits
dirname = os.path.dirname(os.path.abspath(output_file))
if not os.path.exists(dirname):
logger.debug(f"Creating parent directory of output file '{output_file}'")
os.makedirs(dirname)
elif isinstance(output_file, io.IOBase):
file_or_string = output_file
else:
raise TypeError(f"Invalid output file: {output_file} (type: {type(output_file)})")
retval = None
attr_getters = [attrgetter(attr_name) for attr_name in fields.values()]
try:
# TODO: quick-and-dirty solution to enable writing to gzip files; tidy this up!
# (Note that for regular file output we don't want to encode each line to a bytes
# object because this seems to be ca. 2x slower).
if isinstance(file_or_string, gzip.GzipFile):
file_or_string.write(header_line.encode())
for x in self.items:
line = sep.join([format(func(x)) for func in attr_getters]) + newline
file_or_string.write(line.encode())
else:
file_or_string.write(header_line)
for x in self.items:
line = sep.join([format(func(x)) for func in attr_getters]) + newline
file_or_string.write(line)
if output_file is None:
retval = file_or_string.getvalue()
finally:
file_or_string.close()
return retval | Parameters
----------
output_file: str or file object or None
The file to which output will be written. By default, any existing content is
overwritten. Use `append=True` to open the file in append mode instead.
If `output_file` is None, the generated CSV output is returned as a string
instead of written to a file.
fields: list or dict
List of field names to export, or dictionary mapping output column names
to attribute names of the generators.
Examples:
fields=['field_name_1', 'field_name_2']
fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'}
fields_to_explode: list
Optional list of field names where each entry (which must itself be a sequence)
is to be "exploded" into separate rows. (*Note:* this is not supported yet for CSV export.)
append: bool
If `True`, open the file in 'append' mode to avoid overwriting existing content.
Default is `False`, i.e. any existing content will be overwritten.
This argument only has an effect if `output_file` is given (i.e. if output happens
to a file instead of returning a CSV string).
header: bool or str or None
If `header=False` or `header=None` then no header line will be written.
If `header` is a string then this string will be used as the header line.
If `header=True` then a header line will be automatically generated from
the field names of the custom generator.
header_prefix: str
If `header=True` then the auto-generated header line will be prefixed
with `header_prefix` (otherwise this argument has no effect). For example,
set `header_prefix='#'` to make the header line start with '#'. Default: ''
sep: str
Field separator to use in the output. Default: ','
newline: str
Line terminator to use in the output. Default: '\n'
Returns
-------
The return value depends on the value of `output_file`.
If `output_file` is given, writes the output to the file and returns `None`.
If `output_file` is `None`, returns a string containing the CSV output. | Below is the the instruction that describes the task:
### Input:
Parameters
----------
output_file: str or file object or None
The file to which output will be written. By default, any existing content is
overwritten. Use `append=True` to open the file in append mode instead.
If `output_file` is None, the generated CSV output is returned as a string
instead of written to a file.
fields: list or dict
List of field names to export, or dictionary mapping output column names
to attribute names of the generators.
Examples:
fields=['field_name_1', 'field_name_2']
fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'}
fields_to_explode: list
Optional list of field names where each entry (which must itself be a sequence)
is to be "exploded" into separate rows. (*Note:* this is not supported yet for CSV export.)
append: bool
If `True`, open the file in 'append' mode to avoid overwriting existing content.
Default is `False`, i.e. any existing content will be overwritten.
This argument only has an effect if `output_file` is given (i.e. if output happens
to a file instead of returning a CSV string).
header: bool or str or None
If `header=False` or `header=None` then no header line will be written.
If `header` is a string then this string will be used as the header line.
If `header=True` then a header line will be automatically generated from
the field names of the custom generator.
header_prefix: str
If `header=True` then the auto-generated header line will be prefixed
with `header_prefix` (otherwise this argument has no effect). For example,
set `header_prefix='#'` to make the header line start with '#'. Default: ''
sep: str
Field separator to use in the output. Default: ','
newline: str
Line terminator to use in the output. Default: '\n'
Returns
-------
The return value depends on the value of `output_file`.
If `output_file` is given, writes the output to the file and returns `None`.
If `output_file` is `None`, returns a string containing the CSV output.
### Response:
def to_csv(self, output_file=None, *, fields=None, fields_to_explode=None, append=False, header=True, header_prefix='', sep=',', newline='\n'):
"""
Parameters
----------
output_file: str or file object or None
The file to which output will be written. By default, any existing content is
overwritten. Use `append=True` to open the file in append mode instead.
If `output_file` is None, the generated CSV output is returned as a string
instead of written to a file.
fields: list or dict
List of field names to export, or dictionary mapping output column names
to attribute names of the generators.
Examples:
fields=['field_name_1', 'field_name_2']
fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'}
fields_to_explode: list
Optional list of field names where each entry (which must itself be a sequence)
is to be "exploded" into separate rows. (*Note:* this is not supported yet for CSV export.)
append: bool
If `True`, open the file in 'append' mode to avoid overwriting existing content.
Default is `False`, i.e. any existing content will be overwritten.
This argument only has an effect if `output_file` is given (i.e. if output happens
to a file instead of returning a CSV string).
header: bool or str or None
If `header=False` or `header=None` then no header line will be written.
If `header` is a string then this string will be used as the header line.
If `header=True` then a header line will be automatically generated from
the field names of the custom generator.
header_prefix: str
If `header=True` then the auto-generated header line will be prefixed
with `header_prefix` (otherwise this argument has no effect). For example,
set `header_prefix='#'` to make the header line start with '#'. Default: ''
sep: str
Field separator to use in the output. Default: ','
newline: str
Line terminator to use in the output. Default: '\n'
Returns
-------
The return value depends on the value of `output_file`.
If `output_file` is given, writes the output to the file and returns `None`.
If `output_file` is `None`, returns a string containing the CSV output.
"""
assert isinstance(append, bool)
if fields is None:
raise NotImplementedError("TODO: derive field names automatically from the generator which produced this item list")
if fields_to_explode is not None:
raise NotImplementedError("TODO: the 'fields_to_explode' argument is not supported for CSV export yet.")
if isinstance(fields, (list, tuple)):
fields = {name: name for name in fields}
header_line = _generate_csv_header_line(header=header, header_prefix=header_prefix, header_names=fields.keys(), sep=sep, newline=newline)
if output_file is None:
file_or_string = io.StringIO()
elif isinstance(output_file, str):
mode = 'a' if append else 'w'
file_or_string = open(output_file, mode)
# ensure parent directory of output file exits
dirname = os.path.dirname(os.path.abspath(output_file))
if not os.path.exists(dirname):
logger.debug(f"Creating parent directory of output file '{output_file}'")
os.makedirs(dirname)
elif isinstance(output_file, io.IOBase):
file_or_string = output_file
else:
raise TypeError(f"Invalid output file: {output_file} (type: {type(output_file)})")
retval = None
attr_getters = [attrgetter(attr_name) for attr_name in fields.values()]
try:
# TODO: quick-and-dirty solution to enable writing to gzip files; tidy this up!
# (Note that for regular file output we don't want to encode each line to a bytes
# object because this seems to be ca. 2x slower).
if isinstance(file_or_string, gzip.GzipFile):
file_or_string.write(header_line.encode())
for x in self.items:
line = sep.join([format(func(x)) for func in attr_getters]) + newline
file_or_string.write(line.encode())
else:
file_or_string.write(header_line)
for x in self.items:
line = sep.join([format(func(x)) for func in attr_getters]) + newline
file_or_string.write(line)
if output_file is None:
retval = file_or_string.getvalue()
finally:
file_or_string.close()
return retval |
def list_to_string(l = range(200), width = 40, indent = " "):
"""
Converts a list-like to string with given line width.
"""
l = [str(v) + "," for v in l]
counter = 0
out = "" + indent
for w in l:
s = len(w)
if counter + s > width:
out += "\n" + indent
counter = 0
out += w
counter += s
return out.strip(",") | Converts a list-like to string with given line width. | Below is the the instruction that describes the task:
### Input:
Converts a list-like to string with given line width.
### Response:
def list_to_string(l = range(200), width = 40, indent = " "):
"""
Converts a list-like to string with given line width.
"""
l = [str(v) + "," for v in l]
counter = 0
out = "" + indent
for w in l:
s = len(w)
if counter + s > width:
out += "\n" + indent
counter = 0
out += w
counter += s
return out.strip(",") |
def filter_pastdate(string, default=None):
"""Coerce to a date not beyond the current date
If only a day is given, assumes the current month if that day has
passed or is the current day, otherwise assumes the previous month.
If a day and month are given, but no year, assumes the current year
if the given date has passed (or is today), otherwise the previous
year.
"""
if not string and default is not None:
return default
today = datetime.date.today()
# split the string
try:
parts = map(int, re.split('\D+', string)) # split the string
except ValueError:
raise InvalidInputError("invalid date; use format: DD [MM [YYYY]]")
if len(parts) < 1 or len(parts) > 3:
raise InvalidInputError("invalid date; use format: DD [MM [YYYY]]")
if len(parts) == 1:
# no month or year given; append month
parts.append(today.month - 1 if parts[0] > today.day else today.month)
if parts[1] < 1:
parts[1] = 12
if len(parts) == 2:
# no year given; append year
if parts[1] > today.month \
or parts[1] == today.month and parts[0] > today.day:
parts.append(today.year - 1)
else:
parts.append(today.year)
parts.reverse()
try:
date = datetime.date(*parts)
if date > today:
raise InvalidInputError("cannot choose a date in the future")
return date
except ValueError:
print parts
raise InvalidInputError("invalid date; use format: DD [MM [YYYY]]") | Coerce to a date not beyond the current date
If only a day is given, assumes the current month if that day has
passed or is the current day, otherwise assumes the previous month.
If a day and month are given, but no year, assumes the current year
if the given date has passed (or is today), otherwise the previous
year. | Below is the the instruction that describes the task:
### Input:
Coerce to a date not beyond the current date
If only a day is given, assumes the current month if that day has
passed or is the current day, otherwise assumes the previous month.
If a day and month are given, but no year, assumes the current year
if the given date has passed (or is today), otherwise the previous
year.
### Response:
def filter_pastdate(string, default=None):
"""Coerce to a date not beyond the current date
If only a day is given, assumes the current month if that day has
passed or is the current day, otherwise assumes the previous month.
If a day and month are given, but no year, assumes the current year
if the given date has passed (or is today), otherwise the previous
year.
"""
if not string and default is not None:
return default
today = datetime.date.today()
# split the string
try:
parts = map(int, re.split('\D+', string)) # split the string
except ValueError:
raise InvalidInputError("invalid date; use format: DD [MM [YYYY]]")
if len(parts) < 1 or len(parts) > 3:
raise InvalidInputError("invalid date; use format: DD [MM [YYYY]]")
if len(parts) == 1:
# no month or year given; append month
parts.append(today.month - 1 if parts[0] > today.day else today.month)
if parts[1] < 1:
parts[1] = 12
if len(parts) == 2:
# no year given; append year
if parts[1] > today.month \
or parts[1] == today.month and parts[0] > today.day:
parts.append(today.year - 1)
else:
parts.append(today.year)
parts.reverse()
try:
date = datetime.date(*parts)
if date > today:
raise InvalidInputError("cannot choose a date in the future")
return date
except ValueError:
print parts
raise InvalidInputError("invalid date; use format: DD [MM [YYYY]]") |
def fw_rule_update(self, data, fw_name=None):
"""Top level rule update routine. """
LOG.debug("FW Update Debug")
self._fw_rule_update(fw_name, data) | Top level rule update routine. | Below is the the instruction that describes the task:
### Input:
Top level rule update routine.
### Response:
def fw_rule_update(self, data, fw_name=None):
"""Top level rule update routine. """
LOG.debug("FW Update Debug")
self._fw_rule_update(fw_name, data) |
def check(text):
"""Suggest the preferred forms."""
err = "pinker.metadiscourse"
msg = "Excessive metadiscourse."
metadiscourse = [
"The preceeding discussion",
"The rest of this article",
"This chapter discusses",
"The preceding paragraph demonstrated",
"The previous section analyzed",
]
return existence_check(text, metadiscourse, err, msg) | Suggest the preferred forms. | Below is the the instruction that describes the task:
### Input:
Suggest the preferred forms.
### Response:
def check(text):
"""Suggest the preferred forms."""
err = "pinker.metadiscourse"
msg = "Excessive metadiscourse."
metadiscourse = [
"The preceeding discussion",
"The rest of this article",
"This chapter discusses",
"The preceding paragraph demonstrated",
"The previous section analyzed",
]
return existence_check(text, metadiscourse, err, msg) |
def get_card(cards, term, limit=0, sort=False, ranks=None):
"""
Get the specified card from the stack.
:arg cards:
The cards to get from. Can be a ``Stack``, ``Deck`` or ``list``.
:arg str term:
The card's full name, value, suit, abbreviation, or stack indice.
:arg int limit:
The number of items to retrieve for each term.
:arg bool sort:
Whether or not to sort the results, by poker ranks.
:arg dict ranks:
If ``sort=True``, the rank dict to refer to for sorting.
:returns:
A copy of the given cards, with the found cards removed, and a list
of the specified cards, if found.
"""
got_cards = []
try:
indices = find_card(cards, term, limit=limit)
got_cards = [cards[i] for i in indices]
cards = [v for i, v in enumerate(cards) if i not in indices]
except:
got_cards = [cards[term]]
cards = [v for i, v in enumerate(cards) if i is not term]
if sort:
got_cards = sort_cards(got_cards)
return cards, got_cards | Get the specified card from the stack.
:arg cards:
The cards to get from. Can be a ``Stack``, ``Deck`` or ``list``.
:arg str term:
The card's full name, value, suit, abbreviation, or stack indice.
:arg int limit:
The number of items to retrieve for each term.
:arg bool sort:
Whether or not to sort the results, by poker ranks.
:arg dict ranks:
If ``sort=True``, the rank dict to refer to for sorting.
:returns:
A copy of the given cards, with the found cards removed, and a list
of the specified cards, if found. | Below is the the instruction that describes the task:
### Input:
Get the specified card from the stack.
:arg cards:
The cards to get from. Can be a ``Stack``, ``Deck`` or ``list``.
:arg str term:
The card's full name, value, suit, abbreviation, or stack indice.
:arg int limit:
The number of items to retrieve for each term.
:arg bool sort:
Whether or not to sort the results, by poker ranks.
:arg dict ranks:
If ``sort=True``, the rank dict to refer to for sorting.
:returns:
A copy of the given cards, with the found cards removed, and a list
of the specified cards, if found.
### Response:
def get_card(cards, term, limit=0, sort=False, ranks=None):
"""
Get the specified card from the stack.
:arg cards:
The cards to get from. Can be a ``Stack``, ``Deck`` or ``list``.
:arg str term:
The card's full name, value, suit, abbreviation, or stack indice.
:arg int limit:
The number of items to retrieve for each term.
:arg bool sort:
Whether or not to sort the results, by poker ranks.
:arg dict ranks:
If ``sort=True``, the rank dict to refer to for sorting.
:returns:
A copy of the given cards, with the found cards removed, and a list
of the specified cards, if found.
"""
got_cards = []
try:
indices = find_card(cards, term, limit=limit)
got_cards = [cards[i] for i in indices]
cards = [v for i, v in enumerate(cards) if i not in indices]
except:
got_cards = [cards[term]]
cards = [v for i, v in enumerate(cards) if i is not term]
if sort:
got_cards = sort_cards(got_cards)
return cards, got_cards |
def fulfill(self, agreement_id, message, account_address, signature, from_account):
"""
Fulfill the sign conditon.
:param agreement_id: id of the agreement, hex str
:param message:
:param account_address: ethereum account address, hex str
:param signature: signed agreement hash, hex str
:param from_account: Account doing the transaction
:return:
"""
return self._fulfill(
agreement_id,
message,
account_address,
signature,
transact={'from': from_account.address,
'passphrase': from_account.password}
) | Fulfill the sign conditon.
:param agreement_id: id of the agreement, hex str
:param message:
:param account_address: ethereum account address, hex str
:param signature: signed agreement hash, hex str
:param from_account: Account doing the transaction
:return: | Below is the the instruction that describes the task:
### Input:
Fulfill the sign conditon.
:param agreement_id: id of the agreement, hex str
:param message:
:param account_address: ethereum account address, hex str
:param signature: signed agreement hash, hex str
:param from_account: Account doing the transaction
:return:
### Response:
def fulfill(self, agreement_id, message, account_address, signature, from_account):
"""
Fulfill the sign conditon.
:param agreement_id: id of the agreement, hex str
:param message:
:param account_address: ethereum account address, hex str
:param signature: signed agreement hash, hex str
:param from_account: Account doing the transaction
:return:
"""
return self._fulfill(
agreement_id,
message,
account_address,
signature,
transact={'from': from_account.address,
'passphrase': from_account.password}
) |
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
# ApiCli.get_arguments(self)
if self.args.file_name is not None:
self.file_name = self.args.file_name | Extracts the specific arguments of this CLI | Below is the the instruction that describes the task:
### Input:
Extracts the specific arguments of this CLI
### Response:
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
# ApiCli.get_arguments(self)
if self.args.file_name is not None:
self.file_name = self.args.file_name |
def get_mode(path):
'''
Return the mode of a file
Right now we're just returning None because Windows' doesn't have a mode
like Linux
Args:
path (str): The path to the file or directory
Returns:
None
CLI Example:
.. code-block:: bash
salt '*' file.get_mode /etc/passwd
'''
if not os.path.exists(path):
raise CommandExecutionError('Path not found: {0}'.format(path))
func_name = '{0}.get_mode'.format(__virtualname__)
if __opts__.get('fun', '') == func_name:
log.info('The function %s should not be used on Windows systems; '
'see function docs for details. The value returned is '
'always None.', func_name)
return None | Return the mode of a file
Right now we're just returning None because Windows' doesn't have a mode
like Linux
Args:
path (str): The path to the file or directory
Returns:
None
CLI Example:
.. code-block:: bash
salt '*' file.get_mode /etc/passwd | Below is the the instruction that describes the task:
### Input:
Return the mode of a file
Right now we're just returning None because Windows' doesn't have a mode
like Linux
Args:
path (str): The path to the file or directory
Returns:
None
CLI Example:
.. code-block:: bash
salt '*' file.get_mode /etc/passwd
### Response:
def get_mode(path):
'''
Return the mode of a file
Right now we're just returning None because Windows' doesn't have a mode
like Linux
Args:
path (str): The path to the file or directory
Returns:
None
CLI Example:
.. code-block:: bash
salt '*' file.get_mode /etc/passwd
'''
if not os.path.exists(path):
raise CommandExecutionError('Path not found: {0}'.format(path))
func_name = '{0}.get_mode'.format(__virtualname__)
if __opts__.get('fun', '') == func_name:
log.info('The function %s should not be used on Windows systems; '
'see function docs for details. The value returned is '
'always None.', func_name)
return None |
def infer_declared(ms, namespace=None):
'''Retrieves any declared information from the given macaroons and returns
it as a key-value map.
Information is declared with a first party caveat as created by
declared_caveat.
If there are two caveats that declare the same key with different values,
the information is omitted from the map. When the caveats are later
checked, this will cause the check to fail.
namespace is the Namespace used to retrieve the prefix associated to the
uri, if None it will use the STD_NAMESPACE only.
'''
conditions = []
for m in ms:
for cav in m.caveats:
if cav.location is None or cav.location == '':
conditions.append(cav.caveat_id_bytes.decode('utf-8'))
return infer_declared_from_conditions(conditions, namespace) | Retrieves any declared information from the given macaroons and returns
it as a key-value map.
Information is declared with a first party caveat as created by
declared_caveat.
If there are two caveats that declare the same key with different values,
the information is omitted from the map. When the caveats are later
checked, this will cause the check to fail.
namespace is the Namespace used to retrieve the prefix associated to the
uri, if None it will use the STD_NAMESPACE only. | Below is the the instruction that describes the task:
### Input:
Retrieves any declared information from the given macaroons and returns
it as a key-value map.
Information is declared with a first party caveat as created by
declared_caveat.
If there are two caveats that declare the same key with different values,
the information is omitted from the map. When the caveats are later
checked, this will cause the check to fail.
namespace is the Namespace used to retrieve the prefix associated to the
uri, if None it will use the STD_NAMESPACE only.
### Response:
def infer_declared(ms, namespace=None):
'''Retrieves any declared information from the given macaroons and returns
it as a key-value map.
Information is declared with a first party caveat as created by
declared_caveat.
If there are two caveats that declare the same key with different values,
the information is omitted from the map. When the caveats are later
checked, this will cause the check to fail.
namespace is the Namespace used to retrieve the prefix associated to the
uri, if None it will use the STD_NAMESPACE only.
'''
conditions = []
for m in ms:
for cav in m.caveats:
if cav.location is None or cav.location == '':
conditions.append(cav.caveat_id_bytes.decode('utf-8'))
return infer_declared_from_conditions(conditions, namespace) |
def shutdown(self):
"""Executed on shutdown of application"""
for wsock in self.wsocks:
wsock.shutdown()
for api in self.apis:
api.shutdown() | Executed on shutdown of application | Below is the the instruction that describes the task:
### Input:
Executed on shutdown of application
### Response:
def shutdown(self):
"""Executed on shutdown of application"""
for wsock in self.wsocks:
wsock.shutdown()
for api in self.apis:
api.shutdown() |
def delete_instance(self, instance, using_transactions=True, dry_run=False):
"""
Calls :meth:`instance.delete` as long as ``dry_run`` is not set.
"""
self.before_delete_instance(instance, dry_run)
if not using_transactions and dry_run:
# we don't have transactions and we want to do a dry_run
pass
else:
instance.delete()
self.after_delete_instance(instance, dry_run) | Calls :meth:`instance.delete` as long as ``dry_run`` is not set. | Below is the the instruction that describes the task:
### Input:
Calls :meth:`instance.delete` as long as ``dry_run`` is not set.
### Response:
def delete_instance(self, instance, using_transactions=True, dry_run=False):
"""
Calls :meth:`instance.delete` as long as ``dry_run`` is not set.
"""
self.before_delete_instance(instance, dry_run)
if not using_transactions and dry_run:
# we don't have transactions and we want to do a dry_run
pass
else:
instance.delete()
self.after_delete_instance(instance, dry_run) |
def get_session_cookie(self):
"""
Create a session cookie object for use by aiohttp
"""
if self._login is not None and self._password is not None:
session_key = self.encode_user(self._login, self._password)
return {'sessionkey': session_key}
else:
return None | Create a session cookie object for use by aiohttp | Below is the the instruction that describes the task:
### Input:
Create a session cookie object for use by aiohttp
### Response:
def get_session_cookie(self):
"""
Create a session cookie object for use by aiohttp
"""
if self._login is not None and self._password is not None:
session_key = self.encode_user(self._login, self._password)
return {'sessionkey': session_key}
else:
return None |
def _abort_workflow(pb: ProcessingBlock, workflow_stage_dict: dict,
docker: DockerSwarmClient):
"""Abort the workflow.
TODO(BMo): This function currently does nothing as the abort flag
is hardcoded to False!
This function is used by `execute_processing_block`.
Args:
pb (ProcessingBlock): Configuration database Processing block object.
workflow_stage_dict (dict): Workflow stage metadata dictionary.
docker (DockerClient): Docker Swarm Client object.
Returns:
bool, True if the stage is aborted, otherwise False.
"""
# TODO(BMo) Ask the database if the abort flag on the PB is set.
_abort_flag = False
if _abort_flag:
for workflow_stage in pb.workflow_stages:
for service_id, _ in \
workflow_stage_dict[workflow_stage.id]['services'].items():
docker.delete_service(service_id)
LOG.info("Deleted Service Id %s", service_id)
return True
return False | Abort the workflow.
TODO(BMo): This function currently does nothing as the abort flag
is hardcoded to False!
This function is used by `execute_processing_block`.
Args:
pb (ProcessingBlock): Configuration database Processing block object.
workflow_stage_dict (dict): Workflow stage metadata dictionary.
docker (DockerClient): Docker Swarm Client object.
Returns:
bool, True if the stage is aborted, otherwise False. | Below is the the instruction that describes the task:
### Input:
Abort the workflow.
TODO(BMo): This function currently does nothing as the abort flag
is hardcoded to False!
This function is used by `execute_processing_block`.
Args:
pb (ProcessingBlock): Configuration database Processing block object.
workflow_stage_dict (dict): Workflow stage metadata dictionary.
docker (DockerClient): Docker Swarm Client object.
Returns:
bool, True if the stage is aborted, otherwise False.
### Response:
def _abort_workflow(pb: ProcessingBlock, workflow_stage_dict: dict,
docker: DockerSwarmClient):
"""Abort the workflow.
TODO(BMo): This function currently does nothing as the abort flag
is hardcoded to False!
This function is used by `execute_processing_block`.
Args:
pb (ProcessingBlock): Configuration database Processing block object.
workflow_stage_dict (dict): Workflow stage metadata dictionary.
docker (DockerClient): Docker Swarm Client object.
Returns:
bool, True if the stage is aborted, otherwise False.
"""
# TODO(BMo) Ask the database if the abort flag on the PB is set.
_abort_flag = False
if _abort_flag:
for workflow_stage in pb.workflow_stages:
for service_id, _ in \
workflow_stage_dict[workflow_stage.id]['services'].items():
docker.delete_service(service_id)
LOG.info("Deleted Service Id %s", service_id)
return True
return False |
def classify_host(host):
'''Host is an IPv4Address, IPv6Address or a string.
If an IPv4Address or IPv6Address return it. Otherwise convert the string to an
IPv4Address or IPv6Address object if possible and return it. Otherwise return the
original string if it is a valid hostname.
Raise ValueError if a string cannot be interpreted as an IP address and it is not
a valid hostname.
'''
if isinstance(host, (IPv4Address, IPv6Address)):
return host
if is_valid_hostname(host):
return host
return ip_address(host) | Host is an IPv4Address, IPv6Address or a string.
If an IPv4Address or IPv6Address return it. Otherwise convert the string to an
IPv4Address or IPv6Address object if possible and return it. Otherwise return the
original string if it is a valid hostname.
Raise ValueError if a string cannot be interpreted as an IP address and it is not
a valid hostname. | Below is the the instruction that describes the task:
### Input:
Host is an IPv4Address, IPv6Address or a string.
If an IPv4Address or IPv6Address return it. Otherwise convert the string to an
IPv4Address or IPv6Address object if possible and return it. Otherwise return the
original string if it is a valid hostname.
Raise ValueError if a string cannot be interpreted as an IP address and it is not
a valid hostname.
### Response:
def classify_host(host):
'''Host is an IPv4Address, IPv6Address or a string.
If an IPv4Address or IPv6Address return it. Otherwise convert the string to an
IPv4Address or IPv6Address object if possible and return it. Otherwise return the
original string if it is a valid hostname.
Raise ValueError if a string cannot be interpreted as an IP address and it is not
a valid hostname.
'''
if isinstance(host, (IPv4Address, IPv6Address)):
return host
if is_valid_hostname(host):
return host
return ip_address(host) |
def named_series(self, ordering=None):
'''Generator of tuples with name and serie data.'''
series = self.series()
if ordering:
series = list(series)
todo = dict(((n, idx) for idx, n in enumerate(self.names())))
for name in ordering:
if name in todo:
idx = todo.pop(name)
yield name, series[idx]
for name in todo:
idx = todo[name]
yield name, series[idx]
else:
for name_serie in zip(self.names(), series):
yield name_serie | Generator of tuples with name and serie data. | Below is the the instruction that describes the task:
### Input:
Generator of tuples with name and serie data.
### Response:
def named_series(self, ordering=None):
'''Generator of tuples with name and serie data.'''
series = self.series()
if ordering:
series = list(series)
todo = dict(((n, idx) for idx, n in enumerate(self.names())))
for name in ordering:
if name in todo:
idx = todo.pop(name)
yield name, series[idx]
for name in todo:
idx = todo[name]
yield name, series[idx]
else:
for name_serie in zip(self.names(), series):
yield name_serie |
def started_plan_summary_for(self, year, month):
"""Return started_during Subscriptions with plan counts annotated."""
return (
self.started_during(year, month)
.values("plan")
.order_by()
.annotate(count=models.Count("plan"))
) | Return started_during Subscriptions with plan counts annotated. | Below is the the instruction that describes the task:
### Input:
Return started_during Subscriptions with plan counts annotated.
### Response:
def started_plan_summary_for(self, year, month):
"""Return started_during Subscriptions with plan counts annotated."""
return (
self.started_during(year, month)
.values("plan")
.order_by()
.annotate(count=models.Count("plan"))
) |
def add_vrf(self, args):
""" Add a new VRF.
Valid keys in the `args`-struct:
* `auth` [struct]
Authentication options passed to the :class:`AuthFactory`.
* `attr` [struct]
VRF attributes.
Returns the internal database ID for the VRF.
"""
try:
res = self.nip.add_vrf(args.get('auth'), args.get('attr'))
# fugly cast from large numbers to string to deal with XML-RPC
for val in ( 'num_prefixes_v4', 'num_prefixes_v6',
'total_addresses_v4', 'total_addresses_v6',
'used_addresses_v4', 'used_addresses_v6', 'free_addresses_v4',
'free_addresses_v6'):
res[val] = unicode(res[val])
return res
except (AuthError, NipapError) as exc:
self.logger.debug(unicode(exc))
raise Fault(exc.error_code, unicode(exc)) | Add a new VRF.
Valid keys in the `args`-struct:
* `auth` [struct]
Authentication options passed to the :class:`AuthFactory`.
* `attr` [struct]
VRF attributes.
Returns the internal database ID for the VRF. | Below is the the instruction that describes the task:
### Input:
Add a new VRF.
Valid keys in the `args`-struct:
* `auth` [struct]
Authentication options passed to the :class:`AuthFactory`.
* `attr` [struct]
VRF attributes.
Returns the internal database ID for the VRF.
### Response:
def add_vrf(self, args):
""" Add a new VRF.
Valid keys in the `args`-struct:
* `auth` [struct]
Authentication options passed to the :class:`AuthFactory`.
* `attr` [struct]
VRF attributes.
Returns the internal database ID for the VRF.
"""
try:
res = self.nip.add_vrf(args.get('auth'), args.get('attr'))
# fugly cast from large numbers to string to deal with XML-RPC
for val in ( 'num_prefixes_v4', 'num_prefixes_v6',
'total_addresses_v4', 'total_addresses_v6',
'used_addresses_v4', 'used_addresses_v6', 'free_addresses_v4',
'free_addresses_v6'):
res[val] = unicode(res[val])
return res
except (AuthError, NipapError) as exc:
self.logger.debug(unicode(exc))
raise Fault(exc.error_code, unicode(exc)) |
def shannon(self, data):
'''
Performs a Shannon entropy analysis on a given block of data.
'''
entropy = 0
if data:
length = len(data)
seen = dict(((chr(x), 0) for x in range(0, 256)))
for byte in data:
seen[byte] += 1
for x in range(0, 256):
p_x = float(seen[chr(x)]) / length
if p_x > 0:
entropy -= p_x * math.log(p_x, 2)
return (entropy / 8) | Performs a Shannon entropy analysis on a given block of data. | Below is the the instruction that describes the task:
### Input:
Performs a Shannon entropy analysis on a given block of data.
### Response:
def shannon(self, data):
'''
Performs a Shannon entropy analysis on a given block of data.
'''
entropy = 0
if data:
length = len(data)
seen = dict(((chr(x), 0) for x in range(0, 256)))
for byte in data:
seen[byte] += 1
for x in range(0, 256):
p_x = float(seen[chr(x)]) / length
if p_x > 0:
entropy -= p_x * math.log(p_x, 2)
return (entropy / 8) |
def _std(self):
"""Computes the current estimate of the standard deviation.
Note that the standard deviation is not defined until at least two samples
were seen.
Returns:
Tensor of current variance.
"""
variance = tf.cond(
self._count > 1,
lambda: self._var_sum / tf.cast(self._count - 1, tf.float32),
lambda: tf.ones_like(self._var_sum) * float('nan'))
# The epsilon corrects for small negative variance values caused by
# the algorithm. It was empirically chosen to work with all environments
# tested.
return tf.sqrt(variance + 1e-4) | Computes the current estimate of the standard deviation.
Note that the standard deviation is not defined until at least two samples
were seen.
Returns:
Tensor of current variance. | Below is the the instruction that describes the task:
### Input:
Computes the current estimate of the standard deviation.
Note that the standard deviation is not defined until at least two samples
were seen.
Returns:
Tensor of current variance.
### Response:
def _std(self):
"""Computes the current estimate of the standard deviation.
Note that the standard deviation is not defined until at least two samples
were seen.
Returns:
Tensor of current variance.
"""
variance = tf.cond(
self._count > 1,
lambda: self._var_sum / tf.cast(self._count - 1, tf.float32),
lambda: tf.ones_like(self._var_sum) * float('nan'))
# The epsilon corrects for small negative variance values caused by
# the algorithm. It was empirically chosen to work with all environments
# tested.
return tf.sqrt(variance + 1e-4) |
def include_class_in_title(result):
"""Makes sure that test class is included in "title".
e.g. "TestServiceRESTAPI.test_power_parent_service"
>>> result = {"title": "test_foo", "classname": "foo.bar.baz.TestFoo",
... "file": "foo/bar/baz.py"}
>>> include_class_in_title(result)
>>> str(result.get("title"))
'TestFoo.test_foo'
>>> result.get("classname")
"""
classname = result.get("classname", "")
if classname:
filepath = result.get("file", "")
title = result.get("title")
if title and "/" in filepath and "." in classname:
fname = filepath.split("/")[-1].replace(".py", "")
last_classname = classname.split(".")[-1]
# last part of classname is not file name
if fname != last_classname and last_classname not in title:
result["title"] = "{}.{}".format(last_classname, title)
# we don't need to pass classnames?
del result["classname"] | Makes sure that test class is included in "title".
e.g. "TestServiceRESTAPI.test_power_parent_service"
>>> result = {"title": "test_foo", "classname": "foo.bar.baz.TestFoo",
... "file": "foo/bar/baz.py"}
>>> include_class_in_title(result)
>>> str(result.get("title"))
'TestFoo.test_foo'
>>> result.get("classname") | Below is the the instruction that describes the task:
### Input:
Makes sure that test class is included in "title".
e.g. "TestServiceRESTAPI.test_power_parent_service"
>>> result = {"title": "test_foo", "classname": "foo.bar.baz.TestFoo",
... "file": "foo/bar/baz.py"}
>>> include_class_in_title(result)
>>> str(result.get("title"))
'TestFoo.test_foo'
>>> result.get("classname")
### Response:
def include_class_in_title(result):
"""Makes sure that test class is included in "title".
e.g. "TestServiceRESTAPI.test_power_parent_service"
>>> result = {"title": "test_foo", "classname": "foo.bar.baz.TestFoo",
... "file": "foo/bar/baz.py"}
>>> include_class_in_title(result)
>>> str(result.get("title"))
'TestFoo.test_foo'
>>> result.get("classname")
"""
classname = result.get("classname", "")
if classname:
filepath = result.get("file", "")
title = result.get("title")
if title and "/" in filepath and "." in classname:
fname = filepath.split("/")[-1].replace(".py", "")
last_classname = classname.split(".")[-1]
# last part of classname is not file name
if fname != last_classname and last_classname not in title:
result["title"] = "{}.{}".format(last_classname, title)
# we don't need to pass classnames?
del result["classname"] |
def crypto_kx_keypair():
"""
Generate a keypair.
This is a duplicate crypto_box_keypair, but
is included for api consistency.
:return: (public_key, secret_key)
:rtype: (bytes, bytes)
"""
public_key = ffi.new("unsigned char[]", crypto_kx_PUBLIC_KEY_BYTES)
secret_key = ffi.new("unsigned char[]", crypto_kx_SECRET_KEY_BYTES)
res = lib.crypto_kx_keypair(public_key, secret_key)
ensure(res == 0, "Key generation failed.", raising=exc.CryptoError)
return (ffi.buffer(public_key, crypto_kx_PUBLIC_KEY_BYTES)[:],
ffi.buffer(secret_key, crypto_kx_SECRET_KEY_BYTES)[:]) | Generate a keypair.
This is a duplicate crypto_box_keypair, but
is included for api consistency.
:return: (public_key, secret_key)
:rtype: (bytes, bytes) | Below is the the instruction that describes the task:
### Input:
Generate a keypair.
This is a duplicate crypto_box_keypair, but
is included for api consistency.
:return: (public_key, secret_key)
:rtype: (bytes, bytes)
### Response:
def crypto_kx_keypair():
"""
Generate a keypair.
This is a duplicate crypto_box_keypair, but
is included for api consistency.
:return: (public_key, secret_key)
:rtype: (bytes, bytes)
"""
public_key = ffi.new("unsigned char[]", crypto_kx_PUBLIC_KEY_BYTES)
secret_key = ffi.new("unsigned char[]", crypto_kx_SECRET_KEY_BYTES)
res = lib.crypto_kx_keypair(public_key, secret_key)
ensure(res == 0, "Key generation failed.", raising=exc.CryptoError)
return (ffi.buffer(public_key, crypto_kx_PUBLIC_KEY_BYTES)[:],
ffi.buffer(secret_key, crypto_kx_SECRET_KEY_BYTES)[:]) |
def whoarewe(self,
shutit_pexpect_child=None,
note=None,
loglevel=logging.DEBUG):
"""Returns the current group.
@param shutit_pexpect_child: See send()
@param note: See send()
@return: the first group found
@rtype: string
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.whoarewe(note=note,
loglevel=loglevel) | Returns the current group.
@param shutit_pexpect_child: See send()
@param note: See send()
@return: the first group found
@rtype: string | Below is the the instruction that describes the task:
### Input:
Returns the current group.
@param shutit_pexpect_child: See send()
@param note: See send()
@return: the first group found
@rtype: string
### Response:
def whoarewe(self,
shutit_pexpect_child=None,
note=None,
loglevel=logging.DEBUG):
"""Returns the current group.
@param shutit_pexpect_child: See send()
@param note: See send()
@return: the first group found
@rtype: string
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.whoarewe(note=note,
loglevel=loglevel) |
def _resolve_fn_sub(uri_data):
"""
Tries to resolve an Integration URI which contains Fn::Sub intrinsic function. This method tries to resolve
and produce a string output.
Example:
{
"Fn::Sub":
"arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${LambdaFunction.Arn}/invocations"
}
Fn::Sub Processing:
~~~~~~~~~~~~~~~~~~
If this is a Fn::Sub, resolve as following:
1. Get the ARN String:
- If Sub is using the array syntax, then use element which is a string.
- If Sub is using string syntax, then just use the string.
2. If there is a ${XXX.Arn} then replace it with a dummy ARN
3. Otherwise skip it
.. code:
Input:
{
"Fn::Sub":
"arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${LambdaFunction.Arn}/invocations"
}
Output: "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/arn:aws:lambda:us-east-1:LambdaFunction/invocations" # NOQA
Note
~~~~
This method supports only a very restricted subset of intrinsic functions with Swagger document. This is the
best we can do without implementing a full blown intrinsic function resolution module.
Parameters
----------
uri_data : string or dict
Value of Integration URI. It can either be a string or an intrinsic function that resolves to a string
Returns
-------
string
Integration URI as a string, if we were able to resolve the Sub intrinsic
dict
Input data is returned unmodified if we are unable to resolve the intrinsic
"""
# Try the short form of Fn::Sub syntax where the value is the ARN
arn = uri_data[LambdaUri._FN_SUB]
if isinstance(arn, list):
# This is the long form of Fn::Sub syntax
#
# {
# "Fn::Sub":[ "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${MyARn}/invocations",
# {"MyARn": {"Ref": MyFunction"}
# ]
# }
#
# Get the ARN out of the list
arn = arn[0]
if not isinstance(arn, string_types):
# Even after all the processing, ARN is still not a string. Probably customer provided wrong syntax
# for Fn::Sub. Let's skip this altogether
LOG.debug("Unable to resolve Fn::Sub value for integration URI: %s", uri_data)
return uri_data
# Now finally we got the ARN string. Let us try to resolve it.
# We only support value of type ${XXX.Arn} or ${YYY.Alias}. The `.Alias` syntax is a SAM specific intrinsic
# to get ARN of Lambda Alias when using DeploymentPreference
lambda_function_arn_template = r'arn:aws:lambda:${AWS::Region}:123456789012:function:\1'
return re.sub(LambdaUri._REGEX_SUB_FUNCTION_ARN, # Find all ${blah} patterns
# Replace with Lambda Function ARN, where function name is from pattern
lambda_function_arn_template,
arn) | Tries to resolve an Integration URI which contains Fn::Sub intrinsic function. This method tries to resolve
and produce a string output.
Example:
{
"Fn::Sub":
"arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${LambdaFunction.Arn}/invocations"
}
Fn::Sub Processing:
~~~~~~~~~~~~~~~~~~
If this is a Fn::Sub, resolve as following:
1. Get the ARN String:
- If Sub is using the array syntax, then use element which is a string.
- If Sub is using string syntax, then just use the string.
2. If there is a ${XXX.Arn} then replace it with a dummy ARN
3. Otherwise skip it
.. code:
Input:
{
"Fn::Sub":
"arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${LambdaFunction.Arn}/invocations"
}
Output: "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/arn:aws:lambda:us-east-1:LambdaFunction/invocations" # NOQA
Note
~~~~
This method supports only a very restricted subset of intrinsic functions with Swagger document. This is the
best we can do without implementing a full blown intrinsic function resolution module.
Parameters
----------
uri_data : string or dict
Value of Integration URI. It can either be a string or an intrinsic function that resolves to a string
Returns
-------
string
Integration URI as a string, if we were able to resolve the Sub intrinsic
dict
Input data is returned unmodified if we are unable to resolve the intrinsic | Below is the the instruction that describes the task:
### Input:
Tries to resolve an Integration URI which contains Fn::Sub intrinsic function. This method tries to resolve
and produce a string output.
Example:
{
"Fn::Sub":
"arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${LambdaFunction.Arn}/invocations"
}
Fn::Sub Processing:
~~~~~~~~~~~~~~~~~~
If this is a Fn::Sub, resolve as following:
1. Get the ARN String:
- If Sub is using the array syntax, then use element which is a string.
- If Sub is using string syntax, then just use the string.
2. If there is a ${XXX.Arn} then replace it with a dummy ARN
3. Otherwise skip it
.. code:
Input:
{
"Fn::Sub":
"arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${LambdaFunction.Arn}/invocations"
}
Output: "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/arn:aws:lambda:us-east-1:LambdaFunction/invocations" # NOQA
Note
~~~~
This method supports only a very restricted subset of intrinsic functions with Swagger document. This is the
best we can do without implementing a full blown intrinsic function resolution module.
Parameters
----------
uri_data : string or dict
Value of Integration URI. It can either be a string or an intrinsic function that resolves to a string
Returns
-------
string
Integration URI as a string, if we were able to resolve the Sub intrinsic
dict
Input data is returned unmodified if we are unable to resolve the intrinsic
### Response:
def _resolve_fn_sub(uri_data):
"""
Tries to resolve an Integration URI which contains Fn::Sub intrinsic function. This method tries to resolve
and produce a string output.
Example:
{
"Fn::Sub":
"arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${LambdaFunction.Arn}/invocations"
}
Fn::Sub Processing:
~~~~~~~~~~~~~~~~~~
If this is a Fn::Sub, resolve as following:
1. Get the ARN String:
- If Sub is using the array syntax, then use element which is a string.
- If Sub is using string syntax, then just use the string.
2. If there is a ${XXX.Arn} then replace it with a dummy ARN
3. Otherwise skip it
.. code:
Input:
{
"Fn::Sub":
"arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${LambdaFunction.Arn}/invocations"
}
Output: "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/arn:aws:lambda:us-east-1:LambdaFunction/invocations" # NOQA
Note
~~~~
This method supports only a very restricted subset of intrinsic functions with Swagger document. This is the
best we can do without implementing a full blown intrinsic function resolution module.
Parameters
----------
uri_data : string or dict
Value of Integration URI. It can either be a string or an intrinsic function that resolves to a string
Returns
-------
string
Integration URI as a string, if we were able to resolve the Sub intrinsic
dict
Input data is returned unmodified if we are unable to resolve the intrinsic
"""
# Try the short form of Fn::Sub syntax where the value is the ARN
arn = uri_data[LambdaUri._FN_SUB]
if isinstance(arn, list):
# This is the long form of Fn::Sub syntax
#
# {
# "Fn::Sub":[ "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${MyARn}/invocations",
# {"MyARn": {"Ref": MyFunction"}
# ]
# }
#
# Get the ARN out of the list
arn = arn[0]
if not isinstance(arn, string_types):
# Even after all the processing, ARN is still not a string. Probably customer provided wrong syntax
# for Fn::Sub. Let's skip this altogether
LOG.debug("Unable to resolve Fn::Sub value for integration URI: %s", uri_data)
return uri_data
# Now finally we got the ARN string. Let us try to resolve it.
# We only support value of type ${XXX.Arn} or ${YYY.Alias}. The `.Alias` syntax is a SAM specific intrinsic
# to get ARN of Lambda Alias when using DeploymentPreference
lambda_function_arn_template = r'arn:aws:lambda:${AWS::Region}:123456789012:function:\1'
return re.sub(LambdaUri._REGEX_SUB_FUNCTION_ARN, # Find all ${blah} patterns
# Replace with Lambda Function ARN, where function name is from pattern
lambda_function_arn_template,
arn) |
def load_model(self, fname):
"""
Load the model from a file.
Parameters
----------
fname : string or a memory buffer
Input file name or memory buffer(see also save_raw)
"""
if isinstance(fname, STRING_TYPES): # assume file name
if os.path.exists(fname):
_LIB.XGBoosterLoadModel(self.handle, c_str(fname))
else:
raise ValueError("No such file: {0}".format(fname))
else:
buf = fname
length = ctypes.c_ulong(len(buf))
ptr = (ctypes.c_char * len(buf)).from_buffer(buf)
_check_call(_LIB.XGBoosterLoadModelFromBuffer(self.handle, ptr, length)) | Load the model from a file.
Parameters
----------
fname : string or a memory buffer
Input file name or memory buffer(see also save_raw) | Below is the the instruction that describes the task:
### Input:
Load the model from a file.
Parameters
----------
fname : string or a memory buffer
Input file name or memory buffer(see also save_raw)
### Response:
def load_model(self, fname):
"""
Load the model from a file.
Parameters
----------
fname : string or a memory buffer
Input file name or memory buffer(see also save_raw)
"""
if isinstance(fname, STRING_TYPES): # assume file name
if os.path.exists(fname):
_LIB.XGBoosterLoadModel(self.handle, c_str(fname))
else:
raise ValueError("No such file: {0}".format(fname))
else:
buf = fname
length = ctypes.c_ulong(len(buf))
ptr = (ctypes.c_char * len(buf)).from_buffer(buf)
_check_call(_LIB.XGBoosterLoadModelFromBuffer(self.handle, ptr, length)) |
def validate_context(
self, context: Mapping[str, Any]
) -> Tuple[bool, List[Tuple[str, str]]]:
""" Validate context
Args:
context (Mapping[str, Any]): context dictionary of type, id and label
Returns:
Tuple[bool, List[Tuple[str, str]]]:
bool: Is valid? Yes = True, No = False
List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('ERROR|WARNING', msg)
e.g. [('WARNING', "Context ID not found")]
"""
url = f'{self.endpoint}/terms/{context["id"]}'
res = requests.get(url)
if res.status_code == 200:
return (True, [])
else:
return (False, [("WARNING", f'Context {context["id"]} not found at {url}')]) | Validate context
Args:
context (Mapping[str, Any]): context dictionary of type, id and label
Returns:
Tuple[bool, List[Tuple[str, str]]]:
bool: Is valid? Yes = True, No = False
List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('ERROR|WARNING', msg)
e.g. [('WARNING', "Context ID not found")] | Below is the the instruction that describes the task:
### Input:
Validate context
Args:
context (Mapping[str, Any]): context dictionary of type, id and label
Returns:
Tuple[bool, List[Tuple[str, str]]]:
bool: Is valid? Yes = True, No = False
List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('ERROR|WARNING', msg)
e.g. [('WARNING', "Context ID not found")]
### Response:
def validate_context(
self, context: Mapping[str, Any]
) -> Tuple[bool, List[Tuple[str, str]]]:
""" Validate context
Args:
context (Mapping[str, Any]): context dictionary of type, id and label
Returns:
Tuple[bool, List[Tuple[str, str]]]:
bool: Is valid? Yes = True, No = False
List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('ERROR|WARNING', msg)
e.g. [('WARNING', "Context ID not found")]
"""
url = f'{self.endpoint}/terms/{context["id"]}'
res = requests.get(url)
if res.status_code == 200:
return (True, [])
else:
return (False, [("WARNING", f'Context {context["id"]} not found at {url}')]) |
def p_types(self, p):
'''types : type
| type COMMA types'''
if len(p) == 2:
p[0] = tuple((t,) for t in p[1])
else:
p[0] = tuple((t,) + ts for t in p[1] for ts in p[3]) | types : type
| type COMMA types | Below is the the instruction that describes the task:
### Input:
types : type
| type COMMA types
### Response:
def p_types(self, p):
'''types : type
| type COMMA types'''
if len(p) == 2:
p[0] = tuple((t,) for t in p[1])
else:
p[0] = tuple((t,) + ts for t in p[1] for ts in p[3]) |
def _call(self, x):
"""Sum all values if indices are given multiple times."""
y = np.bincount(self._indices_flat, weights=x,
minlength=self.range.size)
out = y.reshape(self.range.shape)
if self.variant == 'dirac':
weights = getattr(self.range, 'cell_volume', 1.0)
elif self.variant == 'char_fun':
weights = 1.0
else:
raise RuntimeError('The variant "{!r}" is not yet supported'
''.format(self.variant))
if weights != 1.0:
out /= weights
return out | Sum all values if indices are given multiple times. | Below is the the instruction that describes the task:
### Input:
Sum all values if indices are given multiple times.
### Response:
def _call(self, x):
"""Sum all values if indices are given multiple times."""
y = np.bincount(self._indices_flat, weights=x,
minlength=self.range.size)
out = y.reshape(self.range.shape)
if self.variant == 'dirac':
weights = getattr(self.range, 'cell_volume', 1.0)
elif self.variant == 'char_fun':
weights = 1.0
else:
raise RuntimeError('The variant "{!r}" is not yet supported'
''.format(self.variant))
if weights != 1.0:
out /= weights
return out |
def main():
""" SimpleScheduler
redis parameters will be read from environment variables:
REDIS_HOST, REDIS_PORT, REDIS_DB, REDIS_KEY (password)
"""
args = parser.parse_args()
scheduler = Scheduler()
print 'Start %s' % scheduler.scheduler_id
scheduler.interval = args.interval
if args.keepalive:
scheduler.run(once=True)
keepalive = Job('simplescheduler.keepalive',
args=[0,
scheduler.get_running_scheduler_id(),
args.interval * 2])
scheduler.schedule(keepalive, long(time.time() * 1000000))
scheduler._run() | SimpleScheduler
redis parameters will be read from environment variables:
REDIS_HOST, REDIS_PORT, REDIS_DB, REDIS_KEY (password) | Below is the the instruction that describes the task:
### Input:
SimpleScheduler
redis parameters will be read from environment variables:
REDIS_HOST, REDIS_PORT, REDIS_DB, REDIS_KEY (password)
### Response:
def main():
""" SimpleScheduler
redis parameters will be read from environment variables:
REDIS_HOST, REDIS_PORT, REDIS_DB, REDIS_KEY (password)
"""
args = parser.parse_args()
scheduler = Scheduler()
print 'Start %s' % scheduler.scheduler_id
scheduler.interval = args.interval
if args.keepalive:
scheduler.run(once=True)
keepalive = Job('simplescheduler.keepalive',
args=[0,
scheduler.get_running_scheduler_id(),
args.interval * 2])
scheduler.schedule(keepalive, long(time.time() * 1000000))
scheduler._run() |
def describe(**kwargs):
""" describe is a decorator to customize the rest API
that transmute generates, such as choosing
certain arguments to be query parameters or
body parameters, or a different method.
:param list(str) paths: the path(s) for the handler to represent (using swagger's syntax for a path)
:param list(str) methods: the methods this function should respond to. if non is set, transmute defaults to a GET.
:param list(str) query_parameters: the names of arguments that
should be query parameters. By default, all arguments are query_or path parameters for a GET request.
:param body_parameters: the names of arguments that should be body parameters.
By default, all arguments are either body or path parameters for a non-GET request.
in the case of a single string, the whole body is validated against a single object.
:type body_parameters: List[str] or str
:param list(str) header_parameters: the arguments that should be passed into the header.
:param list(str) path_parameters: the arguments that are specified by the path. By default, arguments
that are found in the path are used first before the query_parameters and body_parameters.
:param list(str) parameter_descriptions: descriptions for each parameter, keyed by attribute name.
this will appear in the swagger documentation.
"""
# if we have a single method, make it a list.
if isinstance(kwargs.get("paths"), string_type):
kwargs["paths"] = [kwargs["paths"]]
if isinstance(kwargs.get("methods"), string_type):
kwargs["methods"] = [kwargs["methods"]]
attrs = TransmuteAttributes(**kwargs)
def decorator(f):
if hasattr(f, "transmute"):
f.transmute = f.transmute | attrs
else:
f.transmute = attrs
return f
return decorator | describe is a decorator to customize the rest API
that transmute generates, such as choosing
certain arguments to be query parameters or
body parameters, or a different method.
:param list(str) paths: the path(s) for the handler to represent (using swagger's syntax for a path)
:param list(str) methods: the methods this function should respond to. if non is set, transmute defaults to a GET.
:param list(str) query_parameters: the names of arguments that
should be query parameters. By default, all arguments are query_or path parameters for a GET request.
:param body_parameters: the names of arguments that should be body parameters.
By default, all arguments are either body or path parameters for a non-GET request.
in the case of a single string, the whole body is validated against a single object.
:type body_parameters: List[str] or str
:param list(str) header_parameters: the arguments that should be passed into the header.
:param list(str) path_parameters: the arguments that are specified by the path. By default, arguments
that are found in the path are used first before the query_parameters and body_parameters.
:param list(str) parameter_descriptions: descriptions for each parameter, keyed by attribute name.
this will appear in the swagger documentation. | Below is the the instruction that describes the task:
### Input:
describe is a decorator to customize the rest API
that transmute generates, such as choosing
certain arguments to be query parameters or
body parameters, or a different method.
:param list(str) paths: the path(s) for the handler to represent (using swagger's syntax for a path)
:param list(str) methods: the methods this function should respond to. if non is set, transmute defaults to a GET.
:param list(str) query_parameters: the names of arguments that
should be query parameters. By default, all arguments are query_or path parameters for a GET request.
:param body_parameters: the names of arguments that should be body parameters.
By default, all arguments are either body or path parameters for a non-GET request.
in the case of a single string, the whole body is validated against a single object.
:type body_parameters: List[str] or str
:param list(str) header_parameters: the arguments that should be passed into the header.
:param list(str) path_parameters: the arguments that are specified by the path. By default, arguments
that are found in the path are used first before the query_parameters and body_parameters.
:param list(str) parameter_descriptions: descriptions for each parameter, keyed by attribute name.
this will appear in the swagger documentation.
### Response:
def describe(**kwargs):
""" describe is a decorator to customize the rest API
that transmute generates, such as choosing
certain arguments to be query parameters or
body parameters, or a different method.
:param list(str) paths: the path(s) for the handler to represent (using swagger's syntax for a path)
:param list(str) methods: the methods this function should respond to. if non is set, transmute defaults to a GET.
:param list(str) query_parameters: the names of arguments that
should be query parameters. By default, all arguments are query_or path parameters for a GET request.
:param body_parameters: the names of arguments that should be body parameters.
By default, all arguments are either body or path parameters for a non-GET request.
in the case of a single string, the whole body is validated against a single object.
:type body_parameters: List[str] or str
:param list(str) header_parameters: the arguments that should be passed into the header.
:param list(str) path_parameters: the arguments that are specified by the path. By default, arguments
that are found in the path are used first before the query_parameters and body_parameters.
:param list(str) parameter_descriptions: descriptions for each parameter, keyed by attribute name.
this will appear in the swagger documentation.
"""
# if we have a single method, make it a list.
if isinstance(kwargs.get("paths"), string_type):
kwargs["paths"] = [kwargs["paths"]]
if isinstance(kwargs.get("methods"), string_type):
kwargs["methods"] = [kwargs["methods"]]
attrs = TransmuteAttributes(**kwargs)
def decorator(f):
if hasattr(f, "transmute"):
f.transmute = f.transmute | attrs
else:
f.transmute = attrs
return f
return decorator |
def save_to_wav(self, file_name):
""" Save this time series to a wav format audio file.
Parameters
----------
file_name : string
The output file name
"""
scaled = _numpy.int16(self.numpy()/max(abs(self)) * 32767)
write_wav(file_name, self.sample_rate, scaled) | Save this time series to a wav format audio file.
Parameters
----------
file_name : string
The output file name | Below is the the instruction that describes the task:
### Input:
Save this time series to a wav format audio file.
Parameters
----------
file_name : string
The output file name
### Response:
def save_to_wav(self, file_name):
""" Save this time series to a wav format audio file.
Parameters
----------
file_name : string
The output file name
"""
scaled = _numpy.int16(self.numpy()/max(abs(self)) * 32767)
write_wav(file_name, self.sample_rate, scaled) |
def compare(self, vertex0, vertex1, subject_graph):
"""Returns true when the two vertices are of the same kind"""
return (
self.pattern_graph.vertex_fingerprints[vertex0] ==
subject_graph.vertex_fingerprints[vertex1]
).all() | Returns true when the two vertices are of the same kind | Below is the the instruction that describes the task:
### Input:
Returns true when the two vertices are of the same kind
### Response:
def compare(self, vertex0, vertex1, subject_graph):
"""Returns true when the two vertices are of the same kind"""
return (
self.pattern_graph.vertex_fingerprints[vertex0] ==
subject_graph.vertex_fingerprints[vertex1]
).all() |
def _ParsePlistKeyValue(self, knowledge_base, name, value):
"""Parses a plist key value.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
name (str): name of the plist key.
value (str): value of the plist key.
"""
if not knowledge_base.GetValue('operating_system_version'):
if name in self._PLIST_KEYS:
knowledge_base.SetValue('operating_system_version', value) | Parses a plist key value.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
name (str): name of the plist key.
value (str): value of the plist key. | Below is the the instruction that describes the task:
### Input:
Parses a plist key value.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
name (str): name of the plist key.
value (str): value of the plist key.
### Response:
def _ParsePlistKeyValue(self, knowledge_base, name, value):
"""Parses a plist key value.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
name (str): name of the plist key.
value (str): value of the plist key.
"""
if not knowledge_base.GetValue('operating_system_version'):
if name in self._PLIST_KEYS:
knowledge_base.SetValue('operating_system_version', value) |
def list_tags():
'''
Returns a list of tagged images
CLI Example:
.. code-block:: bash
salt myminion docker.list_tags
'''
ret = set()
for item in six.itervalues(images()):
if not item.get('RepoTags'):
continue
ret.update(set(item['RepoTags']))
return sorted(ret) | Returns a list of tagged images
CLI Example:
.. code-block:: bash
salt myminion docker.list_tags | Below is the the instruction that describes the task:
### Input:
Returns a list of tagged images
CLI Example:
.. code-block:: bash
salt myminion docker.list_tags
### Response:
def list_tags():
'''
Returns a list of tagged images
CLI Example:
.. code-block:: bash
salt myminion docker.list_tags
'''
ret = set()
for item in six.itervalues(images()):
if not item.get('RepoTags'):
continue
ret.update(set(item['RepoTags']))
return sorted(ret) |
def plot_cumulative_gain(y_true, y_probas, title='Cumulative Gains Curve',
ax=None, figsize=None, title_fontsize="large",
text_fontsize="medium"):
"""Generates the Cumulative Gains Plot from labels and scores/probabilities
The cumulative gains chart is used to determine the effectiveness of a
binary classifier. A detailed explanation can be found at
http://mlwiki.org/index.php/Cumulative_Gain_Chart. The implementation
here works only for binary classification.
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_probas (array-like, shape (n_samples, n_classes)):
Prediction probabilities for each class returned by a classifier.
title (string, optional): Title of the generated plot. Defaults to
"Cumulative Gains Curve".
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the learning curve. If None, the plot is drawn on a new set of
axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot as skplt
>>> lr = LogisticRegression()
>>> lr = lr.fit(X_train, y_train)
>>> y_probas = lr.predict_proba(X_test)
>>> skplt.metrics.plot_cumulative_gain(y_test, y_probas)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_cumulative_gain.png
:align: center
:alt: Cumulative Gains Plot
"""
y_true = np.array(y_true)
y_probas = np.array(y_probas)
classes = np.unique(y_true)
if len(classes) != 2:
raise ValueError('Cannot calculate Cumulative Gains for data with '
'{} category/ies'.format(len(classes)))
# Compute Cumulative Gain Curves
percentages, gains1 = cumulative_gain_curve(y_true, y_probas[:, 0],
classes[0])
percentages, gains2 = cumulative_gain_curve(y_true, y_probas[:, 1],
classes[1])
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
ax.plot(percentages, gains1, lw=3, label='Class {}'.format(classes[0]))
ax.plot(percentages, gains2, lw=3, label='Class {}'.format(classes[1]))
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.0])
ax.plot([0, 1], [0, 1], 'k--', lw=2, label='Baseline')
ax.set_xlabel('Percentage of sample', fontsize=text_fontsize)
ax.set_ylabel('Gain', fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
ax.grid('on')
ax.legend(loc='lower right', fontsize=text_fontsize)
return ax | Generates the Cumulative Gains Plot from labels and scores/probabilities
The cumulative gains chart is used to determine the effectiveness of a
binary classifier. A detailed explanation can be found at
http://mlwiki.org/index.php/Cumulative_Gain_Chart. The implementation
here works only for binary classification.
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_probas (array-like, shape (n_samples, n_classes)):
Prediction probabilities for each class returned by a classifier.
title (string, optional): Title of the generated plot. Defaults to
"Cumulative Gains Curve".
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the learning curve. If None, the plot is drawn on a new set of
axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot as skplt
>>> lr = LogisticRegression()
>>> lr = lr.fit(X_train, y_train)
>>> y_probas = lr.predict_proba(X_test)
>>> skplt.metrics.plot_cumulative_gain(y_test, y_probas)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_cumulative_gain.png
:align: center
:alt: Cumulative Gains Plot | Below is the the instruction that describes the task:
### Input:
Generates the Cumulative Gains Plot from labels and scores/probabilities
The cumulative gains chart is used to determine the effectiveness of a
binary classifier. A detailed explanation can be found at
http://mlwiki.org/index.php/Cumulative_Gain_Chart. The implementation
here works only for binary classification.
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_probas (array-like, shape (n_samples, n_classes)):
Prediction probabilities for each class returned by a classifier.
title (string, optional): Title of the generated plot. Defaults to
"Cumulative Gains Curve".
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the learning curve. If None, the plot is drawn on a new set of
axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot as skplt
>>> lr = LogisticRegression()
>>> lr = lr.fit(X_train, y_train)
>>> y_probas = lr.predict_proba(X_test)
>>> skplt.metrics.plot_cumulative_gain(y_test, y_probas)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_cumulative_gain.png
:align: center
:alt: Cumulative Gains Plot
### Response:
def plot_cumulative_gain(y_true, y_probas, title='Cumulative Gains Curve',
ax=None, figsize=None, title_fontsize="large",
text_fontsize="medium"):
"""Generates the Cumulative Gains Plot from labels and scores/probabilities
The cumulative gains chart is used to determine the effectiveness of a
binary classifier. A detailed explanation can be found at
http://mlwiki.org/index.php/Cumulative_Gain_Chart. The implementation
here works only for binary classification.
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_probas (array-like, shape (n_samples, n_classes)):
Prediction probabilities for each class returned by a classifier.
title (string, optional): Title of the generated plot. Defaults to
"Cumulative Gains Curve".
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the learning curve. If None, the plot is drawn on a new set of
axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot as skplt
>>> lr = LogisticRegression()
>>> lr = lr.fit(X_train, y_train)
>>> y_probas = lr.predict_proba(X_test)
>>> skplt.metrics.plot_cumulative_gain(y_test, y_probas)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_cumulative_gain.png
:align: center
:alt: Cumulative Gains Plot
"""
y_true = np.array(y_true)
y_probas = np.array(y_probas)
classes = np.unique(y_true)
if len(classes) != 2:
raise ValueError('Cannot calculate Cumulative Gains for data with '
'{} category/ies'.format(len(classes)))
# Compute Cumulative Gain Curves
percentages, gains1 = cumulative_gain_curve(y_true, y_probas[:, 0],
classes[0])
percentages, gains2 = cumulative_gain_curve(y_true, y_probas[:, 1],
classes[1])
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
ax.plot(percentages, gains1, lw=3, label='Class {}'.format(classes[0]))
ax.plot(percentages, gains2, lw=3, label='Class {}'.format(classes[1]))
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.0])
ax.plot([0, 1], [0, 1], 'k--', lw=2, label='Baseline')
ax.set_xlabel('Percentage of sample', fontsize=text_fontsize)
ax.set_ylabel('Gain', fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
ax.grid('on')
ax.legend(loc='lower right', fontsize=text_fontsize)
return ax |
def save_mfdataset(datasets, paths, mode='w', format=None, groups=None,
engine=None, compute=True):
"""Write multiple datasets to disk as netCDF files simultaneously.
This function is intended for use with datasets consisting of dask.array
objects, in which case it can write the multiple datasets to disk
simultaneously using a shared thread pool.
When not using dask, it is no different than calling ``to_netcdf``
repeatedly.
Parameters
----------
datasets : list of xarray.Dataset
List of datasets to save.
paths : list of str or list of Paths
List of paths to which to save each corresponding dataset.
mode : {'w', 'a'}, optional
Write ('w') or append ('a') mode. If mode='w', any existing file at
these locations will be overwritten.
format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',
'NETCDF3_CLASSIC'}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
groups : list of str, optional
Paths to the netCDF4 group in each corresponding file to which to save
datasets (only works for format='NETCDF4'). The groups will be created
if necessary.
engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for 'netcdf4' if writing to a file on disk.
See `Dataset.to_netcdf` for additional information.
compute: boolean
If true compute immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed later.
Examples
--------
Save a dataset into one netCDF per year of data:
>>> years, datasets = zip(*ds.groupby('time.year'))
>>> paths = ['%s.nc' % y for y in years]
>>> xr.save_mfdataset(datasets, paths)
"""
if mode == 'w' and len(set(paths)) < len(paths):
raise ValueError("cannot use mode='w' when writing multiple "
'datasets to the same path')
for obj in datasets:
if not isinstance(obj, Dataset):
raise TypeError('save_mfdataset only supports writing Dataset '
'objects, received type %s' % type(obj))
if groups is None:
groups = [None] * len(datasets)
if len(set([len(datasets), len(paths), len(groups)])) > 1:
raise ValueError('must supply lists of the same length for the '
'datasets, paths and groups arguments to '
'save_mfdataset')
writers, stores = zip(*[
to_netcdf(ds, path, mode, format, group, engine, compute=compute,
multifile=True)
for ds, path, group in zip(datasets, paths, groups)])
try:
writes = [w.sync(compute=compute) for w in writers]
finally:
if compute:
for store in stores:
store.close()
if not compute:
import dask
return dask.delayed([dask.delayed(_finalize_store)(w, s)
for w, s in zip(writes, stores)]) | Write multiple datasets to disk as netCDF files simultaneously.
This function is intended for use with datasets consisting of dask.array
objects, in which case it can write the multiple datasets to disk
simultaneously using a shared thread pool.
When not using dask, it is no different than calling ``to_netcdf``
repeatedly.
Parameters
----------
datasets : list of xarray.Dataset
List of datasets to save.
paths : list of str or list of Paths
List of paths to which to save each corresponding dataset.
mode : {'w', 'a'}, optional
Write ('w') or append ('a') mode. If mode='w', any existing file at
these locations will be overwritten.
format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',
'NETCDF3_CLASSIC'}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
groups : list of str, optional
Paths to the netCDF4 group in each corresponding file to which to save
datasets (only works for format='NETCDF4'). The groups will be created
if necessary.
engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for 'netcdf4' if writing to a file on disk.
See `Dataset.to_netcdf` for additional information.
compute: boolean
If true compute immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed later.
Examples
--------
Save a dataset into one netCDF per year of data:
>>> years, datasets = zip(*ds.groupby('time.year'))
>>> paths = ['%s.nc' % y for y in years]
>>> xr.save_mfdataset(datasets, paths) | Below is the the instruction that describes the task:
### Input:
Write multiple datasets to disk as netCDF files simultaneously.
This function is intended for use with datasets consisting of dask.array
objects, in which case it can write the multiple datasets to disk
simultaneously using a shared thread pool.
When not using dask, it is no different than calling ``to_netcdf``
repeatedly.
Parameters
----------
datasets : list of xarray.Dataset
List of datasets to save.
paths : list of str or list of Paths
List of paths to which to save each corresponding dataset.
mode : {'w', 'a'}, optional
Write ('w') or append ('a') mode. If mode='w', any existing file at
these locations will be overwritten.
format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',
'NETCDF3_CLASSIC'}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
groups : list of str, optional
Paths to the netCDF4 group in each corresponding file to which to save
datasets (only works for format='NETCDF4'). The groups will be created
if necessary.
engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for 'netcdf4' if writing to a file on disk.
See `Dataset.to_netcdf` for additional information.
compute: boolean
If true compute immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed later.
Examples
--------
Save a dataset into one netCDF per year of data:
>>> years, datasets = zip(*ds.groupby('time.year'))
>>> paths = ['%s.nc' % y for y in years]
>>> xr.save_mfdataset(datasets, paths)
### Response:
def save_mfdataset(datasets, paths, mode='w', format=None, groups=None,
engine=None, compute=True):
"""Write multiple datasets to disk as netCDF files simultaneously.
This function is intended for use with datasets consisting of dask.array
objects, in which case it can write the multiple datasets to disk
simultaneously using a shared thread pool.
When not using dask, it is no different than calling ``to_netcdf``
repeatedly.
Parameters
----------
datasets : list of xarray.Dataset
List of datasets to save.
paths : list of str or list of Paths
List of paths to which to save each corresponding dataset.
mode : {'w', 'a'}, optional
Write ('w') or append ('a') mode. If mode='w', any existing file at
these locations will be overwritten.
format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',
'NETCDF3_CLASSIC'}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
groups : list of str, optional
Paths to the netCDF4 group in each corresponding file to which to save
datasets (only works for format='NETCDF4'). The groups will be created
if necessary.
engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for 'netcdf4' if writing to a file on disk.
See `Dataset.to_netcdf` for additional information.
compute: boolean
If true compute immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed later.
Examples
--------
Save a dataset into one netCDF per year of data:
>>> years, datasets = zip(*ds.groupby('time.year'))
>>> paths = ['%s.nc' % y for y in years]
>>> xr.save_mfdataset(datasets, paths)
"""
if mode == 'w' and len(set(paths)) < len(paths):
raise ValueError("cannot use mode='w' when writing multiple "
'datasets to the same path')
for obj in datasets:
if not isinstance(obj, Dataset):
raise TypeError('save_mfdataset only supports writing Dataset '
'objects, received type %s' % type(obj))
if groups is None:
groups = [None] * len(datasets)
if len(set([len(datasets), len(paths), len(groups)])) > 1:
raise ValueError('must supply lists of the same length for the '
'datasets, paths and groups arguments to '
'save_mfdataset')
writers, stores = zip(*[
to_netcdf(ds, path, mode, format, group, engine, compute=compute,
multifile=True)
for ds, path, group in zip(datasets, paths, groups)])
try:
writes = [w.sync(compute=compute) for w in writers]
finally:
if compute:
for store in stores:
store.close()
if not compute:
import dask
return dask.delayed([dask.delayed(_finalize_store)(w, s)
for w, s in zip(writes, stores)]) |
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warn('Ending without manager process.')
return
this_process = psutil.Process(os.getpid())
try:
manager_process = psutil.Process(self._process.pid)
except psutil.NoSuchProcess:
self.log.info("Manager process not running.")
return
# First try SIGTERM
if manager_process.is_running() \
and manager_process.pid in [x.pid for x in this_process.children()]:
self.log.info("Terminating manager process: %s", manager_process.pid)
manager_process.terminate()
# TODO: Remove magic number
timeout = 5
self.log.info("Waiting up to %ss for manager process to exit...", timeout)
try:
psutil.wait_procs({manager_process}, timeout)
except psutil.TimeoutExpired:
self.log.debug("Ran out of time while waiting for "
"processes to exit")
# Then SIGKILL
if manager_process.is_running() \
and manager_process.pid in [x.pid for x in this_process.children()]:
self.log.info("Killing manager process: %s", manager_process.pid)
manager_process.kill()
manager_process.wait() | Terminate (and then kill) the manager process launched.
:return: | Below is the the instruction that describes the task:
### Input:
Terminate (and then kill) the manager process launched.
:return:
### Response:
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warn('Ending without manager process.')
return
this_process = psutil.Process(os.getpid())
try:
manager_process = psutil.Process(self._process.pid)
except psutil.NoSuchProcess:
self.log.info("Manager process not running.")
return
# First try SIGTERM
if manager_process.is_running() \
and manager_process.pid in [x.pid for x in this_process.children()]:
self.log.info("Terminating manager process: %s", manager_process.pid)
manager_process.terminate()
# TODO: Remove magic number
timeout = 5
self.log.info("Waiting up to %ss for manager process to exit...", timeout)
try:
psutil.wait_procs({manager_process}, timeout)
except psutil.TimeoutExpired:
self.log.debug("Ran out of time while waiting for "
"processes to exit")
# Then SIGKILL
if manager_process.is_running() \
and manager_process.pid in [x.pid for x in this_process.children()]:
self.log.info("Killing manager process: %s", manager_process.pid)
manager_process.kill()
manager_process.wait() |
def measurement_time_typical(self):
"""Typical time in milliseconds required to complete a measurement in normal mode"""
meas_time_ms = 1.0
if self.overscan_temperature != OVERSCAN_DISABLE:
meas_time_ms += (2 * _BME280_OVERSCANS.get(self.overscan_temperature))
if self.overscan_pressure != OVERSCAN_DISABLE:
meas_time_ms += (2 * _BME280_OVERSCANS.get(self.overscan_pressure) + 0.5)
if self.overscan_humidity != OVERSCAN_DISABLE:
meas_time_ms += (2 * _BME280_OVERSCANS.get(self.overscan_humidity) + 0.5)
return meas_time_ms | Typical time in milliseconds required to complete a measurement in normal mode | Below is the the instruction that describes the task:
### Input:
Typical time in milliseconds required to complete a measurement in normal mode
### Response:
def measurement_time_typical(self):
"""Typical time in milliseconds required to complete a measurement in normal mode"""
meas_time_ms = 1.0
if self.overscan_temperature != OVERSCAN_DISABLE:
meas_time_ms += (2 * _BME280_OVERSCANS.get(self.overscan_temperature))
if self.overscan_pressure != OVERSCAN_DISABLE:
meas_time_ms += (2 * _BME280_OVERSCANS.get(self.overscan_pressure) + 0.5)
if self.overscan_humidity != OVERSCAN_DISABLE:
meas_time_ms += (2 * _BME280_OVERSCANS.get(self.overscan_humidity) + 0.5)
return meas_time_ms |
def formataddr(pair, charset='utf-8'):
"""The inverse of parseaddr(), this takes a 2-tuple of the form
(realname, email_address) and returns the string value suitable
for an RFC 2822 From, To or Cc header.
If the first element of pair is false, then the second element is
returned unmodified.
Optional charset if given is the character set that is used to encode
realname in case realname is not ASCII safe. Can be an instance of str or
a Charset-like object which has a header_encode method. Default is
'utf-8'.
"""
name, address = pair
# The address MUST (per RFC) be ascii, so raise an UnicodeError if it isn't.
address.encode('ascii')
if name:
try:
name.encode('ascii')
except UnicodeEncodeError:
if isinstance(charset, str):
charset = Charset(charset)
encoded_name = charset.header_encode(name)
return "%s <%s>" % (encoded_name, address)
else:
quotes = ''
if specialsre.search(name):
quotes = '"'
name = escapesre.sub(r'\\\g<0>', name)
return '%s%s%s <%s>' % (quotes, name, quotes, address)
return address | The inverse of parseaddr(), this takes a 2-tuple of the form
(realname, email_address) and returns the string value suitable
for an RFC 2822 From, To or Cc header.
If the first element of pair is false, then the second element is
returned unmodified.
Optional charset if given is the character set that is used to encode
realname in case realname is not ASCII safe. Can be an instance of str or
a Charset-like object which has a header_encode method. Default is
'utf-8'. | Below is the the instruction that describes the task:
### Input:
The inverse of parseaddr(), this takes a 2-tuple of the form
(realname, email_address) and returns the string value suitable
for an RFC 2822 From, To or Cc header.
If the first element of pair is false, then the second element is
returned unmodified.
Optional charset if given is the character set that is used to encode
realname in case realname is not ASCII safe. Can be an instance of str or
a Charset-like object which has a header_encode method. Default is
'utf-8'.
### Response:
def formataddr(pair, charset='utf-8'):
"""The inverse of parseaddr(), this takes a 2-tuple of the form
(realname, email_address) and returns the string value suitable
for an RFC 2822 From, To or Cc header.
If the first element of pair is false, then the second element is
returned unmodified.
Optional charset if given is the character set that is used to encode
realname in case realname is not ASCII safe. Can be an instance of str or
a Charset-like object which has a header_encode method. Default is
'utf-8'.
"""
name, address = pair
# The address MUST (per RFC) be ascii, so raise an UnicodeError if it isn't.
address.encode('ascii')
if name:
try:
name.encode('ascii')
except UnicodeEncodeError:
if isinstance(charset, str):
charset = Charset(charset)
encoded_name = charset.header_encode(name)
return "%s <%s>" % (encoded_name, address)
else:
quotes = ''
if specialsre.search(name):
quotes = '"'
name = escapesre.sub(r'\\\g<0>', name)
return '%s%s%s <%s>' % (quotes, name, quotes, address)
return address |
def path(self, which=None):
"""Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
add_subscriptions
/hosts/<id>/add_subscriptions
remove_subscriptions
/hosts/<id>/remove_subscriptions
``super`` is called otherwise.
"""
if which in (
'add_subscriptions',
'remove_subscriptions'):
return '{0}/{1}'.format(
super(HostSubscription, self).path(which='base'),
which
)
return super(HostSubscription, self).path(which) | Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
add_subscriptions
/hosts/<id>/add_subscriptions
remove_subscriptions
/hosts/<id>/remove_subscriptions
``super`` is called otherwise. | Below is the the instruction that describes the task:
### Input:
Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
add_subscriptions
/hosts/<id>/add_subscriptions
remove_subscriptions
/hosts/<id>/remove_subscriptions
``super`` is called otherwise.
### Response:
def path(self, which=None):
"""Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
add_subscriptions
/hosts/<id>/add_subscriptions
remove_subscriptions
/hosts/<id>/remove_subscriptions
``super`` is called otherwise.
"""
if which in (
'add_subscriptions',
'remove_subscriptions'):
return '{0}/{1}'.format(
super(HostSubscription, self).path(which='base'),
which
)
return super(HostSubscription, self).path(which) |
def generate_changeset(old, new, comment=None):
"""Diff two XML configs and return an object with changes to be written.
Args: old, new: lxml.etree.Element (<ResourceRecordSets>).
Returns: lxml.etree.ETree (<ChangeResourceRecordSetsRequest>) or None"""
rrsets_tag = '{%s}ResourceRecordSets' % R53_XMLNS
if rrsets_tag not in (old.tag, new.tag):
log.error('both configs must be ResourceRecordSets tags. old: %s, new: %s' % (old.tag, new.tag))
raise InvalidArgumentException()
if comment is None:
comment = 'Generated by %s for %s@%s at %s.' % (
__file__,
os.environ['USER'],
socket.gethostname(),
time.strftime('%Y-%m-%d %H:%M:%S'))
root = lxml.etree.XML("""<ChangeResourceRecordSetsRequest xmlns="%s">
<ChangeBatch>
<Comment>%s</Comment>
<Changes/>
</ChangeBatch>
</ChangeResourceRecordSetsRequest>""" % (
R53_XMLNS, comment), parser=XML_PARSER)
changesroot = root.find('.//{%s}Changes' % R53_XMLNS)
old = normalize_rrs(old)
new = normalize_rrs(new)
oldset = set([lxml.etree.tostring(x).rstrip() for x in old])
newset = set([lxml.etree.tostring(x).rstrip() for x in new])
if oldset == newset:
return None
# look for removed elements
for rrs in old:
rrsst = lxml.etree.tostring(rrs).rstrip()
if rrsst not in newset:
log.debug("REMOVED:")
log.debug(rrsst)
change = lxml.etree.XML('<Change xmlns="%s"><Action>DELETE</Action></Change>' % R53_XMLNS, parser=XML_PARSER)
change.append(rrs)
changesroot.append(change)
# look for added elements
for rrs in new:
rrsst = lxml.etree.tostring(rrs).rstrip()
if rrsst not in oldset:
log.debug("ADDED:")
log.debug(rrsst)
change = lxml.etree.XML('<Change xmlns="%s"><Action>CREATE</Action></Change>' % R53_XMLNS, parser=XML_PARSER)
change.append(rrs)
changesroot.append(change)
return root | Diff two XML configs and return an object with changes to be written.
Args: old, new: lxml.etree.Element (<ResourceRecordSets>).
Returns: lxml.etree.ETree (<ChangeResourceRecordSetsRequest>) or None | Below is the the instruction that describes the task:
### Input:
Diff two XML configs and return an object with changes to be written.
Args: old, new: lxml.etree.Element (<ResourceRecordSets>).
Returns: lxml.etree.ETree (<ChangeResourceRecordSetsRequest>) or None
### Response:
def generate_changeset(old, new, comment=None):
"""Diff two XML configs and return an object with changes to be written.
Args: old, new: lxml.etree.Element (<ResourceRecordSets>).
Returns: lxml.etree.ETree (<ChangeResourceRecordSetsRequest>) or None"""
rrsets_tag = '{%s}ResourceRecordSets' % R53_XMLNS
if rrsets_tag not in (old.tag, new.tag):
log.error('both configs must be ResourceRecordSets tags. old: %s, new: %s' % (old.tag, new.tag))
raise InvalidArgumentException()
if comment is None:
comment = 'Generated by %s for %s@%s at %s.' % (
__file__,
os.environ['USER'],
socket.gethostname(),
time.strftime('%Y-%m-%d %H:%M:%S'))
root = lxml.etree.XML("""<ChangeResourceRecordSetsRequest xmlns="%s">
<ChangeBatch>
<Comment>%s</Comment>
<Changes/>
</ChangeBatch>
</ChangeResourceRecordSetsRequest>""" % (
R53_XMLNS, comment), parser=XML_PARSER)
changesroot = root.find('.//{%s}Changes' % R53_XMLNS)
old = normalize_rrs(old)
new = normalize_rrs(new)
oldset = set([lxml.etree.tostring(x).rstrip() for x in old])
newset = set([lxml.etree.tostring(x).rstrip() for x in new])
if oldset == newset:
return None
# look for removed elements
for rrs in old:
rrsst = lxml.etree.tostring(rrs).rstrip()
if rrsst not in newset:
log.debug("REMOVED:")
log.debug(rrsst)
change = lxml.etree.XML('<Change xmlns="%s"><Action>DELETE</Action></Change>' % R53_XMLNS, parser=XML_PARSER)
change.append(rrs)
changesroot.append(change)
# look for added elements
for rrs in new:
rrsst = lxml.etree.tostring(rrs).rstrip()
if rrsst not in oldset:
log.debug("ADDED:")
log.debug(rrsst)
change = lxml.etree.XML('<Change xmlns="%s"><Action>CREATE</Action></Change>' % R53_XMLNS, parser=XML_PARSER)
change.append(rrs)
changesroot.append(change)
return root |
def undo(self):
"""
Raises IndexError if more than one group is open, otherwise closes it
and invokes undo_nested_group.
"""
if self.grouping_level() == 1:
self.end_grouping()
if self._open:
raise IndexError
self.undo_nested_group()
self.notify() | Raises IndexError if more than one group is open, otherwise closes it
and invokes undo_nested_group. | Below is the the instruction that describes the task:
### Input:
Raises IndexError if more than one group is open, otherwise closes it
and invokes undo_nested_group.
### Response:
def undo(self):
"""
Raises IndexError if more than one group is open, otherwise closes it
and invokes undo_nested_group.
"""
if self.grouping_level() == 1:
self.end_grouping()
if self._open:
raise IndexError
self.undo_nested_group()
self.notify() |
def command_template(self):
"""Build and return a string that can be used as a template invoking
this chain from the command line.
The actual command can be obtainted by using
`self.command_template().format(**self.args)`
"""
com_out = self.appname
for key, val in self.args.items():
if key in self._options:
com_out += ' %s={%s}' % (key, key)
else:
com_out += ' %s=%s' % (key, val)
return com_out | Build and return a string that can be used as a template invoking
this chain from the command line.
The actual command can be obtainted by using
`self.command_template().format(**self.args)` | Below is the the instruction that describes the task:
### Input:
Build and return a string that can be used as a template invoking
this chain from the command line.
The actual command can be obtainted by using
`self.command_template().format(**self.args)`
### Response:
def command_template(self):
"""Build and return a string that can be used as a template invoking
this chain from the command line.
The actual command can be obtainted by using
`self.command_template().format(**self.args)`
"""
com_out = self.appname
for key, val in self.args.items():
if key in self._options:
com_out += ' %s={%s}' % (key, key)
else:
com_out += ' %s=%s' % (key, val)
return com_out |
def all_nodes_that_run_in_env(service, env, service_configuration=None):
""" Returns all nodes that run in an environment. This needs
to be specified in field named 'env_runs_on' one level under services
in the configuration, and needs to contain an object which maps strings
to lists (environments to nodes).
:param service: A string specifying which service to look up nodes for
:param env: A string specifying which environment's nodes should be returned
:param service_configuration: A service_configuration dict to look in or None to
use the default dict.
:returns: list of all nodes running in a certain environment
"""
if service_configuration is None:
service_configuration = read_services_configuration()
env_runs_on = service_configuration[service]['env_runs_on']
if env in env_runs_on:
return list(sorted(env_runs_on[env]))
else:
return [] | Returns all nodes that run in an environment. This needs
to be specified in field named 'env_runs_on' one level under services
in the configuration, and needs to contain an object which maps strings
to lists (environments to nodes).
:param service: A string specifying which service to look up nodes for
:param env: A string specifying which environment's nodes should be returned
:param service_configuration: A service_configuration dict to look in or None to
use the default dict.
:returns: list of all nodes running in a certain environment | Below is the the instruction that describes the task:
### Input:
Returns all nodes that run in an environment. This needs
to be specified in field named 'env_runs_on' one level under services
in the configuration, and needs to contain an object which maps strings
to lists (environments to nodes).
:param service: A string specifying which service to look up nodes for
:param env: A string specifying which environment's nodes should be returned
:param service_configuration: A service_configuration dict to look in or None to
use the default dict.
:returns: list of all nodes running in a certain environment
### Response:
def all_nodes_that_run_in_env(service, env, service_configuration=None):
""" Returns all nodes that run in an environment. This needs
to be specified in field named 'env_runs_on' one level under services
in the configuration, and needs to contain an object which maps strings
to lists (environments to nodes).
:param service: A string specifying which service to look up nodes for
:param env: A string specifying which environment's nodes should be returned
:param service_configuration: A service_configuration dict to look in or None to
use the default dict.
:returns: list of all nodes running in a certain environment
"""
if service_configuration is None:
service_configuration = read_services_configuration()
env_runs_on = service_configuration[service]['env_runs_on']
if env in env_runs_on:
return list(sorted(env_runs_on[env]))
else:
return [] |
def parseNoise(rawArray):
'''
Function returns indices that contain non-noisy genes as an integer array
:param rawArray: numpy ndarray of data set
:return nnGenes : numpy ndarray of non-noise gene indices
'''
nnGenes=[]
for i in range(0,(rawArray.shape[1])): #Checks all genes
count0=np.asarray(np.where(rawArray[:,i]==0))
count1=np.asarray(np.where(rawArray[:,i]==1))
if ((count1.shape[1]+count0.shape[1])<rawArray.shape[0]):
nnGenes=np.append(nnGenes,i)
return nnGenes.astype(int) | Function returns indices that contain non-noisy genes as an integer array
:param rawArray: numpy ndarray of data set
:return nnGenes : numpy ndarray of non-noise gene indices | Below is the the instruction that describes the task:
### Input:
Function returns indices that contain non-noisy genes as an integer array
:param rawArray: numpy ndarray of data set
:return nnGenes : numpy ndarray of non-noise gene indices
### Response:
def parseNoise(rawArray):
'''
Function returns indices that contain non-noisy genes as an integer array
:param rawArray: numpy ndarray of data set
:return nnGenes : numpy ndarray of non-noise gene indices
'''
nnGenes=[]
for i in range(0,(rawArray.shape[1])): #Checks all genes
count0=np.asarray(np.where(rawArray[:,i]==0))
count1=np.asarray(np.where(rawArray[:,i]==1))
if ((count1.shape[1]+count0.shape[1])<rawArray.shape[0]):
nnGenes=np.append(nnGenes,i)
return nnGenes.astype(int) |
def clean_html5lib(input):
"""
Takes an HTML fragment and processes it using html5lib to ensure that the HTML is well-formed.
>>> clean_html5lib("<p>Foo<b>bar</b></p>")
u'<p>Foo<b>bar</b></p>'
>>> clean_html5lib("<p>Foo<b>bar</b><i>Ooops!</p>")
u'<p>Foo<b>bar</b><i>Ooops!</i></p>'
>>> clean_html5lib('<p>Foo<b>bar</b>& oops<a href="#foo&bar">This is a <>link</a></p>')
u'<p>Foo<b>bar</b>& oops<a href=#foo&bar>This is a <>link</a></p>'
"""
from html5lib import treebuilders, treewalkers, serializer, sanitizer
p = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
dom_tree = p.parseFragment(input)
walker = treewalkers.getTreeWalker("dom")
stream = walker(dom_tree)
s = serializer.htmlserializer.HTMLSerializer(omit_optional_tags=False)
return "".join(s.serialize(stream)) | Takes an HTML fragment and processes it using html5lib to ensure that the HTML is well-formed.
>>> clean_html5lib("<p>Foo<b>bar</b></p>")
u'<p>Foo<b>bar</b></p>'
>>> clean_html5lib("<p>Foo<b>bar</b><i>Ooops!</p>")
u'<p>Foo<b>bar</b><i>Ooops!</i></p>'
>>> clean_html5lib('<p>Foo<b>bar</b>& oops<a href="#foo&bar">This is a <>link</a></p>')
u'<p>Foo<b>bar</b>& oops<a href=#foo&bar>This is a <>link</a></p>' | Below is the the instruction that describes the task:
### Input:
Takes an HTML fragment and processes it using html5lib to ensure that the HTML is well-formed.
>>> clean_html5lib("<p>Foo<b>bar</b></p>")
u'<p>Foo<b>bar</b></p>'
>>> clean_html5lib("<p>Foo<b>bar</b><i>Ooops!</p>")
u'<p>Foo<b>bar</b><i>Ooops!</i></p>'
>>> clean_html5lib('<p>Foo<b>bar</b>& oops<a href="#foo&bar">This is a <>link</a></p>')
u'<p>Foo<b>bar</b>& oops<a href=#foo&bar>This is a <>link</a></p>'
### Response:
def clean_html5lib(input):
"""
Takes an HTML fragment and processes it using html5lib to ensure that the HTML is well-formed.
>>> clean_html5lib("<p>Foo<b>bar</b></p>")
u'<p>Foo<b>bar</b></p>'
>>> clean_html5lib("<p>Foo<b>bar</b><i>Ooops!</p>")
u'<p>Foo<b>bar</b><i>Ooops!</i></p>'
>>> clean_html5lib('<p>Foo<b>bar</b>& oops<a href="#foo&bar">This is a <>link</a></p>')
u'<p>Foo<b>bar</b>& oops<a href=#foo&bar>This is a <>link</a></p>'
"""
from html5lib import treebuilders, treewalkers, serializer, sanitizer
p = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
dom_tree = p.parseFragment(input)
walker = treewalkers.getTreeWalker("dom")
stream = walker(dom_tree)
s = serializer.htmlserializer.HTMLSerializer(omit_optional_tags=False)
return "".join(s.serialize(stream)) |
def get_place_tags(index_page, domain): #: TODO geoip to docstring
"""
Return list of `place` tags parsed from `meta` and `whois`.
Args:
index_page (str): HTML content of the page you wisht to analyze.
domain (str): Domain of the web, without ``http://`` or other parts.
Returns:
list: List of :class:`.SourceString` objects.
"""
ip_address = get_ip_address(domain)
dom = dhtmlparser.parseString(index_page)
place_tags = [
get_html_geo_place_tags(dom),
get_whois_tags(ip_address),
# [_get_geo_place_tag(ip_address)], # TODO: implement geoip
]
return sum(place_tags, []) | Return list of `place` tags parsed from `meta` and `whois`.
Args:
index_page (str): HTML content of the page you wisht to analyze.
domain (str): Domain of the web, without ``http://`` or other parts.
Returns:
list: List of :class:`.SourceString` objects. | Below is the the instruction that describes the task:
### Input:
Return list of `place` tags parsed from `meta` and `whois`.
Args:
index_page (str): HTML content of the page you wisht to analyze.
domain (str): Domain of the web, without ``http://`` or other parts.
Returns:
list: List of :class:`.SourceString` objects.
### Response:
def get_place_tags(index_page, domain): #: TODO geoip to docstring
"""
Return list of `place` tags parsed from `meta` and `whois`.
Args:
index_page (str): HTML content of the page you wisht to analyze.
domain (str): Domain of the web, without ``http://`` or other parts.
Returns:
list: List of :class:`.SourceString` objects.
"""
ip_address = get_ip_address(domain)
dom = dhtmlparser.parseString(index_page)
place_tags = [
get_html_geo_place_tags(dom),
get_whois_tags(ip_address),
# [_get_geo_place_tag(ip_address)], # TODO: implement geoip
]
return sum(place_tags, []) |
def _filter_filecommands(self, filecmd_iter):
"""Return the filecommands filtered by includes & excludes.
:return: a list of FileCommand objects
"""
if self.includes is None and self.excludes is None:
return list(filecmd_iter())
# Do the filtering, adjusting for the new_root
result = []
for fc in filecmd_iter():
if (isinstance(fc, commands.FileModifyCommand) or
isinstance(fc, commands.FileDeleteCommand)):
if self._path_to_be_kept(fc.path):
fc.path = self._adjust_for_new_root(fc.path)
else:
continue
elif isinstance(fc, commands.FileDeleteAllCommand):
pass
elif isinstance(fc, commands.FileRenameCommand):
fc = self._convert_rename(fc)
elif isinstance(fc, commands.FileCopyCommand):
fc = self._convert_copy(fc)
else:
self.warning("cannot handle FileCommands of class %s - ignoring",
fc.__class__)
continue
if fc is not None:
result.append(fc)
return result | Return the filecommands filtered by includes & excludes.
:return: a list of FileCommand objects | Below is the the instruction that describes the task:
### Input:
Return the filecommands filtered by includes & excludes.
:return: a list of FileCommand objects
### Response:
def _filter_filecommands(self, filecmd_iter):
"""Return the filecommands filtered by includes & excludes.
:return: a list of FileCommand objects
"""
if self.includes is None and self.excludes is None:
return list(filecmd_iter())
# Do the filtering, adjusting for the new_root
result = []
for fc in filecmd_iter():
if (isinstance(fc, commands.FileModifyCommand) or
isinstance(fc, commands.FileDeleteCommand)):
if self._path_to_be_kept(fc.path):
fc.path = self._adjust_for_new_root(fc.path)
else:
continue
elif isinstance(fc, commands.FileDeleteAllCommand):
pass
elif isinstance(fc, commands.FileRenameCommand):
fc = self._convert_rename(fc)
elif isinstance(fc, commands.FileCopyCommand):
fc = self._convert_copy(fc)
else:
self.warning("cannot handle FileCommands of class %s - ignoring",
fc.__class__)
continue
if fc is not None:
result.append(fc)
return result |
def claim_keys(self, key_request, timeout=None):
"""Claims one-time keys for use in pre-key messages.
Args:
key_request (dict): The keys to be claimed. Format should be
<user_id>: { <device_id>: <algorithm> }.
timeout (int): Optional. The time (in milliseconds) to wait when
downloading keys from remote servers.
"""
content = {"one_time_keys": key_request}
if timeout:
content["timeout"] = timeout
return self._send("POST", "/keys/claim", content=content) | Claims one-time keys for use in pre-key messages.
Args:
key_request (dict): The keys to be claimed. Format should be
<user_id>: { <device_id>: <algorithm> }.
timeout (int): Optional. The time (in milliseconds) to wait when
downloading keys from remote servers. | Below is the the instruction that describes the task:
### Input:
Claims one-time keys for use in pre-key messages.
Args:
key_request (dict): The keys to be claimed. Format should be
<user_id>: { <device_id>: <algorithm> }.
timeout (int): Optional. The time (in milliseconds) to wait when
downloading keys from remote servers.
### Response:
def claim_keys(self, key_request, timeout=None):
"""Claims one-time keys for use in pre-key messages.
Args:
key_request (dict): The keys to be claimed. Format should be
<user_id>: { <device_id>: <algorithm> }.
timeout (int): Optional. The time (in milliseconds) to wait when
downloading keys from remote servers.
"""
content = {"one_time_keys": key_request}
if timeout:
content["timeout"] = timeout
return self._send("POST", "/keys/claim", content=content) |
def check_login(cookie, tokens, username):
'''进行登录验证, 主要是在服务器上验证这个帐户的状态.
如果帐户不存在, 或者帐户异常, 就不需要再进行最后一步的登录操作了.
这一步有可能需要输入验证码.
返回的信息如下:
{"errInfo":{ "no": "0" }, "data": { "codeString" : "", "vcodetype" : "" }}
'''
url = ''.join([
const.PASSPORT_URL,
'?logincheck',
'&token=', tokens['token'],
'&tpl=mm&apiver=v3',
'&tt=', util.timestamp(),
'&username=', encoder.encode_uri_component(username),
'&isphone=false',
])
headers={
'Cookie': cookie.header_output(),
'Referer': const.REFERER,
}
req = net.urlopen(url, headers=headers)
if req:
ubi = req.headers.get_all('Set-Cookie')
return ubi, json.loads(req.data.decode())
else:
return None | 进行登录验证, 主要是在服务器上验证这个帐户的状态.
如果帐户不存在, 或者帐户异常, 就不需要再进行最后一步的登录操作了.
这一步有可能需要输入验证码.
返回的信息如下:
{"errInfo":{ "no": "0" }, "data": { "codeString" : "", "vcodetype" : "" }} | Below is the the instruction that describes the task:
### Input:
进行登录验证, 主要是在服务器上验证这个帐户的状态.
如果帐户不存在, 或者帐户异常, 就不需要再进行最后一步的登录操作了.
这一步有可能需要输入验证码.
返回的信息如下:
{"errInfo":{ "no": "0" }, "data": { "codeString" : "", "vcodetype" : "" }}
### Response:
def check_login(cookie, tokens, username):
'''进行登录验证, 主要是在服务器上验证这个帐户的状态.
如果帐户不存在, 或者帐户异常, 就不需要再进行最后一步的登录操作了.
这一步有可能需要输入验证码.
返回的信息如下:
{"errInfo":{ "no": "0" }, "data": { "codeString" : "", "vcodetype" : "" }}
'''
url = ''.join([
const.PASSPORT_URL,
'?logincheck',
'&token=', tokens['token'],
'&tpl=mm&apiver=v3',
'&tt=', util.timestamp(),
'&username=', encoder.encode_uri_component(username),
'&isphone=false',
])
headers={
'Cookie': cookie.header_output(),
'Referer': const.REFERER,
}
req = net.urlopen(url, headers=headers)
if req:
ubi = req.headers.get_all('Set-Cookie')
return ubi, json.loads(req.data.decode())
else:
return None |
def temporal_derivatives(order, variables, data):
"""
Compute temporal derivative terms by the method of backwards differences.
Parameters
----------
order: range or list(int)
A list of temporal derivative terms to include. For instance, [1, 2]
indicates that the first and second derivative terms should be added.
To retain the original terms, 0 *must* be included in the list.
variables: list(str)
List of variables for which temporal derivative terms should be
computed.
data: pandas DataFrame object
Table of values of all observations of all variables.
Returns
-------
variables_deriv: list
A list of variables to include in the final data frame after adding
the specified derivative terms.
data_deriv: pandas DataFrame object
Table of values of all observations of all variables, including any
specified derivative terms.
"""
variables_deriv = OrderedDict()
data_deriv = OrderedDict()
if 0 in order:
data_deriv[0] = data[variables]
variables_deriv[0] = variables
order = set(order) - set([0])
for o in order:
variables_deriv[o] = ['{}_derivative{}'.format(v, o)
for v in variables]
data_deriv[o] = np.tile(np.nan, data[variables].shape)
data_deriv[o][o:, :] = np.diff(data[variables], n=o, axis=0)
variables_deriv = reduce((lambda x, y: x + y), variables_deriv.values())
data_deriv = pd.DataFrame(columns=variables_deriv,
data=np.concatenate([*data_deriv.values()],
axis=1))
return (variables_deriv, data_deriv) | Compute temporal derivative terms by the method of backwards differences.
Parameters
----------
order: range or list(int)
A list of temporal derivative terms to include. For instance, [1, 2]
indicates that the first and second derivative terms should be added.
To retain the original terms, 0 *must* be included in the list.
variables: list(str)
List of variables for which temporal derivative terms should be
computed.
data: pandas DataFrame object
Table of values of all observations of all variables.
Returns
-------
variables_deriv: list
A list of variables to include in the final data frame after adding
the specified derivative terms.
data_deriv: pandas DataFrame object
Table of values of all observations of all variables, including any
specified derivative terms. | Below is the the instruction that describes the task:
### Input:
Compute temporal derivative terms by the method of backwards differences.
Parameters
----------
order: range or list(int)
A list of temporal derivative terms to include. For instance, [1, 2]
indicates that the first and second derivative terms should be added.
To retain the original terms, 0 *must* be included in the list.
variables: list(str)
List of variables for which temporal derivative terms should be
computed.
data: pandas DataFrame object
Table of values of all observations of all variables.
Returns
-------
variables_deriv: list
A list of variables to include in the final data frame after adding
the specified derivative terms.
data_deriv: pandas DataFrame object
Table of values of all observations of all variables, including any
specified derivative terms.
### Response:
def temporal_derivatives(order, variables, data):
"""
Compute temporal derivative terms by the method of backwards differences.
Parameters
----------
order: range or list(int)
A list of temporal derivative terms to include. For instance, [1, 2]
indicates that the first and second derivative terms should be added.
To retain the original terms, 0 *must* be included in the list.
variables: list(str)
List of variables for which temporal derivative terms should be
computed.
data: pandas DataFrame object
Table of values of all observations of all variables.
Returns
-------
variables_deriv: list
A list of variables to include in the final data frame after adding
the specified derivative terms.
data_deriv: pandas DataFrame object
Table of values of all observations of all variables, including any
specified derivative terms.
"""
variables_deriv = OrderedDict()
data_deriv = OrderedDict()
if 0 in order:
data_deriv[0] = data[variables]
variables_deriv[0] = variables
order = set(order) - set([0])
for o in order:
variables_deriv[o] = ['{}_derivative{}'.format(v, o)
for v in variables]
data_deriv[o] = np.tile(np.nan, data[variables].shape)
data_deriv[o][o:, :] = np.diff(data[variables], n=o, axis=0)
variables_deriv = reduce((lambda x, y: x + y), variables_deriv.values())
data_deriv = pd.DataFrame(columns=variables_deriv,
data=np.concatenate([*data_deriv.values()],
axis=1))
return (variables_deriv, data_deriv) |
def create_templates_static_files(app_path):
"""
create templates and static
"""
templates_path = os.path.join(app_path, 'templates')
static_path = os.path.join(app_path, 'static')
_mkdir_p(templates_path)
_mkdir_p(static_path)
# create {img, css, js}
os.chdir(static_path)
img_path = os.path.join(static_path, 'img')
css_path = os.path.join(static_path, 'css')
js_path = os.path.join(static_path, 'js')
_mkdir_p(img_path)
_mkdir_p(css_path)
_mkdir_p(js_path)
return css_path, templates_path | create templates and static | Below is the the instruction that describes the task:
### Input:
create templates and static
### Response:
def create_templates_static_files(app_path):
"""
create templates and static
"""
templates_path = os.path.join(app_path, 'templates')
static_path = os.path.join(app_path, 'static')
_mkdir_p(templates_path)
_mkdir_p(static_path)
# create {img, css, js}
os.chdir(static_path)
img_path = os.path.join(static_path, 'img')
css_path = os.path.join(static_path, 'css')
js_path = os.path.join(static_path, 'js')
_mkdir_p(img_path)
_mkdir_p(css_path)
_mkdir_p(js_path)
return css_path, templates_path |
def ParseAction(self, action):
"""Extract log configuration data from rsyslog actions.
Actions have the format:
<facility>/<severity> <type_def><destination>;<template>
e.g. *.* @@loghost.example.com.:514;RSYSLOG_ForwardFormat
Actions are selected by a type definition. These include:
"@@": TCP syslog
"@": UDP syslog
"|": Named pipe
"~": Drop to /dev/null
"^": Shell script
":om<string>:": An output module
Or a file path.
Args:
action: The action string from rsyslog.
Returns:
a rdfvalue.LogTarget message.
"""
rslt = rdf_config_file.LogTarget()
for dst_str, dst_re in iteritems(self.destinations):
dst = dst_re.match(action)
if dst:
rslt.transport = dst_str
rslt.destination = dst.group(1)
break
return rslt | Extract log configuration data from rsyslog actions.
Actions have the format:
<facility>/<severity> <type_def><destination>;<template>
e.g. *.* @@loghost.example.com.:514;RSYSLOG_ForwardFormat
Actions are selected by a type definition. These include:
"@@": TCP syslog
"@": UDP syslog
"|": Named pipe
"~": Drop to /dev/null
"^": Shell script
":om<string>:": An output module
Or a file path.
Args:
action: The action string from rsyslog.
Returns:
a rdfvalue.LogTarget message. | Below is the the instruction that describes the task:
### Input:
Extract log configuration data from rsyslog actions.
Actions have the format:
<facility>/<severity> <type_def><destination>;<template>
e.g. *.* @@loghost.example.com.:514;RSYSLOG_ForwardFormat
Actions are selected by a type definition. These include:
"@@": TCP syslog
"@": UDP syslog
"|": Named pipe
"~": Drop to /dev/null
"^": Shell script
":om<string>:": An output module
Or a file path.
Args:
action: The action string from rsyslog.
Returns:
a rdfvalue.LogTarget message.
### Response:
def ParseAction(self, action):
"""Extract log configuration data from rsyslog actions.
Actions have the format:
<facility>/<severity> <type_def><destination>;<template>
e.g. *.* @@loghost.example.com.:514;RSYSLOG_ForwardFormat
Actions are selected by a type definition. These include:
"@@": TCP syslog
"@": UDP syslog
"|": Named pipe
"~": Drop to /dev/null
"^": Shell script
":om<string>:": An output module
Or a file path.
Args:
action: The action string from rsyslog.
Returns:
a rdfvalue.LogTarget message.
"""
rslt = rdf_config_file.LogTarget()
for dst_str, dst_re in iteritems(self.destinations):
dst = dst_re.match(action)
if dst:
rslt.transport = dst_str
rslt.destination = dst.group(1)
break
return rslt |
def debugger():
"""Return the current debugger instance, or create if none."""
sdb = _current[0]
if sdb is None or not sdb.active:
sdb = _current[0] = Sdb()
return sdb | Return the current debugger instance, or create if none. | Below is the the instruction that describes the task:
### Input:
Return the current debugger instance, or create if none.
### Response:
def debugger():
"""Return the current debugger instance, or create if none."""
sdb = _current[0]
if sdb is None or not sdb.active:
sdb = _current[0] = Sdb()
return sdb |
def balance_of_contacts(records, weighted=True):
"""
The balance of interactions per contact. For every contact,
the balance is the number of outgoing interactions divided by the total
number of interactions (in+out).
.. math::
\\forall \\,\\text{contact}\\,c,\\;\\text{balance}\,(c) = \\frac{\\bigl|\\text{outgoing}\,(c)\\bigr|}{\\bigl|\\text{outgoing}\,(c)\\bigr|+\\bigl|\\text{incoming}\,(c)\\bigr|}
Parameters
----------
weighted : str, optional
If ``True``, the balance for each contact is weighted by
the number of interactions the user had with this contact.
"""
counter_out = defaultdict(int)
counter = defaultdict(int)
for r in records:
if r.direction == 'out':
counter_out[r.correspondent_id] += 1
counter[r.correspondent_id] += 1
if not weighted:
balance = [counter_out[c] / counter[c] for c in counter]
else:
balance = [counter_out[c] / sum(counter.values()) for c in counter]
return summary_stats(balance) | The balance of interactions per contact. For every contact,
the balance is the number of outgoing interactions divided by the total
number of interactions (in+out).
.. math::
\\forall \\,\\text{contact}\\,c,\\;\\text{balance}\,(c) = \\frac{\\bigl|\\text{outgoing}\,(c)\\bigr|}{\\bigl|\\text{outgoing}\,(c)\\bigr|+\\bigl|\\text{incoming}\,(c)\\bigr|}
Parameters
----------
weighted : str, optional
If ``True``, the balance for each contact is weighted by
the number of interactions the user had with this contact. | Below is the the instruction that describes the task:
### Input:
The balance of interactions per contact. For every contact,
the balance is the number of outgoing interactions divided by the total
number of interactions (in+out).
.. math::
\\forall \\,\\text{contact}\\,c,\\;\\text{balance}\,(c) = \\frac{\\bigl|\\text{outgoing}\,(c)\\bigr|}{\\bigl|\\text{outgoing}\,(c)\\bigr|+\\bigl|\\text{incoming}\,(c)\\bigr|}
Parameters
----------
weighted : str, optional
If ``True``, the balance for each contact is weighted by
the number of interactions the user had with this contact.
### Response:
def balance_of_contacts(records, weighted=True):
"""
The balance of interactions per contact. For every contact,
the balance is the number of outgoing interactions divided by the total
number of interactions (in+out).
.. math::
\\forall \\,\\text{contact}\\,c,\\;\\text{balance}\,(c) = \\frac{\\bigl|\\text{outgoing}\,(c)\\bigr|}{\\bigl|\\text{outgoing}\,(c)\\bigr|+\\bigl|\\text{incoming}\,(c)\\bigr|}
Parameters
----------
weighted : str, optional
If ``True``, the balance for each contact is weighted by
the number of interactions the user had with this contact.
"""
counter_out = defaultdict(int)
counter = defaultdict(int)
for r in records:
if r.direction == 'out':
counter_out[r.correspondent_id] += 1
counter[r.correspondent_id] += 1
if not weighted:
balance = [counter_out[c] / counter[c] for c in counter]
else:
balance = [counter_out[c] / sum(counter.values()) for c in counter]
return summary_stats(balance) |
def step4(self, encrypted_data):
"""Last pairing step."""
chacha = chacha20.Chacha20Cipher(self._session_key, self._session_key)
decrypted_tlv_bytes = chacha.decrypt(
encrypted_data, nounce='PS-Msg06'.encode())
if not decrypted_tlv_bytes:
raise Exception('data decrypt failed') # TODO: new exception
decrypted_tlv = tlv8.read_tlv(decrypted_tlv_bytes)
_LOGGER.debug('PS-Msg06: %s', decrypted_tlv)
atv_identifier = decrypted_tlv[tlv8.TLV_IDENTIFIER]
atv_signature = decrypted_tlv[tlv8.TLV_SIGNATURE]
atv_pub_key = decrypted_tlv[tlv8.TLV_PUBLIC_KEY]
log_binary(_LOGGER,
'Device',
Identifier=atv_identifier,
Signature=atv_signature,
Public=atv_pub_key)
# TODO: verify signature here
return Credentials(atv_pub_key, self._signing_key.to_seed(),
atv_identifier, self.pairing_id) | Last pairing step. | Below is the the instruction that describes the task:
### Input:
Last pairing step.
### Response:
def step4(self, encrypted_data):
"""Last pairing step."""
chacha = chacha20.Chacha20Cipher(self._session_key, self._session_key)
decrypted_tlv_bytes = chacha.decrypt(
encrypted_data, nounce='PS-Msg06'.encode())
if not decrypted_tlv_bytes:
raise Exception('data decrypt failed') # TODO: new exception
decrypted_tlv = tlv8.read_tlv(decrypted_tlv_bytes)
_LOGGER.debug('PS-Msg06: %s', decrypted_tlv)
atv_identifier = decrypted_tlv[tlv8.TLV_IDENTIFIER]
atv_signature = decrypted_tlv[tlv8.TLV_SIGNATURE]
atv_pub_key = decrypted_tlv[tlv8.TLV_PUBLIC_KEY]
log_binary(_LOGGER,
'Device',
Identifier=atv_identifier,
Signature=atv_signature,
Public=atv_pub_key)
# TODO: verify signature here
return Credentials(atv_pub_key, self._signing_key.to_seed(),
atv_identifier, self.pairing_id) |
def __is_file_to_be_busted(self, filepath):
"""
:param filepath:
:return: True or False
"""
if not self.extensions:
return True
return Path(filepath).suffix in self.extensions if filepath else False | :param filepath:
:return: True or False | Below is the the instruction that describes the task:
### Input:
:param filepath:
:return: True or False
### Response:
def __is_file_to_be_busted(self, filepath):
"""
:param filepath:
:return: True or False
"""
if not self.extensions:
return True
return Path(filepath).suffix in self.extensions if filepath else False |
def _binary_enable_zero_disable_one_reverse_conversion(cls, val, **kwargs):
'''
converts Enabled/Disabled to unicode char to write to a REG_BINARY value
'''
if val is not None:
if val.upper() == 'DISABLED':
return chr(0)
elif val.upper() == 'ENABLED':
return chr(1)
else:
return None
else:
return None | converts Enabled/Disabled to unicode char to write to a REG_BINARY value | Below is the the instruction that describes the task:
### Input:
converts Enabled/Disabled to unicode char to write to a REG_BINARY value
### Response:
def _binary_enable_zero_disable_one_reverse_conversion(cls, val, **kwargs):
'''
converts Enabled/Disabled to unicode char to write to a REG_BINARY value
'''
if val is not None:
if val.upper() == 'DISABLED':
return chr(0)
elif val.upper() == 'ENABLED':
return chr(1)
else:
return None
else:
return None |
def _make_options(x):
"""Standardize the options tuple format.
The returned tuple should be in the format (('label', value), ('label', value), ...).
The input can be
* an iterable of (label, value) pairs
* an iterable of values, and labels will be generated
"""
# Check if x is a mapping of labels to values
if isinstance(x, Mapping):
import warnings
warnings.warn("Support for mapping types has been deprecated and will be dropped in a future release.", DeprecationWarning)
return tuple((unicode_type(k), v) for k, v in x.items())
# only iterate once through the options.
xlist = tuple(x)
# Check if x is an iterable of (label, value) pairs
if all((isinstance(i, (list, tuple)) and len(i) == 2) for i in xlist):
return tuple((unicode_type(k), v) for k, v in xlist)
# Otherwise, assume x is an iterable of values
return tuple((unicode_type(i), i) for i in xlist) | Standardize the options tuple format.
The returned tuple should be in the format (('label', value), ('label', value), ...).
The input can be
* an iterable of (label, value) pairs
* an iterable of values, and labels will be generated | Below is the the instruction that describes the task:
### Input:
Standardize the options tuple format.
The returned tuple should be in the format (('label', value), ('label', value), ...).
The input can be
* an iterable of (label, value) pairs
* an iterable of values, and labels will be generated
### Response:
def _make_options(x):
"""Standardize the options tuple format.
The returned tuple should be in the format (('label', value), ('label', value), ...).
The input can be
* an iterable of (label, value) pairs
* an iterable of values, and labels will be generated
"""
# Check if x is a mapping of labels to values
if isinstance(x, Mapping):
import warnings
warnings.warn("Support for mapping types has been deprecated and will be dropped in a future release.", DeprecationWarning)
return tuple((unicode_type(k), v) for k, v in x.items())
# only iterate once through the options.
xlist = tuple(x)
# Check if x is an iterable of (label, value) pairs
if all((isinstance(i, (list, tuple)) and len(i) == 2) for i in xlist):
return tuple((unicode_type(k), v) for k, v in xlist)
# Otherwise, assume x is an iterable of values
return tuple((unicode_type(i), i) for i in xlist) |
def _handle_repos(self, root):
"""Handle repo configuration."""
ElementTree.SubElement(root, "key").text = "repos"
repos_array = ElementTree.SubElement(root, "array")
# Make a temporary jss object to try to pull repo information.
jss_server = JSS(url=self.url, user=self.user, password=self.password,
ssl_verify=self.verify, suppress_warnings=True)
print "Fetching distribution point info..."
try:
dpts = jss_server.DistributionPoint()
except JSSGetError:
print ("Fetching distribution point info failed. If you want to "
"configure distribution points, ensure that your API user "
"has read permissions for distribution points, and that "
"the URL, username, and password are correct.")
dpts = None
if dpts:
print ("There are file share distribution points configured on "
"your JSS. Most of the configuration can be automated "
"from the information on the JSS, with the exception of "
"the password for the R/W user.\n")
for dpt in dpts:
repo_dict = ElementTree.SubElement(repos_array, "dict")
repo_name_key = ElementTree.SubElement(repo_dict, "key")
repo_name_key.text = "name"
repo_name_string = ElementTree.SubElement(repo_dict, "string")
repo_name_string.text = dpt.get("name")
repo_pass_key = ElementTree.SubElement(repo_dict, "key")
repo_pass_key.text = "password"
repo_pass_string = ElementTree.SubElement(repo_dict, "string")
repo_pass_string.text = getpass.getpass(
"Please enter the R/W user's password for distribution "
"point: %s: " % dpt.get("name", "<NO NAME CONFIGURED>"))
_handle_dist_server("JDS", repos_array)
_handle_dist_server("CDP", repos_array) | Handle repo configuration. | Below is the the instruction that describes the task:
### Input:
Handle repo configuration.
### Response:
def _handle_repos(self, root):
"""Handle repo configuration."""
ElementTree.SubElement(root, "key").text = "repos"
repos_array = ElementTree.SubElement(root, "array")
# Make a temporary jss object to try to pull repo information.
jss_server = JSS(url=self.url, user=self.user, password=self.password,
ssl_verify=self.verify, suppress_warnings=True)
print "Fetching distribution point info..."
try:
dpts = jss_server.DistributionPoint()
except JSSGetError:
print ("Fetching distribution point info failed. If you want to "
"configure distribution points, ensure that your API user "
"has read permissions for distribution points, and that "
"the URL, username, and password are correct.")
dpts = None
if dpts:
print ("There are file share distribution points configured on "
"your JSS. Most of the configuration can be automated "
"from the information on the JSS, with the exception of "
"the password for the R/W user.\n")
for dpt in dpts:
repo_dict = ElementTree.SubElement(repos_array, "dict")
repo_name_key = ElementTree.SubElement(repo_dict, "key")
repo_name_key.text = "name"
repo_name_string = ElementTree.SubElement(repo_dict, "string")
repo_name_string.text = dpt.get("name")
repo_pass_key = ElementTree.SubElement(repo_dict, "key")
repo_pass_key.text = "password"
repo_pass_string = ElementTree.SubElement(repo_dict, "string")
repo_pass_string.text = getpass.getpass(
"Please enter the R/W user's password for distribution "
"point: %s: " % dpt.get("name", "<NO NAME CONFIGURED>"))
_handle_dist_server("JDS", repos_array)
_handle_dist_server("CDP", repos_array) |
async def export_wallet(handle: int,
export_config_json: str) -> None:
"""
Exports opened wallet to the file.
:param handle: wallet handle returned by indy_open_wallet.
:param export_config_json: JSON containing settings for input operation.
{
"path": path of the file that contains exported wallet content
"key": string, Key or passphrase used for wallet export key derivation.
Look to key_derivation_method param for information about supported key derivation methods.
"key_derivation_method": optional<string> algorithm to use for export key derivation:
ARGON2I_MOD - derive secured wallet export key (used by default)
ARGON2I_INT - derive secured wallet export key (less secured but faster)
RAW - raw wallet export key provided (skip derivation).
RAW keys can be generated with generate_wallet_key call
}
:return:
"""
logger = logging.getLogger(__name__)
logger.debug("export_wallet: >>> handle: %r, export_config_json: %r",
handle,
export_config_json)
if not hasattr(export_wallet, "cb"):
logger.debug("export_wallet: Creating callback")
export_wallet.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_export_config_json = c_char_p(export_config_json.encode('utf-8'))
await do_call('indy_export_wallet',
handle,
c_export_config_json,
export_wallet.cb)
logger.debug("export_wallet: <<<") | Exports opened wallet to the file.
:param handle: wallet handle returned by indy_open_wallet.
:param export_config_json: JSON containing settings for input operation.
{
"path": path of the file that contains exported wallet content
"key": string, Key or passphrase used for wallet export key derivation.
Look to key_derivation_method param for information about supported key derivation methods.
"key_derivation_method": optional<string> algorithm to use for export key derivation:
ARGON2I_MOD - derive secured wallet export key (used by default)
ARGON2I_INT - derive secured wallet export key (less secured but faster)
RAW - raw wallet export key provided (skip derivation).
RAW keys can be generated with generate_wallet_key call
}
:return: | Below is the the instruction that describes the task:
### Input:
Exports opened wallet to the file.
:param handle: wallet handle returned by indy_open_wallet.
:param export_config_json: JSON containing settings for input operation.
{
"path": path of the file that contains exported wallet content
"key": string, Key or passphrase used for wallet export key derivation.
Look to key_derivation_method param for information about supported key derivation methods.
"key_derivation_method": optional<string> algorithm to use for export key derivation:
ARGON2I_MOD - derive secured wallet export key (used by default)
ARGON2I_INT - derive secured wallet export key (less secured but faster)
RAW - raw wallet export key provided (skip derivation).
RAW keys can be generated with generate_wallet_key call
}
:return:
### Response:
async def export_wallet(handle: int,
export_config_json: str) -> None:
"""
Exports opened wallet to the file.
:param handle: wallet handle returned by indy_open_wallet.
:param export_config_json: JSON containing settings for input operation.
{
"path": path of the file that contains exported wallet content
"key": string, Key or passphrase used for wallet export key derivation.
Look to key_derivation_method param for information about supported key derivation methods.
"key_derivation_method": optional<string> algorithm to use for export key derivation:
ARGON2I_MOD - derive secured wallet export key (used by default)
ARGON2I_INT - derive secured wallet export key (less secured but faster)
RAW - raw wallet export key provided (skip derivation).
RAW keys can be generated with generate_wallet_key call
}
:return:
"""
logger = logging.getLogger(__name__)
logger.debug("export_wallet: >>> handle: %r, export_config_json: %r",
handle,
export_config_json)
if not hasattr(export_wallet, "cb"):
logger.debug("export_wallet: Creating callback")
export_wallet.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_export_config_json = c_char_p(export_config_json.encode('utf-8'))
await do_call('indy_export_wallet',
handle,
c_export_config_json,
export_wallet.cb)
logger.debug("export_wallet: <<<") |
def _set_property(self, val, *args):
"""Private method that sets the value currently of the property"""
if self._prop_write:
val = self._prop_write(val)
return self._setter(Adapter._get_property(self), val, *args) | Private method that sets the value currently of the property | Below is the the instruction that describes the task:
### Input:
Private method that sets the value currently of the property
### Response:
def _set_property(self, val, *args):
"""Private method that sets the value currently of the property"""
if self._prop_write:
val = self._prop_write(val)
return self._setter(Adapter._get_property(self), val, *args) |
def explain_feature(featurename):
'''print the location of single feature and its version
if the feature is located inside a git repository,
this will also print the git-rev and modified files
'''
import os
import featuremonkey
import importlib
import subprocess
def guess_version(feature_module):
if hasattr(feature_module, '__version__'):
return feature_module.__version__
if hasattr(feature_module, 'get_version'):
return feature_module.get_version()
return ('unable to determine version:'
' please add __version__ or get_version()'
' to this feature module!')
def git_rev(module):
stdout, stderr = subprocess.Popen(
["git", "rev-parse", "HEAD"],
cwd=os.path.dirname(module.__file__),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate()
if 'Not a git repo' in stderr:
return '-'
else:
return stdout.strip()
def git_changes(module):
stdout = subprocess.Popen(
["git", "diff", "--name-only"],
cwd=os.path.dirname(module.__file__),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate()[0]
return stdout.strip() or '-'
if featurename in featuremonkey.get_features_from_equation_file(os.environ['PRODUCT_EQUATION_FILENAME']):
print()
print(featurename)
print('-' * 60)
print()
is_subfeature = '.features.' in featurename
try:
feature_module = importlib.import_module(featurename)
except ImportError:
print('Error: unable to import feature "%s"' % featurename)
print('Location: %s' % os.path.dirname(feature_module.__file__))
print()
if is_subfeature:
print('Version: see parent feature')
print()
else:
print('Version: %s' % str(guess_version(feature_module)))
print()
print('git: %s' % git_rev(feature_module))
print()
print('git changed: %s' % '\n\t\t'.join(git_changes(feature_module).split('\n')))
else:
print('No feature named ' + featurename) | print the location of single feature and its version
if the feature is located inside a git repository,
this will also print the git-rev and modified files | Below is the the instruction that describes the task:
### Input:
print the location of single feature and its version
if the feature is located inside a git repository,
this will also print the git-rev and modified files
### Response:
def explain_feature(featurename):
'''print the location of single feature and its version
if the feature is located inside a git repository,
this will also print the git-rev and modified files
'''
import os
import featuremonkey
import importlib
import subprocess
def guess_version(feature_module):
if hasattr(feature_module, '__version__'):
return feature_module.__version__
if hasattr(feature_module, 'get_version'):
return feature_module.get_version()
return ('unable to determine version:'
' please add __version__ or get_version()'
' to this feature module!')
def git_rev(module):
stdout, stderr = subprocess.Popen(
["git", "rev-parse", "HEAD"],
cwd=os.path.dirname(module.__file__),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate()
if 'Not a git repo' in stderr:
return '-'
else:
return stdout.strip()
def git_changes(module):
stdout = subprocess.Popen(
["git", "diff", "--name-only"],
cwd=os.path.dirname(module.__file__),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate()[0]
return stdout.strip() or '-'
if featurename in featuremonkey.get_features_from_equation_file(os.environ['PRODUCT_EQUATION_FILENAME']):
print()
print(featurename)
print('-' * 60)
print()
is_subfeature = '.features.' in featurename
try:
feature_module = importlib.import_module(featurename)
except ImportError:
print('Error: unable to import feature "%s"' % featurename)
print('Location: %s' % os.path.dirname(feature_module.__file__))
print()
if is_subfeature:
print('Version: see parent feature')
print()
else:
print('Version: %s' % str(guess_version(feature_module)))
print()
print('git: %s' % git_rev(feature_module))
print()
print('git changed: %s' % '\n\t\t'.join(git_changes(feature_module).split('\n')))
else:
print('No feature named ' + featurename) |
def first_valid_index(self):
"""Returns index of first non-NaN/NULL value.
Return:
Scalar of index name.
"""
# It may be possible to incrementally check each partition, but this
# computation is fairly cheap.
def first_valid_index_builder(df):
df.index = pandas.RangeIndex(len(df.index))
return df.apply(lambda df: df.first_valid_index())
func = self._build_mapreduce_func(first_valid_index_builder)
# We get the minimum from each column, then take the min of that to get
# first_valid_index. The `to_pandas()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = self._full_axis_reduce(0, func).min(axis=1).to_pandas().squeeze()
return self.index[first_result] | Returns index of first non-NaN/NULL value.
Return:
Scalar of index name. | Below is the the instruction that describes the task:
### Input:
Returns index of first non-NaN/NULL value.
Return:
Scalar of index name.
### Response:
def first_valid_index(self):
"""Returns index of first non-NaN/NULL value.
Return:
Scalar of index name.
"""
# It may be possible to incrementally check each partition, but this
# computation is fairly cheap.
def first_valid_index_builder(df):
df.index = pandas.RangeIndex(len(df.index))
return df.apply(lambda df: df.first_valid_index())
func = self._build_mapreduce_func(first_valid_index_builder)
# We get the minimum from each column, then take the min of that to get
# first_valid_index. The `to_pandas()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = self._full_axis_reduce(0, func).min(axis=1).to_pandas().squeeze()
return self.index[first_result] |
def append(self, item):
"""Adds an item to the list and checks it's a string."""
if not isinstance(item, str):
raise TypeError(
'Members of this object must be strings. '
'You supplied \"%s\"' % type(item))
list.append(self, item) | Adds an item to the list and checks it's a string. | Below is the the instruction that describes the task:
### Input:
Adds an item to the list and checks it's a string.
### Response:
def append(self, item):
"""Adds an item to the list and checks it's a string."""
if not isinstance(item, str):
raise TypeError(
'Members of this object must be strings. '
'You supplied \"%s\"' % type(item))
list.append(self, item) |
def kogge_stone(a, b, cin=0):
"""
Creates a Kogge-Stone adder given two inputs
:param WireVector a, b: The two WireVectors to add up (bitwidths don't need to match)
:param cin: An optimal carry in WireVector or value
:return: a Wirevector representing the output of the adder
The Kogge-Stone adder is a fast tree-based adder with O(log(n))
propagation delay, useful for performance critical designs. However,
it has O(n log(n)) area usage, and large fan out.
"""
a, b = libutils.match_bitwidth(a, b)
prop_orig = a ^ b
prop_bits = [i for i in prop_orig]
gen_bits = [i for i in a & b]
prop_dist = 1
# creation of the carry calculation
while prop_dist < len(a):
for i in reversed(range(prop_dist, len(a))):
prop_old = prop_bits[i]
gen_bits[i] = gen_bits[i] | (prop_old & gen_bits[i - prop_dist])
if i >= prop_dist * 2: # to prevent creating unnecessary nets and wires
prop_bits[i] = prop_old & prop_bits[i - prop_dist]
prop_dist *= 2
# assembling the result of the addition
# preparing the cin (and conveniently shifting the gen bits)
gen_bits.insert(0, pyrtl.as_wires(cin))
return pyrtl.concat_list(gen_bits) ^ prop_orig | Creates a Kogge-Stone adder given two inputs
:param WireVector a, b: The two WireVectors to add up (bitwidths don't need to match)
:param cin: An optimal carry in WireVector or value
:return: a Wirevector representing the output of the adder
The Kogge-Stone adder is a fast tree-based adder with O(log(n))
propagation delay, useful for performance critical designs. However,
it has O(n log(n)) area usage, and large fan out. | Below is the the instruction that describes the task:
### Input:
Creates a Kogge-Stone adder given two inputs
:param WireVector a, b: The two WireVectors to add up (bitwidths don't need to match)
:param cin: An optimal carry in WireVector or value
:return: a Wirevector representing the output of the adder
The Kogge-Stone adder is a fast tree-based adder with O(log(n))
propagation delay, useful for performance critical designs. However,
it has O(n log(n)) area usage, and large fan out.
### Response:
def kogge_stone(a, b, cin=0):
"""
Creates a Kogge-Stone adder given two inputs
:param WireVector a, b: The two WireVectors to add up (bitwidths don't need to match)
:param cin: An optimal carry in WireVector or value
:return: a Wirevector representing the output of the adder
The Kogge-Stone adder is a fast tree-based adder with O(log(n))
propagation delay, useful for performance critical designs. However,
it has O(n log(n)) area usage, and large fan out.
"""
a, b = libutils.match_bitwidth(a, b)
prop_orig = a ^ b
prop_bits = [i for i in prop_orig]
gen_bits = [i for i in a & b]
prop_dist = 1
# creation of the carry calculation
while prop_dist < len(a):
for i in reversed(range(prop_dist, len(a))):
prop_old = prop_bits[i]
gen_bits[i] = gen_bits[i] | (prop_old & gen_bits[i - prop_dist])
if i >= prop_dist * 2: # to prevent creating unnecessary nets and wires
prop_bits[i] = prop_old & prop_bits[i - prop_dist]
prop_dist *= 2
# assembling the result of the addition
# preparing the cin (and conveniently shifting the gen bits)
gen_bits.insert(0, pyrtl.as_wires(cin))
return pyrtl.concat_list(gen_bits) ^ prop_orig |
def _table_attrs(table):
'''
Helper function to find valid table attributes
'''
cmd = ['osqueryi'] + ['--json'] + ['pragma table_info({0})'.format(table)]
res = __salt__['cmd.run_all'](cmd)
if res['retcode'] == 0:
attrs = []
text = salt.utils.json.loads(res['stdout'])
for item in text:
attrs.append(item['name'])
return attrs
return False | Helper function to find valid table attributes | Below is the the instruction that describes the task:
### Input:
Helper function to find valid table attributes
### Response:
def _table_attrs(table):
'''
Helper function to find valid table attributes
'''
cmd = ['osqueryi'] + ['--json'] + ['pragma table_info({0})'.format(table)]
res = __salt__['cmd.run_all'](cmd)
if res['retcode'] == 0:
attrs = []
text = salt.utils.json.loads(res['stdout'])
for item in text:
attrs.append(item['name'])
return attrs
return False |
def _parse_title(line_iter, cur_line, conf):
"""
Parse "title" in grub v1 config
"""
title = []
conf['title'].append(title)
title.append(('title_name', cur_line.split('title', 1)[1].strip()))
while (True):
line = next(line_iter)
if line.startswith("title "):
return line
cmd, opt = _parse_cmd(line)
title.append((cmd, opt)) | Parse "title" in grub v1 config | Below is the the instruction that describes the task:
### Input:
Parse "title" in grub v1 config
### Response:
def _parse_title(line_iter, cur_line, conf):
"""
Parse "title" in grub v1 config
"""
title = []
conf['title'].append(title)
title.append(('title_name', cur_line.split('title', 1)[1].strip()))
while (True):
line = next(line_iter)
if line.startswith("title "):
return line
cmd, opt = _parse_cmd(line)
title.append((cmd, opt)) |
def run(configobj, wcsmap=None):
"""
Initial example by Nadia ran MD with configobj EPAR using:
It can be run in one of two ways:
from stsci.tools import teal
1. Passing a config object to teal
teal.teal('drizzlepac/pars/astrodrizzle.cfg')
2. Passing a task name:
teal.teal('astrodrizzle')
The example config files are in drizzlepac/pars
"""
# turn on logging, redirecting stdout/stderr messages to a log file
# while also printing them out to stdout as well
# also, initialize timing of processing steps
#
# We need to define a default logfile name from the user's parameters
input_list, output, ivmlist, odict = \
processInput.processFilenames(configobj['input'])
if output is not None:
def_logname = output
elif len(input_list) > 0:
def_logname = input_list[0]
else:
print(textutil.textbox(
"ERROR:\nNo valid input files found! Please restart the task "
"and check the value for the 'input' parameter."), file=sys.stderr)
def_logname = None
return
clean = configobj['STATE OF INPUT FILES']['clean']
procSteps = util.ProcSteps()
print("AstroDrizzle Version {:s} ({:s}) started at: {:s}\n"
.format(__version__, __version_date__, util._ptime()[0]))
util.print_pkg_versions(log=log)
log.debug('')
log.debug(
"==== AstroDrizzle was invoked with the following parameters: ===="
)
log.debug('')
util.print_cfg(configobj, log.debug)
try:
# Define list of imageObject instances and output WCSObject instance
# based on input paramters
imgObjList = None
procSteps.addStep('Initialization')
imgObjList, outwcs = processInput.setCommonInput(configobj)
procSteps.endStep('Initialization')
if imgObjList is None or not imgObjList:
errmsg = "No valid images found for processing!\n"
errmsg += "Check log file for full details.\n"
errmsg += "Exiting AstroDrizzle now..."
print(textutil.textbox(errmsg, width=65))
print(textutil.textbox(
'ERROR:\nAstroDrizzle Version {:s} encountered a problem! '
'Processing terminated at {:s}.'
.format(__version__, util._ptime()[0])), file=sys.stderr)
return
log.info("USER INPUT PARAMETERS common to all Processing Steps:")
util.printParams(configobj, log=log)
# Call rest of MD steps...
#create static masks for each image
staticMask.createStaticMask(imgObjList, configobj,
procSteps=procSteps)
#subtract the sky
sky.subtractSky(imgObjList, configobj, procSteps=procSteps)
# _dbg_dump_virtual_outputs(imgObjList)
#drizzle to separate images
adrizzle.drizSeparate(imgObjList, outwcs, configobj, wcsmap=wcsmap,
procSteps=procSteps)
# _dbg_dump_virtual_outputs(imgObjList)
#create the median images from the driz sep images
createMedian.createMedian(imgObjList, configobj, procSteps=procSteps)
#blot the images back to the original reference frame
ablot.runBlot(imgObjList, outwcs, configobj, wcsmap=wcsmap,
procSteps=procSteps)
#look for cosmic rays
drizCR.rundrizCR(imgObjList, configobj, procSteps=procSteps)
#Make your final drizzled image
adrizzle.drizFinal(imgObjList, outwcs, configobj, wcsmap=wcsmap,
procSteps=procSteps)
print()
print("AstroDrizzle Version {:s} is finished processing at {:s}.\n"
.format(__version__, util._ptime()[0]))
except:
clean = False
print(textutil.textbox(
"ERROR:\nAstroDrizzle Version {:s} encountered a problem! "
"Processing terminated at {:s}."
.format(__version__, util._ptime()[0])), file=sys.stderr)
raise
finally:
procSteps.reportTimes()
if imgObjList:
for image in imgObjList:
if clean:
image.clean()
image.close()
del imgObjList
del outwcs | Initial example by Nadia ran MD with configobj EPAR using:
It can be run in one of two ways:
from stsci.tools import teal
1. Passing a config object to teal
teal.teal('drizzlepac/pars/astrodrizzle.cfg')
2. Passing a task name:
teal.teal('astrodrizzle')
The example config files are in drizzlepac/pars | Below is the the instruction that describes the task:
### Input:
Initial example by Nadia ran MD with configobj EPAR using:
It can be run in one of two ways:
from stsci.tools import teal
1. Passing a config object to teal
teal.teal('drizzlepac/pars/astrodrizzle.cfg')
2. Passing a task name:
teal.teal('astrodrizzle')
The example config files are in drizzlepac/pars
### Response:
def run(configobj, wcsmap=None):
"""
Initial example by Nadia ran MD with configobj EPAR using:
It can be run in one of two ways:
from stsci.tools import teal
1. Passing a config object to teal
teal.teal('drizzlepac/pars/astrodrizzle.cfg')
2. Passing a task name:
teal.teal('astrodrizzle')
The example config files are in drizzlepac/pars
"""
# turn on logging, redirecting stdout/stderr messages to a log file
# while also printing them out to stdout as well
# also, initialize timing of processing steps
#
# We need to define a default logfile name from the user's parameters
input_list, output, ivmlist, odict = \
processInput.processFilenames(configobj['input'])
if output is not None:
def_logname = output
elif len(input_list) > 0:
def_logname = input_list[0]
else:
print(textutil.textbox(
"ERROR:\nNo valid input files found! Please restart the task "
"and check the value for the 'input' parameter."), file=sys.stderr)
def_logname = None
return
clean = configobj['STATE OF INPUT FILES']['clean']
procSteps = util.ProcSteps()
print("AstroDrizzle Version {:s} ({:s}) started at: {:s}\n"
.format(__version__, __version_date__, util._ptime()[0]))
util.print_pkg_versions(log=log)
log.debug('')
log.debug(
"==== AstroDrizzle was invoked with the following parameters: ===="
)
log.debug('')
util.print_cfg(configobj, log.debug)
try:
# Define list of imageObject instances and output WCSObject instance
# based on input paramters
imgObjList = None
procSteps.addStep('Initialization')
imgObjList, outwcs = processInput.setCommonInput(configobj)
procSteps.endStep('Initialization')
if imgObjList is None or not imgObjList:
errmsg = "No valid images found for processing!\n"
errmsg += "Check log file for full details.\n"
errmsg += "Exiting AstroDrizzle now..."
print(textutil.textbox(errmsg, width=65))
print(textutil.textbox(
'ERROR:\nAstroDrizzle Version {:s} encountered a problem! '
'Processing terminated at {:s}.'
.format(__version__, util._ptime()[0])), file=sys.stderr)
return
log.info("USER INPUT PARAMETERS common to all Processing Steps:")
util.printParams(configobj, log=log)
# Call rest of MD steps...
#create static masks for each image
staticMask.createStaticMask(imgObjList, configobj,
procSteps=procSteps)
#subtract the sky
sky.subtractSky(imgObjList, configobj, procSteps=procSteps)
# _dbg_dump_virtual_outputs(imgObjList)
#drizzle to separate images
adrizzle.drizSeparate(imgObjList, outwcs, configobj, wcsmap=wcsmap,
procSteps=procSteps)
# _dbg_dump_virtual_outputs(imgObjList)
#create the median images from the driz sep images
createMedian.createMedian(imgObjList, configobj, procSteps=procSteps)
#blot the images back to the original reference frame
ablot.runBlot(imgObjList, outwcs, configobj, wcsmap=wcsmap,
procSteps=procSteps)
#look for cosmic rays
drizCR.rundrizCR(imgObjList, configobj, procSteps=procSteps)
#Make your final drizzled image
adrizzle.drizFinal(imgObjList, outwcs, configobj, wcsmap=wcsmap,
procSteps=procSteps)
print()
print("AstroDrizzle Version {:s} is finished processing at {:s}.\n"
.format(__version__, util._ptime()[0]))
except:
clean = False
print(textutil.textbox(
"ERROR:\nAstroDrizzle Version {:s} encountered a problem! "
"Processing terminated at {:s}."
.format(__version__, util._ptime()[0])), file=sys.stderr)
raise
finally:
procSteps.reportTimes()
if imgObjList:
for image in imgObjList:
if clean:
image.clean()
image.close()
del imgObjList
del outwcs |
def load_formatter_from_file(filename, formattername="CustomFormatter",
**options):
"""Load a formatter from a file.
This method expects a file located relative to the current working
directory, which contains a class named CustomFormatter. By default,
it expects the Formatter to be named CustomFormatter; you can specify
your own class name as the second argument to this function.
Users should be very careful with the input, because this method
is equivalent to running eval on the input file.
Raises ClassNotFound if there are any problems importing the Formatter.
.. versionadded:: 2.2
"""
try:
# This empty dict will contain the namespace for the exec'd file
custom_namespace = {}
exec(open(filename, 'rb').read(), custom_namespace)
# Retrieve the class `formattername` from that namespace
if formattername not in custom_namespace:
raise ClassNotFound('no valid %s class found in %s' %
(formattername, filename))
formatter_class = custom_namespace[formattername]
# And finally instantiate it with the options
return formatter_class(**options)
except IOError as err:
raise ClassNotFound('cannot read %s' % filename)
except ClassNotFound as err:
raise
except Exception as err:
raise ClassNotFound('error when loading custom formatter: %s' % err) | Load a formatter from a file.
This method expects a file located relative to the current working
directory, which contains a class named CustomFormatter. By default,
it expects the Formatter to be named CustomFormatter; you can specify
your own class name as the second argument to this function.
Users should be very careful with the input, because this method
is equivalent to running eval on the input file.
Raises ClassNotFound if there are any problems importing the Formatter.
.. versionadded:: 2.2 | Below is the the instruction that describes the task:
### Input:
Load a formatter from a file.
This method expects a file located relative to the current working
directory, which contains a class named CustomFormatter. By default,
it expects the Formatter to be named CustomFormatter; you can specify
your own class name as the second argument to this function.
Users should be very careful with the input, because this method
is equivalent to running eval on the input file.
Raises ClassNotFound if there are any problems importing the Formatter.
.. versionadded:: 2.2
### Response:
def load_formatter_from_file(filename, formattername="CustomFormatter",
**options):
"""Load a formatter from a file.
This method expects a file located relative to the current working
directory, which contains a class named CustomFormatter. By default,
it expects the Formatter to be named CustomFormatter; you can specify
your own class name as the second argument to this function.
Users should be very careful with the input, because this method
is equivalent to running eval on the input file.
Raises ClassNotFound if there are any problems importing the Formatter.
.. versionadded:: 2.2
"""
try:
# This empty dict will contain the namespace for the exec'd file
custom_namespace = {}
exec(open(filename, 'rb').read(), custom_namespace)
# Retrieve the class `formattername` from that namespace
if formattername not in custom_namespace:
raise ClassNotFound('no valid %s class found in %s' %
(formattername, filename))
formatter_class = custom_namespace[formattername]
# And finally instantiate it with the options
return formatter_class(**options)
except IOError as err:
raise ClassNotFound('cannot read %s' % filename)
except ClassNotFound as err:
raise
except Exception as err:
raise ClassNotFound('error when loading custom formatter: %s' % err) |
def volumes(val, **kwargs): # pylint: disable=unused-argument
'''
Should be a list of absolute paths
'''
val = helpers.translate_stringlist(val)
for item in val:
if not os.path.isabs(item):
raise SaltInvocationError(
'\'{0}\' is not an absolute path'.format(item)
)
return val | Should be a list of absolute paths | Below is the the instruction that describes the task:
### Input:
Should be a list of absolute paths
### Response:
def volumes(val, **kwargs): # pylint: disable=unused-argument
'''
Should be a list of absolute paths
'''
val = helpers.translate_stringlist(val)
for item in val:
if not os.path.isabs(item):
raise SaltInvocationError(
'\'{0}\' is not an absolute path'.format(item)
)
return val |
def convert(self, request, response, data):
"""
Performs the desired Conversion.
:param request: The webob Request object describing the
request.
:param response: The webob Response object describing the
response.
:param data: The data dictionary returned by the prepare()
method.
:returns: A string, the results of which are the desired
conversion.
"""
if self.modifier.param in (None, 'canonical', 'local'):
return str(request.environ['SERVER_PORT'])
elif self.modifier.param == 'remote':
return str(request.environ.get('REMOTE_PORT', '-'))
return "-" | Performs the desired Conversion.
:param request: The webob Request object describing the
request.
:param response: The webob Response object describing the
response.
:param data: The data dictionary returned by the prepare()
method.
:returns: A string, the results of which are the desired
conversion. | Below is the the instruction that describes the task:
### Input:
Performs the desired Conversion.
:param request: The webob Request object describing the
request.
:param response: The webob Response object describing the
response.
:param data: The data dictionary returned by the prepare()
method.
:returns: A string, the results of which are the desired
conversion.
### Response:
def convert(self, request, response, data):
"""
Performs the desired Conversion.
:param request: The webob Request object describing the
request.
:param response: The webob Response object describing the
response.
:param data: The data dictionary returned by the prepare()
method.
:returns: A string, the results of which are the desired
conversion.
"""
if self.modifier.param in (None, 'canonical', 'local'):
return str(request.environ['SERVER_PORT'])
elif self.modifier.param == 'remote':
return str(request.environ.get('REMOTE_PORT', '-'))
return "-" |
def on_transparency_value_changed(self, hscale):
"""Changes the value of background_transparency in dconf
"""
value = hscale.get_value()
self.prefDlg.set_colors_from_settings()
self.settings.styleBackground.set_int('transparency', MAX_TRANSPARENCY - int(value)) | Changes the value of background_transparency in dconf | Below is the the instruction that describes the task:
### Input:
Changes the value of background_transparency in dconf
### Response:
def on_transparency_value_changed(self, hscale):
"""Changes the value of background_transparency in dconf
"""
value = hscale.get_value()
self.prefDlg.set_colors_from_settings()
self.settings.styleBackground.set_int('transparency', MAX_TRANSPARENCY - int(value)) |
def findall(self, title=None):
"""Return a list of worksheets with the given title.
Args:
title(str): title/name of the worksheets to return, or ``None`` for all
Returns:
list: list of contained worksheet instances (possibly empty)
"""
if title is None:
return list(self._sheets)
if title not in self._titles:
return []
return list(self._titles[title]) | Return a list of worksheets with the given title.
Args:
title(str): title/name of the worksheets to return, or ``None`` for all
Returns:
list: list of contained worksheet instances (possibly empty) | Below is the the instruction that describes the task:
### Input:
Return a list of worksheets with the given title.
Args:
title(str): title/name of the worksheets to return, or ``None`` for all
Returns:
list: list of contained worksheet instances (possibly empty)
### Response:
def findall(self, title=None):
"""Return a list of worksheets with the given title.
Args:
title(str): title/name of the worksheets to return, or ``None`` for all
Returns:
list: list of contained worksheet instances (possibly empty)
"""
if title is None:
return list(self._sheets)
if title not in self._titles:
return []
return list(self._titles[title]) |
def impersonate(self, name=None, lifetime=None, mechs=None,
usage='initiate'):
"""Impersonate a name using the current credentials
This method acquires credentials by impersonating another
name using the current credentials.
:requires-ext:`s4u`
Args:
name (Name): the name to impersonate
lifetime (int): the desired lifetime of the new credentials,
or None for indefinite
mechs (list): the desired :class:`MechType` OIDs for the new
credentials
usage (str): the desired usage for the new credentials -- either
'both', 'initiate', or 'accept'. Note that some mechanisms
may only support 'initiate'.
Returns:
Credentials: the new credentials impersonating the given name
"""
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for S4U")
res = rcred_s4u.acquire_cred_impersonate_name(self, name,
lifetime, mechs,
usage)
return type(self)(base=res.creds) | Impersonate a name using the current credentials
This method acquires credentials by impersonating another
name using the current credentials.
:requires-ext:`s4u`
Args:
name (Name): the name to impersonate
lifetime (int): the desired lifetime of the new credentials,
or None for indefinite
mechs (list): the desired :class:`MechType` OIDs for the new
credentials
usage (str): the desired usage for the new credentials -- either
'both', 'initiate', or 'accept'. Note that some mechanisms
may only support 'initiate'.
Returns:
Credentials: the new credentials impersonating the given name | Below is the the instruction that describes the task:
### Input:
Impersonate a name using the current credentials
This method acquires credentials by impersonating another
name using the current credentials.
:requires-ext:`s4u`
Args:
name (Name): the name to impersonate
lifetime (int): the desired lifetime of the new credentials,
or None for indefinite
mechs (list): the desired :class:`MechType` OIDs for the new
credentials
usage (str): the desired usage for the new credentials -- either
'both', 'initiate', or 'accept'. Note that some mechanisms
may only support 'initiate'.
Returns:
Credentials: the new credentials impersonating the given name
### Response:
def impersonate(self, name=None, lifetime=None, mechs=None,
usage='initiate'):
"""Impersonate a name using the current credentials
This method acquires credentials by impersonating another
name using the current credentials.
:requires-ext:`s4u`
Args:
name (Name): the name to impersonate
lifetime (int): the desired lifetime of the new credentials,
or None for indefinite
mechs (list): the desired :class:`MechType` OIDs for the new
credentials
usage (str): the desired usage for the new credentials -- either
'both', 'initiate', or 'accept'. Note that some mechanisms
may only support 'initiate'.
Returns:
Credentials: the new credentials impersonating the given name
"""
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for S4U")
res = rcred_s4u.acquire_cred_impersonate_name(self, name,
lifetime, mechs,
usage)
return type(self)(base=res.creds) |
def filter_incomplete_spectra(self, flimit=1000, percAccept=85):
"""Remove all data points that belong to spectra that did not retain at
least **percAccept** percent of the number of data points.
..warning::
This function does not honor additional dimensions (e.g.,
timesteps) yet!
"""
assert percAccept > 0 and percAccept < 100
def _retain_only_complete_spectra(item, fmax, acceptN):
"""Function called using pd.filter, applied to all spectra in the
data set. Return true if the number of data points <= **fmax** in
item is equal, or larger, than **acceptN**.
Parameters
----------
item : :py:class:`pandas.DataFrame`
dataframe containing one spectrum
fmax : float
maximum frequency up to which data points are counted
acceptN : int
the number of data points required to pass this test
Returns
-------
true : bool
if enough data points are present
false : bool
if not enough data points are present
"""
frequencies = item['frequency'].loc[item['frequency'] < fmax]
fN = frequencies.size
if fN >= acceptN:
return True
return False
group_abmn = self.data.groupby(['a', 'b', 'm', 'n'])
frequencies = np.array(
list(sorted(self.data.groupby('frequency').groups.keys()))
)
assert flimit >= frequencies.min() and flimit <= frequencies.max()
Nlimit = len(np.where(frequencies <= flimit)[0])
Naccept = np.ceil(Nlimit * percAccept / 100.0)
self.data = group_abmn.filter(
_retain_only_complete_spectra, fmax=flimit, acceptN=Naccept
).copy() | Remove all data points that belong to spectra that did not retain at
least **percAccept** percent of the number of data points.
..warning::
This function does not honor additional dimensions (e.g.,
timesteps) yet! | Below is the the instruction that describes the task:
### Input:
Remove all data points that belong to spectra that did not retain at
least **percAccept** percent of the number of data points.
..warning::
This function does not honor additional dimensions (e.g.,
timesteps) yet!
### Response:
def filter_incomplete_spectra(self, flimit=1000, percAccept=85):
"""Remove all data points that belong to spectra that did not retain at
least **percAccept** percent of the number of data points.
..warning::
This function does not honor additional dimensions (e.g.,
timesteps) yet!
"""
assert percAccept > 0 and percAccept < 100
def _retain_only_complete_spectra(item, fmax, acceptN):
"""Function called using pd.filter, applied to all spectra in the
data set. Return true if the number of data points <= **fmax** in
item is equal, or larger, than **acceptN**.
Parameters
----------
item : :py:class:`pandas.DataFrame`
dataframe containing one spectrum
fmax : float
maximum frequency up to which data points are counted
acceptN : int
the number of data points required to pass this test
Returns
-------
true : bool
if enough data points are present
false : bool
if not enough data points are present
"""
frequencies = item['frequency'].loc[item['frequency'] < fmax]
fN = frequencies.size
if fN >= acceptN:
return True
return False
group_abmn = self.data.groupby(['a', 'b', 'm', 'n'])
frequencies = np.array(
list(sorted(self.data.groupby('frequency').groups.keys()))
)
assert flimit >= frequencies.min() and flimit <= frequencies.max()
Nlimit = len(np.where(frequencies <= flimit)[0])
Naccept = np.ceil(Nlimit * percAccept / 100.0)
self.data = group_abmn.filter(
_retain_only_complete_spectra, fmax=flimit, acceptN=Naccept
).copy() |
def gsum_(self, col: str, index_col: bool=True) -> "Ds":
"""
Group by and sum column
:param col: column to group
:type col: str
:param index_col:
:type index_col: bool
:return: a dataswim instance
:rtype: Ds
:example: ``ds2 = ds.gsum("Col 1")``
"""
try:
df = self.df.copy()
df = df.groupby([col]).sum()
if index_col is True:
df[col] = df.index.values
return self._duplicate_(df)
except Exception as e:
self.err(e, self.gsum_, "Can not groupsum column") | Group by and sum column
:param col: column to group
:type col: str
:param index_col:
:type index_col: bool
:return: a dataswim instance
:rtype: Ds
:example: ``ds2 = ds.gsum("Col 1")`` | Below is the the instruction that describes the task:
### Input:
Group by and sum column
:param col: column to group
:type col: str
:param index_col:
:type index_col: bool
:return: a dataswim instance
:rtype: Ds
:example: ``ds2 = ds.gsum("Col 1")``
### Response:
def gsum_(self, col: str, index_col: bool=True) -> "Ds":
"""
Group by and sum column
:param col: column to group
:type col: str
:param index_col:
:type index_col: bool
:return: a dataswim instance
:rtype: Ds
:example: ``ds2 = ds.gsum("Col 1")``
"""
try:
df = self.df.copy()
df = df.groupby([col]).sum()
if index_col is True:
df[col] = df.index.values
return self._duplicate_(df)
except Exception as e:
self.err(e, self.gsum_, "Can not groupsum column") |
def append(self, item):
"""Append one item to the list.
Args:
item: Item to be appended.
Raises:
:exc:`~.exceptions.WrongListItemType`: If an item has a different
type than the first item to be stored.
"""
if isinstance(item, list):
self.extend(item)
elif not self:
list.append(self, item)
elif item.__class__ == self[0].__class__:
list.append(self, item)
else:
raise exceptions.WrongListItemType(item.__class__.__name__,
self[0].__class__.__name__) | Append one item to the list.
Args:
item: Item to be appended.
Raises:
:exc:`~.exceptions.WrongListItemType`: If an item has a different
type than the first item to be stored. | Below is the the instruction that describes the task:
### Input:
Append one item to the list.
Args:
item: Item to be appended.
Raises:
:exc:`~.exceptions.WrongListItemType`: If an item has a different
type than the first item to be stored.
### Response:
def append(self, item):
"""Append one item to the list.
Args:
item: Item to be appended.
Raises:
:exc:`~.exceptions.WrongListItemType`: If an item has a different
type than the first item to be stored.
"""
if isinstance(item, list):
self.extend(item)
elif not self:
list.append(self, item)
elif item.__class__ == self[0].__class__:
list.append(self, item)
else:
raise exceptions.WrongListItemType(item.__class__.__name__,
self[0].__class__.__name__) |
def parse_bookmark_data (data):
"""Parse data string.
Return iterator for bookmarks of the form (url, name).
Bookmarks are not sorted.
"""
for url, name in parse_bookmark_json(json.loads(data)):
yield url, name | Parse data string.
Return iterator for bookmarks of the form (url, name).
Bookmarks are not sorted. | Below is the the instruction that describes the task:
### Input:
Parse data string.
Return iterator for bookmarks of the form (url, name).
Bookmarks are not sorted.
### Response:
def parse_bookmark_data (data):
"""Parse data string.
Return iterator for bookmarks of the form (url, name).
Bookmarks are not sorted.
"""
for url, name in parse_bookmark_json(json.loads(data)):
yield url, name |
def update_resolver_nameservers(resolver, nameservers, nameserver_filename):
"""
Update a resolver's nameservers. The following priority is taken:
1. Nameservers list provided as an argument
2. A filename containing a list of nameservers
3. The original nameservers associated with the resolver
"""
if nameservers:
resolver.nameservers = nameservers
elif nameserver_filename:
nameservers = get_stripped_file_lines(nameserver_filename)
resolver.nameservers = nameservers
else:
# Use original nameservers
pass
return resolver | Update a resolver's nameservers. The following priority is taken:
1. Nameservers list provided as an argument
2. A filename containing a list of nameservers
3. The original nameservers associated with the resolver | Below is the the instruction that describes the task:
### Input:
Update a resolver's nameservers. The following priority is taken:
1. Nameservers list provided as an argument
2. A filename containing a list of nameservers
3. The original nameservers associated with the resolver
### Response:
def update_resolver_nameservers(resolver, nameservers, nameserver_filename):
"""
Update a resolver's nameservers. The following priority is taken:
1. Nameservers list provided as an argument
2. A filename containing a list of nameservers
3. The original nameservers associated with the resolver
"""
if nameservers:
resolver.nameservers = nameservers
elif nameserver_filename:
nameservers = get_stripped_file_lines(nameserver_filename)
resolver.nameservers = nameservers
else:
# Use original nameservers
pass
return resolver |
def Queue(hub, size):
"""
::
+----------+
send --> | Queue |
| (buffer) | --> recv
+----------+
A Queue may also only have exactly one sender and recver. A Queue however
has a fifo buffer of a custom size. Sends to the Queue won't block until
the buffer becomes full::
h = vanilla.Hub()
q = h.queue(1)
q.send(1) # safe from deadlock
# q.send(1) # this would deadlock however as the queue only has a
# buffer size of 1
q.recv() # returns 1
"""
assert size > 0
def main(upstream, downstream, size):
queue = collections.deque()
while True:
if downstream.halted:
# no one is downstream, so shutdown
upstream.close()
return
watch = []
if queue:
watch.append(downstream)
else:
# if the buffer is empty, and no one is upstream, shutdown
if upstream.halted:
downstream.close()
return
# if are upstream is still available, and there is spare room in
# the buffer, watch upstream as well
if not upstream.halted and len(queue) < size:
watch.append(upstream)
try:
ch, item = hub.select(watch)
except vanilla.exception.Halt:
continue
if ch == upstream:
queue.append(item)
elif ch == downstream:
item = queue.popleft()
downstream.send(item)
upstream = hub.pipe()
downstream = hub.pipe()
# TODO: rethink this
old_connect = upstream.sender.connect
def connect(recver):
old_connect(recver)
return downstream.recver
upstream.sender.connect = connect
hub.spawn(main, upstream.recver, downstream.sender, size)
return Pair(upstream.sender, downstream.recver) | ::
+----------+
send --> | Queue |
| (buffer) | --> recv
+----------+
A Queue may also only have exactly one sender and recver. A Queue however
has a fifo buffer of a custom size. Sends to the Queue won't block until
the buffer becomes full::
h = vanilla.Hub()
q = h.queue(1)
q.send(1) # safe from deadlock
# q.send(1) # this would deadlock however as the queue only has a
# buffer size of 1
q.recv() # returns 1 | Below is the the instruction that describes the task:
### Input:
::
+----------+
send --> | Queue |
| (buffer) | --> recv
+----------+
A Queue may also only have exactly one sender and recver. A Queue however
has a fifo buffer of a custom size. Sends to the Queue won't block until
the buffer becomes full::
h = vanilla.Hub()
q = h.queue(1)
q.send(1) # safe from deadlock
# q.send(1) # this would deadlock however as the queue only has a
# buffer size of 1
q.recv() # returns 1
### Response:
def Queue(hub, size):
"""
::
+----------+
send --> | Queue |
| (buffer) | --> recv
+----------+
A Queue may also only have exactly one sender and recver. A Queue however
has a fifo buffer of a custom size. Sends to the Queue won't block until
the buffer becomes full::
h = vanilla.Hub()
q = h.queue(1)
q.send(1) # safe from deadlock
# q.send(1) # this would deadlock however as the queue only has a
# buffer size of 1
q.recv() # returns 1
"""
assert size > 0
def main(upstream, downstream, size):
queue = collections.deque()
while True:
if downstream.halted:
# no one is downstream, so shutdown
upstream.close()
return
watch = []
if queue:
watch.append(downstream)
else:
# if the buffer is empty, and no one is upstream, shutdown
if upstream.halted:
downstream.close()
return
# if are upstream is still available, and there is spare room in
# the buffer, watch upstream as well
if not upstream.halted and len(queue) < size:
watch.append(upstream)
try:
ch, item = hub.select(watch)
except vanilla.exception.Halt:
continue
if ch == upstream:
queue.append(item)
elif ch == downstream:
item = queue.popleft()
downstream.send(item)
upstream = hub.pipe()
downstream = hub.pipe()
# TODO: rethink this
old_connect = upstream.sender.connect
def connect(recver):
old_connect(recver)
return downstream.recver
upstream.sender.connect = connect
hub.spawn(main, upstream.recver, downstream.sender, size)
return Pair(upstream.sender, downstream.recver) |
def put_calendar_job(self, calendar_id, job_id, params=None):
"""
`<>`_
:arg calendar_id: The ID of the calendar to modify
:arg job_id: The ID of the job to add to the calendar
"""
for param in (calendar_id, job_id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT",
_make_path("_ml", "calendars", calendar_id, "jobs", job_id),
params=params,
) | `<>`_
:arg calendar_id: The ID of the calendar to modify
:arg job_id: The ID of the job to add to the calendar | Below is the the instruction that describes the task:
### Input:
`<>`_
:arg calendar_id: The ID of the calendar to modify
:arg job_id: The ID of the job to add to the calendar
### Response:
def put_calendar_job(self, calendar_id, job_id, params=None):
"""
`<>`_
:arg calendar_id: The ID of the calendar to modify
:arg job_id: The ID of the job to add to the calendar
"""
for param in (calendar_id, job_id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT",
_make_path("_ml", "calendars", calendar_id, "jobs", job_id),
params=params,
) |
def dip_pval_tabinterpol(dip, N):
'''
dip - dip value computed from dip_from_cdf
N - number of observations
'''
# if qDiptab_df is None:
# raise DataError("Tabulated p-values not available. See installation instructions.")
if np.isnan(N) or N < 10:
return np.nan
qDiptab_dict = {'0': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0714285714285714,
8: 0.0625,
9: 0.0555555555555556,
10: 0.05,
15: 0.0341378172277919,
20: 0.033718563622065004,
30: 0.0262674485075642,
50: 0.0218544781364545,
100: 0.0164852597438403,
200: 0.0111236388849688,
500: 0.007554885975761959,
1000: 0.00541658127872122,
2000: 0.0039043999745055702,
5000: 0.00245657785440433,
10000: 0.00174954269199566,
20000: 0.00119458814106091,
40000: 0.000852415648011777,
72000: 0.000644400053256997},
'0.01': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0714285714285714,
8: 0.0625,
9: 0.0613018090298924,
10: 0.0610132555623269,
15: 0.0546284208048975,
20: 0.0474333740698401,
30: 0.0395871890405749,
50: 0.0314400501999916,
100: 0.022831985803043,
200: 0.0165017735429825,
500: 0.0106403461127515,
1000: 0.0076028674530018705,
2000: 0.0054166418179658294,
5000: 0.0034480928223332603,
10000: 0.00244595133885302,
20000: 0.00173435346896287,
40000: 0.00122883479310665,
72000: 0.000916872204484283},
'0.02': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0714285714285714,
8: 0.0656911994503283,
9: 0.0658615858179315,
10: 0.0651627333214016,
15: 0.0572191260231815,
20: 0.0490891387627092,
30: 0.0414574606741673,
50: 0.0329008160470834,
100: 0.0238917486442849,
200: 0.0172594157992489,
500: 0.0111255573208294,
1000: 0.00794987834644799,
2000: 0.0056617138625232296,
5000: 0.00360473943713036,
10000: 0.00255710802275612,
20000: 0.0018119443458468102,
40000: 0.0012846930445701802,
72000: 0.0009579329467655321},
'0.05': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0725717816250742,
8: 0.0738651136071762,
9: 0.0732651142535317,
10: 0.0718321619656165,
15: 0.0610087367689692,
20: 0.052719998201553,
30: 0.0444462614069956,
50: 0.0353023819040016,
100: 0.0256559537977579,
200: 0.0185259426032926,
500: 0.0119353655328931,
1000: 0.0085216518343594,
2000: 0.00607120971135229,
5000: 0.0038632654801084897,
10000: 0.00273990955227265,
20000: 0.00194259470485893,
40000: 0.0013761765052555301,
72000: 0.00102641863872347},
'0.1': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0817315478539489,
8: 0.0820045917762512,
9: 0.0803941629593475,
10: 0.077966212182459,
15: 0.0642657137330444,
20: 0.0567795509056742,
30: 0.0473998525042686,
50: 0.0377279973102482,
100: 0.0273987414570948,
200: 0.0197917612637521,
500: 0.0127411306411808,
1000: 0.00909775605533253,
2000: 0.0064762535755248,
5000: 0.00412089506752692,
10000: 0.0029225480567908,
20000: 0.00207173719623868,
40000: 0.0014675150200632301,
72000: 0.0010949515421800199},
'0.2': {4: 0.125,
5: 0.1,
6: 0.0924514470941933,
7: 0.0940590181922527,
8: 0.0922700601131892,
9: 0.0890432420913848,
10: 0.0852835359834564,
15: 0.0692234107989591,
20: 0.0620134674468181,
30: 0.0516677370374349,
50: 0.0410699984399582,
100: 0.0298109370830153,
200: 0.0215233745778454,
500: 0.0138524542751814,
1000: 0.00988924521014078,
2000: 0.00703573098590029,
5000: 0.00447640050137479,
10000: 0.00317374638422465,
20000: 0.00224993202086955,
40000: 0.00159376453672466,
72000: 0.00118904090369415},
'0.3': {4: 0.125,
5: 0.1,
6: 0.103913431059949,
7: 0.10324449080087102,
8: 0.0996737189599363,
9: 0.0950811420297928,
10: 0.0903204173707099,
15: 0.0745462114365167,
20: 0.0660163872069048,
30: 0.0551037519001622,
50: 0.0437704598622665,
100: 0.0317771496530253,
200: 0.0229259769870428,
500: 0.0147536004288476,
1000: 0.0105309297090482,
2000: 0.007494212545892991,
5000: 0.00476555693102276,
10000: 0.00338072258533527,
20000: 0.00239520831473419,
40000: 0.00169668445506151,
72000: 0.00126575197699874},
'0.4': {4: 0.125,
5: 0.10872059357632902,
6: 0.113885220640212,
7: 0.110964599995697,
8: 0.10573353180273701,
9: 0.0999380897811046,
10: 0.0943334983745117,
15: 0.0792030878981762,
20: 0.0696506075066401,
30: 0.058265005347492994,
50: 0.0462925642671299,
100: 0.0336073821590387,
200: 0.024243848341112,
500: 0.0155963185751048,
1000: 0.0111322726797384,
2000: 0.007920878896017329,
5000: 0.005037040297500721,
10000: 0.0035724387653598205,
20000: 0.00253036792824665,
40000: 0.0017925341833790601,
72000: 0.00133750966361506},
'0.5': {4: 0.125,
5: 0.12156379802641401,
6: 0.123071187137781,
7: 0.11780784650433501,
8: 0.11103512984770501,
9: 0.10415356007586801,
10: 0.0977817630384725,
15: 0.083621033469191,
20: 0.0733437740592714,
30: 0.0614510857304343,
50: 0.048851155289608,
100: 0.0354621760592113,
200: 0.025584358256487003,
500: 0.0164519238025286,
1000: 0.0117439009052552,
2000: 0.008355737247680059,
5000: 0.0053123924740821294,
10000: 0.00376734715752209,
20000: 0.00266863168718114,
40000: 0.00189061261635977,
72000: 0.00141049709228472},
'0.6': {4: 0.125,
5: 0.134318918697053,
6: 0.13186973390253,
7: 0.124216086833531,
8: 0.11592005574998801,
9: 0.10800780236193198,
10: 0.102180866696628,
15: 0.0881198482202905,
20: 0.0776460662880254,
30: 0.0649164408053978,
50: 0.0516145897865757,
100: 0.0374805844550272,
200: 0.0270252129816288,
500: 0.017383057902553,
1000: 0.012405033293814,
2000: 0.00882439333812351,
5000: 0.00560929919359959,
10000: 0.00397885007249132,
20000: 0.0028181999035216,
40000: 0.00199645471886179,
72000: 0.00148936709298802},
'0.7': {4: 0.13255954878268902,
5: 0.14729879897625198,
6: 0.140564796497941,
7: 0.130409013968317,
8: 0.120561479262465,
9: 0.112512617124951,
10: 0.10996094814295099,
15: 0.093124666680253,
20: 0.0824558407118372,
30: 0.0689178762425442,
50: 0.0548121932066019,
100: 0.0398046179116599,
200: 0.0286920262150517,
500: 0.0184503949887735,
1000: 0.0131684179320803,
2000: 0.009367858207170609,
5000: 0.00595352728377949,
10000: 0.00422430013176233,
20000: 0.00299137548142077,
40000: 0.00211929748381704,
72000: 0.00158027541945626},
'0.8': {4: 0.15749736904023498,
5: 0.161085025702604,
6: 0.14941924112913002,
7: 0.136639642123068,
8: 0.125558759034845,
9: 0.12291503348081699,
10: 0.11884476721158699,
15: 0.0996694393390689,
20: 0.08834462700173701,
30: 0.0739249074078291,
50: 0.0588230482851366,
100: 0.0427283846799166,
200: 0.0308006766341406,
500: 0.0198162679782071,
1000: 0.0141377942603047,
2000: 0.01005604603884,
5000: 0.00639092280563517,
10000: 0.00453437508148542,
20000: 0.00321024899920135,
40000: 0.0022745769870358102,
72000: 0.00169651643860074},
'0.9': {4: 0.18740187880755899,
5: 0.176811998476076,
6: 0.159137064572627,
7: 0.144240669035124,
8: 0.141841067033899,
9: 0.136412639387084,
10: 0.130462149644819,
15: 0.11008749690090598,
20: 0.0972346018122903,
30: 0.0814791379390127,
50: 0.0649136324046767,
100: 0.047152783315718,
200: 0.0339967814293504,
500: 0.0218781313182203,
1000: 0.0156148055023058,
2000: 0.0111019116837591,
5000: 0.00705566126234625,
10000: 0.00500178808402368,
20000: 0.00354362220314155,
40000: 0.00250999080890397,
72000: 0.0018730618472582602},
'0.95': {4: 0.20726978858735998,
5: 0.18639179602794398,
6: 0.164769608513302,
7: 0.159903395678336,
8: 0.153978303998561,
9: 0.14660378495401902,
10: 0.139611395137099,
15: 0.118760769203664,
20: 0.105130218270636,
30: 0.0881689143126666,
50: 0.0702737877191269,
100: 0.0511279442868827,
200: 0.0368418413878307,
500: 0.0237294742633411,
1000: 0.0169343970067564,
2000: 0.0120380990328341,
5000: 0.0076506368153935,
10000: 0.00542372242836395,
20000: 0.00384330190244679,
40000: 0.00272375073486223,
72000: 0.00203178401610555},
'0.98': {4: 0.22375580462922195,
5: 0.19361253363045,
6: 0.17917654739278197,
7: 0.17519655327122302,
8: 0.16597856724751,
9: 0.157084065653166,
10: 0.150961728882481,
15: 0.128890475210055,
20: 0.11430970428125302,
30: 0.0960564383013644,
50: 0.0767095886079179,
100: 0.0558022052195208,
200: 0.0402729850316397,
500: 0.025919578977657003,
1000: 0.018513067368104,
2000: 0.0131721010552576,
5000: 0.00836821687047215,
10000: 0.00592656681022859,
20000: 0.00420258799378253,
40000: 0.00298072958568387,
72000: 0.00222356097506054},
'0.99': {4: 0.231796258864192,
5: 0.19650913979884502,
6: 0.191862827995563,
7: 0.184118659121501,
8: 0.172988528276759,
9: 0.164164643657217,
10: 0.159684158858235,
15: 0.13598356863636,
20: 0.120624043335821,
30: 0.101478558893837,
50: 0.0811998415355918,
100: 0.059024132304226,
200: 0.0426864799777448,
500: 0.0274518022761997,
1000: 0.0196080260483234,
2000: 0.0139655122281969,
5000: 0.00886357892854914,
10000: 0.00628034732880374,
20000: 0.00445774902155711,
40000: 0.00315942194040388,
72000: 0.00235782814777627},
'0.995': {4: 0.23726374382677898,
5: 0.198159967287576,
6: 0.20210197104296804,
7: 0.19101439617430602,
8: 0.179010413496374,
9: 0.172821674582338,
10: 0.16719524735674,
15: 0.14245248368127697,
20: 0.126552378036739,
30: 0.10650487144103,
50: 0.0852854646662134,
100: 0.0620425065165146,
200: 0.044958959158761,
500: 0.0288986369564301,
1000: 0.0206489568587364,
2000: 0.0146889122204488,
5000: 0.00934162787186159,
10000: 0.00661030641550873,
20000: 0.00469461513212743,
40000: 0.0033273652798148,
72000: 0.00248343580127067},
'0.998': {4: 0.241992892688593,
5: 0.19924427936243302,
6: 0.213015781111186,
7: 0.198216795232182,
8: 0.186504388711178,
9: 0.182555283567818,
10: 0.175419540856082,
15: 0.15017281653074202,
20: 0.13360135382395,
30: 0.112724636524262,
50: 0.0904847827490294,
100: 0.0658016011466099,
200: 0.0477643873749449,
500: 0.0306813505050163,
1000: 0.0219285176765082,
2000: 0.0156076779647454,
5000: 0.009932186363240291,
10000: 0.00702254699967648,
20000: 0.004994160691291679,
40000: 0.00353988965698579,
72000: 0.00264210826339498},
'0.999': {4: 0.244369839049632,
5: 0.199617527406166,
6: 0.219518627282415,
7: 0.20234101074826102,
8: 0.19448404115794,
9: 0.188658833121906,
10: 0.180611195797351,
15: 0.15545613369632802,
20: 0.138569903791767,
30: 0.117164140184417,
50: 0.0940930106666244,
100: 0.0684479731118028,
200: 0.0497198001867437,
500: 0.0320170996823189,
1000: 0.0228689168972669,
2000: 0.0162685615996248,
5000: 0.0103498795291629,
10000: 0.0073182262815645795,
20000: 0.00520917757743218,
40000: 0.00369400045486625,
72000: 0.0027524322157581},
'0.9995': {4: 0.245966625504691,
5: 0.19980094149902802,
6: 0.22433904739444602,
7: 0.205377566346832,
8: 0.200864297005026,
9: 0.19408912076824603,
10: 0.18528641605039603,
15: 0.160896499106958,
20: 0.14336916123968,
30: 0.12142585990898701,
50: 0.0974904344916743,
100: 0.0709169443994193,
200: 0.0516114611801451,
500: 0.0332452747332959,
1000: 0.023738710122235003,
2000: 0.0168874937789415,
5000: 0.0107780907076862,
10000: 0.0076065423418208,
20000: 0.005403962359243721,
40000: 0.00383345715372182,
72000: 0.0028608570740143},
'0.9998': {4: 0.24743959723326198,
5: 0.19991708183427104,
6: 0.22944933215424101,
7: 0.208306562526874,
8: 0.20884999705022897,
9: 0.19915700809389003,
10: 0.19120308390504398,
15: 0.16697940794624802,
20: 0.148940116394883,
30: 0.126733051889401,
50: 0.10228420428399698,
100: 0.0741183486081263,
200: 0.0540543978864652,
500: 0.0348335698576168,
1000: 0.0248334158891432,
2000: 0.0176505093388153,
5000: 0.0113184316868283,
10000: 0.00795640367207482,
20000: 0.00564540201704594,
40000: 0.0040079346963469605,
72000: 0.00298695044508003},
'0.9999': {4: 0.24823065965663801,
5: 0.19995902909307503,
6: 0.232714530449602,
7: 0.209866047852379,
8: 0.212556040406219,
9: 0.20288159843655804,
10: 0.19580515933918397,
15: 0.17111793515551002,
20: 0.152832538183622,
30: 0.131198578897542,
50: 0.104680624334611,
100: 0.0762579402903838,
200: 0.0558704526182638,
500: 0.0359832389317461,
1000: 0.0256126573433596,
2000: 0.0181944265400504,
5000: 0.0117329446468571,
10000: 0.0082270524584354,
20000: 0.00580460792299214,
40000: 0.00414892737222885,
72000: 0.00309340092038059},
'0.99995': {4: 0.248754269146416,
5: 0.19997839537608197,
6: 0.236548128358969,
7: 0.21096757693345103,
8: 0.21714917413729898,
9: 0.205979795735129,
10: 0.20029398089673,
15: 0.17590050570443203,
20: 0.15601016361897102,
30: 0.133691739483444,
50: 0.107496694235039,
100: 0.0785735967934979,
200: 0.0573877056330228,
500: 0.0369051995840645,
1000: 0.0265491336936829,
2000: 0.0186226037818523,
5000: 0.0119995948968375,
10000: 0.00852240989786251,
20000: 0.00599774739593151,
40000: 0.0042839159079761,
72000: 0.00319932767198801},
'0.99998': {4: 0.24930203997425898,
5: 0.199993151405815,
6: 0.2390887911995,
7: 0.212233348558702,
8: 0.22170007640450304,
9: 0.21054115498898,
10: 0.20565108964621898,
15: 0.18185667601316602,
20: 0.16131922583934502,
30: 0.137831637950694,
50: 0.11140887547015,
100: 0.0813458356889133,
200: 0.0593365901653878,
500: 0.0387221159256424,
1000: 0.027578430100535997,
2000: 0.0193001796565433,
5000: 0.0124410052027886,
10000: 0.00892863905540303,
20000: 0.00633099254378114,
40000: 0.0044187010443287895,
72000: 0.00332688234611187},
'0.99999': {4: 0.24945965232322498,
5: 0.199995525025673,
6: 0.24010356643629502,
7: 0.21266103831250602,
8: 0.225000835357532,
9: 0.21180033095039003,
10: 0.209682048785853,
15: 0.185743454151004,
20: 0.165568255916749,
30: 0.14155750962435099,
50: 0.113536607717411,
100: 0.0832963013755522,
200: 0.0607646310473911,
500: 0.039930259057650005,
1000: 0.0284430733108,
2000: 0.0196241518040617,
5000: 0.0129467396733128,
10000: 0.009138539330002129,
20000: 0.00656987109386762,
40000: 0.00450818604569179,
72000: 0.00339316094477355},
'1': {4: 0.24974836247845,
5: 0.199999835639211,
6: 0.24467288361776798,
7: 0.21353618608817,
8: 0.23377291968768302,
9: 0.21537991431762502,
10: 0.221530282182963,
15: 0.19224056333056197,
20: 0.175834459522789,
30: 0.163833046059817,
50: 0.11788671686531199,
100: 0.0926780423096737,
200: 0.0705309107882395,
500: 0.0431448163617178,
1000: 0.0313640941982108,
2000: 0.0213081254074584,
5000: 0.014396063834027,
10000: 0.00952234579566773,
20000: 0.006858294480462271,
40000: 0.00513477467565583,
72000: 0.00376331697005859}}
qDiptab_df = pd.DataFrame(qDiptab_dict)
diptable = np.array(qDiptab_df)
ps = np.array(qDiptab_df.columns).astype(float)
Ns = np.array(qDiptab_df.index)
if N >= Ns[-1]:
dip = transform_dip_to_other_nbr_pts(dip, N, Ns[-1]-0.1)
N = Ns[-1]-0.1
iNlow = np.nonzero(Ns < N)[0][-1]
qN = (N-Ns[iNlow])/(Ns[iNlow+1]-Ns[iNlow])
dip_sqrtN = np.sqrt(N)*dip
dip_interpol_sqrtN = (
np.sqrt(Ns[iNlow])*diptable[iNlow, :] + qN*(
np.sqrt(Ns[iNlow+1])*diptable[iNlow+1, :]-np.sqrt(Ns[iNlow])*diptable[iNlow, :]))
if not (dip_interpol_sqrtN < dip_sqrtN).any():
return 1
iplow = np.nonzero(dip_interpol_sqrtN < dip_sqrtN)[0][-1]
if iplow == len(dip_interpol_sqrtN) - 1:
return 0
qp = (dip_sqrtN-dip_interpol_sqrtN[iplow])/(dip_interpol_sqrtN[iplow+1]-dip_interpol_sqrtN[iplow])
p_interpol = ps[iplow] + qp*(ps[iplow+1]-ps[iplow])
return 1 - p_interpol | dip - dip value computed from dip_from_cdf
N - number of observations | Below is the the instruction that describes the task:
### Input:
dip - dip value computed from dip_from_cdf
N - number of observations
### Response:
def dip_pval_tabinterpol(dip, N):
'''
dip - dip value computed from dip_from_cdf
N - number of observations
'''
# if qDiptab_df is None:
# raise DataError("Tabulated p-values not available. See installation instructions.")
if np.isnan(N) or N < 10:
return np.nan
qDiptab_dict = {'0': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0714285714285714,
8: 0.0625,
9: 0.0555555555555556,
10: 0.05,
15: 0.0341378172277919,
20: 0.033718563622065004,
30: 0.0262674485075642,
50: 0.0218544781364545,
100: 0.0164852597438403,
200: 0.0111236388849688,
500: 0.007554885975761959,
1000: 0.00541658127872122,
2000: 0.0039043999745055702,
5000: 0.00245657785440433,
10000: 0.00174954269199566,
20000: 0.00119458814106091,
40000: 0.000852415648011777,
72000: 0.000644400053256997},
'0.01': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0714285714285714,
8: 0.0625,
9: 0.0613018090298924,
10: 0.0610132555623269,
15: 0.0546284208048975,
20: 0.0474333740698401,
30: 0.0395871890405749,
50: 0.0314400501999916,
100: 0.022831985803043,
200: 0.0165017735429825,
500: 0.0106403461127515,
1000: 0.0076028674530018705,
2000: 0.0054166418179658294,
5000: 0.0034480928223332603,
10000: 0.00244595133885302,
20000: 0.00173435346896287,
40000: 0.00122883479310665,
72000: 0.000916872204484283},
'0.02': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0714285714285714,
8: 0.0656911994503283,
9: 0.0658615858179315,
10: 0.0651627333214016,
15: 0.0572191260231815,
20: 0.0490891387627092,
30: 0.0414574606741673,
50: 0.0329008160470834,
100: 0.0238917486442849,
200: 0.0172594157992489,
500: 0.0111255573208294,
1000: 0.00794987834644799,
2000: 0.0056617138625232296,
5000: 0.00360473943713036,
10000: 0.00255710802275612,
20000: 0.0018119443458468102,
40000: 0.0012846930445701802,
72000: 0.0009579329467655321},
'0.05': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0725717816250742,
8: 0.0738651136071762,
9: 0.0732651142535317,
10: 0.0718321619656165,
15: 0.0610087367689692,
20: 0.052719998201553,
30: 0.0444462614069956,
50: 0.0353023819040016,
100: 0.0256559537977579,
200: 0.0185259426032926,
500: 0.0119353655328931,
1000: 0.0085216518343594,
2000: 0.00607120971135229,
5000: 0.0038632654801084897,
10000: 0.00273990955227265,
20000: 0.00194259470485893,
40000: 0.0013761765052555301,
72000: 0.00102641863872347},
'0.1': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0817315478539489,
8: 0.0820045917762512,
9: 0.0803941629593475,
10: 0.077966212182459,
15: 0.0642657137330444,
20: 0.0567795509056742,
30: 0.0473998525042686,
50: 0.0377279973102482,
100: 0.0273987414570948,
200: 0.0197917612637521,
500: 0.0127411306411808,
1000: 0.00909775605533253,
2000: 0.0064762535755248,
5000: 0.00412089506752692,
10000: 0.0029225480567908,
20000: 0.00207173719623868,
40000: 0.0014675150200632301,
72000: 0.0010949515421800199},
'0.2': {4: 0.125,
5: 0.1,
6: 0.0924514470941933,
7: 0.0940590181922527,
8: 0.0922700601131892,
9: 0.0890432420913848,
10: 0.0852835359834564,
15: 0.0692234107989591,
20: 0.0620134674468181,
30: 0.0516677370374349,
50: 0.0410699984399582,
100: 0.0298109370830153,
200: 0.0215233745778454,
500: 0.0138524542751814,
1000: 0.00988924521014078,
2000: 0.00703573098590029,
5000: 0.00447640050137479,
10000: 0.00317374638422465,
20000: 0.00224993202086955,
40000: 0.00159376453672466,
72000: 0.00118904090369415},
'0.3': {4: 0.125,
5: 0.1,
6: 0.103913431059949,
7: 0.10324449080087102,
8: 0.0996737189599363,
9: 0.0950811420297928,
10: 0.0903204173707099,
15: 0.0745462114365167,
20: 0.0660163872069048,
30: 0.0551037519001622,
50: 0.0437704598622665,
100: 0.0317771496530253,
200: 0.0229259769870428,
500: 0.0147536004288476,
1000: 0.0105309297090482,
2000: 0.007494212545892991,
5000: 0.00476555693102276,
10000: 0.00338072258533527,
20000: 0.00239520831473419,
40000: 0.00169668445506151,
72000: 0.00126575197699874},
'0.4': {4: 0.125,
5: 0.10872059357632902,
6: 0.113885220640212,
7: 0.110964599995697,
8: 0.10573353180273701,
9: 0.0999380897811046,
10: 0.0943334983745117,
15: 0.0792030878981762,
20: 0.0696506075066401,
30: 0.058265005347492994,
50: 0.0462925642671299,
100: 0.0336073821590387,
200: 0.024243848341112,
500: 0.0155963185751048,
1000: 0.0111322726797384,
2000: 0.007920878896017329,
5000: 0.005037040297500721,
10000: 0.0035724387653598205,
20000: 0.00253036792824665,
40000: 0.0017925341833790601,
72000: 0.00133750966361506},
'0.5': {4: 0.125,
5: 0.12156379802641401,
6: 0.123071187137781,
7: 0.11780784650433501,
8: 0.11103512984770501,
9: 0.10415356007586801,
10: 0.0977817630384725,
15: 0.083621033469191,
20: 0.0733437740592714,
30: 0.0614510857304343,
50: 0.048851155289608,
100: 0.0354621760592113,
200: 0.025584358256487003,
500: 0.0164519238025286,
1000: 0.0117439009052552,
2000: 0.008355737247680059,
5000: 0.0053123924740821294,
10000: 0.00376734715752209,
20000: 0.00266863168718114,
40000: 0.00189061261635977,
72000: 0.00141049709228472},
'0.6': {4: 0.125,
5: 0.134318918697053,
6: 0.13186973390253,
7: 0.124216086833531,
8: 0.11592005574998801,
9: 0.10800780236193198,
10: 0.102180866696628,
15: 0.0881198482202905,
20: 0.0776460662880254,
30: 0.0649164408053978,
50: 0.0516145897865757,
100: 0.0374805844550272,
200: 0.0270252129816288,
500: 0.017383057902553,
1000: 0.012405033293814,
2000: 0.00882439333812351,
5000: 0.00560929919359959,
10000: 0.00397885007249132,
20000: 0.0028181999035216,
40000: 0.00199645471886179,
72000: 0.00148936709298802},
'0.7': {4: 0.13255954878268902,
5: 0.14729879897625198,
6: 0.140564796497941,
7: 0.130409013968317,
8: 0.120561479262465,
9: 0.112512617124951,
10: 0.10996094814295099,
15: 0.093124666680253,
20: 0.0824558407118372,
30: 0.0689178762425442,
50: 0.0548121932066019,
100: 0.0398046179116599,
200: 0.0286920262150517,
500: 0.0184503949887735,
1000: 0.0131684179320803,
2000: 0.009367858207170609,
5000: 0.00595352728377949,
10000: 0.00422430013176233,
20000: 0.00299137548142077,
40000: 0.00211929748381704,
72000: 0.00158027541945626},
'0.8': {4: 0.15749736904023498,
5: 0.161085025702604,
6: 0.14941924112913002,
7: 0.136639642123068,
8: 0.125558759034845,
9: 0.12291503348081699,
10: 0.11884476721158699,
15: 0.0996694393390689,
20: 0.08834462700173701,
30: 0.0739249074078291,
50: 0.0588230482851366,
100: 0.0427283846799166,
200: 0.0308006766341406,
500: 0.0198162679782071,
1000: 0.0141377942603047,
2000: 0.01005604603884,
5000: 0.00639092280563517,
10000: 0.00453437508148542,
20000: 0.00321024899920135,
40000: 0.0022745769870358102,
72000: 0.00169651643860074},
'0.9': {4: 0.18740187880755899,
5: 0.176811998476076,
6: 0.159137064572627,
7: 0.144240669035124,
8: 0.141841067033899,
9: 0.136412639387084,
10: 0.130462149644819,
15: 0.11008749690090598,
20: 0.0972346018122903,
30: 0.0814791379390127,
50: 0.0649136324046767,
100: 0.047152783315718,
200: 0.0339967814293504,
500: 0.0218781313182203,
1000: 0.0156148055023058,
2000: 0.0111019116837591,
5000: 0.00705566126234625,
10000: 0.00500178808402368,
20000: 0.00354362220314155,
40000: 0.00250999080890397,
72000: 0.0018730618472582602},
'0.95': {4: 0.20726978858735998,
5: 0.18639179602794398,
6: 0.164769608513302,
7: 0.159903395678336,
8: 0.153978303998561,
9: 0.14660378495401902,
10: 0.139611395137099,
15: 0.118760769203664,
20: 0.105130218270636,
30: 0.0881689143126666,
50: 0.0702737877191269,
100: 0.0511279442868827,
200: 0.0368418413878307,
500: 0.0237294742633411,
1000: 0.0169343970067564,
2000: 0.0120380990328341,
5000: 0.0076506368153935,
10000: 0.00542372242836395,
20000: 0.00384330190244679,
40000: 0.00272375073486223,
72000: 0.00203178401610555},
'0.98': {4: 0.22375580462922195,
5: 0.19361253363045,
6: 0.17917654739278197,
7: 0.17519655327122302,
8: 0.16597856724751,
9: 0.157084065653166,
10: 0.150961728882481,
15: 0.128890475210055,
20: 0.11430970428125302,
30: 0.0960564383013644,
50: 0.0767095886079179,
100: 0.0558022052195208,
200: 0.0402729850316397,
500: 0.025919578977657003,
1000: 0.018513067368104,
2000: 0.0131721010552576,
5000: 0.00836821687047215,
10000: 0.00592656681022859,
20000: 0.00420258799378253,
40000: 0.00298072958568387,
72000: 0.00222356097506054},
'0.99': {4: 0.231796258864192,
5: 0.19650913979884502,
6: 0.191862827995563,
7: 0.184118659121501,
8: 0.172988528276759,
9: 0.164164643657217,
10: 0.159684158858235,
15: 0.13598356863636,
20: 0.120624043335821,
30: 0.101478558893837,
50: 0.0811998415355918,
100: 0.059024132304226,
200: 0.0426864799777448,
500: 0.0274518022761997,
1000: 0.0196080260483234,
2000: 0.0139655122281969,
5000: 0.00886357892854914,
10000: 0.00628034732880374,
20000: 0.00445774902155711,
40000: 0.00315942194040388,
72000: 0.00235782814777627},
'0.995': {4: 0.23726374382677898,
5: 0.198159967287576,
6: 0.20210197104296804,
7: 0.19101439617430602,
8: 0.179010413496374,
9: 0.172821674582338,
10: 0.16719524735674,
15: 0.14245248368127697,
20: 0.126552378036739,
30: 0.10650487144103,
50: 0.0852854646662134,
100: 0.0620425065165146,
200: 0.044958959158761,
500: 0.0288986369564301,
1000: 0.0206489568587364,
2000: 0.0146889122204488,
5000: 0.00934162787186159,
10000: 0.00661030641550873,
20000: 0.00469461513212743,
40000: 0.0033273652798148,
72000: 0.00248343580127067},
'0.998': {4: 0.241992892688593,
5: 0.19924427936243302,
6: 0.213015781111186,
7: 0.198216795232182,
8: 0.186504388711178,
9: 0.182555283567818,
10: 0.175419540856082,
15: 0.15017281653074202,
20: 0.13360135382395,
30: 0.112724636524262,
50: 0.0904847827490294,
100: 0.0658016011466099,
200: 0.0477643873749449,
500: 0.0306813505050163,
1000: 0.0219285176765082,
2000: 0.0156076779647454,
5000: 0.009932186363240291,
10000: 0.00702254699967648,
20000: 0.004994160691291679,
40000: 0.00353988965698579,
72000: 0.00264210826339498},
'0.999': {4: 0.244369839049632,
5: 0.199617527406166,
6: 0.219518627282415,
7: 0.20234101074826102,
8: 0.19448404115794,
9: 0.188658833121906,
10: 0.180611195797351,
15: 0.15545613369632802,
20: 0.138569903791767,
30: 0.117164140184417,
50: 0.0940930106666244,
100: 0.0684479731118028,
200: 0.0497198001867437,
500: 0.0320170996823189,
1000: 0.0228689168972669,
2000: 0.0162685615996248,
5000: 0.0103498795291629,
10000: 0.0073182262815645795,
20000: 0.00520917757743218,
40000: 0.00369400045486625,
72000: 0.0027524322157581},
'0.9995': {4: 0.245966625504691,
5: 0.19980094149902802,
6: 0.22433904739444602,
7: 0.205377566346832,
8: 0.200864297005026,
9: 0.19408912076824603,
10: 0.18528641605039603,
15: 0.160896499106958,
20: 0.14336916123968,
30: 0.12142585990898701,
50: 0.0974904344916743,
100: 0.0709169443994193,
200: 0.0516114611801451,
500: 0.0332452747332959,
1000: 0.023738710122235003,
2000: 0.0168874937789415,
5000: 0.0107780907076862,
10000: 0.0076065423418208,
20000: 0.005403962359243721,
40000: 0.00383345715372182,
72000: 0.0028608570740143},
'0.9998': {4: 0.24743959723326198,
5: 0.19991708183427104,
6: 0.22944933215424101,
7: 0.208306562526874,
8: 0.20884999705022897,
9: 0.19915700809389003,
10: 0.19120308390504398,
15: 0.16697940794624802,
20: 0.148940116394883,
30: 0.126733051889401,
50: 0.10228420428399698,
100: 0.0741183486081263,
200: 0.0540543978864652,
500: 0.0348335698576168,
1000: 0.0248334158891432,
2000: 0.0176505093388153,
5000: 0.0113184316868283,
10000: 0.00795640367207482,
20000: 0.00564540201704594,
40000: 0.0040079346963469605,
72000: 0.00298695044508003},
'0.9999': {4: 0.24823065965663801,
5: 0.19995902909307503,
6: 0.232714530449602,
7: 0.209866047852379,
8: 0.212556040406219,
9: 0.20288159843655804,
10: 0.19580515933918397,
15: 0.17111793515551002,
20: 0.152832538183622,
30: 0.131198578897542,
50: 0.104680624334611,
100: 0.0762579402903838,
200: 0.0558704526182638,
500: 0.0359832389317461,
1000: 0.0256126573433596,
2000: 0.0181944265400504,
5000: 0.0117329446468571,
10000: 0.0082270524584354,
20000: 0.00580460792299214,
40000: 0.00414892737222885,
72000: 0.00309340092038059},
'0.99995': {4: 0.248754269146416,
5: 0.19997839537608197,
6: 0.236548128358969,
7: 0.21096757693345103,
8: 0.21714917413729898,
9: 0.205979795735129,
10: 0.20029398089673,
15: 0.17590050570443203,
20: 0.15601016361897102,
30: 0.133691739483444,
50: 0.107496694235039,
100: 0.0785735967934979,
200: 0.0573877056330228,
500: 0.0369051995840645,
1000: 0.0265491336936829,
2000: 0.0186226037818523,
5000: 0.0119995948968375,
10000: 0.00852240989786251,
20000: 0.00599774739593151,
40000: 0.0042839159079761,
72000: 0.00319932767198801},
'0.99998': {4: 0.24930203997425898,
5: 0.199993151405815,
6: 0.2390887911995,
7: 0.212233348558702,
8: 0.22170007640450304,
9: 0.21054115498898,
10: 0.20565108964621898,
15: 0.18185667601316602,
20: 0.16131922583934502,
30: 0.137831637950694,
50: 0.11140887547015,
100: 0.0813458356889133,
200: 0.0593365901653878,
500: 0.0387221159256424,
1000: 0.027578430100535997,
2000: 0.0193001796565433,
5000: 0.0124410052027886,
10000: 0.00892863905540303,
20000: 0.00633099254378114,
40000: 0.0044187010443287895,
72000: 0.00332688234611187},
'0.99999': {4: 0.24945965232322498,
5: 0.199995525025673,
6: 0.24010356643629502,
7: 0.21266103831250602,
8: 0.225000835357532,
9: 0.21180033095039003,
10: 0.209682048785853,
15: 0.185743454151004,
20: 0.165568255916749,
30: 0.14155750962435099,
50: 0.113536607717411,
100: 0.0832963013755522,
200: 0.0607646310473911,
500: 0.039930259057650005,
1000: 0.0284430733108,
2000: 0.0196241518040617,
5000: 0.0129467396733128,
10000: 0.009138539330002129,
20000: 0.00656987109386762,
40000: 0.00450818604569179,
72000: 0.00339316094477355},
'1': {4: 0.24974836247845,
5: 0.199999835639211,
6: 0.24467288361776798,
7: 0.21353618608817,
8: 0.23377291968768302,
9: 0.21537991431762502,
10: 0.221530282182963,
15: 0.19224056333056197,
20: 0.175834459522789,
30: 0.163833046059817,
50: 0.11788671686531199,
100: 0.0926780423096737,
200: 0.0705309107882395,
500: 0.0431448163617178,
1000: 0.0313640941982108,
2000: 0.0213081254074584,
5000: 0.014396063834027,
10000: 0.00952234579566773,
20000: 0.006858294480462271,
40000: 0.00513477467565583,
72000: 0.00376331697005859}}
qDiptab_df = pd.DataFrame(qDiptab_dict)
diptable = np.array(qDiptab_df)
ps = np.array(qDiptab_df.columns).astype(float)
Ns = np.array(qDiptab_df.index)
if N >= Ns[-1]:
dip = transform_dip_to_other_nbr_pts(dip, N, Ns[-1]-0.1)
N = Ns[-1]-0.1
iNlow = np.nonzero(Ns < N)[0][-1]
qN = (N-Ns[iNlow])/(Ns[iNlow+1]-Ns[iNlow])
dip_sqrtN = np.sqrt(N)*dip
dip_interpol_sqrtN = (
np.sqrt(Ns[iNlow])*diptable[iNlow, :] + qN*(
np.sqrt(Ns[iNlow+1])*diptable[iNlow+1, :]-np.sqrt(Ns[iNlow])*diptable[iNlow, :]))
if not (dip_interpol_sqrtN < dip_sqrtN).any():
return 1
iplow = np.nonzero(dip_interpol_sqrtN < dip_sqrtN)[0][-1]
if iplow == len(dip_interpol_sqrtN) - 1:
return 0
qp = (dip_sqrtN-dip_interpol_sqrtN[iplow])/(dip_interpol_sqrtN[iplow+1]-dip_interpol_sqrtN[iplow])
p_interpol = ps[iplow] + qp*(ps[iplow+1]-ps[iplow])
return 1 - p_interpol |
def unpacked_dtype(self):
""" Returns the numpy.dtype used to store the point records in a numpy array
.. note::
The dtype corresponds to the dtype with sub_fields *unpacked*
"""
dtype = self._access_dict(dims.UNPACKED_POINT_FORMATS_DTYPES, self.id)
dtype = self._dtype_add_extra_dims(dtype)
return dtype | Returns the numpy.dtype used to store the point records in a numpy array
.. note::
The dtype corresponds to the dtype with sub_fields *unpacked* | Below is the the instruction that describes the task:
### Input:
Returns the numpy.dtype used to store the point records in a numpy array
.. note::
The dtype corresponds to the dtype with sub_fields *unpacked*
### Response:
def unpacked_dtype(self):
""" Returns the numpy.dtype used to store the point records in a numpy array
.. note::
The dtype corresponds to the dtype with sub_fields *unpacked*
"""
dtype = self._access_dict(dims.UNPACKED_POINT_FORMATS_DTYPES, self.id)
dtype = self._dtype_add_extra_dims(dtype)
return dtype |
def visit_keyword(self, node, parent):
"""visit a Keyword node by returning a fresh instance of it"""
newnode = nodes.Keyword(node.arg, parent=parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode | visit a Keyword node by returning a fresh instance of it | Below is the the instruction that describes the task:
### Input:
visit a Keyword node by returning a fresh instance of it
### Response:
def visit_keyword(self, node, parent):
"""visit a Keyword node by returning a fresh instance of it"""
newnode = nodes.Keyword(node.arg, parent=parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode |
def Platform(name = platform_default()):
"""Select a canned Platform specification.
"""
module = platform_module(name)
spec = PlatformSpec(name, module.generate)
return spec | Select a canned Platform specification. | Below is the the instruction that describes the task:
### Input:
Select a canned Platform specification.
### Response:
def Platform(name = platform_default()):
"""Select a canned Platform specification.
"""
module = platform_module(name)
spec = PlatformSpec(name, module.generate)
return spec |
def get_behave_args(self, argv=sys.argv):
"""
Get a list of those command line arguments specified with the
management command that are meant as arguments for running behave.
"""
parser = BehaveArgsHelper().create_parser('manage.py', 'behave')
args, unknown = parser.parse_known_args(argv[2:])
behave_args = []
for option in unknown:
# Remove behave prefix
if option.startswith('--behave-'):
option = option.replace('--behave-', '', 1)
prefix = '-' if len(option) == 1 else '--'
option = prefix + option
behave_args.append(option)
return behave_args | Get a list of those command line arguments specified with the
management command that are meant as arguments for running behave. | Below is the the instruction that describes the task:
### Input:
Get a list of those command line arguments specified with the
management command that are meant as arguments for running behave.
### Response:
def get_behave_args(self, argv=sys.argv):
"""
Get a list of those command line arguments specified with the
management command that are meant as arguments for running behave.
"""
parser = BehaveArgsHelper().create_parser('manage.py', 'behave')
args, unknown = parser.parse_known_args(argv[2:])
behave_args = []
for option in unknown:
# Remove behave prefix
if option.startswith('--behave-'):
option = option.replace('--behave-', '', 1)
prefix = '-' if len(option) == 1 else '--'
option = prefix + option
behave_args.append(option)
return behave_args |
def console_get_char_foreground(
con: tcod.console.Console, x: int, y: int
) -> Color:
"""Return the foreground color at the x,y of this console.
.. deprecated:: 8.4
Array access performs significantly faster than using this function.
See :any:`Console.fg`.
"""
return Color._new_from_cdata(
lib.TCOD_console_get_char_foreground(_console(con), x, y)
) | Return the foreground color at the x,y of this console.
.. deprecated:: 8.4
Array access performs significantly faster than using this function.
See :any:`Console.fg`. | Below is the the instruction that describes the task:
### Input:
Return the foreground color at the x,y of this console.
.. deprecated:: 8.4
Array access performs significantly faster than using this function.
See :any:`Console.fg`.
### Response:
def console_get_char_foreground(
con: tcod.console.Console, x: int, y: int
) -> Color:
"""Return the foreground color at the x,y of this console.
.. deprecated:: 8.4
Array access performs significantly faster than using this function.
See :any:`Console.fg`.
"""
return Color._new_from_cdata(
lib.TCOD_console_get_char_foreground(_console(con), x, y)
) |
def size_as_drawn(lines, screen_width):
"""Get the bottom-right corner of some text as would be drawn by draw_lines"""
y = 0
x = 0
for line in lines:
wrapped = list(wc_wrap(line, screen_width))
if len(wrapped) > 0:
for wrapped_line in wrapped:
x = len(wrapped_line)
y += 1
else:
x = 0
y += 1
return y - 1, x - 1 if x != 0 else 0 | Get the bottom-right corner of some text as would be drawn by draw_lines | Below is the the instruction that describes the task:
### Input:
Get the bottom-right corner of some text as would be drawn by draw_lines
### Response:
def size_as_drawn(lines, screen_width):
"""Get the bottom-right corner of some text as would be drawn by draw_lines"""
y = 0
x = 0
for line in lines:
wrapped = list(wc_wrap(line, screen_width))
if len(wrapped) > 0:
for wrapped_line in wrapped:
x = len(wrapped_line)
y += 1
else:
x = 0
y += 1
return y - 1, x - 1 if x != 0 else 0 |
def snapshot_get(repository, snapshot, ignore_unavailable=False, hosts=None, profile=None):
'''
.. versionadded:: 2017.7.0
Obtain snapshot residing in specified repository.
repository
Repository name
snapshot
Snapshot name, use _all to obtain all snapshots in specified repository
ignore_unavailable
Ignore unavailable snapshots
CLI example::
salt myminion elasticsearch.snapshot_get testrepo testsnapshot
'''
es = _get_instance(hosts, profile)
try:
return es.snapshot.get(repository=repository, snapshot=snapshot, ignore_unavailable=ignore_unavailable)
except elasticsearch.TransportError as e:
raise CommandExecutionError("Cannot obtain details of snapshot {0} in repository {1}, server returned code {2} with message {3}".format(snapshot, repository, e.status_code, e.error)) | .. versionadded:: 2017.7.0
Obtain snapshot residing in specified repository.
repository
Repository name
snapshot
Snapshot name, use _all to obtain all snapshots in specified repository
ignore_unavailable
Ignore unavailable snapshots
CLI example::
salt myminion elasticsearch.snapshot_get testrepo testsnapshot | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2017.7.0
Obtain snapshot residing in specified repository.
repository
Repository name
snapshot
Snapshot name, use _all to obtain all snapshots in specified repository
ignore_unavailable
Ignore unavailable snapshots
CLI example::
salt myminion elasticsearch.snapshot_get testrepo testsnapshot
### Response:
def snapshot_get(repository, snapshot, ignore_unavailable=False, hosts=None, profile=None):
'''
.. versionadded:: 2017.7.0
Obtain snapshot residing in specified repository.
repository
Repository name
snapshot
Snapshot name, use _all to obtain all snapshots in specified repository
ignore_unavailable
Ignore unavailable snapshots
CLI example::
salt myminion elasticsearch.snapshot_get testrepo testsnapshot
'''
es = _get_instance(hosts, profile)
try:
return es.snapshot.get(repository=repository, snapshot=snapshot, ignore_unavailable=ignore_unavailable)
except elasticsearch.TransportError as e:
raise CommandExecutionError("Cannot obtain details of snapshot {0} in repository {1}, server returned code {2} with message {3}".format(snapshot, repository, e.status_code, e.error)) |
def get_all_leaves(self, item_ids=None, language=None, forbidden_item_ids=None):
"""
Get all leaves reachable from the given set of items. Leaves having
inactive relations to other items are omitted.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
set: leaf items which are reachable from the given set of items
"""
return sorted(set(flatten(self.get_leaves(item_ids, language=language, forbidden_item_ids=forbidden_item_ids).values()))) | Get all leaves reachable from the given set of items. Leaves having
inactive relations to other items are omitted.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
set: leaf items which are reachable from the given set of items | Below is the the instruction that describes the task:
### Input:
Get all leaves reachable from the given set of items. Leaves having
inactive relations to other items are omitted.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
set: leaf items which are reachable from the given set of items
### Response:
def get_all_leaves(self, item_ids=None, language=None, forbidden_item_ids=None):
"""
Get all leaves reachable from the given set of items. Leaves having
inactive relations to other items are omitted.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
set: leaf items which are reachable from the given set of items
"""
return sorted(set(flatten(self.get_leaves(item_ids, language=language, forbidden_item_ids=forbidden_item_ids).values()))) |
def TYPE(type_):
""" Converts a backend type (from api.constants)
to a SymbolTYPE object (taken from the SYMBOL_TABLE).
If type_ is already a SymbolTYPE object, nothing
is done.
"""
if isinstance(type_, symbols.TYPE):
return type_
assert TYPE.is_valid(type_)
return gl.SYMBOL_TABLE.basic_types[type_] | Converts a backend type (from api.constants)
to a SymbolTYPE object (taken from the SYMBOL_TABLE).
If type_ is already a SymbolTYPE object, nothing
is done. | Below is the the instruction that describes the task:
### Input:
Converts a backend type (from api.constants)
to a SymbolTYPE object (taken from the SYMBOL_TABLE).
If type_ is already a SymbolTYPE object, nothing
is done.
### Response:
def TYPE(type_):
""" Converts a backend type (from api.constants)
to a SymbolTYPE object (taken from the SYMBOL_TABLE).
If type_ is already a SymbolTYPE object, nothing
is done.
"""
if isinstance(type_, symbols.TYPE):
return type_
assert TYPE.is_valid(type_)
return gl.SYMBOL_TABLE.basic_types[type_] |
def execute(self, write_concern, session):
"""Execute operations.
"""
if not self.ops:
raise InvalidOperation('No operations to execute')
if self.executed:
raise InvalidOperation('Bulk operations can '
'only be executed once.')
self.executed = True
write_concern = write_concern or self.collection.write_concern
session = _validate_session_write_concern(session, write_concern)
if self.ordered:
generator = self.gen_ordered()
else:
generator = self.gen_unordered()
client = self.collection.database.client
if not write_concern.acknowledged:
with client._socket_for_writes(session) as sock_info:
self.execute_no_results(sock_info, generator)
else:
return self.execute_command(generator, write_concern, session) | Execute operations. | Below is the the instruction that describes the task:
### Input:
Execute operations.
### Response:
def execute(self, write_concern, session):
"""Execute operations.
"""
if not self.ops:
raise InvalidOperation('No operations to execute')
if self.executed:
raise InvalidOperation('Bulk operations can '
'only be executed once.')
self.executed = True
write_concern = write_concern or self.collection.write_concern
session = _validate_session_write_concern(session, write_concern)
if self.ordered:
generator = self.gen_ordered()
else:
generator = self.gen_unordered()
client = self.collection.database.client
if not write_concern.acknowledged:
with client._socket_for_writes(session) as sock_info:
self.execute_no_results(sock_info, generator)
else:
return self.execute_command(generator, write_concern, session) |
def go_previous_thumbnail(self):
"""Select the thumbnail previous to the currently selected one."""
if self.current_thumbnail is not None:
index = self._thumbnails.index(self.current_thumbnail) - 1
index = index if index >= 0 else len(self._thumbnails) - 1
self.set_current_index(index)
self.scroll_to_item(index) | Select the thumbnail previous to the currently selected one. | Below is the the instruction that describes the task:
### Input:
Select the thumbnail previous to the currently selected one.
### Response:
def go_previous_thumbnail(self):
"""Select the thumbnail previous to the currently selected one."""
if self.current_thumbnail is not None:
index = self._thumbnails.index(self.current_thumbnail) - 1
index = index if index >= 0 else len(self._thumbnails) - 1
self.set_current_index(index)
self.scroll_to_item(index) |
def get_user_affinity(self, test):
"""Prepare test set for C++ SAR prediction code.
Find all items the test users have seen in the past.
Arguments:
test (pySpark.DataFrame): input dataframe which contains test users.
"""
test.createOrReplaceTempView(self.f("{prefix}df_test"))
query = self.f(
"SELECT DISTINCT {col_user} FROM {prefix}df_test CLUSTER BY {col_user}"
)
df_test_users = self.spark.sql(query)
df_test_users.write.mode("overwrite").saveAsTable(
self.f("{prefix}df_test_users")
)
query = self.f(
"""
SELECT a.{col_user}, a.{col_item}, CAST(a.{col_rating} AS double) {col_rating}
FROM {prefix}df_train a INNER JOIN {prefix}df_test_users b ON a.{col_user} = b.{col_user}
DISTRIBUTE BY {col_user}
SORT BY {col_user}, {col_item}
"""
)
return self.spark.sql(query) | Prepare test set for C++ SAR prediction code.
Find all items the test users have seen in the past.
Arguments:
test (pySpark.DataFrame): input dataframe which contains test users. | Below is the the instruction that describes the task:
### Input:
Prepare test set for C++ SAR prediction code.
Find all items the test users have seen in the past.
Arguments:
test (pySpark.DataFrame): input dataframe which contains test users.
### Response:
def get_user_affinity(self, test):
"""Prepare test set for C++ SAR prediction code.
Find all items the test users have seen in the past.
Arguments:
test (pySpark.DataFrame): input dataframe which contains test users.
"""
test.createOrReplaceTempView(self.f("{prefix}df_test"))
query = self.f(
"SELECT DISTINCT {col_user} FROM {prefix}df_test CLUSTER BY {col_user}"
)
df_test_users = self.spark.sql(query)
df_test_users.write.mode("overwrite").saveAsTable(
self.f("{prefix}df_test_users")
)
query = self.f(
"""
SELECT a.{col_user}, a.{col_item}, CAST(a.{col_rating} AS double) {col_rating}
FROM {prefix}df_train a INNER JOIN {prefix}df_test_users b ON a.{col_user} = b.{col_user}
DISTRIBUTE BY {col_user}
SORT BY {col_user}, {col_item}
"""
)
return self.spark.sql(query) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.