code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def request_data(self):
"""
(0,'cpu_percent'),
(1,'virtual_memory_total'),
(2,'virtual_memory_available'),
(3,'virtual_memory_percent'),
(4,'virtual_memory_used'),
(5,'virtual_memory_free'),
(6,'virtual_memory_active'),
(7,'virtual_memory_inactive'),
(8,'virtual_memory_buffers'),
(9,'virtual_memory_cached'),
(10,'swap_memory_total'),
(11,'swap_memory_used'),
(12,'swap_memory_free'),
(13,'swap_memory_percent'),
(14,'swap_memory_sin'),
(15,'swap_memory_sout'),
(17,'disk_usage_systemdisk_percent'),
(18,'disk_usage_disk_percent'),
### APCUPSD Status
(100, 'STATUS'), # True/False
(101, 'LINEV'), # Volts
(102, 'BATTV'), # Volts
(103, 'BCHARGE'), # %
(104, 'TIMELEFT'), # Minutes
(105, 'LOADPCT'), #
"""
if not driver_ok:
return None
output = []
apcupsd_status_is_queried = False
for item in self.variables:
timestamp = time()
value = None
if item.systemstatvariable.information == 0:
# cpu_percent
if hasattr(psutil, 'cpu_percent'):
value = psutil.cpu_percent()
timestamp = time()
elif item.systemstatvariable.information == 1:
# virtual_memory_total
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().total
timestamp = time()
elif item.systemstatvariable.information == 2:
# virtual_memory_available
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().available
timestamp = time()
elif item.systemstatvariable.information == 3:
# virtual_memory_percent
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().percent
timestamp = time()
elif item.systemstatvariable.information == 4:
# virtual_memory_used
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().used
timestamp = time()
elif item.systemstatvariable.information == 5:
# virtual_memory_free
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().free
timestamp = time()
elif item.systemstatvariable.information == 6:
# virtual_memory_active
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().active
timestamp = time()
elif item.systemstatvariable.information == 7:
# virtual_memory_inactive
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().inactive
timestamp = time()
elif item.systemstatvariable.information == 8:
# virtual_memory_buffers
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().buffers
timestamp = time()
elif item.systemstatvariable.information == 9:
# virtual_memory_cached
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().cached
timestamp = time()
elif item.systemstatvariable.information == 10:
# swap_memory_total
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().total
timestamp = time()
elif item.systemstatvariable.information == 11:
# swap_memory_used
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().used
timestamp = time()
elif item.systemstatvariable.information == 12:
# swap_memory_free
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().free
timestamp = time()
elif item.systemstatvariable.information == 13:
# swap_memory_percent
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().percent
timestamp = time()
elif item.systemstatvariable.information == 14:
# swap_memory_sin
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().sin
timestamp = time()
elif item.systemstatvariable.information == 15:
# swap_memory_sout
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().sout
timestamp = time()
elif item.systemstatvariable.information == 17:
# disk_usage_systemdisk_percent
if hasattr(psutil, 'disk_usage'):
value = psutil.disk_usage('/').percent
timestamp = time()
elif item.systemstatvariable.information == 18:
# disk_usage_disk_percent
if hasattr(psutil, 'disk_usage'):
value = psutil.disk_usage(item.systemstatvariable.parameter).percent
timestamp = time()
elif 100 <= item.systemstatvariable.information <= 105:
# APCUPSD Status
apcupsd_status = None
if not apcupsd_status_is_queried:
apcupsd_status = query_apsupsd_status()
apcupsd_status_is_queried = True
if apcupsd_status is not None:
if item.systemstatvariable.information == 100:
if 'STATUS' in apcupsd_status:
value = apcupsd_status['STATUS']
timestamp = apcupsd_status['timestamp']
elif item.systemstatvariable.information == 101:
if 'LINEV' in apcupsd_status:
value = apcupsd_status['LINEV']
timestamp = apcupsd_status['timestamp']
elif item.systemstatvariable.information == 102:
if 'BATTV' in apcupsd_status:
value = apcupsd_status['BATTV']
timestamp = apcupsd_status['timestamp']
elif item.systemstatvariable.information == 103:
if 'BCHARGE' in apcupsd_status:
value = apcupsd_status['BCHARGE']
timestamp = apcupsd_status['timestamp']
elif item.systemstatvariable.information == 104:
if 'TIMELEFT' in apcupsd_status:
value = apcupsd_status['TIMELEFT']
timestamp = apcupsd_status['timestamp']
elif item.systemstatvariable.information == 105:
if 'LOADPCT' in apcupsd_status:
value = apcupsd_status['LOADPCT']
timestamp = apcupsd_status['timestamp']
else:
value = None
# update variable
if value is not None and item.update_value(value, timestamp):
output.append(item.create_recorded_data_element())
return output | (0,'cpu_percent'),
(1,'virtual_memory_total'),
(2,'virtual_memory_available'),
(3,'virtual_memory_percent'),
(4,'virtual_memory_used'),
(5,'virtual_memory_free'),
(6,'virtual_memory_active'),
(7,'virtual_memory_inactive'),
(8,'virtual_memory_buffers'),
(9,'virtual_memory_cached'),
(10,'swap_memory_total'),
(11,'swap_memory_used'),
(12,'swap_memory_free'),
(13,'swap_memory_percent'),
(14,'swap_memory_sin'),
(15,'swap_memory_sout'),
(17,'disk_usage_systemdisk_percent'),
(18,'disk_usage_disk_percent'),
### APCUPSD Status
(100, 'STATUS'), # True/False
(101, 'LINEV'), # Volts
(102, 'BATTV'), # Volts
(103, 'BCHARGE'), # %
(104, 'TIMELEFT'), # Minutes
(105, 'LOADPCT'), # | Below is the the instruction that describes the task:
### Input:
(0,'cpu_percent'),
(1,'virtual_memory_total'),
(2,'virtual_memory_available'),
(3,'virtual_memory_percent'),
(4,'virtual_memory_used'),
(5,'virtual_memory_free'),
(6,'virtual_memory_active'),
(7,'virtual_memory_inactive'),
(8,'virtual_memory_buffers'),
(9,'virtual_memory_cached'),
(10,'swap_memory_total'),
(11,'swap_memory_used'),
(12,'swap_memory_free'),
(13,'swap_memory_percent'),
(14,'swap_memory_sin'),
(15,'swap_memory_sout'),
(17,'disk_usage_systemdisk_percent'),
(18,'disk_usage_disk_percent'),
### APCUPSD Status
(100, 'STATUS'), # True/False
(101, 'LINEV'), # Volts
(102, 'BATTV'), # Volts
(103, 'BCHARGE'), # %
(104, 'TIMELEFT'), # Minutes
(105, 'LOADPCT'), #
### Response:
def request_data(self):
"""
(0,'cpu_percent'),
(1,'virtual_memory_total'),
(2,'virtual_memory_available'),
(3,'virtual_memory_percent'),
(4,'virtual_memory_used'),
(5,'virtual_memory_free'),
(6,'virtual_memory_active'),
(7,'virtual_memory_inactive'),
(8,'virtual_memory_buffers'),
(9,'virtual_memory_cached'),
(10,'swap_memory_total'),
(11,'swap_memory_used'),
(12,'swap_memory_free'),
(13,'swap_memory_percent'),
(14,'swap_memory_sin'),
(15,'swap_memory_sout'),
(17,'disk_usage_systemdisk_percent'),
(18,'disk_usage_disk_percent'),
### APCUPSD Status
(100, 'STATUS'), # True/False
(101, 'LINEV'), # Volts
(102, 'BATTV'), # Volts
(103, 'BCHARGE'), # %
(104, 'TIMELEFT'), # Minutes
(105, 'LOADPCT'), #
"""
if not driver_ok:
return None
output = []
apcupsd_status_is_queried = False
for item in self.variables:
timestamp = time()
value = None
if item.systemstatvariable.information == 0:
# cpu_percent
if hasattr(psutil, 'cpu_percent'):
value = psutil.cpu_percent()
timestamp = time()
elif item.systemstatvariable.information == 1:
# virtual_memory_total
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().total
timestamp = time()
elif item.systemstatvariable.information == 2:
# virtual_memory_available
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().available
timestamp = time()
elif item.systemstatvariable.information == 3:
# virtual_memory_percent
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().percent
timestamp = time()
elif item.systemstatvariable.information == 4:
# virtual_memory_used
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().used
timestamp = time()
elif item.systemstatvariable.information == 5:
# virtual_memory_free
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().free
timestamp = time()
elif item.systemstatvariable.information == 6:
# virtual_memory_active
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().active
timestamp = time()
elif item.systemstatvariable.information == 7:
# virtual_memory_inactive
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().inactive
timestamp = time()
elif item.systemstatvariable.information == 8:
# virtual_memory_buffers
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().buffers
timestamp = time()
elif item.systemstatvariable.information == 9:
# virtual_memory_cached
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().cached
timestamp = time()
elif item.systemstatvariable.information == 10:
# swap_memory_total
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().total
timestamp = time()
elif item.systemstatvariable.information == 11:
# swap_memory_used
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().used
timestamp = time()
elif item.systemstatvariable.information == 12:
# swap_memory_free
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().free
timestamp = time()
elif item.systemstatvariable.information == 13:
# swap_memory_percent
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().percent
timestamp = time()
elif item.systemstatvariable.information == 14:
# swap_memory_sin
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().sin
timestamp = time()
elif item.systemstatvariable.information == 15:
# swap_memory_sout
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().sout
timestamp = time()
elif item.systemstatvariable.information == 17:
# disk_usage_systemdisk_percent
if hasattr(psutil, 'disk_usage'):
value = psutil.disk_usage('/').percent
timestamp = time()
elif item.systemstatvariable.information == 18:
# disk_usage_disk_percent
if hasattr(psutil, 'disk_usage'):
value = psutil.disk_usage(item.systemstatvariable.parameter).percent
timestamp = time()
elif 100 <= item.systemstatvariable.information <= 105:
# APCUPSD Status
apcupsd_status = None
if not apcupsd_status_is_queried:
apcupsd_status = query_apsupsd_status()
apcupsd_status_is_queried = True
if apcupsd_status is not None:
if item.systemstatvariable.information == 100:
if 'STATUS' in apcupsd_status:
value = apcupsd_status['STATUS']
timestamp = apcupsd_status['timestamp']
elif item.systemstatvariable.information == 101:
if 'LINEV' in apcupsd_status:
value = apcupsd_status['LINEV']
timestamp = apcupsd_status['timestamp']
elif item.systemstatvariable.information == 102:
if 'BATTV' in apcupsd_status:
value = apcupsd_status['BATTV']
timestamp = apcupsd_status['timestamp']
elif item.systemstatvariable.information == 103:
if 'BCHARGE' in apcupsd_status:
value = apcupsd_status['BCHARGE']
timestamp = apcupsd_status['timestamp']
elif item.systemstatvariable.information == 104:
if 'TIMELEFT' in apcupsd_status:
value = apcupsd_status['TIMELEFT']
timestamp = apcupsd_status['timestamp']
elif item.systemstatvariable.information == 105:
if 'LOADPCT' in apcupsd_status:
value = apcupsd_status['LOADPCT']
timestamp = apcupsd_status['timestamp']
else:
value = None
# update variable
if value is not None and item.update_value(value, timestamp):
output.append(item.create_recorded_data_element())
return output |
def set_option(self, name, value):
"""
Sets an option from an SConscript file.
"""
if not name in self.settable:
raise SCons.Errors.UserError("This option is not settable from a SConscript file: %s"%name)
if name == 'num_jobs':
try:
value = int(value)
if value < 1:
raise ValueError
except ValueError:
raise SCons.Errors.UserError("A positive integer is required: %s"%repr(value))
elif name == 'max_drift':
try:
value = int(value)
except ValueError:
raise SCons.Errors.UserError("An integer is required: %s"%repr(value))
elif name == 'duplicate':
try:
value = str(value)
except ValueError:
raise SCons.Errors.UserError("A string is required: %s"%repr(value))
if not value in SCons.Node.FS.Valid_Duplicates:
raise SCons.Errors.UserError("Not a valid duplication style: %s" % value)
# Set the duplicate style right away so it can affect linking
# of SConscript files.
SCons.Node.FS.set_duplicate(value)
elif name == 'diskcheck':
try:
value = diskcheck_convert(value)
except ValueError as v:
raise SCons.Errors.UserError("Not a valid diskcheck value: %s"%v)
if 'diskcheck' not in self.__dict__:
# No --diskcheck= option was specified on the command line.
# Set this right away so it can affect the rest of the
# file/Node lookups while processing the SConscript files.
SCons.Node.FS.set_diskcheck(value)
elif name == 'stack_size':
try:
value = int(value)
except ValueError:
raise SCons.Errors.UserError("An integer is required: %s"%repr(value))
elif name == 'md5_chunksize':
try:
value = int(value)
except ValueError:
raise SCons.Errors.UserError("An integer is required: %s"%repr(value))
elif name == 'warn':
if SCons.Util.is_String(value):
value = [value]
value = self.__SConscript_settings__.get(name, []) + value
SCons.Warnings.process_warn_strings(value)
self.__SConscript_settings__[name] = value | Sets an option from an SConscript file. | Below is the the instruction that describes the task:
### Input:
Sets an option from an SConscript file.
### Response:
def set_option(self, name, value):
"""
Sets an option from an SConscript file.
"""
if not name in self.settable:
raise SCons.Errors.UserError("This option is not settable from a SConscript file: %s"%name)
if name == 'num_jobs':
try:
value = int(value)
if value < 1:
raise ValueError
except ValueError:
raise SCons.Errors.UserError("A positive integer is required: %s"%repr(value))
elif name == 'max_drift':
try:
value = int(value)
except ValueError:
raise SCons.Errors.UserError("An integer is required: %s"%repr(value))
elif name == 'duplicate':
try:
value = str(value)
except ValueError:
raise SCons.Errors.UserError("A string is required: %s"%repr(value))
if not value in SCons.Node.FS.Valid_Duplicates:
raise SCons.Errors.UserError("Not a valid duplication style: %s" % value)
# Set the duplicate style right away so it can affect linking
# of SConscript files.
SCons.Node.FS.set_duplicate(value)
elif name == 'diskcheck':
try:
value = diskcheck_convert(value)
except ValueError as v:
raise SCons.Errors.UserError("Not a valid diskcheck value: %s"%v)
if 'diskcheck' not in self.__dict__:
# No --diskcheck= option was specified on the command line.
# Set this right away so it can affect the rest of the
# file/Node lookups while processing the SConscript files.
SCons.Node.FS.set_diskcheck(value)
elif name == 'stack_size':
try:
value = int(value)
except ValueError:
raise SCons.Errors.UserError("An integer is required: %s"%repr(value))
elif name == 'md5_chunksize':
try:
value = int(value)
except ValueError:
raise SCons.Errors.UserError("An integer is required: %s"%repr(value))
elif name == 'warn':
if SCons.Util.is_String(value):
value = [value]
value = self.__SConscript_settings__.get(name, []) + value
SCons.Warnings.process_warn_strings(value)
self.__SConscript_settings__[name] = value |
def run_parallel_with_display(wf, n_threads, display):
"""Adds a display to the parallel runner. Because messages come in
asynchronously now, we start an extra thread just for the display
routine."""
LogQ = Queue()
S = Scheduler(error_handler=display.error_handler)
threading.Thread(
target=patch,
args=(LogQ.source, sink_map(display)),
daemon=True).start()
W = Queue() \
>> branch(log_job_start >> LogQ.sink) \
>> thread_pool(*repeat(worker, n_threads)) \
>> branch(LogQ.sink)
result = S.run(W, get_workflow(wf))
LogQ.wait()
return result | Adds a display to the parallel runner. Because messages come in
asynchronously now, we start an extra thread just for the display
routine. | Below is the the instruction that describes the task:
### Input:
Adds a display to the parallel runner. Because messages come in
asynchronously now, we start an extra thread just for the display
routine.
### Response:
def run_parallel_with_display(wf, n_threads, display):
"""Adds a display to the parallel runner. Because messages come in
asynchronously now, we start an extra thread just for the display
routine."""
LogQ = Queue()
S = Scheduler(error_handler=display.error_handler)
threading.Thread(
target=patch,
args=(LogQ.source, sink_map(display)),
daemon=True).start()
W = Queue() \
>> branch(log_job_start >> LogQ.sink) \
>> thread_pool(*repeat(worker, n_threads)) \
>> branch(LogQ.sink)
result = S.run(W, get_workflow(wf))
LogQ.wait()
return result |
def next_frame_basic_stochastic_discrete():
"""Basic 2-frame conv model with stochastic discrete latent."""
hparams = basic_deterministic_params.next_frame_sampling()
hparams.batch_size = 4
hparams.video_num_target_frames = 6
hparams.scheduled_sampling_mode = "prob_inverse_lin"
hparams.scheduled_sampling_decay_steps = 40000
hparams.scheduled_sampling_max_prob = 1.0
hparams.dropout = 0.15
hparams.filter_double_steps = 3
hparams.hidden_size = 96
hparams.learning_rate_constant = 0.002
hparams.learning_rate_warmup_steps = 2000
hparams.learning_rate_schedule = "linear_warmup * constant"
hparams.concat_internal_states = True
hparams.video_modality_loss_cutoff = 0.03
hparams.add_hparam("bottleneck_bits", 128)
hparams.add_hparam("bottleneck_noise", 0.1)
hparams.add_hparam("discretize_warmup_steps", 40000)
hparams.add_hparam("latent_rnn_warmup_steps", 40000)
hparams.add_hparam("latent_rnn_max_sampling", 0.5)
hparams.add_hparam("latent_use_max_probability", 0.8)
hparams.add_hparam("full_latent_tower", False)
hparams.add_hparam("latent_predictor_state_size", 128)
hparams.add_hparam("latent_predictor_temperature", 1.0)
hparams.add_hparam("complex_addn", True)
hparams.add_hparam("recurrent_state_size", 64)
return hparams | Basic 2-frame conv model with stochastic discrete latent. | Below is the the instruction that describes the task:
### Input:
Basic 2-frame conv model with stochastic discrete latent.
### Response:
def next_frame_basic_stochastic_discrete():
"""Basic 2-frame conv model with stochastic discrete latent."""
hparams = basic_deterministic_params.next_frame_sampling()
hparams.batch_size = 4
hparams.video_num_target_frames = 6
hparams.scheduled_sampling_mode = "prob_inverse_lin"
hparams.scheduled_sampling_decay_steps = 40000
hparams.scheduled_sampling_max_prob = 1.0
hparams.dropout = 0.15
hparams.filter_double_steps = 3
hparams.hidden_size = 96
hparams.learning_rate_constant = 0.002
hparams.learning_rate_warmup_steps = 2000
hparams.learning_rate_schedule = "linear_warmup * constant"
hparams.concat_internal_states = True
hparams.video_modality_loss_cutoff = 0.03
hparams.add_hparam("bottleneck_bits", 128)
hparams.add_hparam("bottleneck_noise", 0.1)
hparams.add_hparam("discretize_warmup_steps", 40000)
hparams.add_hparam("latent_rnn_warmup_steps", 40000)
hparams.add_hparam("latent_rnn_max_sampling", 0.5)
hparams.add_hparam("latent_use_max_probability", 0.8)
hparams.add_hparam("full_latent_tower", False)
hparams.add_hparam("latent_predictor_state_size", 128)
hparams.add_hparam("latent_predictor_temperature", 1.0)
hparams.add_hparam("complex_addn", True)
hparams.add_hparam("recurrent_state_size", 64)
return hparams |
def from_json(cls, jsonmsg):
""" Create an object directly from a JSON string.
Applies general validation after creating the
object to check whether all required fields are
present.
Args:
jsonmsg (str): An object encoded as a JSON string
Returns:
An object of the generated type
Raises:
ValidationError: if `jsonmsg` does not match the schema
`cls` was generated from
"""
import json
msg = json.loads(jsonmsg)
obj = cls(**msg)
obj.validate()
return obj | Create an object directly from a JSON string.
Applies general validation after creating the
object to check whether all required fields are
present.
Args:
jsonmsg (str): An object encoded as a JSON string
Returns:
An object of the generated type
Raises:
ValidationError: if `jsonmsg` does not match the schema
`cls` was generated from | Below is the the instruction that describes the task:
### Input:
Create an object directly from a JSON string.
Applies general validation after creating the
object to check whether all required fields are
present.
Args:
jsonmsg (str): An object encoded as a JSON string
Returns:
An object of the generated type
Raises:
ValidationError: if `jsonmsg` does not match the schema
`cls` was generated from
### Response:
def from_json(cls, jsonmsg):
""" Create an object directly from a JSON string.
Applies general validation after creating the
object to check whether all required fields are
present.
Args:
jsonmsg (str): An object encoded as a JSON string
Returns:
An object of the generated type
Raises:
ValidationError: if `jsonmsg` does not match the schema
`cls` was generated from
"""
import json
msg = json.loads(jsonmsg)
obj = cls(**msg)
obj.validate()
return obj |
def write_title(self, title, level=1, id=None):
"""Writes a title header in the document body,
with an optional depth level
"""
if id:
self.write('<h{lv} id="{id}">{title}</h{lv}>',
title=title, lv=level, id=id)
else:
self.write('<h{lv}>{title}</h{lv}>',
title=title, lv=level) | Writes a title header in the document body,
with an optional depth level | Below is the the instruction that describes the task:
### Input:
Writes a title header in the document body,
with an optional depth level
### Response:
def write_title(self, title, level=1, id=None):
"""Writes a title header in the document body,
with an optional depth level
"""
if id:
self.write('<h{lv} id="{id}">{title}</h{lv}>',
title=title, lv=level, id=id)
else:
self.write('<h{lv}>{title}</h{lv}>',
title=title, lv=level) |
def bool(self, var, default=NOTSET):
"""
:rtype: bool
"""
return self.get_value(var, cast=bool, default=default) | :rtype: bool | Below is the the instruction that describes the task:
### Input:
:rtype: bool
### Response:
def bool(self, var, default=NOTSET):
"""
:rtype: bool
"""
return self.get_value(var, cast=bool, default=default) |
def get_parameter_value_from_file_names(files, parameters=None, unique=False, sort=True):
"""
Takes a list of files, searches for the parameter name in the file name and returns a ordered dict with the file name
in the first dimension and the corresponding parameter value in the second.
The file names can be sorted by the parameter value, otherwise the order is kept. If unique is true every parameter is unique and
mapped to the file name that occurred last in the files list.
Parameters
----------
files : list of strings
parameter : string or list of strings
unique : bool
sort : bool
Returns
-------
collections.OrderedDict
"""
# unique=False
logging.debug('Get the parameter: ' + str(parameters) + ' values from the file names of ' + str(len(files)) + ' files')
files_dict = collections.OrderedDict()
if parameters is None: # special case, no parameter defined
return files_dict
if isinstance(parameters, basestring):
parameters = (parameters, )
search_string = '_'.join(parameters)
for _ in parameters:
search_string += r'_(-?\d+)'
result = {}
for one_file in files:
parameter_values = re.findall(search_string, one_file)
if parameter_values:
if isinstance(parameter_values[0], tuple):
parameter_values = list(reduce(lambda t1, t2: t1 + t2, parameter_values))
parameter_values = [[int(i), ] for i in parameter_values] # convert string value to list with int
files_dict[one_file] = dict(zip(parameters, parameter_values))
if unique: # reduce to the files with different scan parameters
for key, value in files_dict.items():
if value not in result.values():
result[key] = value
else:
result[one_file] = files_dict[one_file]
return collections.OrderedDict(sorted(result.iteritems(), key=itemgetter(1)) if sort else files_dict) | Takes a list of files, searches for the parameter name in the file name and returns a ordered dict with the file name
in the first dimension and the corresponding parameter value in the second.
The file names can be sorted by the parameter value, otherwise the order is kept. If unique is true every parameter is unique and
mapped to the file name that occurred last in the files list.
Parameters
----------
files : list of strings
parameter : string or list of strings
unique : bool
sort : bool
Returns
-------
collections.OrderedDict | Below is the the instruction that describes the task:
### Input:
Takes a list of files, searches for the parameter name in the file name and returns a ordered dict with the file name
in the first dimension and the corresponding parameter value in the second.
The file names can be sorted by the parameter value, otherwise the order is kept. If unique is true every parameter is unique and
mapped to the file name that occurred last in the files list.
Parameters
----------
files : list of strings
parameter : string or list of strings
unique : bool
sort : bool
Returns
-------
collections.OrderedDict
### Response:
def get_parameter_value_from_file_names(files, parameters=None, unique=False, sort=True):
"""
Takes a list of files, searches for the parameter name in the file name and returns a ordered dict with the file name
in the first dimension and the corresponding parameter value in the second.
The file names can be sorted by the parameter value, otherwise the order is kept. If unique is true every parameter is unique and
mapped to the file name that occurred last in the files list.
Parameters
----------
files : list of strings
parameter : string or list of strings
unique : bool
sort : bool
Returns
-------
collections.OrderedDict
"""
# unique=False
logging.debug('Get the parameter: ' + str(parameters) + ' values from the file names of ' + str(len(files)) + ' files')
files_dict = collections.OrderedDict()
if parameters is None: # special case, no parameter defined
return files_dict
if isinstance(parameters, basestring):
parameters = (parameters, )
search_string = '_'.join(parameters)
for _ in parameters:
search_string += r'_(-?\d+)'
result = {}
for one_file in files:
parameter_values = re.findall(search_string, one_file)
if parameter_values:
if isinstance(parameter_values[0], tuple):
parameter_values = list(reduce(lambda t1, t2: t1 + t2, parameter_values))
parameter_values = [[int(i), ] for i in parameter_values] # convert string value to list with int
files_dict[one_file] = dict(zip(parameters, parameter_values))
if unique: # reduce to the files with different scan parameters
for key, value in files_dict.items():
if value not in result.values():
result[key] = value
else:
result[one_file] = files_dict[one_file]
return collections.OrderedDict(sorted(result.iteritems(), key=itemgetter(1)) if sort else files_dict) |
def get_dataset_meta(label):
"""Gives you metadata for dataset chosen via 'label' param
:param label: label = key in data_url dict (that big dict containing all possible datasets)
:return: tuple (data_url, url, expected_hash, hash_path, relative_download_dir)
relative_download_dir says where will be downloaded the file from url and eventually unzipped
"""
data_url = data_urls[label]
if type(data_url) == str:
# back compatibility
data_url = [data_url]
if type(data_url) == list:
data_url.extend([None, None, None, None])
data_url = data_url[:4]
url, expected_hash, hash_path, relative_donwload_dir = data_url
if hash_path is None:
hash_path = label
# elif type(data_url) == dict:
return data_url, url, expected_hash, hash_path, relative_donwload_dir | Gives you metadata for dataset chosen via 'label' param
:param label: label = key in data_url dict (that big dict containing all possible datasets)
:return: tuple (data_url, url, expected_hash, hash_path, relative_download_dir)
relative_download_dir says where will be downloaded the file from url and eventually unzipped | Below is the the instruction that describes the task:
### Input:
Gives you metadata for dataset chosen via 'label' param
:param label: label = key in data_url dict (that big dict containing all possible datasets)
:return: tuple (data_url, url, expected_hash, hash_path, relative_download_dir)
relative_download_dir says where will be downloaded the file from url and eventually unzipped
### Response:
def get_dataset_meta(label):
"""Gives you metadata for dataset chosen via 'label' param
:param label: label = key in data_url dict (that big dict containing all possible datasets)
:return: tuple (data_url, url, expected_hash, hash_path, relative_download_dir)
relative_download_dir says where will be downloaded the file from url and eventually unzipped
"""
data_url = data_urls[label]
if type(data_url) == str:
# back compatibility
data_url = [data_url]
if type(data_url) == list:
data_url.extend([None, None, None, None])
data_url = data_url[:4]
url, expected_hash, hash_path, relative_donwload_dir = data_url
if hash_path is None:
hash_path = label
# elif type(data_url) == dict:
return data_url, url, expected_hash, hash_path, relative_donwload_dir |
def escape(msg):
"""Takes a raw IRC message and returns a girc-escaped message."""
msg = msg.replace(escape_character, 'girc-escaped-character')
for escape_key, irc_char in format_dict.items():
msg = msg.replace(irc_char, escape_character + escape_key)
# convert colour codes
new_msg = ''
while len(msg):
if msg.startswith(escape_character + 'c'):
new_msg += msg[:2]
msg = msg[2:]
if not len(msg):
new_msg += '[]'
continue
colours, msg = extract_irc_colours(msg)
new_msg += colours
else:
new_msg += msg[0]
msg = msg[1:]
new_msg = new_msg.replace('girc-escaped-character', escape_character + escape_character)
return new_msg | Takes a raw IRC message and returns a girc-escaped message. | Below is the the instruction that describes the task:
### Input:
Takes a raw IRC message and returns a girc-escaped message.
### Response:
def escape(msg):
"""Takes a raw IRC message and returns a girc-escaped message."""
msg = msg.replace(escape_character, 'girc-escaped-character')
for escape_key, irc_char in format_dict.items():
msg = msg.replace(irc_char, escape_character + escape_key)
# convert colour codes
new_msg = ''
while len(msg):
if msg.startswith(escape_character + 'c'):
new_msg += msg[:2]
msg = msg[2:]
if not len(msg):
new_msg += '[]'
continue
colours, msg = extract_irc_colours(msg)
new_msg += colours
else:
new_msg += msg[0]
msg = msg[1:]
new_msg = new_msg.replace('girc-escaped-character', escape_character + escape_character)
return new_msg |
def clone(cls, cluster_id_label, cluster_info):
"""
Update the cluster with id/label `cluster_id_label` using information provided in
`cluster_info`.
"""
conn = Qubole.agent(version="v2")
return conn.post(cls.element_path(cluster_id_label) + '/clone', data=cluster_info) | Update the cluster with id/label `cluster_id_label` using information provided in
`cluster_info`. | Below is the the instruction that describes the task:
### Input:
Update the cluster with id/label `cluster_id_label` using information provided in
`cluster_info`.
### Response:
def clone(cls, cluster_id_label, cluster_info):
"""
Update the cluster with id/label `cluster_id_label` using information provided in
`cluster_info`.
"""
conn = Qubole.agent(version="v2")
return conn.post(cls.element_path(cluster_id_label) + '/clone', data=cluster_info) |
def run_with_plugins(plugin_list):
"""
Carry out a test run with the supplied list of plugin instances.
The plugins are expected to identify the object to run.
Parameters:
plugin_list: a list of plugin instances (objects which implement some subset of PluginInterface)
Returns: exit code as an integer.
The default behaviour (which may be overridden by plugins) is to return a 0
exit code if the test run succeeded, and 1 if it failed.
"""
composite = core.PluginComposite(plugin_list)
to_run = composite.get_object_to_run()
test_run = core.TestRun(to_run, composite)
test_run.run()
return composite.get_exit_code() | Carry out a test run with the supplied list of plugin instances.
The plugins are expected to identify the object to run.
Parameters:
plugin_list: a list of plugin instances (objects which implement some subset of PluginInterface)
Returns: exit code as an integer.
The default behaviour (which may be overridden by plugins) is to return a 0
exit code if the test run succeeded, and 1 if it failed. | Below is the the instruction that describes the task:
### Input:
Carry out a test run with the supplied list of plugin instances.
The plugins are expected to identify the object to run.
Parameters:
plugin_list: a list of plugin instances (objects which implement some subset of PluginInterface)
Returns: exit code as an integer.
The default behaviour (which may be overridden by plugins) is to return a 0
exit code if the test run succeeded, and 1 if it failed.
### Response:
def run_with_plugins(plugin_list):
"""
Carry out a test run with the supplied list of plugin instances.
The plugins are expected to identify the object to run.
Parameters:
plugin_list: a list of plugin instances (objects which implement some subset of PluginInterface)
Returns: exit code as an integer.
The default behaviour (which may be overridden by plugins) is to return a 0
exit code if the test run succeeded, and 1 if it failed.
"""
composite = core.PluginComposite(plugin_list)
to_run = composite.get_object_to_run()
test_run = core.TestRun(to_run, composite)
test_run.run()
return composite.get_exit_code() |
def event_return(events):
'''
Return event data via SMTP
'''
for event in events:
ret = event.get('data', False)
if ret:
returner(ret) | Return event data via SMTP | Below is the the instruction that describes the task:
### Input:
Return event data via SMTP
### Response:
def event_return(events):
'''
Return event data via SMTP
'''
for event in events:
ret = event.get('data', False)
if ret:
returner(ret) |
def prepare_logged(x, y):
"""
Transform `x` and `y` to a log scale while dealing with zeros.
This function scales `x` and `y` such that the points that are zero in one
array are set to the min of the other array.
When plotting expression data, frequently one sample will have reads in
a particular feature but the other sample will not. Expression data also
tends to look better on a log scale, but log(0) is undefined and therefore
cannot be shown on a plot. This function allows these points to be shown,
piled up along one side of the plot.
:param x,y: NumPy arrays
"""
xi = np.log2(x)
yi = np.log2(y)
xv = np.isfinite(xi)
yv = np.isfinite(yi)
global_min = min(xi[xv].min(), yi[yv].min())
global_max = max(xi[xv].max(), yi[yv].max())
xi[~xv] = global_min
yi[~yv] = global_min
return xi, yi | Transform `x` and `y` to a log scale while dealing with zeros.
This function scales `x` and `y` such that the points that are zero in one
array are set to the min of the other array.
When plotting expression data, frequently one sample will have reads in
a particular feature but the other sample will not. Expression data also
tends to look better on a log scale, but log(0) is undefined and therefore
cannot be shown on a plot. This function allows these points to be shown,
piled up along one side of the plot.
:param x,y: NumPy arrays | Below is the the instruction that describes the task:
### Input:
Transform `x` and `y` to a log scale while dealing with zeros.
This function scales `x` and `y` such that the points that are zero in one
array are set to the min of the other array.
When plotting expression data, frequently one sample will have reads in
a particular feature but the other sample will not. Expression data also
tends to look better on a log scale, but log(0) is undefined and therefore
cannot be shown on a plot. This function allows these points to be shown,
piled up along one side of the plot.
:param x,y: NumPy arrays
### Response:
def prepare_logged(x, y):
"""
Transform `x` and `y` to a log scale while dealing with zeros.
This function scales `x` and `y` such that the points that are zero in one
array are set to the min of the other array.
When plotting expression data, frequently one sample will have reads in
a particular feature but the other sample will not. Expression data also
tends to look better on a log scale, but log(0) is undefined and therefore
cannot be shown on a plot. This function allows these points to be shown,
piled up along one side of the plot.
:param x,y: NumPy arrays
"""
xi = np.log2(x)
yi = np.log2(y)
xv = np.isfinite(xi)
yv = np.isfinite(yi)
global_min = min(xi[xv].min(), yi[yv].min())
global_max = max(xi[xv].max(), yi[yv].max())
xi[~xv] = global_min
yi[~yv] = global_min
return xi, yi |
def _ExtractInterfaceMetadata(self, metadata):
"""Extracts network interface metadata.
Args:
metadata: dict, the metadata response with the new network interfaces.
Returns:
list, a list of NetworkInterface objects.
"""
interfaces = []
for network_interface in metadata:
mac_address = network_interface.get('mac')
interface = self.network_utils.GetNetworkInterface(mac_address)
ip_addresses = []
if interface:
ip_addresses.extend(network_interface.get('forwardedIps', []))
if self.ip_aliases:
ip_addresses.extend(network_interface.get('ipAliases', []))
if self.target_instance_ips:
ip_addresses.extend(network_interface.get('targetInstanceIps', []))
interfaces.append(NetworkDaemon.NetworkInterface(
interface, ip_addresses, network_interface.get('ip', [])))
else:
message = 'Network interface not found for MAC address: %s.'
self.logger.warning(message, mac_address)
return interfaces | Extracts network interface metadata.
Args:
metadata: dict, the metadata response with the new network interfaces.
Returns:
list, a list of NetworkInterface objects. | Below is the the instruction that describes the task:
### Input:
Extracts network interface metadata.
Args:
metadata: dict, the metadata response with the new network interfaces.
Returns:
list, a list of NetworkInterface objects.
### Response:
def _ExtractInterfaceMetadata(self, metadata):
"""Extracts network interface metadata.
Args:
metadata: dict, the metadata response with the new network interfaces.
Returns:
list, a list of NetworkInterface objects.
"""
interfaces = []
for network_interface in metadata:
mac_address = network_interface.get('mac')
interface = self.network_utils.GetNetworkInterface(mac_address)
ip_addresses = []
if interface:
ip_addresses.extend(network_interface.get('forwardedIps', []))
if self.ip_aliases:
ip_addresses.extend(network_interface.get('ipAliases', []))
if self.target_instance_ips:
ip_addresses.extend(network_interface.get('targetInstanceIps', []))
interfaces.append(NetworkDaemon.NetworkInterface(
interface, ip_addresses, network_interface.get('ip', [])))
else:
message = 'Network interface not found for MAC address: %s.'
self.logger.warning(message, mac_address)
return interfaces |
def network_stats(self):
"""
Used by Flask to show informations on the network
"""
statistics = {}
mstp_networks = []
mstp_map = {}
ip_devices = []
bacoids = []
mstp_devices = []
for address, bacoid in self.whois_answer[0].keys():
if ":" in address:
net, mac = address.split(":")
mstp_networks.append(net)
mstp_devices.append(mac)
try:
mstp_map[net].append(mac)
except KeyError:
mstp_map[net] = []
mstp_map[net].append(mac)
else:
net = "ip"
mac = address
ip_devices.append(address)
bacoids.append((bacoid, address))
mstpnetworks = sorted(set(mstp_networks))
statistics["mstp_networks"] = mstpnetworks
statistics["ip_devices"] = sorted(ip_devices)
statistics["bacoids"] = sorted(bacoids)
statistics["mstp_map"] = mstp_map
statistics["timestamp"] = str(datetime.now())
statistics["number_of_devices"] = self.number_of_devices
statistics["number_of_registered_devices"] = len(self.registered_devices)
statistics["print_mstpnetworks"] = self.print_list(mstpnetworks)
return statistics | Used by Flask to show informations on the network | Below is the the instruction that describes the task:
### Input:
Used by Flask to show informations on the network
### Response:
def network_stats(self):
"""
Used by Flask to show informations on the network
"""
statistics = {}
mstp_networks = []
mstp_map = {}
ip_devices = []
bacoids = []
mstp_devices = []
for address, bacoid in self.whois_answer[0].keys():
if ":" in address:
net, mac = address.split(":")
mstp_networks.append(net)
mstp_devices.append(mac)
try:
mstp_map[net].append(mac)
except KeyError:
mstp_map[net] = []
mstp_map[net].append(mac)
else:
net = "ip"
mac = address
ip_devices.append(address)
bacoids.append((bacoid, address))
mstpnetworks = sorted(set(mstp_networks))
statistics["mstp_networks"] = mstpnetworks
statistics["ip_devices"] = sorted(ip_devices)
statistics["bacoids"] = sorted(bacoids)
statistics["mstp_map"] = mstp_map
statistics["timestamp"] = str(datetime.now())
statistics["number_of_devices"] = self.number_of_devices
statistics["number_of_registered_devices"] = len(self.registered_devices)
statistics["print_mstpnetworks"] = self.print_list(mstpnetworks)
return statistics |
def encode_request(name, value_list):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name, value_list))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.append_int(len(value_list))
for value_list_item in value_list:
client_message.append_data(value_list_item)
client_message.update_frame_length()
return client_message | Encode request into client_message | Below is the the instruction that describes the task:
### Input:
Encode request into client_message
### Response:
def encode_request(name, value_list):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name, value_list))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.append_int(len(value_list))
for value_list_item in value_list:
client_message.append_data(value_list_item)
client_message.update_frame_length()
return client_message |
def get_packages(self, offset=None, limit=None, api=None):
"""
Return list of packages that belong to this automation
:param offset: Pagination offset.
:param limit: Pagination limit.
:param api: sevenbridges Api instance.
:return: AutomationPackage collection
"""
api = api or self._API
return AutomationPackage.query(
automation=self.id, offset=offset, limit=limit, api=api
) | Return list of packages that belong to this automation
:param offset: Pagination offset.
:param limit: Pagination limit.
:param api: sevenbridges Api instance.
:return: AutomationPackage collection | Below is the the instruction that describes the task:
### Input:
Return list of packages that belong to this automation
:param offset: Pagination offset.
:param limit: Pagination limit.
:param api: sevenbridges Api instance.
:return: AutomationPackage collection
### Response:
def get_packages(self, offset=None, limit=None, api=None):
"""
Return list of packages that belong to this automation
:param offset: Pagination offset.
:param limit: Pagination limit.
:param api: sevenbridges Api instance.
:return: AutomationPackage collection
"""
api = api or self._API
return AutomationPackage.query(
automation=self.id, offset=offset, limit=limit, api=api
) |
def to_timestamp(self, data):
"""Transform a datetime series into linux epoch.
Args:
data(pandas.DataFrame): DataFrame containins a column named as `self.col_name`.
Returns:
pandas.Series
"""
result = pd.Series(index=data.index)
_slice = ~data[self.col_name].isnull()
result[_slice] = data[_slice][self.col_name].astype('int64')
return result | Transform a datetime series into linux epoch.
Args:
data(pandas.DataFrame): DataFrame containins a column named as `self.col_name`.
Returns:
pandas.Series | Below is the the instruction that describes the task:
### Input:
Transform a datetime series into linux epoch.
Args:
data(pandas.DataFrame): DataFrame containins a column named as `self.col_name`.
Returns:
pandas.Series
### Response:
def to_timestamp(self, data):
"""Transform a datetime series into linux epoch.
Args:
data(pandas.DataFrame): DataFrame containins a column named as `self.col_name`.
Returns:
pandas.Series
"""
result = pd.Series(index=data.index)
_slice = ~data[self.col_name].isnull()
result[_slice] = data[_slice][self.col_name].astype('int64')
return result |
def transform(self, X):
"""Transform your data to zero mean unit variance."""
if not self.is_fit:
raise ValueError("The scaler has not been fit yet.")
return (X-self.mean) / (self.std + 10e-7) | Transform your data to zero mean unit variance. | Below is the the instruction that describes the task:
### Input:
Transform your data to zero mean unit variance.
### Response:
def transform(self, X):
"""Transform your data to zero mean unit variance."""
if not self.is_fit:
raise ValueError("The scaler has not been fit yet.")
return (X-self.mean) / (self.std + 10e-7) |
def deploy_deb(self,
file_name,
distribution,
component,
architecture,
parameters={}):
"""
Convenience method to deploy .deb packages
Keyword arguments:
file_name -- full path to local file that will be deployed
distribution -- debian distribution (e.g. 'wheezy')
component -- repository component (e.g. 'main')
architecture -- package architecture (e.g. 'i386')
parameters -- attach any additional metadata
"""
params = {
'deb.distribution': distribution,
'deb.component': component,
'deb.architecture': architecture
}
params.update(parameters)
self.deploy_file(file_name, parameters=params) | Convenience method to deploy .deb packages
Keyword arguments:
file_name -- full path to local file that will be deployed
distribution -- debian distribution (e.g. 'wheezy')
component -- repository component (e.g. 'main')
architecture -- package architecture (e.g. 'i386')
parameters -- attach any additional metadata | Below is the the instruction that describes the task:
### Input:
Convenience method to deploy .deb packages
Keyword arguments:
file_name -- full path to local file that will be deployed
distribution -- debian distribution (e.g. 'wheezy')
component -- repository component (e.g. 'main')
architecture -- package architecture (e.g. 'i386')
parameters -- attach any additional metadata
### Response:
def deploy_deb(self,
file_name,
distribution,
component,
architecture,
parameters={}):
"""
Convenience method to deploy .deb packages
Keyword arguments:
file_name -- full path to local file that will be deployed
distribution -- debian distribution (e.g. 'wheezy')
component -- repository component (e.g. 'main')
architecture -- package architecture (e.g. 'i386')
parameters -- attach any additional metadata
"""
params = {
'deb.distribution': distribution,
'deb.component': component,
'deb.architecture': architecture
}
params.update(parameters)
self.deploy_file(file_name, parameters=params) |
def sed_contains(path,
text,
limit='',
flags='g'):
'''
.. deprecated:: 0.17.0
Use :func:`search` instead.
Return True if the file at ``path`` contains ``text``. Utilizes sed to
perform the search (line-wise search).
Note: the ``p`` flag will be added to any flags you pass in.
CLI Example:
.. code-block:: bash
salt '*' file.contains /etc/crontab 'mymaintenance.sh'
'''
# Largely inspired by Fabric's contrib.files.contains()
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
before = _sed_esc(six.text_type(text), False)
limit = _sed_esc(six.text_type(limit), False)
options = '-n -r -e'
if sys.platform == 'darwin':
options = options.replace('-r', '-E')
cmd = ['sed']
cmd.extend(salt.utils.args.shlex_split(options))
cmd.append(
r'{limit}s/{before}/$/{flags}'.format(
limit='/{0}/ '.format(limit) if limit else '',
before=before,
flags='p{0}'.format(flags)
)
)
cmd.append(path)
result = __salt__['cmd.run'](cmd, python_shell=False)
return bool(result) | .. deprecated:: 0.17.0
Use :func:`search` instead.
Return True if the file at ``path`` contains ``text``. Utilizes sed to
perform the search (line-wise search).
Note: the ``p`` flag will be added to any flags you pass in.
CLI Example:
.. code-block:: bash
salt '*' file.contains /etc/crontab 'mymaintenance.sh' | Below is the the instruction that describes the task:
### Input:
.. deprecated:: 0.17.0
Use :func:`search` instead.
Return True if the file at ``path`` contains ``text``. Utilizes sed to
perform the search (line-wise search).
Note: the ``p`` flag will be added to any flags you pass in.
CLI Example:
.. code-block:: bash
salt '*' file.contains /etc/crontab 'mymaintenance.sh'
### Response:
def sed_contains(path,
text,
limit='',
flags='g'):
'''
.. deprecated:: 0.17.0
Use :func:`search` instead.
Return True if the file at ``path`` contains ``text``. Utilizes sed to
perform the search (line-wise search).
Note: the ``p`` flag will be added to any flags you pass in.
CLI Example:
.. code-block:: bash
salt '*' file.contains /etc/crontab 'mymaintenance.sh'
'''
# Largely inspired by Fabric's contrib.files.contains()
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
before = _sed_esc(six.text_type(text), False)
limit = _sed_esc(six.text_type(limit), False)
options = '-n -r -e'
if sys.platform == 'darwin':
options = options.replace('-r', '-E')
cmd = ['sed']
cmd.extend(salt.utils.args.shlex_split(options))
cmd.append(
r'{limit}s/{before}/$/{flags}'.format(
limit='/{0}/ '.format(limit) if limit else '',
before=before,
flags='p{0}'.format(flags)
)
)
cmd.append(path)
result = __salt__['cmd.run'](cmd, python_shell=False)
return bool(result) |
def number_of_contacts(records, direction=None, more=0):
"""
The number of contacts the user interacted with.
Parameters
----------
direction : str, optional
Filters the records by their direction: ``None`` for all records,
``'in'`` for incoming, and ``'out'`` for outgoing.
more : int, default is 0
Counts only contacts with more than this number of interactions.
"""
if direction is None:
counter = Counter(r.correspondent_id for r in records)
else:
counter = Counter(r.correspondent_id for r in records if r.direction == direction)
return sum(1 for d in counter.values() if d > more) | The number of contacts the user interacted with.
Parameters
----------
direction : str, optional
Filters the records by their direction: ``None`` for all records,
``'in'`` for incoming, and ``'out'`` for outgoing.
more : int, default is 0
Counts only contacts with more than this number of interactions. | Below is the the instruction that describes the task:
### Input:
The number of contacts the user interacted with.
Parameters
----------
direction : str, optional
Filters the records by their direction: ``None`` for all records,
``'in'`` for incoming, and ``'out'`` for outgoing.
more : int, default is 0
Counts only contacts with more than this number of interactions.
### Response:
def number_of_contacts(records, direction=None, more=0):
"""
The number of contacts the user interacted with.
Parameters
----------
direction : str, optional
Filters the records by their direction: ``None`` for all records,
``'in'`` for incoming, and ``'out'`` for outgoing.
more : int, default is 0
Counts only contacts with more than this number of interactions.
"""
if direction is None:
counter = Counter(r.correspondent_id for r in records)
else:
counter = Counter(r.correspondent_id for r in records if r.direction == direction)
return sum(1 for d in counter.values() if d > more) |
def strings_in_dictionary(dictionary):
""" Used by default implementation for finding excerpt """
strings = [value for value in six.itervalues(dictionary) if not isinstance(value, dict)]
for child_dict in [dv for dv in six.itervalues(dictionary) if isinstance(dv, dict)]:
strings.extend(SearchResultProcessor.strings_in_dictionary(child_dict))
return strings | Used by default implementation for finding excerpt | Below is the the instruction that describes the task:
### Input:
Used by default implementation for finding excerpt
### Response:
def strings_in_dictionary(dictionary):
""" Used by default implementation for finding excerpt """
strings = [value for value in six.itervalues(dictionary) if not isinstance(value, dict)]
for child_dict in [dv for dv in six.itervalues(dictionary) if isinstance(dv, dict)]:
strings.extend(SearchResultProcessor.strings_in_dictionary(child_dict))
return strings |
def validate_additional_properties(self, valid_response, response):
"""Validates additional properties. In additional properties, we only
need to compare the values of the dict, not the keys
Args:
valid_response: An example response (for example generated in
_get_example_from_properties(self, spec))
Type is DICT
response: The actual dict coming from the response
Type is DICT
Returns:
A boolean - whether the actual response validates against the given example
"""
assert isinstance(valid_response, dict)
assert isinstance(response, dict)
# the type of the value of the first key/value in valid_response is our
# expected type - if it is a dict or list, we must go deeper
first_value = valid_response[list(valid_response)[0]]
# dict
if isinstance(first_value, dict):
# try to find a definition for that first value
definition = None
definition_name = self.get_dict_definition(first_value)
if definition_name is None:
definition = self._definition_from_example(first_value)
definition_name = 'self generated'
for item in response.values():
if not self.validate_definition(definition_name,
item,
definition=definition):
return False
return True
# TODO: list
if isinstance(first_value, list):
raise Exception("Not implemented yet")
# simple types
# all values must be of that type in both valid and actual response
try:
assert all(isinstance(y, type(first_value)) for _, y in response.items())
assert all(isinstance(y, type(first_value)) for _, y in valid_response.items())
return True
except Exception:
return False | Validates additional properties. In additional properties, we only
need to compare the values of the dict, not the keys
Args:
valid_response: An example response (for example generated in
_get_example_from_properties(self, spec))
Type is DICT
response: The actual dict coming from the response
Type is DICT
Returns:
A boolean - whether the actual response validates against the given example | Below is the the instruction that describes the task:
### Input:
Validates additional properties. In additional properties, we only
need to compare the values of the dict, not the keys
Args:
valid_response: An example response (for example generated in
_get_example_from_properties(self, spec))
Type is DICT
response: The actual dict coming from the response
Type is DICT
Returns:
A boolean - whether the actual response validates against the given example
### Response:
def validate_additional_properties(self, valid_response, response):
"""Validates additional properties. In additional properties, we only
need to compare the values of the dict, not the keys
Args:
valid_response: An example response (for example generated in
_get_example_from_properties(self, spec))
Type is DICT
response: The actual dict coming from the response
Type is DICT
Returns:
A boolean - whether the actual response validates against the given example
"""
assert isinstance(valid_response, dict)
assert isinstance(response, dict)
# the type of the value of the first key/value in valid_response is our
# expected type - if it is a dict or list, we must go deeper
first_value = valid_response[list(valid_response)[0]]
# dict
if isinstance(first_value, dict):
# try to find a definition for that first value
definition = None
definition_name = self.get_dict_definition(first_value)
if definition_name is None:
definition = self._definition_from_example(first_value)
definition_name = 'self generated'
for item in response.values():
if not self.validate_definition(definition_name,
item,
definition=definition):
return False
return True
# TODO: list
if isinstance(first_value, list):
raise Exception("Not implemented yet")
# simple types
# all values must be of that type in both valid and actual response
try:
assert all(isinstance(y, type(first_value)) for _, y in response.items())
assert all(isinstance(y, type(first_value)) for _, y in valid_response.items())
return True
except Exception:
return False |
def addError(self, test, err, capt=None):
"""
After a test error, we want to record testcase run information.
"""
self.__insert_test_result(constants.State.ERROR, test, err) | After a test error, we want to record testcase run information. | Below is the the instruction that describes the task:
### Input:
After a test error, we want to record testcase run information.
### Response:
def addError(self, test, err, capt=None):
"""
After a test error, we want to record testcase run information.
"""
self.__insert_test_result(constants.State.ERROR, test, err) |
def extract_vars(*names,**kw):
"""Extract a set of variables by name from another frame.
:Parameters:
- `*names`: strings
One or more variable names which will be extracted from the caller's
frame.
:Keywords:
- `depth`: integer (0)
How many frames in the stack to walk when looking for your variables.
Examples:
In [2]: def func(x):
...: y = 1
...: print sorted(extract_vars('x','y').items())
...:
In [3]: func('hello')
[('x', 'hello'), ('y', 1)]
"""
depth = kw.get('depth',0)
callerNS = sys._getframe(depth+1).f_locals
return dict((k,callerNS[k]) for k in names) | Extract a set of variables by name from another frame.
:Parameters:
- `*names`: strings
One or more variable names which will be extracted from the caller's
frame.
:Keywords:
- `depth`: integer (0)
How many frames in the stack to walk when looking for your variables.
Examples:
In [2]: def func(x):
...: y = 1
...: print sorted(extract_vars('x','y').items())
...:
In [3]: func('hello')
[('x', 'hello'), ('y', 1)] | Below is the the instruction that describes the task:
### Input:
Extract a set of variables by name from another frame.
:Parameters:
- `*names`: strings
One or more variable names which will be extracted from the caller's
frame.
:Keywords:
- `depth`: integer (0)
How many frames in the stack to walk when looking for your variables.
Examples:
In [2]: def func(x):
...: y = 1
...: print sorted(extract_vars('x','y').items())
...:
In [3]: func('hello')
[('x', 'hello'), ('y', 1)]
### Response:
def extract_vars(*names,**kw):
"""Extract a set of variables by name from another frame.
:Parameters:
- `*names`: strings
One or more variable names which will be extracted from the caller's
frame.
:Keywords:
- `depth`: integer (0)
How many frames in the stack to walk when looking for your variables.
Examples:
In [2]: def func(x):
...: y = 1
...: print sorted(extract_vars('x','y').items())
...:
In [3]: func('hello')
[('x', 'hello'), ('y', 1)]
"""
depth = kw.get('depth',0)
callerNS = sys._getframe(depth+1).f_locals
return dict((k,callerNS[k]) for k in names) |
def get_first_property(elt, key, default=None, ctx=None):
"""Get first property related to one input key.
:param elt: first property elt. Not None methods.
:param str key: property key to get.
:param default: default value to return if key does not exist in elt.
properties
:param ctx: elt ctx from where get properties. Equals elt if None. It
allows to get function properties related to a class or instance if
related function is defined in base class.
"""
result = default
properties = _get_properties(elt, keys=(key,), ctx=ctx, first=True)
# set value if key exists in properties
if key in properties:
result = properties[key]
return result | Get first property related to one input key.
:param elt: first property elt. Not None methods.
:param str key: property key to get.
:param default: default value to return if key does not exist in elt.
properties
:param ctx: elt ctx from where get properties. Equals elt if None. It
allows to get function properties related to a class or instance if
related function is defined in base class. | Below is the the instruction that describes the task:
### Input:
Get first property related to one input key.
:param elt: first property elt. Not None methods.
:param str key: property key to get.
:param default: default value to return if key does not exist in elt.
properties
:param ctx: elt ctx from where get properties. Equals elt if None. It
allows to get function properties related to a class or instance if
related function is defined in base class.
### Response:
def get_first_property(elt, key, default=None, ctx=None):
"""Get first property related to one input key.
:param elt: first property elt. Not None methods.
:param str key: property key to get.
:param default: default value to return if key does not exist in elt.
properties
:param ctx: elt ctx from where get properties. Equals elt if None. It
allows to get function properties related to a class or instance if
related function is defined in base class.
"""
result = default
properties = _get_properties(elt, keys=(key,), ctx=ctx, first=True)
# set value if key exists in properties
if key in properties:
result = properties[key]
return result |
def parse_cli():
"""
The outline of this function needs to be like this:
1. parse arguments
2. validate arguments only, dont go into other logic/code
3. run application logic
"""
#
# 1. parse cli arguments
#
__docopt__ = """
usage: pykwalify -d FILE -s FILE ... [-e FILE ...]
[--strict-rule-validation] [--fix-ruby-style-regex] [--allow-assertions] [--encoding ENCODING]
[-v ...] [-q]
optional arguments:
-d FILE, --data-file FILE the file to be tested
-e FILE, --extension FILE file containing python extension
-s FILE, --schema-file FILE schema definition file
--fix-ruby-style-regex This flag fixes some of the quirks of ruby style regex
that is not compatible with python style regex
--strict-rule-validation enables strict validation of all keywords for all
Rule objects to find unsupported keyword usage
--allow-assertions By default assertions is disabled due to security risk.
Error will be raised if assertion is used in schema
but this flag is not used. This option enables assert keyword.
--encoding ENCODING Specify encoding to open data and schema files with.
-h, --help show this help message and exit
-q, --quiet suppress terminal output
-v, --verbose verbose terminal output (multiple -v increases verbosity)
--version display the version number and exit
"""
# Import pykwalify package
import pykwalify
args = docopt(__docopt__, version=pykwalify.__version__)
pykwalify.init_logging(1 if args["--quiet"] else args["--verbose"])
log = logging.getLogger(__name__)
#
# 2. validate arguments only, dont go into other code/logic
#
log.debug("Setting verbose level: %s", args["--verbose"])
log.debug("Arguments from CLI: %s", args)
return args | The outline of this function needs to be like this:
1. parse arguments
2. validate arguments only, dont go into other logic/code
3. run application logic | Below is the the instruction that describes the task:
### Input:
The outline of this function needs to be like this:
1. parse arguments
2. validate arguments only, dont go into other logic/code
3. run application logic
### Response:
def parse_cli():
"""
The outline of this function needs to be like this:
1. parse arguments
2. validate arguments only, dont go into other logic/code
3. run application logic
"""
#
# 1. parse cli arguments
#
__docopt__ = """
usage: pykwalify -d FILE -s FILE ... [-e FILE ...]
[--strict-rule-validation] [--fix-ruby-style-regex] [--allow-assertions] [--encoding ENCODING]
[-v ...] [-q]
optional arguments:
-d FILE, --data-file FILE the file to be tested
-e FILE, --extension FILE file containing python extension
-s FILE, --schema-file FILE schema definition file
--fix-ruby-style-regex This flag fixes some of the quirks of ruby style regex
that is not compatible with python style regex
--strict-rule-validation enables strict validation of all keywords for all
Rule objects to find unsupported keyword usage
--allow-assertions By default assertions is disabled due to security risk.
Error will be raised if assertion is used in schema
but this flag is not used. This option enables assert keyword.
--encoding ENCODING Specify encoding to open data and schema files with.
-h, --help show this help message and exit
-q, --quiet suppress terminal output
-v, --verbose verbose terminal output (multiple -v increases verbosity)
--version display the version number and exit
"""
# Import pykwalify package
import pykwalify
args = docopt(__docopt__, version=pykwalify.__version__)
pykwalify.init_logging(1 if args["--quiet"] else args["--verbose"])
log = logging.getLogger(__name__)
#
# 2. validate arguments only, dont go into other code/logic
#
log.debug("Setting verbose level: %s", args["--verbose"])
log.debug("Arguments from CLI: %s", args)
return args |
def role_present(name, profile=None, **connection_args):
''''
Ensures that the keystone role exists
name
The name of the role that should be present
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Role "{0}" already exists'.format(name)}
# Check if role is already present
role = __salt__['keystone.role_get'](name=name, profile=profile,
**connection_args)
if 'Error' not in role:
return ret
else:
if __opts__.get('test'):
ret['result'] = None
ret['comment'] = 'Role "{0}" will be added'.format(name)
return ret
# Create role
__salt__['keystone.role_create'](name, profile=profile,
**connection_args)
ret['comment'] = 'Role "{0}" has been added'.format(name)
ret['changes']['Role'] = 'Created'
return ret | Ensures that the keystone role exists
name
The name of the role that should be present | Below is the the instruction that describes the task:
### Input:
Ensures that the keystone role exists
name
The name of the role that should be present
### Response:
def role_present(name, profile=None, **connection_args):
''''
Ensures that the keystone role exists
name
The name of the role that should be present
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Role "{0}" already exists'.format(name)}
# Check if role is already present
role = __salt__['keystone.role_get'](name=name, profile=profile,
**connection_args)
if 'Error' not in role:
return ret
else:
if __opts__.get('test'):
ret['result'] = None
ret['comment'] = 'Role "{0}" will be added'.format(name)
return ret
# Create role
__salt__['keystone.role_create'](name, profile=profile,
**connection_args)
ret['comment'] = 'Role "{0}" has been added'.format(name)
ret['changes']['Role'] = 'Created'
return ret |
def init_widget(self):
""" Bind the on property to the checked state """
super(UiKitSlider, self).init_widget()
d = self.declaration
if d.min:
self.set_min(d.min)
if d.max:
self.set_max(d.max)
if d.progress:
self.set_progress(d.progress)
#: A really ugly way to add the target
#: would be nice if we could just pass the block pointer here :)
self.get_app().bridge.addTarget(
self.widget,
forControlEvents=UISlider.UIControlEventValueChanged,
andCallback=self.widget.getId(),
usingMethod="onValueChanged",
withValues=["value"]#,"selected"]
)
self.widget.onValueChanged.connect(self.on_checked_changed) | Bind the on property to the checked state | Below is the the instruction that describes the task:
### Input:
Bind the on property to the checked state
### Response:
def init_widget(self):
""" Bind the on property to the checked state """
super(UiKitSlider, self).init_widget()
d = self.declaration
if d.min:
self.set_min(d.min)
if d.max:
self.set_max(d.max)
if d.progress:
self.set_progress(d.progress)
#: A really ugly way to add the target
#: would be nice if we could just pass the block pointer here :)
self.get_app().bridge.addTarget(
self.widget,
forControlEvents=UISlider.UIControlEventValueChanged,
andCallback=self.widget.getId(),
usingMethod="onValueChanged",
withValues=["value"]#,"selected"]
)
self.widget.onValueChanged.connect(self.on_checked_changed) |
def assoc(self, key, value):
"""
Return a modified PrettyContext with ``key`` set to ``value``
"""
return self._replace(user_ctx={
**self.user_ctx,
key: value,
}) | Return a modified PrettyContext with ``key`` set to ``value`` | Below is the the instruction that describes the task:
### Input:
Return a modified PrettyContext with ``key`` set to ``value``
### Response:
def assoc(self, key, value):
"""
Return a modified PrettyContext with ``key`` set to ``value``
"""
return self._replace(user_ctx={
**self.user_ctx,
key: value,
}) |
def event(self, button):
"""
button has been clicked
"""
# cancel any pending timer
if self.timer:
self.timer.cancel()
if self.last_button != button:
if self.last_button:
# new button clicked process the one before.
self.trigger()
self.clicks = 1
else:
self.clicks += 1
# do we accept double clicks on this button?
# if not then just process the click
button_index = min(button, len(self.callbacks)) - 1
if not self.callbacks[button_index][1]:
# set things up correctly for the trigger
self.clicks = 1
self.last_button = button
self.trigger()
else:
# set a timeout to trigger the click
# this will be cancelled if we click again before it runs
self.last_button = button
self.timer = Timer(self.click_time, self.trigger)
self.timer.start() | button has been clicked | Below is the the instruction that describes the task:
### Input:
button has been clicked
### Response:
def event(self, button):
"""
button has been clicked
"""
# cancel any pending timer
if self.timer:
self.timer.cancel()
if self.last_button != button:
if self.last_button:
# new button clicked process the one before.
self.trigger()
self.clicks = 1
else:
self.clicks += 1
# do we accept double clicks on this button?
# if not then just process the click
button_index = min(button, len(self.callbacks)) - 1
if not self.callbacks[button_index][1]:
# set things up correctly for the trigger
self.clicks = 1
self.last_button = button
self.trigger()
else:
# set a timeout to trigger the click
# this will be cancelled if we click again before it runs
self.last_button = button
self.timer = Timer(self.click_time, self.trigger)
self.timer.start() |
def import_module(module_name):
"""
Imports a module. A single point of truth for importing modules to
be documented by `pydoc`. In particular, it makes sure that the top
module in `module_name` can be imported by using only the paths in
`pydoc.import_path`.
If a module has already been imported, then its corresponding entry
in `sys.modules` is returned. This means that modules that have
changed on disk cannot be re-imported in the same process and have
its documentation updated.
"""
if import_path != sys.path:
# Such a kludge. Only restrict imports if the `import_path` has
# been changed. We don't want to always restrict imports, since
# providing a path to `imp.find_module` stops it from searching
# in special locations for built ins or frozen modules.
#
# The problem here is that this relies on the `sys.path` not being
# independently changed since the initialization of this module.
# If it is changed, then some packages may fail.
#
# Any other options available?
# Raises an exception if the parent module cannot be imported.
# This hopefully ensures that we only explicitly import modules
# contained in `pydoc.import_path`.
imp.find_module(module_name.split('.')[0], import_path)
if module_name in sys.modules:
return sys.modules[module_name]
else:
__import__(module_name)
return sys.modules[module_name] | Imports a module. A single point of truth for importing modules to
be documented by `pydoc`. In particular, it makes sure that the top
module in `module_name` can be imported by using only the paths in
`pydoc.import_path`.
If a module has already been imported, then its corresponding entry
in `sys.modules` is returned. This means that modules that have
changed on disk cannot be re-imported in the same process and have
its documentation updated. | Below is the the instruction that describes the task:
### Input:
Imports a module. A single point of truth for importing modules to
be documented by `pydoc`. In particular, it makes sure that the top
module in `module_name` can be imported by using only the paths in
`pydoc.import_path`.
If a module has already been imported, then its corresponding entry
in `sys.modules` is returned. This means that modules that have
changed on disk cannot be re-imported in the same process and have
its documentation updated.
### Response:
def import_module(module_name):
"""
Imports a module. A single point of truth for importing modules to
be documented by `pydoc`. In particular, it makes sure that the top
module in `module_name` can be imported by using only the paths in
`pydoc.import_path`.
If a module has already been imported, then its corresponding entry
in `sys.modules` is returned. This means that modules that have
changed on disk cannot be re-imported in the same process and have
its documentation updated.
"""
if import_path != sys.path:
# Such a kludge. Only restrict imports if the `import_path` has
# been changed. We don't want to always restrict imports, since
# providing a path to `imp.find_module` stops it from searching
# in special locations for built ins or frozen modules.
#
# The problem here is that this relies on the `sys.path` not being
# independently changed since the initialization of this module.
# If it is changed, then some packages may fail.
#
# Any other options available?
# Raises an exception if the parent module cannot be imported.
# This hopefully ensures that we only explicitly import modules
# contained in `pydoc.import_path`.
imp.find_module(module_name.split('.')[0], import_path)
if module_name in sys.modules:
return sys.modules[module_name]
else:
__import__(module_name)
return sys.modules[module_name] |
def save(self, *args, **kwargs):
""" if order left blank """
if self.order == '' or self.order is None:
try:
self.order = self.get_auto_order_queryset().order_by("-order")[0].order + 1
except IndexError:
self.order = 0
super(BaseOrdered, self).save() | if order left blank | Below is the the instruction that describes the task:
### Input:
if order left blank
### Response:
def save(self, *args, **kwargs):
""" if order left blank """
if self.order == '' or self.order is None:
try:
self.order = self.get_auto_order_queryset().order_by("-order")[0].order + 1
except IndexError:
self.order = 0
super(BaseOrdered, self).save() |
def delete_old_versions(
logger,
new_library_match,
id_nums,
token,
prod_folder,
host
):
"""
delete any other versions of the same library where:
it has the same major version
it has a smaller minor version
it lives in prod_folder
Parameters
----------
logger: logging object
configured in cli_commands.py
match: FilenameMatch object
match object with library_name_, major_version, minor_version
id_nums: dict
second output of get_library_mapping
token: string
Databricks API key with admin permissions
prod_folder: string
name of folder in Databricks UI containing production libraries
host: string
Databricks account url
(e.g. https://fake-organization.cloud.databricks.com)
Side Effects
------------
delete any other versions of the same library with the same major version
and smaller minor versions
"""
old_versions = []
for name, lib in id_nums.items():
if new_library_match.replace_version(lib['name_match'], logger):
old_versions.append(lib['name_match'].filename)
res = requests.post(
host + '/api/1.2/libraries/delete',
auth=('token', token),
data={'libraryId': lib['id_num']},
)
if res.status_code != 200:
raise APIError(res)
return old_versions | delete any other versions of the same library where:
it has the same major version
it has a smaller minor version
it lives in prod_folder
Parameters
----------
logger: logging object
configured in cli_commands.py
match: FilenameMatch object
match object with library_name_, major_version, minor_version
id_nums: dict
second output of get_library_mapping
token: string
Databricks API key with admin permissions
prod_folder: string
name of folder in Databricks UI containing production libraries
host: string
Databricks account url
(e.g. https://fake-organization.cloud.databricks.com)
Side Effects
------------
delete any other versions of the same library with the same major version
and smaller minor versions | Below is the the instruction that describes the task:
### Input:
delete any other versions of the same library where:
it has the same major version
it has a smaller minor version
it lives in prod_folder
Parameters
----------
logger: logging object
configured in cli_commands.py
match: FilenameMatch object
match object with library_name_, major_version, minor_version
id_nums: dict
second output of get_library_mapping
token: string
Databricks API key with admin permissions
prod_folder: string
name of folder in Databricks UI containing production libraries
host: string
Databricks account url
(e.g. https://fake-organization.cloud.databricks.com)
Side Effects
------------
delete any other versions of the same library with the same major version
and smaller minor versions
### Response:
def delete_old_versions(
logger,
new_library_match,
id_nums,
token,
prod_folder,
host
):
"""
delete any other versions of the same library where:
it has the same major version
it has a smaller minor version
it lives in prod_folder
Parameters
----------
logger: logging object
configured in cli_commands.py
match: FilenameMatch object
match object with library_name_, major_version, minor_version
id_nums: dict
second output of get_library_mapping
token: string
Databricks API key with admin permissions
prod_folder: string
name of folder in Databricks UI containing production libraries
host: string
Databricks account url
(e.g. https://fake-organization.cloud.databricks.com)
Side Effects
------------
delete any other versions of the same library with the same major version
and smaller minor versions
"""
old_versions = []
for name, lib in id_nums.items():
if new_library_match.replace_version(lib['name_match'], logger):
old_versions.append(lib['name_match'].filename)
res = requests.post(
host + '/api/1.2/libraries/delete',
auth=('token', token),
data={'libraryId': lib['id_num']},
)
if res.status_code != 200:
raise APIError(res)
return old_versions |
def input(self, _in, out, **kwargs):
"""Process individual translation file."""
language_code = _re_language_code.search(_in.read()).group(
'language_code'
)
_in.seek(0) # move at the begining after matching the language
catalog = read_po(_in)
out.write('gettextCatalog.setStrings("{0}", '.format(language_code))
out.write(json.dumps({
key: value.string for key, value in catalog._messages.items()
if key and value.string
}))
out.write(');') | Process individual translation file. | Below is the the instruction that describes the task:
### Input:
Process individual translation file.
### Response:
def input(self, _in, out, **kwargs):
"""Process individual translation file."""
language_code = _re_language_code.search(_in.read()).group(
'language_code'
)
_in.seek(0) # move at the begining after matching the language
catalog = read_po(_in)
out.write('gettextCatalog.setStrings("{0}", '.format(language_code))
out.write(json.dumps({
key: value.string for key, value in catalog._messages.items()
if key and value.string
}))
out.write(');') |
def parse_solr_geo_range_as_pair(geo_box_str):
"""
:param geo_box_str: [-90,-180 TO 90,180]
:return: ("-90,-180", "90,180")
"""
pattern = "\\[(.*) TO (.*)\\]"
matcher = re.search(pattern, geo_box_str)
if matcher:
return matcher.group(1), matcher.group(2)
else:
raise Exception("Regex {0} could not parse {1}".format(pattern, geo_box_str)) | :param geo_box_str: [-90,-180 TO 90,180]
:return: ("-90,-180", "90,180") | Below is the the instruction that describes the task:
### Input:
:param geo_box_str: [-90,-180 TO 90,180]
:return: ("-90,-180", "90,180")
### Response:
def parse_solr_geo_range_as_pair(geo_box_str):
"""
:param geo_box_str: [-90,-180 TO 90,180]
:return: ("-90,-180", "90,180")
"""
pattern = "\\[(.*) TO (.*)\\]"
matcher = re.search(pattern, geo_box_str)
if matcher:
return matcher.group(1), matcher.group(2)
else:
raise Exception("Regex {0} could not parse {1}".format(pattern, geo_box_str)) |
def delete_file(self, name):
"""Deletes the file identified by ``name`` along with its metadata.
The file is removed from both the local store and the remote store.
"""
if self.local_store:
lock = self.lock_manager.lock_for(name)
lock.lock_exclusive()
try:
self.local_store.delete_file(name)
finally:
lock.close()
if self.remote_store:
self.remote_store.delete_file(name) | Deletes the file identified by ``name`` along with its metadata.
The file is removed from both the local store and the remote store. | Below is the the instruction that describes the task:
### Input:
Deletes the file identified by ``name`` along with its metadata.
The file is removed from both the local store and the remote store.
### Response:
def delete_file(self, name):
"""Deletes the file identified by ``name`` along with its metadata.
The file is removed from both the local store and the remote store.
"""
if self.local_store:
lock = self.lock_manager.lock_for(name)
lock.lock_exclusive()
try:
self.local_store.delete_file(name)
finally:
lock.close()
if self.remote_store:
self.remote_store.delete_file(name) |
def hide_virtual_ip_holder_chassis_virtual_ipv6(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_virtual_ip_holder = ET.SubElement(config, "hide-virtual-ip-holder", xmlns="urn:brocade.com:mgmt:brocade-chassis")
chassis = ET.SubElement(hide_virtual_ip_holder, "chassis")
virtual_ipv6 = ET.SubElement(chassis, "virtual-ipv6")
virtual_ipv6.text = kwargs.pop('virtual_ipv6')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def hide_virtual_ip_holder_chassis_virtual_ipv6(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_virtual_ip_holder = ET.SubElement(config, "hide-virtual-ip-holder", xmlns="urn:brocade.com:mgmt:brocade-chassis")
chassis = ET.SubElement(hide_virtual_ip_holder, "chassis")
virtual_ipv6 = ET.SubElement(chassis, "virtual-ipv6")
virtual_ipv6.text = kwargs.pop('virtual_ipv6')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def set_viewlimits(self, axes=None):
""" update xy limits of a plot"""
if axes is None:
axes = self.axes
xmin, xmax, ymin, ymax = self.data_range
if len(self.conf.zoom_lims) >1:
zlims = self.conf.zoom_lims[-1]
if axes in zlims:
xmin, xmax, ymin, ymax = zlims[axes]
xmin = max(self.data_range[0], xmin)
xmax = min(self.data_range[1], xmax)
ymin = max(self.data_range[2], ymin)
ymax = min(self.data_range[3], ymax)
if (xmax < self.data_range[0] or
xmin > self.data_range[1] or
ymax < self.data_range[2] or
ymin > self.data_range[3] ):
self.conf.zoom_lims.pop()
return
if abs(xmax-xmin) < 2:
xmin = int(0.5*(xmax+xmin) - 1)
xmax = xmin + 2
if abs(ymax-ymin) < 2:
ymin = int(0.5*(ymax+xmin) - 1)
ymax = ymin + 2
self.axes.set_xlim((xmin, xmax),emit=True)
self.axes.set_ylim((ymin, ymax),emit=True)
self.axes.update_datalim(((xmin, ymin), (xmax, ymax)))
self.conf.datalimits = [xmin, xmax, ymin, ymax]
self.redraw() | update xy limits of a plot | Below is the the instruction that describes the task:
### Input:
update xy limits of a plot
### Response:
def set_viewlimits(self, axes=None):
""" update xy limits of a plot"""
if axes is None:
axes = self.axes
xmin, xmax, ymin, ymax = self.data_range
if len(self.conf.zoom_lims) >1:
zlims = self.conf.zoom_lims[-1]
if axes in zlims:
xmin, xmax, ymin, ymax = zlims[axes]
xmin = max(self.data_range[0], xmin)
xmax = min(self.data_range[1], xmax)
ymin = max(self.data_range[2], ymin)
ymax = min(self.data_range[3], ymax)
if (xmax < self.data_range[0] or
xmin > self.data_range[1] or
ymax < self.data_range[2] or
ymin > self.data_range[3] ):
self.conf.zoom_lims.pop()
return
if abs(xmax-xmin) < 2:
xmin = int(0.5*(xmax+xmin) - 1)
xmax = xmin + 2
if abs(ymax-ymin) < 2:
ymin = int(0.5*(ymax+xmin) - 1)
ymax = ymin + 2
self.axes.set_xlim((xmin, xmax),emit=True)
self.axes.set_ylim((ymin, ymax),emit=True)
self.axes.update_datalim(((xmin, ymin), (xmax, ymax)))
self.conf.datalimits = [xmin, xmax, ymin, ymax]
self.redraw() |
def update(self) -> None:
"""Call method |Parameter.update| of all "secondary" parameters.
Directly after initialisation, neither the primary (`control`)
parameters nor the secondary (`derived`) parameters of
application model |hstream_v1| are ready for usage:
>>> from hydpy.models.hstream_v1 import *
>>> parameterstep('1d')
>>> simulationstep('1d')
>>> derived
nmbsegments(?)
c1(?)
c3(?)
c2(?)
Trying to update the values of the secondary parameters while the
primary ones are still not defined, raises errors like the following:
>>> model.parameters.update()
Traceback (most recent call last):
...
AttributeError: While trying to update parameter ``nmbsegments` \
of element `?``, the following error occurred: For variable `lag`, \
no value has been defined so far.
With proper values both for parameter |hstream_control.Lag| and
|hstream_control.Damp|, updating the derived parameters succeeds:
>>> lag(0.0)
>>> damp(0.0)
>>> model.parameters.update()
>>> derived
nmbsegments(0)
c1(0.0)
c3(0.0)
c2(1.0)
"""
for subpars in self.secondary_subpars:
for par in subpars:
try:
par.update()
except BaseException:
objecttools.augment_excmessage(
f'While trying to update parameter '
f'`{objecttools.elementphrase(par)}`') | Call method |Parameter.update| of all "secondary" parameters.
Directly after initialisation, neither the primary (`control`)
parameters nor the secondary (`derived`) parameters of
application model |hstream_v1| are ready for usage:
>>> from hydpy.models.hstream_v1 import *
>>> parameterstep('1d')
>>> simulationstep('1d')
>>> derived
nmbsegments(?)
c1(?)
c3(?)
c2(?)
Trying to update the values of the secondary parameters while the
primary ones are still not defined, raises errors like the following:
>>> model.parameters.update()
Traceback (most recent call last):
...
AttributeError: While trying to update parameter ``nmbsegments` \
of element `?``, the following error occurred: For variable `lag`, \
no value has been defined so far.
With proper values both for parameter |hstream_control.Lag| and
|hstream_control.Damp|, updating the derived parameters succeeds:
>>> lag(0.0)
>>> damp(0.0)
>>> model.parameters.update()
>>> derived
nmbsegments(0)
c1(0.0)
c3(0.0)
c2(1.0) | Below is the the instruction that describes the task:
### Input:
Call method |Parameter.update| of all "secondary" parameters.
Directly after initialisation, neither the primary (`control`)
parameters nor the secondary (`derived`) parameters of
application model |hstream_v1| are ready for usage:
>>> from hydpy.models.hstream_v1 import *
>>> parameterstep('1d')
>>> simulationstep('1d')
>>> derived
nmbsegments(?)
c1(?)
c3(?)
c2(?)
Trying to update the values of the secondary parameters while the
primary ones are still not defined, raises errors like the following:
>>> model.parameters.update()
Traceback (most recent call last):
...
AttributeError: While trying to update parameter ``nmbsegments` \
of element `?``, the following error occurred: For variable `lag`, \
no value has been defined so far.
With proper values both for parameter |hstream_control.Lag| and
|hstream_control.Damp|, updating the derived parameters succeeds:
>>> lag(0.0)
>>> damp(0.0)
>>> model.parameters.update()
>>> derived
nmbsegments(0)
c1(0.0)
c3(0.0)
c2(1.0)
### Response:
def update(self) -> None:
"""Call method |Parameter.update| of all "secondary" parameters.
Directly after initialisation, neither the primary (`control`)
parameters nor the secondary (`derived`) parameters of
application model |hstream_v1| are ready for usage:
>>> from hydpy.models.hstream_v1 import *
>>> parameterstep('1d')
>>> simulationstep('1d')
>>> derived
nmbsegments(?)
c1(?)
c3(?)
c2(?)
Trying to update the values of the secondary parameters while the
primary ones are still not defined, raises errors like the following:
>>> model.parameters.update()
Traceback (most recent call last):
...
AttributeError: While trying to update parameter ``nmbsegments` \
of element `?``, the following error occurred: For variable `lag`, \
no value has been defined so far.
With proper values both for parameter |hstream_control.Lag| and
|hstream_control.Damp|, updating the derived parameters succeeds:
>>> lag(0.0)
>>> damp(0.0)
>>> model.parameters.update()
>>> derived
nmbsegments(0)
c1(0.0)
c3(0.0)
c2(1.0)
"""
for subpars in self.secondary_subpars:
for par in subpars:
try:
par.update()
except BaseException:
objecttools.augment_excmessage(
f'While trying to update parameter '
f'`{objecttools.elementphrase(par)}`') |
def delete_namespaced_pod(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_pod # noqa: E501
delete a Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_pod(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Pod (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_pod_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.delete_namespaced_pod_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | delete_namespaced_pod # noqa: E501
delete a Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_pod(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Pod (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
delete_namespaced_pod # noqa: E501
delete a Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_pod(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Pod (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread.
### Response:
def delete_namespaced_pod(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_pod # noqa: E501
delete a Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_pod(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Pod (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_pod_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.delete_namespaced_pod_with_http_info(name, namespace, **kwargs) # noqa: E501
return data |
def _vcf_is_strelka(variant_file, variant_metadata):
"""Return True if variant_file given is in strelka format
"""
if "strelka" in variant_file.lower():
return True
elif "NORMAL" in variant_metadata["sample_info"].keys():
return True
else:
vcf_reader = vcf.Reader(open(variant_file, "r"))
try:
vcf_type = vcf_reader.metadata["content"]
except KeyError:
vcf_type = ""
if "strelka" in vcf_type.lower():
return True
return False | Return True if variant_file given is in strelka format | Below is the the instruction that describes the task:
### Input:
Return True if variant_file given is in strelka format
### Response:
def _vcf_is_strelka(variant_file, variant_metadata):
"""Return True if variant_file given is in strelka format
"""
if "strelka" in variant_file.lower():
return True
elif "NORMAL" in variant_metadata["sample_info"].keys():
return True
else:
vcf_reader = vcf.Reader(open(variant_file, "r"))
try:
vcf_type = vcf_reader.metadata["content"]
except KeyError:
vcf_type = ""
if "strelka" in vcf_type.lower():
return True
return False |
def _which_git_config(global_, cwd, user, password, output_encoding=None):
'''
Based on whether global or local config is desired, return a list of CLI
args to include in the git config command.
'''
if global_:
return ['--global']
version_ = _LooseVersion(version(versioninfo=False))
if version_ >= _LooseVersion('1.7.10.2'):
# --local added in 1.7.10.2
return ['--local']
else:
# For earlier versions, need to specify the path to the git config file
return ['--file', _git_config(cwd, user, password,
output_encoding=output_encoding)] | Based on whether global or local config is desired, return a list of CLI
args to include in the git config command. | Below is the the instruction that describes the task:
### Input:
Based on whether global or local config is desired, return a list of CLI
args to include in the git config command.
### Response:
def _which_git_config(global_, cwd, user, password, output_encoding=None):
'''
Based on whether global or local config is desired, return a list of CLI
args to include in the git config command.
'''
if global_:
return ['--global']
version_ = _LooseVersion(version(versioninfo=False))
if version_ >= _LooseVersion('1.7.10.2'):
# --local added in 1.7.10.2
return ['--local']
else:
# For earlier versions, need to specify the path to the git config file
return ['--file', _git_config(cwd, user, password,
output_encoding=output_encoding)] |
def monte_carlo_standard_error(chain, batch_size_generator=None, compute_method=None):
"""Compute Monte Carlo standard errors for the expectations
This is a convenience function that calls the compute method for each batch size and returns the lowest ESS
over the used batch sizes.
Args:
chain (ndarray): the Markov chain
batch_size_generator (UniVariateESSBatchSizeGenerator): the method that generates that batch sizes
we will use. Per default it uses the :class:`SquareRootSingleBatch` method.
compute_method (ComputeMonteCarloStandardError): the method used to compute the standard error.
By default we will use the :class:`BatchMeansMCSE` method
"""
batch_size_generator = batch_size_generator or SquareRootSingleBatch()
compute_method = compute_method or BatchMeansMCSE()
batch_sizes = batch_size_generator.get_univariate_ess_batch_sizes(len(chain))
return np.min(list(compute_method.compute_standard_error(chain, b) for b in batch_sizes)) | Compute Monte Carlo standard errors for the expectations
This is a convenience function that calls the compute method for each batch size and returns the lowest ESS
over the used batch sizes.
Args:
chain (ndarray): the Markov chain
batch_size_generator (UniVariateESSBatchSizeGenerator): the method that generates that batch sizes
we will use. Per default it uses the :class:`SquareRootSingleBatch` method.
compute_method (ComputeMonteCarloStandardError): the method used to compute the standard error.
By default we will use the :class:`BatchMeansMCSE` method | Below is the the instruction that describes the task:
### Input:
Compute Monte Carlo standard errors for the expectations
This is a convenience function that calls the compute method for each batch size and returns the lowest ESS
over the used batch sizes.
Args:
chain (ndarray): the Markov chain
batch_size_generator (UniVariateESSBatchSizeGenerator): the method that generates that batch sizes
we will use. Per default it uses the :class:`SquareRootSingleBatch` method.
compute_method (ComputeMonteCarloStandardError): the method used to compute the standard error.
By default we will use the :class:`BatchMeansMCSE` method
### Response:
def monte_carlo_standard_error(chain, batch_size_generator=None, compute_method=None):
"""Compute Monte Carlo standard errors for the expectations
This is a convenience function that calls the compute method for each batch size and returns the lowest ESS
over the used batch sizes.
Args:
chain (ndarray): the Markov chain
batch_size_generator (UniVariateESSBatchSizeGenerator): the method that generates that batch sizes
we will use. Per default it uses the :class:`SquareRootSingleBatch` method.
compute_method (ComputeMonteCarloStandardError): the method used to compute the standard error.
By default we will use the :class:`BatchMeansMCSE` method
"""
batch_size_generator = batch_size_generator or SquareRootSingleBatch()
compute_method = compute_method or BatchMeansMCSE()
batch_sizes = batch_size_generator.get_univariate_ess_batch_sizes(len(chain))
return np.min(list(compute_method.compute_standard_error(chain, b) for b in batch_sizes)) |
def parse_connection_string_libpq(connection_string):
"""parse a postgresql connection string as defined in
http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING"""
fields = {}
while True:
connection_string = connection_string.strip()
if not connection_string:
break
if "=" not in connection_string:
raise ValueError("expecting key=value format in connection_string fragment {!r}".format(connection_string))
key, rem = connection_string.split("=", 1)
if rem.startswith("'"):
asis, value = False, ""
for i in range(1, len(rem)):
if asis:
value += rem[i]
asis = False
elif rem[i] == "'":
break # end of entry
elif rem[i] == "\\":
asis = True
else:
value += rem[i]
else:
raise ValueError("invalid connection_string fragment {!r}".format(rem))
connection_string = rem[i + 1:] # pylint: disable=undefined-loop-variable
else:
res = rem.split(None, 1)
if len(res) > 1:
value, connection_string = res
else:
value, connection_string = rem, ""
fields[key] = value
return fields | parse a postgresql connection string as defined in
http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING | Below is the the instruction that describes the task:
### Input:
parse a postgresql connection string as defined in
http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING
### Response:
def parse_connection_string_libpq(connection_string):
"""parse a postgresql connection string as defined in
http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING"""
fields = {}
while True:
connection_string = connection_string.strip()
if not connection_string:
break
if "=" not in connection_string:
raise ValueError("expecting key=value format in connection_string fragment {!r}".format(connection_string))
key, rem = connection_string.split("=", 1)
if rem.startswith("'"):
asis, value = False, ""
for i in range(1, len(rem)):
if asis:
value += rem[i]
asis = False
elif rem[i] == "'":
break # end of entry
elif rem[i] == "\\":
asis = True
else:
value += rem[i]
else:
raise ValueError("invalid connection_string fragment {!r}".format(rem))
connection_string = rem[i + 1:] # pylint: disable=undefined-loop-variable
else:
res = rem.split(None, 1)
if len(res) > 1:
value, connection_string = res
else:
value, connection_string = rem, ""
fields[key] = value
return fields |
def make_payment(self, *, reference_code, description, tx_value, tx_tax, tx_tax_return_base, currency, buyer,
payer, credit_card, payment_method, payment_country, device_session_id, ip_address, cookie,
user_agent, language=None, shipping_address=None, extra_parameters=None, notify_url=None,
transaction_type=TransactionType.AUTHORIZATION_AND_CAPTURE):
"""
Authorization: used to verify if a credit card is active, if it has funds, etc.
The transaction is not complete until a transaction capture is sent (only available for accounts in Argentina,
Brazil, Peru).
Capture: terminates a previously authorized transaction.
This is when the account makes a debit to the card (only available for accounts in Argentina, Brazil, Peru).
Authorization and capture: this is the most used type of transaction.
This option sends the transaction amount to authorization and if it is approved immediately capture is performed.
Args:
reference_code: The reference code of the order. It represents the identifier of the transaction
in the shop’s system.
Alphanumeric. Min: 1 Max: 255.
description: The description of the order.
Alphanumeric. Min: 1 Max: 255.
tx_value: TX_VALUE, it is the total amount of the transaction. It can contain two decimal digits.
For example 10000.00 and 10000.
Alphanumeric. 64.
tx_tax: TX_TAX, it is the value of the VAT (Value Added Tax only valid for Colombia) of the transaction,
if no VAT is sent, the system will apply 19% automatically. It can contain two decimal digits.
Example 19000.00. In case you have no VAT you should fill out 0.
Alphanumeric. 64.
tx_tax_return_base: TX_TAX_RETURN_BASE, it is the base value on which VAT (only valid for Colombia)
is calculated. If you do not have VAT should be sent to 0.
Alphanumeric. 64.
currency: The ISO currency code associated with the amount.
http://developers.payulatam.com/en/api/variables_table.html
Alphanumeric. 3.
buyer: Buyer’s shipping address.
Example.
{
"merchantBuyerId": "1",
"fullName": "First name and second buyer name",
"emailAddress": "buyer_test@test.com",
"contactPhone": "7563126",
"dniNumber": "5415668464654",
"shippingAddress": {
"street1": "calle 100",
"street2": "5555487",
"city": "Medellin",
"state": "Antioquia",
"country": "CO",
"postalCode": "000000",
"phone": "7563126"
}
}
payer: Payer’s data.
Example.
{
"merchantPayerId": "1",
"fullName": "First name and second payer name",
"emailAddress": "payer_test@test.com",
"contactPhone": "7563126",
"dniNumber": "5415668464654",
"billingAddress": {
"street1": "calle 93",
"street2": "125544",
"city": "Bogota",
"state": "Bogota DC",
"country": "CO",
"postalCode": "000000",
"phone": "7563126"
}
}
credit_card: Debit card’s data.
Example.
{
"number": "4097440000000004",
"securityCode": "321",
"expirationDate": "2022/12",
"name": "APPROVED"
}
payment_method: Payment method.
Alphanumeric. 32.
payment_country: Payment countries.
http://developers.payulatam.com/en/api/variables_table.html
device_session_id: The session identifier of the device where the transaction was performed from.
Alphanumeric. Max: 255.
ip_address: The IP address of the device where the transaction was performed from.
Alphanumeric. Max: 39.
cookie: The cookie stored on the device where the transaction was performed from.
Alphanumeric. Max: 255.
user_agent: The user agent of the browser from which the transaction was performed.
Alphanumeric. Max: 1024
language: The language used in the emails that are sent to the buyer and seller.
Alphanumeric. 2
shipping_address: The shipping address.
Example.
{
"street1": "calle 100",
"street2": "5555487",
"city": "Medellin",
"state": "Antioquia",
"country": "CO",
"postalCode": "0000000",
"phone": "7563126"
}
extra_parameters: Additional parameters or data associated with a transaction. These parameters may vary
according to the payment means or shop’s preferences.
Example.
{
"INSTALLMENTS_NUMBER": 1
}
notify_url: The URL notification or order confirmation.
Alphanumeric. Max: 2048.
transaction_type:
Returns:
"""
if not isinstance(payment_country, Country):
payment_country = Country(payment_country)
if not isinstance(transaction_type, TransactionType):
transaction_type = TransactionType(transaction_type)
if not isinstance(payment_method, Franchise):
payment_method = Franchise(payment_method)
if not isinstance(currency, Currency):
currency = Currency(currency)
franchises = get_available_franchise_for_payment(payment_country, transaction_type)
if not franchises or payment_method not in franchises:
fmt = 'The credit card franchise {} with transaction type {} is not available for {}.'
raise CVVRequiredError(fmt.format(payment_method.value, transaction_type.value, payment_country.name))
payload = {
"language": self.client.language.value,
"command": PaymentCommand.SUBMIT_TRANSACTION.value,
"merchant": {
"apiKey": self.client.api_key,
"apiLogin": self.client.api_login
},
"transaction": {
"order": {
"accountId": self.client.account_id,
"referenceCode": reference_code,
"description": description,
"language": language or self.client.language.value,
"signature": self.client._get_signature(reference_code, tx_value, currency.value),
"notifyUrl": notify_url,
"additionalValues": {
"TX_VALUE": {
"value": tx_value,
"currency": currency.value
},
"TX_TAX": {
"value": tx_tax,
"currency": currency.value
},
"TX_TAX_RETURN_BASE": {
"value": tx_tax_return_base,
"currency": currency.value
}
},
"buyer": buyer,
"shippingAddress": shipping_address
},
"payer": payer,
"creditCard": credit_card,
"extraParameters": extra_parameters,
"type": transaction_type.value,
"paymentMethod": payment_method.value,
"paymentCountry": payment_country.value,
"deviceSessionId": device_session_id,
"ipAddress": ip_address,
"cookie": cookie,
"userAgent": user_agent
},
"test": self.client.is_test
}
return self.client._post(self.url, json=payload) | Authorization: used to verify if a credit card is active, if it has funds, etc.
The transaction is not complete until a transaction capture is sent (only available for accounts in Argentina,
Brazil, Peru).
Capture: terminates a previously authorized transaction.
This is when the account makes a debit to the card (only available for accounts in Argentina, Brazil, Peru).
Authorization and capture: this is the most used type of transaction.
This option sends the transaction amount to authorization and if it is approved immediately capture is performed.
Args:
reference_code: The reference code of the order. It represents the identifier of the transaction
in the shop’s system.
Alphanumeric. Min: 1 Max: 255.
description: The description of the order.
Alphanumeric. Min: 1 Max: 255.
tx_value: TX_VALUE, it is the total amount of the transaction. It can contain two decimal digits.
For example 10000.00 and 10000.
Alphanumeric. 64.
tx_tax: TX_TAX, it is the value of the VAT (Value Added Tax only valid for Colombia) of the transaction,
if no VAT is sent, the system will apply 19% automatically. It can contain two decimal digits.
Example 19000.00. In case you have no VAT you should fill out 0.
Alphanumeric. 64.
tx_tax_return_base: TX_TAX_RETURN_BASE, it is the base value on which VAT (only valid for Colombia)
is calculated. If you do not have VAT should be sent to 0.
Alphanumeric. 64.
currency: The ISO currency code associated with the amount.
http://developers.payulatam.com/en/api/variables_table.html
Alphanumeric. 3.
buyer: Buyer’s shipping address.
Example.
{
"merchantBuyerId": "1",
"fullName": "First name and second buyer name",
"emailAddress": "buyer_test@test.com",
"contactPhone": "7563126",
"dniNumber": "5415668464654",
"shippingAddress": {
"street1": "calle 100",
"street2": "5555487",
"city": "Medellin",
"state": "Antioquia",
"country": "CO",
"postalCode": "000000",
"phone": "7563126"
}
}
payer: Payer’s data.
Example.
{
"merchantPayerId": "1",
"fullName": "First name and second payer name",
"emailAddress": "payer_test@test.com",
"contactPhone": "7563126",
"dniNumber": "5415668464654",
"billingAddress": {
"street1": "calle 93",
"street2": "125544",
"city": "Bogota",
"state": "Bogota DC",
"country": "CO",
"postalCode": "000000",
"phone": "7563126"
}
}
credit_card: Debit card’s data.
Example.
{
"number": "4097440000000004",
"securityCode": "321",
"expirationDate": "2022/12",
"name": "APPROVED"
}
payment_method: Payment method.
Alphanumeric. 32.
payment_country: Payment countries.
http://developers.payulatam.com/en/api/variables_table.html
device_session_id: The session identifier of the device where the transaction was performed from.
Alphanumeric. Max: 255.
ip_address: The IP address of the device where the transaction was performed from.
Alphanumeric. Max: 39.
cookie: The cookie stored on the device where the transaction was performed from.
Alphanumeric. Max: 255.
user_agent: The user agent of the browser from which the transaction was performed.
Alphanumeric. Max: 1024
language: The language used in the emails that are sent to the buyer and seller.
Alphanumeric. 2
shipping_address: The shipping address.
Example.
{
"street1": "calle 100",
"street2": "5555487",
"city": "Medellin",
"state": "Antioquia",
"country": "CO",
"postalCode": "0000000",
"phone": "7563126"
}
extra_parameters: Additional parameters or data associated with a transaction. These parameters may vary
according to the payment means or shop’s preferences.
Example.
{
"INSTALLMENTS_NUMBER": 1
}
notify_url: The URL notification or order confirmation.
Alphanumeric. Max: 2048.
transaction_type:
Returns: | Below is the the instruction that describes the task:
### Input:
Authorization: used to verify if a credit card is active, if it has funds, etc.
The transaction is not complete until a transaction capture is sent (only available for accounts in Argentina,
Brazil, Peru).
Capture: terminates a previously authorized transaction.
This is when the account makes a debit to the card (only available for accounts in Argentina, Brazil, Peru).
Authorization and capture: this is the most used type of transaction.
This option sends the transaction amount to authorization and if it is approved immediately capture is performed.
Args:
reference_code: The reference code of the order. It represents the identifier of the transaction
in the shop’s system.
Alphanumeric. Min: 1 Max: 255.
description: The description of the order.
Alphanumeric. Min: 1 Max: 255.
tx_value: TX_VALUE, it is the total amount of the transaction. It can contain two decimal digits.
For example 10000.00 and 10000.
Alphanumeric. 64.
tx_tax: TX_TAX, it is the value of the VAT (Value Added Tax only valid for Colombia) of the transaction,
if no VAT is sent, the system will apply 19% automatically. It can contain two decimal digits.
Example 19000.00. In case you have no VAT you should fill out 0.
Alphanumeric. 64.
tx_tax_return_base: TX_TAX_RETURN_BASE, it is the base value on which VAT (only valid for Colombia)
is calculated. If you do not have VAT should be sent to 0.
Alphanumeric. 64.
currency: The ISO currency code associated with the amount.
http://developers.payulatam.com/en/api/variables_table.html
Alphanumeric. 3.
buyer: Buyer’s shipping address.
Example.
{
"merchantBuyerId": "1",
"fullName": "First name and second buyer name",
"emailAddress": "buyer_test@test.com",
"contactPhone": "7563126",
"dniNumber": "5415668464654",
"shippingAddress": {
"street1": "calle 100",
"street2": "5555487",
"city": "Medellin",
"state": "Antioquia",
"country": "CO",
"postalCode": "000000",
"phone": "7563126"
}
}
payer: Payer’s data.
Example.
{
"merchantPayerId": "1",
"fullName": "First name and second payer name",
"emailAddress": "payer_test@test.com",
"contactPhone": "7563126",
"dniNumber": "5415668464654",
"billingAddress": {
"street1": "calle 93",
"street2": "125544",
"city": "Bogota",
"state": "Bogota DC",
"country": "CO",
"postalCode": "000000",
"phone": "7563126"
}
}
credit_card: Debit card’s data.
Example.
{
"number": "4097440000000004",
"securityCode": "321",
"expirationDate": "2022/12",
"name": "APPROVED"
}
payment_method: Payment method.
Alphanumeric. 32.
payment_country: Payment countries.
http://developers.payulatam.com/en/api/variables_table.html
device_session_id: The session identifier of the device where the transaction was performed from.
Alphanumeric. Max: 255.
ip_address: The IP address of the device where the transaction was performed from.
Alphanumeric. Max: 39.
cookie: The cookie stored on the device where the transaction was performed from.
Alphanumeric. Max: 255.
user_agent: The user agent of the browser from which the transaction was performed.
Alphanumeric. Max: 1024
language: The language used in the emails that are sent to the buyer and seller.
Alphanumeric. 2
shipping_address: The shipping address.
Example.
{
"street1": "calle 100",
"street2": "5555487",
"city": "Medellin",
"state": "Antioquia",
"country": "CO",
"postalCode": "0000000",
"phone": "7563126"
}
extra_parameters: Additional parameters or data associated with a transaction. These parameters may vary
according to the payment means or shop’s preferences.
Example.
{
"INSTALLMENTS_NUMBER": 1
}
notify_url: The URL notification or order confirmation.
Alphanumeric. Max: 2048.
transaction_type:
Returns:
### Response:
def make_payment(self, *, reference_code, description, tx_value, tx_tax, tx_tax_return_base, currency, buyer,
payer, credit_card, payment_method, payment_country, device_session_id, ip_address, cookie,
user_agent, language=None, shipping_address=None, extra_parameters=None, notify_url=None,
transaction_type=TransactionType.AUTHORIZATION_AND_CAPTURE):
"""
Authorization: used to verify if a credit card is active, if it has funds, etc.
The transaction is not complete until a transaction capture is sent (only available for accounts in Argentina,
Brazil, Peru).
Capture: terminates a previously authorized transaction.
This is when the account makes a debit to the card (only available for accounts in Argentina, Brazil, Peru).
Authorization and capture: this is the most used type of transaction.
This option sends the transaction amount to authorization and if it is approved immediately capture is performed.
Args:
reference_code: The reference code of the order. It represents the identifier of the transaction
in the shop’s system.
Alphanumeric. Min: 1 Max: 255.
description: The description of the order.
Alphanumeric. Min: 1 Max: 255.
tx_value: TX_VALUE, it is the total amount of the transaction. It can contain two decimal digits.
For example 10000.00 and 10000.
Alphanumeric. 64.
tx_tax: TX_TAX, it is the value of the VAT (Value Added Tax only valid for Colombia) of the transaction,
if no VAT is sent, the system will apply 19% automatically. It can contain two decimal digits.
Example 19000.00. In case you have no VAT you should fill out 0.
Alphanumeric. 64.
tx_tax_return_base: TX_TAX_RETURN_BASE, it is the base value on which VAT (only valid for Colombia)
is calculated. If you do not have VAT should be sent to 0.
Alphanumeric. 64.
currency: The ISO currency code associated with the amount.
http://developers.payulatam.com/en/api/variables_table.html
Alphanumeric. 3.
buyer: Buyer’s shipping address.
Example.
{
"merchantBuyerId": "1",
"fullName": "First name and second buyer name",
"emailAddress": "buyer_test@test.com",
"contactPhone": "7563126",
"dniNumber": "5415668464654",
"shippingAddress": {
"street1": "calle 100",
"street2": "5555487",
"city": "Medellin",
"state": "Antioquia",
"country": "CO",
"postalCode": "000000",
"phone": "7563126"
}
}
payer: Payer’s data.
Example.
{
"merchantPayerId": "1",
"fullName": "First name and second payer name",
"emailAddress": "payer_test@test.com",
"contactPhone": "7563126",
"dniNumber": "5415668464654",
"billingAddress": {
"street1": "calle 93",
"street2": "125544",
"city": "Bogota",
"state": "Bogota DC",
"country": "CO",
"postalCode": "000000",
"phone": "7563126"
}
}
credit_card: Debit card’s data.
Example.
{
"number": "4097440000000004",
"securityCode": "321",
"expirationDate": "2022/12",
"name": "APPROVED"
}
payment_method: Payment method.
Alphanumeric. 32.
payment_country: Payment countries.
http://developers.payulatam.com/en/api/variables_table.html
device_session_id: The session identifier of the device where the transaction was performed from.
Alphanumeric. Max: 255.
ip_address: The IP address of the device where the transaction was performed from.
Alphanumeric. Max: 39.
cookie: The cookie stored on the device where the transaction was performed from.
Alphanumeric. Max: 255.
user_agent: The user agent of the browser from which the transaction was performed.
Alphanumeric. Max: 1024
language: The language used in the emails that are sent to the buyer and seller.
Alphanumeric. 2
shipping_address: The shipping address.
Example.
{
"street1": "calle 100",
"street2": "5555487",
"city": "Medellin",
"state": "Antioquia",
"country": "CO",
"postalCode": "0000000",
"phone": "7563126"
}
extra_parameters: Additional parameters or data associated with a transaction. These parameters may vary
according to the payment means or shop’s preferences.
Example.
{
"INSTALLMENTS_NUMBER": 1
}
notify_url: The URL notification or order confirmation.
Alphanumeric. Max: 2048.
transaction_type:
Returns:
"""
if not isinstance(payment_country, Country):
payment_country = Country(payment_country)
if not isinstance(transaction_type, TransactionType):
transaction_type = TransactionType(transaction_type)
if not isinstance(payment_method, Franchise):
payment_method = Franchise(payment_method)
if not isinstance(currency, Currency):
currency = Currency(currency)
franchises = get_available_franchise_for_payment(payment_country, transaction_type)
if not franchises or payment_method not in franchises:
fmt = 'The credit card franchise {} with transaction type {} is not available for {}.'
raise CVVRequiredError(fmt.format(payment_method.value, transaction_type.value, payment_country.name))
payload = {
"language": self.client.language.value,
"command": PaymentCommand.SUBMIT_TRANSACTION.value,
"merchant": {
"apiKey": self.client.api_key,
"apiLogin": self.client.api_login
},
"transaction": {
"order": {
"accountId": self.client.account_id,
"referenceCode": reference_code,
"description": description,
"language": language or self.client.language.value,
"signature": self.client._get_signature(reference_code, tx_value, currency.value),
"notifyUrl": notify_url,
"additionalValues": {
"TX_VALUE": {
"value": tx_value,
"currency": currency.value
},
"TX_TAX": {
"value": tx_tax,
"currency": currency.value
},
"TX_TAX_RETURN_BASE": {
"value": tx_tax_return_base,
"currency": currency.value
}
},
"buyer": buyer,
"shippingAddress": shipping_address
},
"payer": payer,
"creditCard": credit_card,
"extraParameters": extra_parameters,
"type": transaction_type.value,
"paymentMethod": payment_method.value,
"paymentCountry": payment_country.value,
"deviceSessionId": device_session_id,
"ipAddress": ip_address,
"cookie": cookie,
"userAgent": user_agent
},
"test": self.client.is_test
}
return self.client._post(self.url, json=payload) |
def get_lines(filename: str) -> Iterator[str]:
"""Create an iterator that returns the lines of a utf-8 encoded file."""
if filename.endswith('.gz'):
with gzip.open(filename, 'r') as f:
for line in f:
yield line.decode('utf-8')
else:
with open(filename, 'r', encoding='utf-8') as f:
for line in f:
yield line | Create an iterator that returns the lines of a utf-8 encoded file. | Below is the the instruction that describes the task:
### Input:
Create an iterator that returns the lines of a utf-8 encoded file.
### Response:
def get_lines(filename: str) -> Iterator[str]:
"""Create an iterator that returns the lines of a utf-8 encoded file."""
if filename.endswith('.gz'):
with gzip.open(filename, 'r') as f:
for line in f:
yield line.decode('utf-8')
else:
with open(filename, 'r', encoding='utf-8') as f:
for line in f:
yield line |
def save_intraday(data: pd.DataFrame, ticker: str, dt, typ='TRADE'):
"""
Check whether data is done for the day and save
Args:
data: data
ticker: ticker
dt: date
typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]
Examples:
>>> os.environ['BBG_ROOT'] = 'xbbg/tests/data'
>>> sample = pd.read_parquet('xbbg/tests/data/aapl.parq')
>>> save_intraday(sample, 'AAPL US Equity', '2018-11-02')
>>> # Invalid exchange
>>> save_intraday(sample, 'AAPL XX Equity', '2018-11-02')
>>> # Invalid empty data
>>> save_intraday(pd.DataFrame(), 'AAPL US Equity', '2018-11-02')
>>> # Invalid date - too close
>>> cur_dt = utils.cur_time()
>>> save_intraday(sample, 'AAPL US Equity', cur_dt)
"""
cur_dt = pd.Timestamp(dt).strftime('%Y-%m-%d')
logger = logs.get_logger(save_intraday, level='debug')
info = f'{ticker} / {cur_dt} / {typ}'
data_file = hist_file(ticker=ticker, dt=dt, typ=typ)
if not data_file: return
if data.empty:
logger.warning(f'data is empty for {info} ...')
return
exch = const.exch_info(ticker=ticker)
if exch.empty: return
end_time = pd.Timestamp(
const.market_timing(ticker=ticker, dt=dt, timing='FINISHED')
).tz_localize(exch.tz)
now = pd.Timestamp('now', tz=exch.tz) - pd.Timedelta('1H')
if end_time > now:
logger.debug(f'skip saving cause market close ({end_time}) < now - 1H ({now}) ...')
return
logger.info(f'saving data to {data_file} ...')
files.create_folder(data_file, is_file=True)
data.to_parquet(data_file) | Check whether data is done for the day and save
Args:
data: data
ticker: ticker
dt: date
typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]
Examples:
>>> os.environ['BBG_ROOT'] = 'xbbg/tests/data'
>>> sample = pd.read_parquet('xbbg/tests/data/aapl.parq')
>>> save_intraday(sample, 'AAPL US Equity', '2018-11-02')
>>> # Invalid exchange
>>> save_intraday(sample, 'AAPL XX Equity', '2018-11-02')
>>> # Invalid empty data
>>> save_intraday(pd.DataFrame(), 'AAPL US Equity', '2018-11-02')
>>> # Invalid date - too close
>>> cur_dt = utils.cur_time()
>>> save_intraday(sample, 'AAPL US Equity', cur_dt) | Below is the the instruction that describes the task:
### Input:
Check whether data is done for the day and save
Args:
data: data
ticker: ticker
dt: date
typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]
Examples:
>>> os.environ['BBG_ROOT'] = 'xbbg/tests/data'
>>> sample = pd.read_parquet('xbbg/tests/data/aapl.parq')
>>> save_intraday(sample, 'AAPL US Equity', '2018-11-02')
>>> # Invalid exchange
>>> save_intraday(sample, 'AAPL XX Equity', '2018-11-02')
>>> # Invalid empty data
>>> save_intraday(pd.DataFrame(), 'AAPL US Equity', '2018-11-02')
>>> # Invalid date - too close
>>> cur_dt = utils.cur_time()
>>> save_intraday(sample, 'AAPL US Equity', cur_dt)
### Response:
def save_intraday(data: pd.DataFrame, ticker: str, dt, typ='TRADE'):
"""
Check whether data is done for the day and save
Args:
data: data
ticker: ticker
dt: date
typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]
Examples:
>>> os.environ['BBG_ROOT'] = 'xbbg/tests/data'
>>> sample = pd.read_parquet('xbbg/tests/data/aapl.parq')
>>> save_intraday(sample, 'AAPL US Equity', '2018-11-02')
>>> # Invalid exchange
>>> save_intraday(sample, 'AAPL XX Equity', '2018-11-02')
>>> # Invalid empty data
>>> save_intraday(pd.DataFrame(), 'AAPL US Equity', '2018-11-02')
>>> # Invalid date - too close
>>> cur_dt = utils.cur_time()
>>> save_intraday(sample, 'AAPL US Equity', cur_dt)
"""
cur_dt = pd.Timestamp(dt).strftime('%Y-%m-%d')
logger = logs.get_logger(save_intraday, level='debug')
info = f'{ticker} / {cur_dt} / {typ}'
data_file = hist_file(ticker=ticker, dt=dt, typ=typ)
if not data_file: return
if data.empty:
logger.warning(f'data is empty for {info} ...')
return
exch = const.exch_info(ticker=ticker)
if exch.empty: return
end_time = pd.Timestamp(
const.market_timing(ticker=ticker, dt=dt, timing='FINISHED')
).tz_localize(exch.tz)
now = pd.Timestamp('now', tz=exch.tz) - pd.Timedelta('1H')
if end_time > now:
logger.debug(f'skip saving cause market close ({end_time}) < now - 1H ({now}) ...')
return
logger.info(f'saving data to {data_file} ...')
files.create_folder(data_file, is_file=True)
data.to_parquet(data_file) |
def ecg_wave_detector(ecg, rpeaks):
"""
Returns the localization of the P, Q, T waves. This function needs massive help!
Parameters
----------
ecg : list or ndarray
ECG signal (preferably filtered).
rpeaks : list or ndarray
R peaks localization.
Returns
----------
ecg_waves : dict
Contains wave peaks location indices.
Example
----------
>>> import neurokit as nk
>>> ecg = nk.ecg_simulate(duration=5, sampling_rate=1000)
>>> ecg = nk.ecg_preprocess(ecg=ecg, sampling_rate=1000)
>>> rpeaks = ecg["ECG"]["R_Peaks"]
>>> ecg = ecg["df"]["ECG_Filtered"]
>>> ecg_waves = nk.ecg_wave_detector(ecg=ecg, rpeaks=rpeaks)
>>> nk.plot_events_in_signal(ecg, [ecg_waves["P_Waves"], ecg_waves["Q_Waves_Onsets"], ecg_waves["Q_Waves"], list(rpeaks), ecg_waves["S_Waves"], ecg_waves["T_Waves_Onsets"], ecg_waves["T_Waves"], ecg_waves["T_Waves_Ends"]], color=["green", "yellow", "orange", "red", "black", "brown", "blue", "purple"])
Notes
----------
*Details*
- **Cardiac Cycle**: A typical ECG showing a heartbeat consists of a P wave, a QRS complex and a T wave.The P wave represents the wave of depolarization that spreads from the SA-node throughout the atria. The QRS complex reflects the rapid depolarization of the right and left ventricles. Since the ventricles are the largest part of the heart, in terms of mass, the QRS complex usually has a much larger amplitude than the P-wave. The T wave represents the ventricular repolarization of the ventricles. On rare occasions, a U wave can be seen following the T wave. The U wave is believed to be related to the last remnants of ventricular repolarization.
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
"""
q_waves = []
p_waves = []
q_waves_starts = []
s_waves = []
t_waves = []
t_waves_starts = []
t_waves_ends = []
for index, rpeak in enumerate(rpeaks[:-3]):
try:
epoch_before = np.array(ecg)[int(rpeaks[index-1]):int(rpeak)]
epoch_before = epoch_before[int(len(epoch_before)/2):len(epoch_before)]
epoch_before = list(reversed(epoch_before))
q_wave_index = np.min(find_peaks(epoch_before))
q_wave = rpeak - q_wave_index
p_wave_index = q_wave_index + np.argmax(epoch_before[q_wave_index:])
p_wave = rpeak - p_wave_index
inter_pq = epoch_before[q_wave_index:p_wave_index]
inter_pq_derivative = np.gradient(inter_pq, 2)
q_start_index = find_closest_in_list(len(inter_pq_derivative)/2, find_peaks(inter_pq_derivative))
q_start = q_wave - q_start_index
q_waves.append(q_wave)
p_waves.append(p_wave)
q_waves_starts.append(q_start)
except ValueError:
pass
except IndexError:
pass
try:
epoch_after = np.array(ecg)[int(rpeak):int(rpeaks[index+1])]
epoch_after = epoch_after[0:int(len(epoch_after)/2)]
s_wave_index = np.min(find_peaks(epoch_after))
s_wave = rpeak + s_wave_index
t_wave_index = s_wave_index + np.argmax(epoch_after[s_wave_index:])
t_wave = rpeak + t_wave_index
inter_st = epoch_after[s_wave_index:t_wave_index]
inter_st_derivative = np.gradient(inter_st, 2)
t_start_index = find_closest_in_list(len(inter_st_derivative)/2, find_peaks(inter_st_derivative))
t_start = s_wave + t_start_index
t_end = np.min(find_peaks(epoch_after[t_wave_index:]))
t_end = t_wave + t_end
s_waves.append(s_wave)
t_waves.append(t_wave)
t_waves_starts.append(t_start)
t_waves_ends.append(t_end)
except ValueError:
pass
except IndexError:
pass
# pd.Series(epoch_before).plot()
# t_waves = []
# for index, rpeak in enumerate(rpeaks[0:-1]):
#
# epoch = np.array(ecg)[int(rpeak):int(rpeaks[index+1])]
# pd.Series(epoch).plot()
#
# # T wave
# middle = (rpeaks[index+1] - rpeak) / 2
# quarter = middle/2
#
# epoch = np.array(ecg)[int(rpeak+quarter):int(rpeak+middle)]
#
# try:
# t_wave = int(rpeak+quarter) + np.argmax(epoch)
# t_waves.append(t_wave)
# except ValueError:
# pass
#
# p_waves = []
# for index, rpeak in enumerate(rpeaks[1:]):
# index += 1
# # Q wave
# middle = (rpeak - rpeaks[index-1]) / 2
# quarter = middle/2
#
# epoch = np.array(ecg)[int(rpeak-middle):int(rpeak-quarter)]
#
# try:
# p_wave = int(rpeak-quarter) + np.argmax(epoch)
# p_waves.append(p_wave)
# except ValueError:
# pass
#
# q_waves = []
# for index, p_wave in enumerate(p_waves):
# epoch = np.array(ecg)[int(p_wave):int(rpeaks[rpeaks>p_wave][0])]
#
# try:
# q_wave = p_wave + np.argmin(epoch)
# q_waves.append(q_wave)
# except ValueError:
# pass
#
# # TODO: manage to find the begininng of the Q and the end of the T wave so we can extract the QT interval
ecg_waves = {"T_Waves": t_waves,
"P_Waves": p_waves,
"Q_Waves": q_waves,
"S_Waves": s_waves,
"Q_Waves_Onsets": q_waves_starts,
"T_Waves_Onsets": t_waves_starts,
"T_Waves_Ends": t_waves_ends}
return(ecg_waves) | Returns the localization of the P, Q, T waves. This function needs massive help!
Parameters
----------
ecg : list or ndarray
ECG signal (preferably filtered).
rpeaks : list or ndarray
R peaks localization.
Returns
----------
ecg_waves : dict
Contains wave peaks location indices.
Example
----------
>>> import neurokit as nk
>>> ecg = nk.ecg_simulate(duration=5, sampling_rate=1000)
>>> ecg = nk.ecg_preprocess(ecg=ecg, sampling_rate=1000)
>>> rpeaks = ecg["ECG"]["R_Peaks"]
>>> ecg = ecg["df"]["ECG_Filtered"]
>>> ecg_waves = nk.ecg_wave_detector(ecg=ecg, rpeaks=rpeaks)
>>> nk.plot_events_in_signal(ecg, [ecg_waves["P_Waves"], ecg_waves["Q_Waves_Onsets"], ecg_waves["Q_Waves"], list(rpeaks), ecg_waves["S_Waves"], ecg_waves["T_Waves_Onsets"], ecg_waves["T_Waves"], ecg_waves["T_Waves_Ends"]], color=["green", "yellow", "orange", "red", "black", "brown", "blue", "purple"])
Notes
----------
*Details*
- **Cardiac Cycle**: A typical ECG showing a heartbeat consists of a P wave, a QRS complex and a T wave.The P wave represents the wave of depolarization that spreads from the SA-node throughout the atria. The QRS complex reflects the rapid depolarization of the right and left ventricles. Since the ventricles are the largest part of the heart, in terms of mass, the QRS complex usually has a much larger amplitude than the P-wave. The T wave represents the ventricular repolarization of the ventricles. On rare occasions, a U wave can be seen following the T wave. The U wave is believed to be related to the last remnants of ventricular repolarization.
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_ | Below is the the instruction that describes the task:
### Input:
Returns the localization of the P, Q, T waves. This function needs massive help!
Parameters
----------
ecg : list or ndarray
ECG signal (preferably filtered).
rpeaks : list or ndarray
R peaks localization.
Returns
----------
ecg_waves : dict
Contains wave peaks location indices.
Example
----------
>>> import neurokit as nk
>>> ecg = nk.ecg_simulate(duration=5, sampling_rate=1000)
>>> ecg = nk.ecg_preprocess(ecg=ecg, sampling_rate=1000)
>>> rpeaks = ecg["ECG"]["R_Peaks"]
>>> ecg = ecg["df"]["ECG_Filtered"]
>>> ecg_waves = nk.ecg_wave_detector(ecg=ecg, rpeaks=rpeaks)
>>> nk.plot_events_in_signal(ecg, [ecg_waves["P_Waves"], ecg_waves["Q_Waves_Onsets"], ecg_waves["Q_Waves"], list(rpeaks), ecg_waves["S_Waves"], ecg_waves["T_Waves_Onsets"], ecg_waves["T_Waves"], ecg_waves["T_Waves_Ends"]], color=["green", "yellow", "orange", "red", "black", "brown", "blue", "purple"])
Notes
----------
*Details*
- **Cardiac Cycle**: A typical ECG showing a heartbeat consists of a P wave, a QRS complex and a T wave.The P wave represents the wave of depolarization that spreads from the SA-node throughout the atria. The QRS complex reflects the rapid depolarization of the right and left ventricles. Since the ventricles are the largest part of the heart, in terms of mass, the QRS complex usually has a much larger amplitude than the P-wave. The T wave represents the ventricular repolarization of the ventricles. On rare occasions, a U wave can be seen following the T wave. The U wave is believed to be related to the last remnants of ventricular repolarization.
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
### Response:
def ecg_wave_detector(ecg, rpeaks):
"""
Returns the localization of the P, Q, T waves. This function needs massive help!
Parameters
----------
ecg : list or ndarray
ECG signal (preferably filtered).
rpeaks : list or ndarray
R peaks localization.
Returns
----------
ecg_waves : dict
Contains wave peaks location indices.
Example
----------
>>> import neurokit as nk
>>> ecg = nk.ecg_simulate(duration=5, sampling_rate=1000)
>>> ecg = nk.ecg_preprocess(ecg=ecg, sampling_rate=1000)
>>> rpeaks = ecg["ECG"]["R_Peaks"]
>>> ecg = ecg["df"]["ECG_Filtered"]
>>> ecg_waves = nk.ecg_wave_detector(ecg=ecg, rpeaks=rpeaks)
>>> nk.plot_events_in_signal(ecg, [ecg_waves["P_Waves"], ecg_waves["Q_Waves_Onsets"], ecg_waves["Q_Waves"], list(rpeaks), ecg_waves["S_Waves"], ecg_waves["T_Waves_Onsets"], ecg_waves["T_Waves"], ecg_waves["T_Waves_Ends"]], color=["green", "yellow", "orange", "red", "black", "brown", "blue", "purple"])
Notes
----------
*Details*
- **Cardiac Cycle**: A typical ECG showing a heartbeat consists of a P wave, a QRS complex and a T wave.The P wave represents the wave of depolarization that spreads from the SA-node throughout the atria. The QRS complex reflects the rapid depolarization of the right and left ventricles. Since the ventricles are the largest part of the heart, in terms of mass, the QRS complex usually has a much larger amplitude than the P-wave. The T wave represents the ventricular repolarization of the ventricles. On rare occasions, a U wave can be seen following the T wave. The U wave is believed to be related to the last remnants of ventricular repolarization.
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
"""
q_waves = []
p_waves = []
q_waves_starts = []
s_waves = []
t_waves = []
t_waves_starts = []
t_waves_ends = []
for index, rpeak in enumerate(rpeaks[:-3]):
try:
epoch_before = np.array(ecg)[int(rpeaks[index-1]):int(rpeak)]
epoch_before = epoch_before[int(len(epoch_before)/2):len(epoch_before)]
epoch_before = list(reversed(epoch_before))
q_wave_index = np.min(find_peaks(epoch_before))
q_wave = rpeak - q_wave_index
p_wave_index = q_wave_index + np.argmax(epoch_before[q_wave_index:])
p_wave = rpeak - p_wave_index
inter_pq = epoch_before[q_wave_index:p_wave_index]
inter_pq_derivative = np.gradient(inter_pq, 2)
q_start_index = find_closest_in_list(len(inter_pq_derivative)/2, find_peaks(inter_pq_derivative))
q_start = q_wave - q_start_index
q_waves.append(q_wave)
p_waves.append(p_wave)
q_waves_starts.append(q_start)
except ValueError:
pass
except IndexError:
pass
try:
epoch_after = np.array(ecg)[int(rpeak):int(rpeaks[index+1])]
epoch_after = epoch_after[0:int(len(epoch_after)/2)]
s_wave_index = np.min(find_peaks(epoch_after))
s_wave = rpeak + s_wave_index
t_wave_index = s_wave_index + np.argmax(epoch_after[s_wave_index:])
t_wave = rpeak + t_wave_index
inter_st = epoch_after[s_wave_index:t_wave_index]
inter_st_derivative = np.gradient(inter_st, 2)
t_start_index = find_closest_in_list(len(inter_st_derivative)/2, find_peaks(inter_st_derivative))
t_start = s_wave + t_start_index
t_end = np.min(find_peaks(epoch_after[t_wave_index:]))
t_end = t_wave + t_end
s_waves.append(s_wave)
t_waves.append(t_wave)
t_waves_starts.append(t_start)
t_waves_ends.append(t_end)
except ValueError:
pass
except IndexError:
pass
# pd.Series(epoch_before).plot()
# t_waves = []
# for index, rpeak in enumerate(rpeaks[0:-1]):
#
# epoch = np.array(ecg)[int(rpeak):int(rpeaks[index+1])]
# pd.Series(epoch).plot()
#
# # T wave
# middle = (rpeaks[index+1] - rpeak) / 2
# quarter = middle/2
#
# epoch = np.array(ecg)[int(rpeak+quarter):int(rpeak+middle)]
#
# try:
# t_wave = int(rpeak+quarter) + np.argmax(epoch)
# t_waves.append(t_wave)
# except ValueError:
# pass
#
# p_waves = []
# for index, rpeak in enumerate(rpeaks[1:]):
# index += 1
# # Q wave
# middle = (rpeak - rpeaks[index-1]) / 2
# quarter = middle/2
#
# epoch = np.array(ecg)[int(rpeak-middle):int(rpeak-quarter)]
#
# try:
# p_wave = int(rpeak-quarter) + np.argmax(epoch)
# p_waves.append(p_wave)
# except ValueError:
# pass
#
# q_waves = []
# for index, p_wave in enumerate(p_waves):
# epoch = np.array(ecg)[int(p_wave):int(rpeaks[rpeaks>p_wave][0])]
#
# try:
# q_wave = p_wave + np.argmin(epoch)
# q_waves.append(q_wave)
# except ValueError:
# pass
#
# # TODO: manage to find the begininng of the Q and the end of the T wave so we can extract the QT interval
ecg_waves = {"T_Waves": t_waves,
"P_Waves": p_waves,
"Q_Waves": q_waves,
"S_Waves": s_waves,
"Q_Waves_Onsets": q_waves_starts,
"T_Waves_Onsets": t_waves_starts,
"T_Waves_Ends": t_waves_ends}
return(ecg_waves) |
def report(self, request: 'Request'=None, state: Text=None):
"""
Report current exception to Sentry.
"""
self._make_context(request, state)
self.client.captureException()
self._clear_context() | Report current exception to Sentry. | Below is the the instruction that describes the task:
### Input:
Report current exception to Sentry.
### Response:
def report(self, request: 'Request'=None, state: Text=None):
"""
Report current exception to Sentry.
"""
self._make_context(request, state)
self.client.captureException()
self._clear_context() |
def get_form(self, request, obj=None, **kwargs):
"""
Add the default alternative dropdown with appropriate choices
"""
if obj:
if obj.alternatives:
choices = [(alternative, alternative) for alternative in obj.alternatives.keys()]
else:
choices = [(conf.CONTROL_GROUP, conf.CONTROL_GROUP)]
class ExperimentModelForm(forms.ModelForm):
default_alternative = forms.ChoiceField(choices=choices,
initial=obj.default_alternative,
required=False)
kwargs['form'] = ExperimentModelForm
return super(ExperimentAdmin, self).get_form(request, obj=obj, **kwargs) | Add the default alternative dropdown with appropriate choices | Below is the the instruction that describes the task:
### Input:
Add the default alternative dropdown with appropriate choices
### Response:
def get_form(self, request, obj=None, **kwargs):
"""
Add the default alternative dropdown with appropriate choices
"""
if obj:
if obj.alternatives:
choices = [(alternative, alternative) for alternative in obj.alternatives.keys()]
else:
choices = [(conf.CONTROL_GROUP, conf.CONTROL_GROUP)]
class ExperimentModelForm(forms.ModelForm):
default_alternative = forms.ChoiceField(choices=choices,
initial=obj.default_alternative,
required=False)
kwargs['form'] = ExperimentModelForm
return super(ExperimentAdmin, self).get_form(request, obj=obj, **kwargs) |
def get_axis_grid(self, ind):
"""
Returns the grid for a particular axis.
Args:
ind (int): Axis index.
"""
ng = self.dim
num_pts = ng[ind]
lengths = self.structure.lattice.abc
return [i / num_pts * lengths[ind] for i in range(num_pts)] | Returns the grid for a particular axis.
Args:
ind (int): Axis index. | Below is the the instruction that describes the task:
### Input:
Returns the grid for a particular axis.
Args:
ind (int): Axis index.
### Response:
def get_axis_grid(self, ind):
"""
Returns the grid for a particular axis.
Args:
ind (int): Axis index.
"""
ng = self.dim
num_pts = ng[ind]
lengths = self.structure.lattice.abc
return [i / num_pts * lengths[ind] for i in range(num_pts)] |
def p_try_statement_3(self, p):
"""try_statement : TRY block catch finally"""
p[0] = self.asttypes.Try(statements=p[2], catch=p[3], fin=p[4])
p[0].setpos(p) | try_statement : TRY block catch finally | Below is the the instruction that describes the task:
### Input:
try_statement : TRY block catch finally
### Response:
def p_try_statement_3(self, p):
"""try_statement : TRY block catch finally"""
p[0] = self.asttypes.Try(statements=p[2], catch=p[3], fin=p[4])
p[0].setpos(p) |
def copy_unit_spike_features(self, sorting, unit_ids=None):
'''Copy unit spike features from another sorting extractor to the current
sorting extractor.
Parameters
----------
sorting: SortingExtractor
The sorting extractor from which the spike features will be copied
unit_ids: (array_like, int)
The list (or single value) of unit_ids for which the spike features will be copied.
def get_unit_spike_features(self, unit_id, feature_name, start_frame=None, end_frame=None):
'''
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
if isinstance(unit_ids, int):
curr_feature_names = sorting.get_unit_spike_feature_names(unit_id=unit_ids)
for curr_feature_name in curr_feature_names:
value = sorting.get_unit_spike_features(unit_id=unit_ids, feature_name=curr_feature_name)
self.set_unit_spike_features(unit_id=unit_ids, feature_name=curr_feature_name, value=value)
else:
for unit_id in unit_ids:
curr_feature_names = sorting.get_unit_spike_feature_names(unit_id=unit_id)
for curr_feature_name in curr_feature_names:
value = sorting.get_unit_spike_features(unit_id=unit_id, feature_name=curr_feature_name)
self.set_unit_spike_features(unit_id=unit_id, feature_name=curr_feature_name, value=value) | Copy unit spike features from another sorting extractor to the current
sorting extractor.
Parameters
----------
sorting: SortingExtractor
The sorting extractor from which the spike features will be copied
unit_ids: (array_like, int)
The list (or single value) of unit_ids for which the spike features will be copied.
def get_unit_spike_features(self, unit_id, feature_name, start_frame=None, end_frame=None): | Below is the the instruction that describes the task:
### Input:
Copy unit spike features from another sorting extractor to the current
sorting extractor.
Parameters
----------
sorting: SortingExtractor
The sorting extractor from which the spike features will be copied
unit_ids: (array_like, int)
The list (or single value) of unit_ids for which the spike features will be copied.
def get_unit_spike_features(self, unit_id, feature_name, start_frame=None, end_frame=None):
### Response:
def copy_unit_spike_features(self, sorting, unit_ids=None):
'''Copy unit spike features from another sorting extractor to the current
sorting extractor.
Parameters
----------
sorting: SortingExtractor
The sorting extractor from which the spike features will be copied
unit_ids: (array_like, int)
The list (or single value) of unit_ids for which the spike features will be copied.
def get_unit_spike_features(self, unit_id, feature_name, start_frame=None, end_frame=None):
'''
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
if isinstance(unit_ids, int):
curr_feature_names = sorting.get_unit_spike_feature_names(unit_id=unit_ids)
for curr_feature_name in curr_feature_names:
value = sorting.get_unit_spike_features(unit_id=unit_ids, feature_name=curr_feature_name)
self.set_unit_spike_features(unit_id=unit_ids, feature_name=curr_feature_name, value=value)
else:
for unit_id in unit_ids:
curr_feature_names = sorting.get_unit_spike_feature_names(unit_id=unit_id)
for curr_feature_name in curr_feature_names:
value = sorting.get_unit_spike_features(unit_id=unit_id, feature_name=curr_feature_name)
self.set_unit_spike_features(unit_id=unit_id, feature_name=curr_feature_name, value=value) |
def this_year():
""" Return start and end date of this fiscal year """
since = TODAY
while since.month != 3 or since.day != 1:
since -= delta(days=1)
until = since + delta(years=1)
return Date(since), Date(until) | Return start and end date of this fiscal year | Below is the the instruction that describes the task:
### Input:
Return start and end date of this fiscal year
### Response:
def this_year():
""" Return start and end date of this fiscal year """
since = TODAY
while since.month != 3 or since.day != 1:
since -= delta(days=1)
until = since + delta(years=1)
return Date(since), Date(until) |
def from_bytes(self, string):
"""Deserialize the binder's annotations from a byte string."""
msg = srsly.msgpack_loads(gzip.decompress(string))
self.attrs = msg["attrs"]
self.strings = set(msg["strings"])
lengths = numpy.fromstring(msg["lengths"], dtype="int32")
flat_spaces = numpy.fromstring(msg["spaces"], dtype=bool)
flat_tokens = numpy.fromstring(msg["tokens"], dtype="uint64")
shape = (flat_tokens.size // len(self.attrs), len(self.attrs))
flat_tokens = flat_tokens.reshape(shape)
flat_spaces = flat_spaces.reshape((flat_spaces.size, 1))
self.tokens = NumpyOps().unflatten(flat_tokens, lengths)
self.spaces = NumpyOps().unflatten(flat_spaces, lengths)
for tokens in self.tokens:
assert len(tokens.shape) == 2, tokens.shape
return self | Deserialize the binder's annotations from a byte string. | Below is the the instruction that describes the task:
### Input:
Deserialize the binder's annotations from a byte string.
### Response:
def from_bytes(self, string):
"""Deserialize the binder's annotations from a byte string."""
msg = srsly.msgpack_loads(gzip.decompress(string))
self.attrs = msg["attrs"]
self.strings = set(msg["strings"])
lengths = numpy.fromstring(msg["lengths"], dtype="int32")
flat_spaces = numpy.fromstring(msg["spaces"], dtype=bool)
flat_tokens = numpy.fromstring(msg["tokens"], dtype="uint64")
shape = (flat_tokens.size // len(self.attrs), len(self.attrs))
flat_tokens = flat_tokens.reshape(shape)
flat_spaces = flat_spaces.reshape((flat_spaces.size, 1))
self.tokens = NumpyOps().unflatten(flat_tokens, lengths)
self.spaces = NumpyOps().unflatten(flat_spaces, lengths)
for tokens in self.tokens:
assert len(tokens.shape) == 2, tokens.shape
return self |
def as_json(self):
"""Return the proxy's properties in JSON format.
:rtype: dict
"""
info = {
'host': self.host,
'port': self.port,
'geo': {
'country': {'code': self._geo.code, 'name': self._geo.name},
'region': {
'code': self._geo.region_code,
'name': self._geo.region_name,
},
'city': self._geo.city_name,
},
'types': [],
'avg_resp_time': self.avg_resp_time,
'error_rate': self.error_rate,
}
order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731
for tp, lvl in sorted(self.types.items(), key=order):
info['types'].append({'type': tp, 'level': lvl or ''})
return info | Return the proxy's properties in JSON format.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Return the proxy's properties in JSON format.
:rtype: dict
### Response:
def as_json(self):
"""Return the proxy's properties in JSON format.
:rtype: dict
"""
info = {
'host': self.host,
'port': self.port,
'geo': {
'country': {'code': self._geo.code, 'name': self._geo.name},
'region': {
'code': self._geo.region_code,
'name': self._geo.region_name,
},
'city': self._geo.city_name,
},
'types': [],
'avg_resp_time': self.avg_resp_time,
'error_rate': self.error_rate,
}
order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731
for tp, lvl in sorted(self.types.items(), key=order):
info['types'].append({'type': tp, 'level': lvl or ''})
return info |
def _selu(attrs, inputs, proto_obj):
"""Selu function"""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'selu'})
return 'LeakyReLU', new_attrs, inputs | Selu function | Below is the the instruction that describes the task:
### Input:
Selu function
### Response:
def _selu(attrs, inputs, proto_obj):
"""Selu function"""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'selu'})
return 'LeakyReLU', new_attrs, inputs |
def _dump_csv(self, rows):
"""Take a list of dicts and return it as a CSV value. The
.. versionchanged:: 4.0.0
:param list rows: A list of lists to return as a CSV
:rtype: str
"""
self.logger.debug('Writing %r', rows)
csv = self._maybe_import('csv')
buff = io.StringIO() if _PYTHON3 else io.BytesIO()
writer = csv.DictWriter(
buff,
sorted(set([k for r in rows for k in r.keys()])),
dialect='excel')
writer.writeheader()
writer.writerows(rows)
value = buff.getvalue()
buff.close()
return value | Take a list of dicts and return it as a CSV value. The
.. versionchanged:: 4.0.0
:param list rows: A list of lists to return as a CSV
:rtype: str | Below is the the instruction that describes the task:
### Input:
Take a list of dicts and return it as a CSV value. The
.. versionchanged:: 4.0.0
:param list rows: A list of lists to return as a CSV
:rtype: str
### Response:
def _dump_csv(self, rows):
"""Take a list of dicts and return it as a CSV value. The
.. versionchanged:: 4.0.0
:param list rows: A list of lists to return as a CSV
:rtype: str
"""
self.logger.debug('Writing %r', rows)
csv = self._maybe_import('csv')
buff = io.StringIO() if _PYTHON3 else io.BytesIO()
writer = csv.DictWriter(
buff,
sorted(set([k for r in rows for k in r.keys()])),
dialect='excel')
writer.writeheader()
writer.writerows(rows)
value = buff.getvalue()
buff.close()
return value |
def _set_untagged_vlan(self, v, load=False):
"""
Setter method for untagged_vlan, mapped from YANG variable /interface/port_channel/logical_interface/port_channel/pc_cmd_container_dummy/service_instance_vlan_cmds_dummy_container/get_untagged_vlan_dummy/untagged/untagged_vlan (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_untagged_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_untagged_vlan() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=untagged_vlan.untagged_vlan, is_container='container', presence=False, yang_name="untagged-vlan", rest_name="vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure untagged VLAN for this logical interface', u'alt-name': u'vlan', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-lif', defining_module='brocade-lif', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """untagged_vlan must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=untagged_vlan.untagged_vlan, is_container='container', presence=False, yang_name="untagged-vlan", rest_name="vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure untagged VLAN for this logical interface', u'alt-name': u'vlan', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-lif', defining_module='brocade-lif', yang_type='container', is_config=True)""",
})
self.__untagged_vlan = t
if hasattr(self, '_set'):
self._set() | Setter method for untagged_vlan, mapped from YANG variable /interface/port_channel/logical_interface/port_channel/pc_cmd_container_dummy/service_instance_vlan_cmds_dummy_container/get_untagged_vlan_dummy/untagged/untagged_vlan (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_untagged_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_untagged_vlan() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for untagged_vlan, mapped from YANG variable /interface/port_channel/logical_interface/port_channel/pc_cmd_container_dummy/service_instance_vlan_cmds_dummy_container/get_untagged_vlan_dummy/untagged/untagged_vlan (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_untagged_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_untagged_vlan() directly.
### Response:
def _set_untagged_vlan(self, v, load=False):
"""
Setter method for untagged_vlan, mapped from YANG variable /interface/port_channel/logical_interface/port_channel/pc_cmd_container_dummy/service_instance_vlan_cmds_dummy_container/get_untagged_vlan_dummy/untagged/untagged_vlan (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_untagged_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_untagged_vlan() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=untagged_vlan.untagged_vlan, is_container='container', presence=False, yang_name="untagged-vlan", rest_name="vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure untagged VLAN for this logical interface', u'alt-name': u'vlan', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-lif', defining_module='brocade-lif', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """untagged_vlan must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=untagged_vlan.untagged_vlan, is_container='container', presence=False, yang_name="untagged-vlan", rest_name="vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure untagged VLAN for this logical interface', u'alt-name': u'vlan', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-lif', defining_module='brocade-lif', yang_type='container', is_config=True)""",
})
self.__untagged_vlan = t
if hasattr(self, '_set'):
self._set() |
def get_longest_non_repeat_v1(string):
"""
Find the length of the longest substring
without repeating characters.
Return max_len and the substring as a tuple
"""
if string is None:
return 0, ''
sub_string = ''
dict = {}
max_length = 0
j = 0
for i in range(len(string)):
if string[i] in dict:
j = max(dict[string[i]], j)
dict[string[i]] = i + 1
if i - j + 1 > max_length:
max_length = i - j + 1
sub_string = string[j: i + 1]
return max_length, sub_string | Find the length of the longest substring
without repeating characters.
Return max_len and the substring as a tuple | Below is the the instruction that describes the task:
### Input:
Find the length of the longest substring
without repeating characters.
Return max_len and the substring as a tuple
### Response:
def get_longest_non_repeat_v1(string):
"""
Find the length of the longest substring
without repeating characters.
Return max_len and the substring as a tuple
"""
if string is None:
return 0, ''
sub_string = ''
dict = {}
max_length = 0
j = 0
for i in range(len(string)):
if string[i] in dict:
j = max(dict[string[i]], j)
dict[string[i]] = i + 1
if i - j + 1 > max_length:
max_length = i - j + 1
sub_string = string[j: i + 1]
return max_length, sub_string |
def _symlink_bcbio(args, script="bcbio_nextgen.py", env_name=None, prefix=None):
"""Ensure a bcbio-nextgen script symlink in final tool directory.
"""
if env_name:
bcbio_anaconda = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(sys.executable))),
"envs", env_name, "bin", script)
else:
bcbio_anaconda = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), script)
bindir = os.path.join(args.tooldir, "bin")
if not os.path.exists(bindir):
os.makedirs(bindir)
if prefix:
script = "%s_%s" % (prefix, script)
bcbio_final = os.path.join(bindir, script)
if not os.path.exists(bcbio_final):
if os.path.lexists(bcbio_final):
subprocess.check_call(["rm", "-f", bcbio_final])
subprocess.check_call(["ln", "-s", bcbio_anaconda, bcbio_final]) | Ensure a bcbio-nextgen script symlink in final tool directory. | Below is the the instruction that describes the task:
### Input:
Ensure a bcbio-nextgen script symlink in final tool directory.
### Response:
def _symlink_bcbio(args, script="bcbio_nextgen.py", env_name=None, prefix=None):
"""Ensure a bcbio-nextgen script symlink in final tool directory.
"""
if env_name:
bcbio_anaconda = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(sys.executable))),
"envs", env_name, "bin", script)
else:
bcbio_anaconda = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), script)
bindir = os.path.join(args.tooldir, "bin")
if not os.path.exists(bindir):
os.makedirs(bindir)
if prefix:
script = "%s_%s" % (prefix, script)
bcbio_final = os.path.join(bindir, script)
if not os.path.exists(bcbio_final):
if os.path.lexists(bcbio_final):
subprocess.check_call(["rm", "-f", bcbio_final])
subprocess.check_call(["ln", "-s", bcbio_anaconda, bcbio_final]) |
def add(self, addend_mat, axis=1):
"""
In-place addition
:param addend_mat: A matrix to be added on the Sparse3DMatrix object
:param axis: The dimension along the addend_mat is added
:return: Nothing (as it performs in-place operations)
"""
if self.finalized:
if axis == 0:
raise NotImplementedError('The method is not yet implemented for the axis.')
elif axis == 1:
for hid in xrange(self.shape[1]):
self.data[hid] = self.data[hid] + addend_mat
elif axis == 2:
raise NotImplementedError('The method is not yet implemented for the axis.')
else:
raise RuntimeError('The axis should be 0, 1, or 2.')
else:
raise RuntimeError('The original matrix must be finalized.') | In-place addition
:param addend_mat: A matrix to be added on the Sparse3DMatrix object
:param axis: The dimension along the addend_mat is added
:return: Nothing (as it performs in-place operations) | Below is the the instruction that describes the task:
### Input:
In-place addition
:param addend_mat: A matrix to be added on the Sparse3DMatrix object
:param axis: The dimension along the addend_mat is added
:return: Nothing (as it performs in-place operations)
### Response:
def add(self, addend_mat, axis=1):
"""
In-place addition
:param addend_mat: A matrix to be added on the Sparse3DMatrix object
:param axis: The dimension along the addend_mat is added
:return: Nothing (as it performs in-place operations)
"""
if self.finalized:
if axis == 0:
raise NotImplementedError('The method is not yet implemented for the axis.')
elif axis == 1:
for hid in xrange(self.shape[1]):
self.data[hid] = self.data[hid] + addend_mat
elif axis == 2:
raise NotImplementedError('The method is not yet implemented for the axis.')
else:
raise RuntimeError('The axis should be 0, 1, or 2.')
else:
raise RuntimeError('The original matrix must be finalized.') |
def describe_db_subnet_groups(name=None, filters=None, jmespath='DBSubnetGroups',
region=None, key=None, keyid=None, profile=None):
'''
Return a detailed listing of some, or all, DB Subnet Groups visible in the
current scope. Arbitrary subelements or subsections of the returned dataset
can be selected by passing in a valid JMSEPath filter as well.
CLI example::
salt myminion boto_rds.describe_db_subnet_groups
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
pag = conn.get_paginator('describe_db_subnet_groups')
args = {}
args.update({'DBSubnetGroupName': name}) if name else None
args.update({'Filters': filters}) if filters else None
pit = pag.paginate(**args)
pit = pit.search(jmespath) if jmespath else pit
return [p for p in pit] | Return a detailed listing of some, or all, DB Subnet Groups visible in the
current scope. Arbitrary subelements or subsections of the returned dataset
can be selected by passing in a valid JMSEPath filter as well.
CLI example::
salt myminion boto_rds.describe_db_subnet_groups | Below is the the instruction that describes the task:
### Input:
Return a detailed listing of some, or all, DB Subnet Groups visible in the
current scope. Arbitrary subelements or subsections of the returned dataset
can be selected by passing in a valid JMSEPath filter as well.
CLI example::
salt myminion boto_rds.describe_db_subnet_groups
### Response:
def describe_db_subnet_groups(name=None, filters=None, jmespath='DBSubnetGroups',
region=None, key=None, keyid=None, profile=None):
'''
Return a detailed listing of some, or all, DB Subnet Groups visible in the
current scope. Arbitrary subelements or subsections of the returned dataset
can be selected by passing in a valid JMSEPath filter as well.
CLI example::
salt myminion boto_rds.describe_db_subnet_groups
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
pag = conn.get_paginator('describe_db_subnet_groups')
args = {}
args.update({'DBSubnetGroupName': name}) if name else None
args.update({'Filters': filters}) if filters else None
pit = pag.paginate(**args)
pit = pit.search(jmespath) if jmespath else pit
return [p for p in pit] |
def get_type_string(item):
"""Return type string of an object."""
if isinstance(item, DataFrame):
return "DataFrame"
if isinstance(item, Index):
return type(item).__name__
if isinstance(item, Series):
return "Series"
found = re.findall(r"<(?:type|class) '(\S*)'>",
to_text_string(type(item)))
if found:
return found[0] | Return type string of an object. | Below is the the instruction that describes the task:
### Input:
Return type string of an object.
### Response:
def get_type_string(item):
"""Return type string of an object."""
if isinstance(item, DataFrame):
return "DataFrame"
if isinstance(item, Index):
return type(item).__name__
if isinstance(item, Series):
return "Series"
found = re.findall(r"<(?:type|class) '(\S*)'>",
to_text_string(type(item)))
if found:
return found[0] |
def plot_options(cls, obj, percent_size):
"""
Given a holoviews object and a percentage size, apply heuristics
to compute a suitable figure size. For instance, scaling layouts
and grids linearly can result in unwieldy figure sizes when there
are a large number of elements. As ad hoc heuristics are used,
this functionality is kept separate from the plotting classes
themselves.
Used by the IPython Notebook display hooks and the save
utility. Note that this can be overridden explicitly per object
using the fig_size and size plot options.
"""
obj = obj.last if isinstance(obj, HoloMap) else obj
plot = Store.registry[cls.backend].get(type(obj), None)
if not hasattr(plot, 'width') or not hasattr(plot, 'height'):
from .plot import BokehPlot
plot = BokehPlot
options = plot.lookup_options(obj, 'plot').options
width = options.get('width', plot.width)
height = options.get('height', plot.height)
if width is not None:
width = int(width)
if height is not None:
height = int(height)
return dict(options, **{'width': width, 'height': height}) | Given a holoviews object and a percentage size, apply heuristics
to compute a suitable figure size. For instance, scaling layouts
and grids linearly can result in unwieldy figure sizes when there
are a large number of elements. As ad hoc heuristics are used,
this functionality is kept separate from the plotting classes
themselves.
Used by the IPython Notebook display hooks and the save
utility. Note that this can be overridden explicitly per object
using the fig_size and size plot options. | Below is the the instruction that describes the task:
### Input:
Given a holoviews object and a percentage size, apply heuristics
to compute a suitable figure size. For instance, scaling layouts
and grids linearly can result in unwieldy figure sizes when there
are a large number of elements. As ad hoc heuristics are used,
this functionality is kept separate from the plotting classes
themselves.
Used by the IPython Notebook display hooks and the save
utility. Note that this can be overridden explicitly per object
using the fig_size and size plot options.
### Response:
def plot_options(cls, obj, percent_size):
"""
Given a holoviews object and a percentage size, apply heuristics
to compute a suitable figure size. For instance, scaling layouts
and grids linearly can result in unwieldy figure sizes when there
are a large number of elements. As ad hoc heuristics are used,
this functionality is kept separate from the plotting classes
themselves.
Used by the IPython Notebook display hooks and the save
utility. Note that this can be overridden explicitly per object
using the fig_size and size plot options.
"""
obj = obj.last if isinstance(obj, HoloMap) else obj
plot = Store.registry[cls.backend].get(type(obj), None)
if not hasattr(plot, 'width') or not hasattr(plot, 'height'):
from .plot import BokehPlot
plot = BokehPlot
options = plot.lookup_options(obj, 'plot').options
width = options.get('width', plot.width)
height = options.get('height', plot.height)
if width is not None:
width = int(width)
if height is not None:
height = int(height)
return dict(options, **{'width': width, 'height': height}) |
async def walk_query(obj, object_resolver, connection_resolver, errors, current_user=None, __naut_name=None, obey_auth=True, **filters):
"""
This function traverses a query and collects the corresponding
information in a dictionary.
"""
# if the object has no selection set
if not hasattr(obj, 'selection_set'):
# yell loudly
raise ValueError("Can only resolve objects, not primitive types")
# the name of the node
node_name = __naut_name or obj.name.value if obj.name else obj.operation
# the selected fields
selection_set = obj.selection_set.selections
def _build_arg_tree(arg):
"""
This function recursively builds the arguments for lists and single values
"""
# TODO: what about object arguments??
# if there is a single value
if hasattr(arg, 'value'):
# assign the value to the filter
return arg.value
# otherwise if there are multiple values for the argument
elif hasattr(arg, 'values'):
return [_build_arg_tree(node) for node in arg.values]
# for each argument on this node
for arg in obj.arguments:
# add it to the query filters
filters[arg.name.value] = _build_arg_tree(arg.value)
# the fields we have to ask for
fields = [field for field in selection_set if not field.selection_set]
# the links between objects
connections = [field for field in selection_set if field.selection_set]
try:
# resolve the model with the given fields
models = await object_resolver(node_name, [field.name.value for field in fields], current_user=current_user, obey_auth=obey_auth, **filters)
# if something went wrong resolving the object
except Exception as e:
# add the error as a string
errors.append(e.__str__())
# stop here
return None
# add connections to each matching model
for model in models:
# if is an id for the model
if 'pk' in model:
# for each connection
for connection in connections:
# the name of the connection
connection_name = connection.name.value
# the target of the connection
node = {
'name': node_name,
'pk': model['pk']
}
try:
# go through the connection
connected_ids, next_target = await connection_resolver(
connection_name,
node,
)
# if there are connections
if connected_ids:
# add the id filter to the list
filters['pk_in'] = connected_ids
# add the connection field
value = await walk_query(
connection,
object_resolver,
connection_resolver,
errors,
current_user=current_user,
obey_auth=obey_auth,
__naut_name=next_target,
**filters
)
# there were no connections
else:
value = []
# if something went wrong
except Exception as e:
# add the error as a string
errors.append(e.__str__())
# stop here
value = None
# set the connection to the appropriate value
model[connection_name] = value
# return the list of matching models
return models | This function traverses a query and collects the corresponding
information in a dictionary. | Below is the the instruction that describes the task:
### Input:
This function traverses a query and collects the corresponding
information in a dictionary.
### Response:
async def walk_query(obj, object_resolver, connection_resolver, errors, current_user=None, __naut_name=None, obey_auth=True, **filters):
"""
This function traverses a query and collects the corresponding
information in a dictionary.
"""
# if the object has no selection set
if not hasattr(obj, 'selection_set'):
# yell loudly
raise ValueError("Can only resolve objects, not primitive types")
# the name of the node
node_name = __naut_name or obj.name.value if obj.name else obj.operation
# the selected fields
selection_set = obj.selection_set.selections
def _build_arg_tree(arg):
"""
This function recursively builds the arguments for lists and single values
"""
# TODO: what about object arguments??
# if there is a single value
if hasattr(arg, 'value'):
# assign the value to the filter
return arg.value
# otherwise if there are multiple values for the argument
elif hasattr(arg, 'values'):
return [_build_arg_tree(node) for node in arg.values]
# for each argument on this node
for arg in obj.arguments:
# add it to the query filters
filters[arg.name.value] = _build_arg_tree(arg.value)
# the fields we have to ask for
fields = [field for field in selection_set if not field.selection_set]
# the links between objects
connections = [field for field in selection_set if field.selection_set]
try:
# resolve the model with the given fields
models = await object_resolver(node_name, [field.name.value for field in fields], current_user=current_user, obey_auth=obey_auth, **filters)
# if something went wrong resolving the object
except Exception as e:
# add the error as a string
errors.append(e.__str__())
# stop here
return None
# add connections to each matching model
for model in models:
# if is an id for the model
if 'pk' in model:
# for each connection
for connection in connections:
# the name of the connection
connection_name = connection.name.value
# the target of the connection
node = {
'name': node_name,
'pk': model['pk']
}
try:
# go through the connection
connected_ids, next_target = await connection_resolver(
connection_name,
node,
)
# if there are connections
if connected_ids:
# add the id filter to the list
filters['pk_in'] = connected_ids
# add the connection field
value = await walk_query(
connection,
object_resolver,
connection_resolver,
errors,
current_user=current_user,
obey_auth=obey_auth,
__naut_name=next_target,
**filters
)
# there were no connections
else:
value = []
# if something went wrong
except Exception as e:
# add the error as a string
errors.append(e.__str__())
# stop here
value = None
# set the connection to the appropriate value
model[connection_name] = value
# return the list of matching models
return models |
def add_method(self, pattern):
"""Decorator to add new dispatch functions."""
def wrap(f):
def frozen_function(class_instance, f):
def _(pattern, *args, **kwargs):
return f(class_instance, pattern, *args, **kwargs)
return _
self.functions.append((frozen_function(self, f), pattern))
return f
return wrap | Decorator to add new dispatch functions. | Below is the the instruction that describes the task:
### Input:
Decorator to add new dispatch functions.
### Response:
def add_method(self, pattern):
"""Decorator to add new dispatch functions."""
def wrap(f):
def frozen_function(class_instance, f):
def _(pattern, *args, **kwargs):
return f(class_instance, pattern, *args, **kwargs)
return _
self.functions.append((frozen_function(self, f), pattern))
return f
return wrap |
def grab(self, bbox=None):
"""Grabs an image directly to a buffer.
:param bbox: Optional tuple or list containing (x1, y1, x2, y2) coordinates
of sub-region to capture.
:return: PIL RGB image
:raises: ValueError, if image data does not have 3 channels (RGB), each with 8
bits.
:rtype: Image
"""
w = Gdk.get_default_root_window()
if bbox is not None:
g = [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]]
else:
g = w.get_geometry()
pb = Gdk.pixbuf_get_from_window(w, *g)
if pb.get_bits_per_sample() != 8:
raise ValueError('Expected 8 bits per pixel.')
elif pb.get_n_channels() != 3:
raise ValueError('Expected RGB image.')
# Read the entire buffer into a python bytes object.
# read_pixel_bytes: New in version 2.32.
pixel_bytes = pb.read_pixel_bytes().get_data() # type: bytes
width, height = g[2], g[3]
# Probably for SSE alignment reasons, the pixbuf has extra data in each line.
# The args after "raw" help handle this; see
# http://effbot.org/imagingbook/decoder.htm#the-raw-decoder
return Image.frombytes(
'RGB', (width, height), pixel_bytes, 'raw', 'RGB', pb.get_rowstride(), 1) | Grabs an image directly to a buffer.
:param bbox: Optional tuple or list containing (x1, y1, x2, y2) coordinates
of sub-region to capture.
:return: PIL RGB image
:raises: ValueError, if image data does not have 3 channels (RGB), each with 8
bits.
:rtype: Image | Below is the the instruction that describes the task:
### Input:
Grabs an image directly to a buffer.
:param bbox: Optional tuple or list containing (x1, y1, x2, y2) coordinates
of sub-region to capture.
:return: PIL RGB image
:raises: ValueError, if image data does not have 3 channels (RGB), each with 8
bits.
:rtype: Image
### Response:
def grab(self, bbox=None):
"""Grabs an image directly to a buffer.
:param bbox: Optional tuple or list containing (x1, y1, x2, y2) coordinates
of sub-region to capture.
:return: PIL RGB image
:raises: ValueError, if image data does not have 3 channels (RGB), each with 8
bits.
:rtype: Image
"""
w = Gdk.get_default_root_window()
if bbox is not None:
g = [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]]
else:
g = w.get_geometry()
pb = Gdk.pixbuf_get_from_window(w, *g)
if pb.get_bits_per_sample() != 8:
raise ValueError('Expected 8 bits per pixel.')
elif pb.get_n_channels() != 3:
raise ValueError('Expected RGB image.')
# Read the entire buffer into a python bytes object.
# read_pixel_bytes: New in version 2.32.
pixel_bytes = pb.read_pixel_bytes().get_data() # type: bytes
width, height = g[2], g[3]
# Probably for SSE alignment reasons, the pixbuf has extra data in each line.
# The args after "raw" help handle this; see
# http://effbot.org/imagingbook/decoder.htm#the-raw-decoder
return Image.frombytes(
'RGB', (width, height), pixel_bytes, 'raw', 'RGB', pb.get_rowstride(), 1) |
def replace(self, year=None, month=None, day=None, hour=None,
minute=None, second=None, microsecond=None, tzinfo=True):
"""Return a new datetime with new values for the specified fields."""
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
_check_date_fields(year, month, day)
_check_time_fields(hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
return datetime(year, month, day, hour, minute, second,
microsecond, tzinfo) | Return a new datetime with new values for the specified fields. | Below is the the instruction that describes the task:
### Input:
Return a new datetime with new values for the specified fields.
### Response:
def replace(self, year=None, month=None, day=None, hour=None,
minute=None, second=None, microsecond=None, tzinfo=True):
"""Return a new datetime with new values for the specified fields."""
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
_check_date_fields(year, month, day)
_check_time_fields(hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
return datetime(year, month, day, hour, minute, second,
microsecond, tzinfo) |
def add_mdp_includes(topology=None, kwargs=None):
"""Set the mdp *include* key in the *kwargs* dict.
1. Add the directory containing *topology*.
2. Add all directories appearing under the key *includes*
3. Generate a string of the form "-Idir1 -Idir2 ..." that
is stored under the key *include* (the corresponding
mdp parameter)
By default, the directories ``.`` and ``..`` are also added to the
*include* string for the mdp; when fed into
:func:`gromacs.cbook.edit_mdp` it will result in a line such as ::
include = -I. -I.. -I../topology_dir ....
Note that the user can always override the behaviour by setting
the *include* keyword herself; in this case this function does
nothing.
If no *kwargs* were supplied then a dict is generated with the
single *include* entry.
:Arguments:
*topology* : top filename
Topology file; the name of the enclosing directory is added
to the include path (if supplied) [``None``]
*kwargs* : dict
Optional dictionary of mdp keywords; will be modified in place.
If it contains the *includes* keyword with either a single string
or a list of strings then these paths will be added to the
include statement.
:Returns: *kwargs* with the *include* keyword added if it did not
exist previously; if the keyword already existed, nothing
happens.
.. Note:: The *kwargs* dict is **modified in place**. This
function is a bit of a hack. It might be removed once
all setup functions become methods in a nice class.
"""
if kwargs is None:
kwargs = {}
include_dirs = ['.', '..'] # should . & .. always be added?
if topology is not None:
# half-hack: find additional itps in the same directory as the topology
topology_dir = os.path.dirname(topology)
include_dirs.append(topology_dir)
include_dirs.extend(asiterable(kwargs.pop('includes', []))) # includes can be a list or a string
# 1. setdefault: we do nothing if user defined include
# 2. modify input in place!
kwargs.setdefault('include', _mdp_include_string(include_dirs))
return kwargs | Set the mdp *include* key in the *kwargs* dict.
1. Add the directory containing *topology*.
2. Add all directories appearing under the key *includes*
3. Generate a string of the form "-Idir1 -Idir2 ..." that
is stored under the key *include* (the corresponding
mdp parameter)
By default, the directories ``.`` and ``..`` are also added to the
*include* string for the mdp; when fed into
:func:`gromacs.cbook.edit_mdp` it will result in a line such as ::
include = -I. -I.. -I../topology_dir ....
Note that the user can always override the behaviour by setting
the *include* keyword herself; in this case this function does
nothing.
If no *kwargs* were supplied then a dict is generated with the
single *include* entry.
:Arguments:
*topology* : top filename
Topology file; the name of the enclosing directory is added
to the include path (if supplied) [``None``]
*kwargs* : dict
Optional dictionary of mdp keywords; will be modified in place.
If it contains the *includes* keyword with either a single string
or a list of strings then these paths will be added to the
include statement.
:Returns: *kwargs* with the *include* keyword added if it did not
exist previously; if the keyword already existed, nothing
happens.
.. Note:: The *kwargs* dict is **modified in place**. This
function is a bit of a hack. It might be removed once
all setup functions become methods in a nice class. | Below is the the instruction that describes the task:
### Input:
Set the mdp *include* key in the *kwargs* dict.
1. Add the directory containing *topology*.
2. Add all directories appearing under the key *includes*
3. Generate a string of the form "-Idir1 -Idir2 ..." that
is stored under the key *include* (the corresponding
mdp parameter)
By default, the directories ``.`` and ``..`` are also added to the
*include* string for the mdp; when fed into
:func:`gromacs.cbook.edit_mdp` it will result in a line such as ::
include = -I. -I.. -I../topology_dir ....
Note that the user can always override the behaviour by setting
the *include* keyword herself; in this case this function does
nothing.
If no *kwargs* were supplied then a dict is generated with the
single *include* entry.
:Arguments:
*topology* : top filename
Topology file; the name of the enclosing directory is added
to the include path (if supplied) [``None``]
*kwargs* : dict
Optional dictionary of mdp keywords; will be modified in place.
If it contains the *includes* keyword with either a single string
or a list of strings then these paths will be added to the
include statement.
:Returns: *kwargs* with the *include* keyword added if it did not
exist previously; if the keyword already existed, nothing
happens.
.. Note:: The *kwargs* dict is **modified in place**. This
function is a bit of a hack. It might be removed once
all setup functions become methods in a nice class.
### Response:
def add_mdp_includes(topology=None, kwargs=None):
"""Set the mdp *include* key in the *kwargs* dict.
1. Add the directory containing *topology*.
2. Add all directories appearing under the key *includes*
3. Generate a string of the form "-Idir1 -Idir2 ..." that
is stored under the key *include* (the corresponding
mdp parameter)
By default, the directories ``.`` and ``..`` are also added to the
*include* string for the mdp; when fed into
:func:`gromacs.cbook.edit_mdp` it will result in a line such as ::
include = -I. -I.. -I../topology_dir ....
Note that the user can always override the behaviour by setting
the *include* keyword herself; in this case this function does
nothing.
If no *kwargs* were supplied then a dict is generated with the
single *include* entry.
:Arguments:
*topology* : top filename
Topology file; the name of the enclosing directory is added
to the include path (if supplied) [``None``]
*kwargs* : dict
Optional dictionary of mdp keywords; will be modified in place.
If it contains the *includes* keyword with either a single string
or a list of strings then these paths will be added to the
include statement.
:Returns: *kwargs* with the *include* keyword added if it did not
exist previously; if the keyword already existed, nothing
happens.
.. Note:: The *kwargs* dict is **modified in place**. This
function is a bit of a hack. It might be removed once
all setup functions become methods in a nice class.
"""
if kwargs is None:
kwargs = {}
include_dirs = ['.', '..'] # should . & .. always be added?
if topology is not None:
# half-hack: find additional itps in the same directory as the topology
topology_dir = os.path.dirname(topology)
include_dirs.append(topology_dir)
include_dirs.extend(asiterable(kwargs.pop('includes', []))) # includes can be a list or a string
# 1. setdefault: we do nothing if user defined include
# 2. modify input in place!
kwargs.setdefault('include', _mdp_include_string(include_dirs))
return kwargs |
def format(self):
"""The format of the image file.
An uppercase string corresponding to the
:attr:`PIL.ImageFile.ImageFile.format` attribute. Valid values include
``"JPEG"`` and ``"PNG"``.
"""
if self._format:
return self._format
elif self.pil_image:
return self.pil_image.format | The format of the image file.
An uppercase string corresponding to the
:attr:`PIL.ImageFile.ImageFile.format` attribute. Valid values include
``"JPEG"`` and ``"PNG"``. | Below is the the instruction that describes the task:
### Input:
The format of the image file.
An uppercase string corresponding to the
:attr:`PIL.ImageFile.ImageFile.format` attribute. Valid values include
``"JPEG"`` and ``"PNG"``.
### Response:
def format(self):
"""The format of the image file.
An uppercase string corresponding to the
:attr:`PIL.ImageFile.ImageFile.format` attribute. Valid values include
``"JPEG"`` and ``"PNG"``.
"""
if self._format:
return self._format
elif self.pil_image:
return self.pil_image.format |
def load_dynamic_config(config_file=DEFAULT_DYNAMIC_CONFIG_FILE):
"""Load and parse dynamic config"""
dynamic_configurations = {}
# Insert config path so we can import it
sys.path.insert(0, path.dirname(path.abspath(config_file)))
try:
config_module = __import__('config')
dynamic_configurations = config_module.CONFIG
except ImportError:
# Provide a default if config not found
LOG.error('ImportError: Unable to load dynamic config. Check config.py file imports!')
return dynamic_configurations | Load and parse dynamic config | Below is the the instruction that describes the task:
### Input:
Load and parse dynamic config
### Response:
def load_dynamic_config(config_file=DEFAULT_DYNAMIC_CONFIG_FILE):
"""Load and parse dynamic config"""
dynamic_configurations = {}
# Insert config path so we can import it
sys.path.insert(0, path.dirname(path.abspath(config_file)))
try:
config_module = __import__('config')
dynamic_configurations = config_module.CONFIG
except ImportError:
# Provide a default if config not found
LOG.error('ImportError: Unable to load dynamic config. Check config.py file imports!')
return dynamic_configurations |
def _construct_node_from_actions(self,
current_node: Tree,
remaining_actions: List[List[str]],
add_var_function: bool) -> List[List[str]]:
"""
Given a current node in the logical form tree, and a list of actions in an action sequence,
this method fills in the children of the current node from the action sequence, then
returns whatever actions are left.
For example, we could get a node with type ``c``, and an action sequence that begins with
``c -> [<r,c>, r]``. This method will add two children to the input node, consuming
actions from the action sequence for nodes of type ``<r,c>`` (and all of its children,
recursively) and ``r`` (and all of its children, recursively). This method assumes that
action sequences are produced `depth-first`, so all actions for the subtree under ``<r,c>``
appear before actions for the subtree under ``r``. If there are any actions in the action
sequence after the ``<r,c>`` and ``r`` subtrees have terminated in leaf nodes, they will be
returned.
"""
if not remaining_actions:
logger.error("No actions left to construct current node: %s", current_node)
raise ParsingError("Incomplete action sequence")
left_side, right_side = remaining_actions.pop(0)
if left_side != current_node.label():
mismatch = True
multi_match_mapping = {str(key): [str(value) for value in values] for key,
values in self.get_multi_match_mapping().items()}
current_label = current_node.label()
if current_label in multi_match_mapping and left_side in multi_match_mapping[current_label]:
mismatch = False
if mismatch:
logger.error("Current node: %s", current_node)
logger.error("Next action: %s -> %s", left_side, right_side)
logger.error("Remaining actions were: %s", remaining_actions)
raise ParsingError("Current node does not match next action")
if right_side[0] == '[':
# This is a non-terminal expansion, with more than one child node.
for child_type in right_side[1:-1].split(', '):
if child_type.startswith("'lambda"):
# We need to special-case the handling of lambda here, because it's handled a
# bit weirdly in the action sequence. This is stripping off the single quotes
# around something like `'lambda x'`.
child_type = child_type[1:-1]
child_node = Tree(child_type, [])
current_node.append(child_node) # you add a child to an nltk.Tree with `append`
if not self.is_terminal(child_type):
remaining_actions = self._construct_node_from_actions(child_node,
remaining_actions,
add_var_function)
elif self.is_terminal(right_side):
# The current node is a pre-terminal; we'll add a single terminal child. We need to
# check first for whether we need to add a (var _) around the terminal node, though.
if add_var_function and right_side in self._lambda_variables:
right_side = f"(var {right_side})"
if add_var_function and right_side == 'var':
raise ParsingError('add_var_function was true, but action sequence already had var')
current_node.append(Tree(right_side, [])) # you add a child to an nltk.Tree with `append`
else:
# The only way this can happen is if you have a unary non-terminal production rule.
# That is almost certainly not what you want with this kind of grammar, so we'll crash.
# If you really do want this, open a PR with a valid use case.
raise ParsingError(f"Found a unary production rule: {left_side} -> {right_side}. "
"Are you sure you want a unary production rule in your grammar?")
return remaining_actions | Given a current node in the logical form tree, and a list of actions in an action sequence,
this method fills in the children of the current node from the action sequence, then
returns whatever actions are left.
For example, we could get a node with type ``c``, and an action sequence that begins with
``c -> [<r,c>, r]``. This method will add two children to the input node, consuming
actions from the action sequence for nodes of type ``<r,c>`` (and all of its children,
recursively) and ``r`` (and all of its children, recursively). This method assumes that
action sequences are produced `depth-first`, so all actions for the subtree under ``<r,c>``
appear before actions for the subtree under ``r``. If there are any actions in the action
sequence after the ``<r,c>`` and ``r`` subtrees have terminated in leaf nodes, they will be
returned. | Below is the the instruction that describes the task:
### Input:
Given a current node in the logical form tree, and a list of actions in an action sequence,
this method fills in the children of the current node from the action sequence, then
returns whatever actions are left.
For example, we could get a node with type ``c``, and an action sequence that begins with
``c -> [<r,c>, r]``. This method will add two children to the input node, consuming
actions from the action sequence for nodes of type ``<r,c>`` (and all of its children,
recursively) and ``r`` (and all of its children, recursively). This method assumes that
action sequences are produced `depth-first`, so all actions for the subtree under ``<r,c>``
appear before actions for the subtree under ``r``. If there are any actions in the action
sequence after the ``<r,c>`` and ``r`` subtrees have terminated in leaf nodes, they will be
returned.
### Response:
def _construct_node_from_actions(self,
current_node: Tree,
remaining_actions: List[List[str]],
add_var_function: bool) -> List[List[str]]:
"""
Given a current node in the logical form tree, and a list of actions in an action sequence,
this method fills in the children of the current node from the action sequence, then
returns whatever actions are left.
For example, we could get a node with type ``c``, and an action sequence that begins with
``c -> [<r,c>, r]``. This method will add two children to the input node, consuming
actions from the action sequence for nodes of type ``<r,c>`` (and all of its children,
recursively) and ``r`` (and all of its children, recursively). This method assumes that
action sequences are produced `depth-first`, so all actions for the subtree under ``<r,c>``
appear before actions for the subtree under ``r``. If there are any actions in the action
sequence after the ``<r,c>`` and ``r`` subtrees have terminated in leaf nodes, they will be
returned.
"""
if not remaining_actions:
logger.error("No actions left to construct current node: %s", current_node)
raise ParsingError("Incomplete action sequence")
left_side, right_side = remaining_actions.pop(0)
if left_side != current_node.label():
mismatch = True
multi_match_mapping = {str(key): [str(value) for value in values] for key,
values in self.get_multi_match_mapping().items()}
current_label = current_node.label()
if current_label in multi_match_mapping and left_side in multi_match_mapping[current_label]:
mismatch = False
if mismatch:
logger.error("Current node: %s", current_node)
logger.error("Next action: %s -> %s", left_side, right_side)
logger.error("Remaining actions were: %s", remaining_actions)
raise ParsingError("Current node does not match next action")
if right_side[0] == '[':
# This is a non-terminal expansion, with more than one child node.
for child_type in right_side[1:-1].split(', '):
if child_type.startswith("'lambda"):
# We need to special-case the handling of lambda here, because it's handled a
# bit weirdly in the action sequence. This is stripping off the single quotes
# around something like `'lambda x'`.
child_type = child_type[1:-1]
child_node = Tree(child_type, [])
current_node.append(child_node) # you add a child to an nltk.Tree with `append`
if not self.is_terminal(child_type):
remaining_actions = self._construct_node_from_actions(child_node,
remaining_actions,
add_var_function)
elif self.is_terminal(right_side):
# The current node is a pre-terminal; we'll add a single terminal child. We need to
# check first for whether we need to add a (var _) around the terminal node, though.
if add_var_function and right_side in self._lambda_variables:
right_side = f"(var {right_side})"
if add_var_function and right_side == 'var':
raise ParsingError('add_var_function was true, but action sequence already had var')
current_node.append(Tree(right_side, [])) # you add a child to an nltk.Tree with `append`
else:
# The only way this can happen is if you have a unary non-terminal production rule.
# That is almost certainly not what you want with this kind of grammar, so we'll crash.
# If you really do want this, open a PR with a valid use case.
raise ParsingError(f"Found a unary production rule: {left_side} -> {right_side}. "
"Are you sure you want a unary production rule in your grammar?")
return remaining_actions |
def fancy_transpose(data, roll=1):
"""Fancy transpose
This method transposes a multidimensional matrix.
Parameters
----------
data : np.ndarray
Input data array
roll : int
Roll direction and amount. Default (roll=1)
Returns
-------
np.ndarray transposed data
Notes
-----
Adjustment to numpy.transpose
Examples
--------
>>> from modopt.base.np_adjust import fancy_transpose
>>> x = np.arange(27).reshape(3, 3, 3)
>>> x
array([[[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8]],
[[ 9, 10, 11],
[12, 13, 14],
[15, 16, 17]],
[[18, 19, 20],
[21, 22, 23],
[24, 25, 26]]])
>>> fancy_transpose(x)
array([[[ 0, 3, 6],
[ 9, 12, 15],
[18, 21, 24]],
[[ 1, 4, 7],
[10, 13, 16],
[19, 22, 25]],
[[ 2, 5, 8],
[11, 14, 17],
[20, 23, 26]]])
>>> fancy_transpose(x, roll=-1)
array([[[ 0, 9, 18],
[ 1, 10, 19],
[ 2, 11, 20]],
[[ 3, 12, 21],
[ 4, 13, 22],
[ 5, 14, 23]],
[[ 6, 15, 24],
[ 7, 16, 25],
[ 8, 17, 26]]])
"""
axis_roll = np.roll(np.arange(data.ndim), roll)
return np.transpose(data, axes=axis_roll) | Fancy transpose
This method transposes a multidimensional matrix.
Parameters
----------
data : np.ndarray
Input data array
roll : int
Roll direction and amount. Default (roll=1)
Returns
-------
np.ndarray transposed data
Notes
-----
Adjustment to numpy.transpose
Examples
--------
>>> from modopt.base.np_adjust import fancy_transpose
>>> x = np.arange(27).reshape(3, 3, 3)
>>> x
array([[[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8]],
[[ 9, 10, 11],
[12, 13, 14],
[15, 16, 17]],
[[18, 19, 20],
[21, 22, 23],
[24, 25, 26]]])
>>> fancy_transpose(x)
array([[[ 0, 3, 6],
[ 9, 12, 15],
[18, 21, 24]],
[[ 1, 4, 7],
[10, 13, 16],
[19, 22, 25]],
[[ 2, 5, 8],
[11, 14, 17],
[20, 23, 26]]])
>>> fancy_transpose(x, roll=-1)
array([[[ 0, 9, 18],
[ 1, 10, 19],
[ 2, 11, 20]],
[[ 3, 12, 21],
[ 4, 13, 22],
[ 5, 14, 23]],
[[ 6, 15, 24],
[ 7, 16, 25],
[ 8, 17, 26]]]) | Below is the the instruction that describes the task:
### Input:
Fancy transpose
This method transposes a multidimensional matrix.
Parameters
----------
data : np.ndarray
Input data array
roll : int
Roll direction and amount. Default (roll=1)
Returns
-------
np.ndarray transposed data
Notes
-----
Adjustment to numpy.transpose
Examples
--------
>>> from modopt.base.np_adjust import fancy_transpose
>>> x = np.arange(27).reshape(3, 3, 3)
>>> x
array([[[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8]],
[[ 9, 10, 11],
[12, 13, 14],
[15, 16, 17]],
[[18, 19, 20],
[21, 22, 23],
[24, 25, 26]]])
>>> fancy_transpose(x)
array([[[ 0, 3, 6],
[ 9, 12, 15],
[18, 21, 24]],
[[ 1, 4, 7],
[10, 13, 16],
[19, 22, 25]],
[[ 2, 5, 8],
[11, 14, 17],
[20, 23, 26]]])
>>> fancy_transpose(x, roll=-1)
array([[[ 0, 9, 18],
[ 1, 10, 19],
[ 2, 11, 20]],
[[ 3, 12, 21],
[ 4, 13, 22],
[ 5, 14, 23]],
[[ 6, 15, 24],
[ 7, 16, 25],
[ 8, 17, 26]]])
### Response:
def fancy_transpose(data, roll=1):
"""Fancy transpose
This method transposes a multidimensional matrix.
Parameters
----------
data : np.ndarray
Input data array
roll : int
Roll direction and amount. Default (roll=1)
Returns
-------
np.ndarray transposed data
Notes
-----
Adjustment to numpy.transpose
Examples
--------
>>> from modopt.base.np_adjust import fancy_transpose
>>> x = np.arange(27).reshape(3, 3, 3)
>>> x
array([[[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8]],
[[ 9, 10, 11],
[12, 13, 14],
[15, 16, 17]],
[[18, 19, 20],
[21, 22, 23],
[24, 25, 26]]])
>>> fancy_transpose(x)
array([[[ 0, 3, 6],
[ 9, 12, 15],
[18, 21, 24]],
[[ 1, 4, 7],
[10, 13, 16],
[19, 22, 25]],
[[ 2, 5, 8],
[11, 14, 17],
[20, 23, 26]]])
>>> fancy_transpose(x, roll=-1)
array([[[ 0, 9, 18],
[ 1, 10, 19],
[ 2, 11, 20]],
[[ 3, 12, 21],
[ 4, 13, 22],
[ 5, 14, 23]],
[[ 6, 15, 24],
[ 7, 16, 25],
[ 8, 17, 26]]])
"""
axis_roll = np.roll(np.arange(data.ndim), roll)
return np.transpose(data, axes=axis_roll) |
def get_handler(ptr, method):
""" Dereference the pointer and return the handler method. """
obj = CACHE.get(ptr, None)
if obj is None:
raise BridgeReferenceError(
"Reference id={} never existed or has already been destroyed"
.format(ptr))
elif not hasattr(obj, method):
raise NotImplementedError("{}.{} is not implemented.".format(obj,
method))
return obj, getattr(obj, method) | Dereference the pointer and return the handler method. | Below is the the instruction that describes the task:
### Input:
Dereference the pointer and return the handler method.
### Response:
def get_handler(ptr, method):
""" Dereference the pointer and return the handler method. """
obj = CACHE.get(ptr, None)
if obj is None:
raise BridgeReferenceError(
"Reference id={} never existed or has already been destroyed"
.format(ptr))
elif not hasattr(obj, method):
raise NotImplementedError("{}.{} is not implemented.".format(obj,
method))
return obj, getattr(obj, method) |
def normalize(df, style = 'mean'):
""" Returns a normalized version of a DataFrame or Series
Parameters:
df - DataFrame or Series
The data to normalize
style - function or string, default 'mean'
The style to use when computing the norms. Takes 'mean' or 'minmax' to
do mean or min-max normalization respectively. User-defined functions that take
a pandas Series as input and return a normalized pandas Series are also accepted
"""
if style == 'mean':
df_mean,df_std = df.mean(),df.std()
return (df-df_mean)/df_std
elif style == 'minmax':
col_min,col_max = df.min(),df.max()
return (df-col_min)/(col_max-col_min)
else:
return style(df) | Returns a normalized version of a DataFrame or Series
Parameters:
df - DataFrame or Series
The data to normalize
style - function or string, default 'mean'
The style to use when computing the norms. Takes 'mean' or 'minmax' to
do mean or min-max normalization respectively. User-defined functions that take
a pandas Series as input and return a normalized pandas Series are also accepted | Below is the the instruction that describes the task:
### Input:
Returns a normalized version of a DataFrame or Series
Parameters:
df - DataFrame or Series
The data to normalize
style - function or string, default 'mean'
The style to use when computing the norms. Takes 'mean' or 'minmax' to
do mean or min-max normalization respectively. User-defined functions that take
a pandas Series as input and return a normalized pandas Series are also accepted
### Response:
def normalize(df, style = 'mean'):
""" Returns a normalized version of a DataFrame or Series
Parameters:
df - DataFrame or Series
The data to normalize
style - function or string, default 'mean'
The style to use when computing the norms. Takes 'mean' or 'minmax' to
do mean or min-max normalization respectively. User-defined functions that take
a pandas Series as input and return a normalized pandas Series are also accepted
"""
if style == 'mean':
df_mean,df_std = df.mean(),df.std()
return (df-df_mean)/df_std
elif style == 'minmax':
col_min,col_max = df.min(),df.max()
return (df-col_min)/(col_max-col_min)
else:
return style(df) |
def to_dot(self, name='EXPR'): # pragma: no cover
"""Convert to DOT language representation."""
parts = ['graph', name, '{', 'rankdir=BT;']
for ex in self.iter_dfs():
exid = ex.node.id()
if ex is Zero:
parts += ["n{} [label=0,shape=box];".format(exid)]
elif ex is One:
parts += ["n{} [label=1,shape=box];".format(exid)]
elif isinstance(ex, Literal):
parts += ['n{} [label="{}",shape=box];'.format(exid, ex)]
else:
parts += ["n{0} [label={1.ASTOP},shape=circle];".format(exid, ex)]
for ex in self.iter_dfs():
exid = ex.node.id()
if isinstance(ex, NotOp):
parts += ["n{} -- n{};".format(ex.x.node.id(), exid)]
elif isinstance(ex, ImpliesOp):
p, q = ex.xs
parts += ["n{} -- n{} [label=p];".format(p.node.id(), exid)]
parts += ["n{} -- n{} [label=q];".format(q.node.id(), exid)]
elif isinstance(ex, IfThenElseOp):
s, d1, d0 = ex.xs
parts += ["n{} -- n{} [label=s];".format(s.node.id(), exid)]
parts += ["n{} -- n{} [label=d1];".format(d1.node.id(), exid)]
parts += ["n{} -- n{} [label=d0];".format(d0.node.id(), exid)]
elif isinstance(ex, NaryOp):
for x in ex.xs:
parts += ["n{} -- n{};".format(x.node.id(), exid)]
parts.append('}')
return " ".join(parts) | Convert to DOT language representation. | Below is the the instruction that describes the task:
### Input:
Convert to DOT language representation.
### Response:
def to_dot(self, name='EXPR'): # pragma: no cover
"""Convert to DOT language representation."""
parts = ['graph', name, '{', 'rankdir=BT;']
for ex in self.iter_dfs():
exid = ex.node.id()
if ex is Zero:
parts += ["n{} [label=0,shape=box];".format(exid)]
elif ex is One:
parts += ["n{} [label=1,shape=box];".format(exid)]
elif isinstance(ex, Literal):
parts += ['n{} [label="{}",shape=box];'.format(exid, ex)]
else:
parts += ["n{0} [label={1.ASTOP},shape=circle];".format(exid, ex)]
for ex in self.iter_dfs():
exid = ex.node.id()
if isinstance(ex, NotOp):
parts += ["n{} -- n{};".format(ex.x.node.id(), exid)]
elif isinstance(ex, ImpliesOp):
p, q = ex.xs
parts += ["n{} -- n{} [label=p];".format(p.node.id(), exid)]
parts += ["n{} -- n{} [label=q];".format(q.node.id(), exid)]
elif isinstance(ex, IfThenElseOp):
s, d1, d0 = ex.xs
parts += ["n{} -- n{} [label=s];".format(s.node.id(), exid)]
parts += ["n{} -- n{} [label=d1];".format(d1.node.id(), exid)]
parts += ["n{} -- n{} [label=d0];".format(d0.node.id(), exid)]
elif isinstance(ex, NaryOp):
for x in ex.xs:
parts += ["n{} -- n{};".format(x.node.id(), exid)]
parts.append('}')
return " ".join(parts) |
def gen_zonal_stats(
vectors, raster,
layer=0,
band=1,
nodata=None,
affine=None,
stats=None,
all_touched=False,
categorical=False,
category_map=None,
add_stats=None,
zone_func=None,
raster_out=False,
prefix=None,
geojson_out=False, **kwargs):
"""Zonal statistics of raster values aggregated to vector geometries.
Parameters
----------
vectors: path to an vector source or geo-like python objects
raster: ndarray or path to a GDAL raster source
If ndarray is passed, the ``affine`` kwarg is required.
layer: int or string, optional
If `vectors` is a path to an fiona source,
specify the vector layer to use either by name or number.
defaults to 0
band: int, optional
If `raster` is a GDAL source, the band number to use (counting from 1).
defaults to 1.
nodata: float, optional
If `raster` is a GDAL source, this value overrides any NODATA value
specified in the file's metadata.
If `None`, the file's metadata's NODATA value (if any) will be used.
defaults to `None`.
affine: Affine instance
required only for ndarrays, otherwise it is read from src
stats: list of str, or space-delimited str, optional
Which statistics to calculate for each zone.
All possible choices are listed in ``utils.VALID_STATS``.
defaults to ``DEFAULT_STATS``, a subset of these.
all_touched: bool, optional
Whether to include every raster cell touched by a geometry, or only
those having a center point within the polygon.
defaults to `False`
categorical: bool, optional
category_map: dict
A dictionary mapping raster values to human-readable categorical names.
Only applies when categorical is True
add_stats: dict
with names and functions of additional stats to compute, optional
zone_func: callable
function to apply to zone ndarray prior to computing stats
raster_out: boolean
Include the masked numpy array for each feature?, optional
Each feature dictionary will have the following additional keys:
mini_raster_array: The clipped and masked numpy array
mini_raster_affine: Affine transformation
mini_raster_nodata: NoData Value
prefix: string
add a prefix to the keys (default: None)
geojson_out: boolean
Return list of GeoJSON-like features (default: False)
Original feature geometry and properties will be retained
with zonal stats appended as additional properties.
Use with `prefix` to ensure unique and meaningful property names.
Returns
-------
generator of dicts (if geojson_out is False)
Each item corresponds to a single vector feature and
contains keys for each of the specified stats.
generator of geojson features (if geojson_out is True)
GeoJSON-like Feature as python dict
"""
stats, run_count = check_stats(stats, categorical)
# Handle 1.0 deprecations
transform = kwargs.get('transform')
if transform:
warnings.warn("GDAL-style transforms will disappear in 1.0. "
"Use affine=Affine.from_gdal(*transform) instead",
DeprecationWarning)
if not affine:
affine = Affine.from_gdal(*transform)
cp = kwargs.get('copy_properties')
if cp:
warnings.warn("Use `geojson_out` to preserve feature properties",
DeprecationWarning)
band_num = kwargs.get('band_num')
if band_num:
warnings.warn("Use `band` to specify band number", DeprecationWarning)
band = band_num
with Raster(raster, affine, nodata, band) as rast:
features_iter = read_features(vectors, layer)
for _, feat in enumerate(features_iter):
geom = shape(feat['geometry'])
if 'Point' in geom.type:
geom = boxify_points(geom, rast)
geom_bounds = tuple(geom.bounds)
fsrc = rast.read(bounds=geom_bounds)
# rasterized geometry
rv_array = rasterize_geom(geom, like=fsrc, all_touched=all_touched)
# nodata mask
isnodata = (fsrc.array == fsrc.nodata)
# add nan mask (if necessary)
has_nan = (
np.issubdtype(fsrc.array.dtype, np.floating)
and np.isnan(fsrc.array.min()))
if has_nan:
isnodata = (isnodata | np.isnan(fsrc.array))
# Mask the source data array
# mask everything that is not a valid value or not within our geom
masked = np.ma.MaskedArray(
fsrc.array,
mask=(isnodata | ~rv_array))
# If we're on 64 bit platform and the array is an integer type
# make sure we cast to 64 bit to avoid overflow.
# workaround for https://github.com/numpy/numpy/issues/8433
if sysinfo.platform_bits == 64 and \
masked.dtype != np.int64 and \
issubclass(masked.dtype.type, np.integer):
masked = masked.astype(np.int64)
# execute zone_func on masked zone ndarray
if zone_func is not None:
if not callable(zone_func):
raise TypeError(('zone_func must be a callable '
'which accepts function a '
'single `zone_array` arg.'))
zone_func(masked)
if masked.compressed().size == 0:
# nothing here, fill with None and move on
feature_stats = dict([(stat, None) for stat in stats])
if 'count' in stats: # special case, zero makes sense here
feature_stats['count'] = 0
else:
if run_count:
keys, counts = np.unique(masked.compressed(), return_counts=True)
pixel_count = dict(zip([np.asscalar(k) for k in keys],
[np.asscalar(c) for c in counts]))
if categorical:
feature_stats = dict(pixel_count)
if category_map:
feature_stats = remap_categories(category_map, feature_stats)
else:
feature_stats = {}
if 'min' in stats:
feature_stats['min'] = float(masked.min())
if 'max' in stats:
feature_stats['max'] = float(masked.max())
if 'mean' in stats:
feature_stats['mean'] = float(masked.mean())
if 'count' in stats:
feature_stats['count'] = int(masked.count())
# optional
if 'sum' in stats:
feature_stats['sum'] = float(masked.sum())
if 'std' in stats:
feature_stats['std'] = float(masked.std())
if 'median' in stats:
feature_stats['median'] = float(np.median(masked.compressed()))
if 'majority' in stats:
feature_stats['majority'] = float(key_assoc_val(pixel_count, max))
if 'minority' in stats:
feature_stats['minority'] = float(key_assoc_val(pixel_count, min))
if 'unique' in stats:
feature_stats['unique'] = len(list(pixel_count.keys()))
if 'range' in stats:
try:
rmin = feature_stats['min']
except KeyError:
rmin = float(masked.min())
try:
rmax = feature_stats['max']
except KeyError:
rmax = float(masked.max())
feature_stats['range'] = rmax - rmin
for pctile in [s for s in stats if s.startswith('percentile_')]:
q = get_percentile(pctile)
pctarr = masked.compressed()
feature_stats[pctile] = np.percentile(pctarr, q)
if 'nodata' in stats or 'nan' in stats:
featmasked = np.ma.MaskedArray(fsrc.array, mask=(~rv_array))
if 'nodata' in stats:
feature_stats['nodata'] = float((featmasked == fsrc.nodata).sum())
if 'nan' in stats:
feature_stats['nan'] = float(np.isnan(featmasked).sum()) if has_nan else 0
if add_stats is not None:
for stat_name, stat_func in add_stats.items():
feature_stats[stat_name] = stat_func(masked)
if raster_out:
feature_stats['mini_raster_array'] = masked
feature_stats['mini_raster_affine'] = fsrc.affine
feature_stats['mini_raster_nodata'] = fsrc.nodata
if prefix is not None:
prefixed_feature_stats = {}
for key, val in feature_stats.items():
newkey = "{}{}".format(prefix, key)
prefixed_feature_stats[newkey] = val
feature_stats = prefixed_feature_stats
if geojson_out:
for key, val in feature_stats.items():
if 'properties' not in feat:
feat['properties'] = {}
feat['properties'][key] = val
yield feat
else:
yield feature_stats | Zonal statistics of raster values aggregated to vector geometries.
Parameters
----------
vectors: path to an vector source or geo-like python objects
raster: ndarray or path to a GDAL raster source
If ndarray is passed, the ``affine`` kwarg is required.
layer: int or string, optional
If `vectors` is a path to an fiona source,
specify the vector layer to use either by name or number.
defaults to 0
band: int, optional
If `raster` is a GDAL source, the band number to use (counting from 1).
defaults to 1.
nodata: float, optional
If `raster` is a GDAL source, this value overrides any NODATA value
specified in the file's metadata.
If `None`, the file's metadata's NODATA value (if any) will be used.
defaults to `None`.
affine: Affine instance
required only for ndarrays, otherwise it is read from src
stats: list of str, or space-delimited str, optional
Which statistics to calculate for each zone.
All possible choices are listed in ``utils.VALID_STATS``.
defaults to ``DEFAULT_STATS``, a subset of these.
all_touched: bool, optional
Whether to include every raster cell touched by a geometry, or only
those having a center point within the polygon.
defaults to `False`
categorical: bool, optional
category_map: dict
A dictionary mapping raster values to human-readable categorical names.
Only applies when categorical is True
add_stats: dict
with names and functions of additional stats to compute, optional
zone_func: callable
function to apply to zone ndarray prior to computing stats
raster_out: boolean
Include the masked numpy array for each feature?, optional
Each feature dictionary will have the following additional keys:
mini_raster_array: The clipped and masked numpy array
mini_raster_affine: Affine transformation
mini_raster_nodata: NoData Value
prefix: string
add a prefix to the keys (default: None)
geojson_out: boolean
Return list of GeoJSON-like features (default: False)
Original feature geometry and properties will be retained
with zonal stats appended as additional properties.
Use with `prefix` to ensure unique and meaningful property names.
Returns
-------
generator of dicts (if geojson_out is False)
Each item corresponds to a single vector feature and
contains keys for each of the specified stats.
generator of geojson features (if geojson_out is True)
GeoJSON-like Feature as python dict | Below is the the instruction that describes the task:
### Input:
Zonal statistics of raster values aggregated to vector geometries.
Parameters
----------
vectors: path to an vector source or geo-like python objects
raster: ndarray or path to a GDAL raster source
If ndarray is passed, the ``affine`` kwarg is required.
layer: int or string, optional
If `vectors` is a path to an fiona source,
specify the vector layer to use either by name or number.
defaults to 0
band: int, optional
If `raster` is a GDAL source, the band number to use (counting from 1).
defaults to 1.
nodata: float, optional
If `raster` is a GDAL source, this value overrides any NODATA value
specified in the file's metadata.
If `None`, the file's metadata's NODATA value (if any) will be used.
defaults to `None`.
affine: Affine instance
required only for ndarrays, otherwise it is read from src
stats: list of str, or space-delimited str, optional
Which statistics to calculate for each zone.
All possible choices are listed in ``utils.VALID_STATS``.
defaults to ``DEFAULT_STATS``, a subset of these.
all_touched: bool, optional
Whether to include every raster cell touched by a geometry, or only
those having a center point within the polygon.
defaults to `False`
categorical: bool, optional
category_map: dict
A dictionary mapping raster values to human-readable categorical names.
Only applies when categorical is True
add_stats: dict
with names and functions of additional stats to compute, optional
zone_func: callable
function to apply to zone ndarray prior to computing stats
raster_out: boolean
Include the masked numpy array for each feature?, optional
Each feature dictionary will have the following additional keys:
mini_raster_array: The clipped and masked numpy array
mini_raster_affine: Affine transformation
mini_raster_nodata: NoData Value
prefix: string
add a prefix to the keys (default: None)
geojson_out: boolean
Return list of GeoJSON-like features (default: False)
Original feature geometry and properties will be retained
with zonal stats appended as additional properties.
Use with `prefix` to ensure unique and meaningful property names.
Returns
-------
generator of dicts (if geojson_out is False)
Each item corresponds to a single vector feature and
contains keys for each of the specified stats.
generator of geojson features (if geojson_out is True)
GeoJSON-like Feature as python dict
### Response:
def gen_zonal_stats(
vectors, raster,
layer=0,
band=1,
nodata=None,
affine=None,
stats=None,
all_touched=False,
categorical=False,
category_map=None,
add_stats=None,
zone_func=None,
raster_out=False,
prefix=None,
geojson_out=False, **kwargs):
"""Zonal statistics of raster values aggregated to vector geometries.
Parameters
----------
vectors: path to an vector source or geo-like python objects
raster: ndarray or path to a GDAL raster source
If ndarray is passed, the ``affine`` kwarg is required.
layer: int or string, optional
If `vectors` is a path to an fiona source,
specify the vector layer to use either by name or number.
defaults to 0
band: int, optional
If `raster` is a GDAL source, the band number to use (counting from 1).
defaults to 1.
nodata: float, optional
If `raster` is a GDAL source, this value overrides any NODATA value
specified in the file's metadata.
If `None`, the file's metadata's NODATA value (if any) will be used.
defaults to `None`.
affine: Affine instance
required only for ndarrays, otherwise it is read from src
stats: list of str, or space-delimited str, optional
Which statistics to calculate for each zone.
All possible choices are listed in ``utils.VALID_STATS``.
defaults to ``DEFAULT_STATS``, a subset of these.
all_touched: bool, optional
Whether to include every raster cell touched by a geometry, or only
those having a center point within the polygon.
defaults to `False`
categorical: bool, optional
category_map: dict
A dictionary mapping raster values to human-readable categorical names.
Only applies when categorical is True
add_stats: dict
with names and functions of additional stats to compute, optional
zone_func: callable
function to apply to zone ndarray prior to computing stats
raster_out: boolean
Include the masked numpy array for each feature?, optional
Each feature dictionary will have the following additional keys:
mini_raster_array: The clipped and masked numpy array
mini_raster_affine: Affine transformation
mini_raster_nodata: NoData Value
prefix: string
add a prefix to the keys (default: None)
geojson_out: boolean
Return list of GeoJSON-like features (default: False)
Original feature geometry and properties will be retained
with zonal stats appended as additional properties.
Use with `prefix` to ensure unique and meaningful property names.
Returns
-------
generator of dicts (if geojson_out is False)
Each item corresponds to a single vector feature and
contains keys for each of the specified stats.
generator of geojson features (if geojson_out is True)
GeoJSON-like Feature as python dict
"""
stats, run_count = check_stats(stats, categorical)
# Handle 1.0 deprecations
transform = kwargs.get('transform')
if transform:
warnings.warn("GDAL-style transforms will disappear in 1.0. "
"Use affine=Affine.from_gdal(*transform) instead",
DeprecationWarning)
if not affine:
affine = Affine.from_gdal(*transform)
cp = kwargs.get('copy_properties')
if cp:
warnings.warn("Use `geojson_out` to preserve feature properties",
DeprecationWarning)
band_num = kwargs.get('band_num')
if band_num:
warnings.warn("Use `band` to specify band number", DeprecationWarning)
band = band_num
with Raster(raster, affine, nodata, band) as rast:
features_iter = read_features(vectors, layer)
for _, feat in enumerate(features_iter):
geom = shape(feat['geometry'])
if 'Point' in geom.type:
geom = boxify_points(geom, rast)
geom_bounds = tuple(geom.bounds)
fsrc = rast.read(bounds=geom_bounds)
# rasterized geometry
rv_array = rasterize_geom(geom, like=fsrc, all_touched=all_touched)
# nodata mask
isnodata = (fsrc.array == fsrc.nodata)
# add nan mask (if necessary)
has_nan = (
np.issubdtype(fsrc.array.dtype, np.floating)
and np.isnan(fsrc.array.min()))
if has_nan:
isnodata = (isnodata | np.isnan(fsrc.array))
# Mask the source data array
# mask everything that is not a valid value or not within our geom
masked = np.ma.MaskedArray(
fsrc.array,
mask=(isnodata | ~rv_array))
# If we're on 64 bit platform and the array is an integer type
# make sure we cast to 64 bit to avoid overflow.
# workaround for https://github.com/numpy/numpy/issues/8433
if sysinfo.platform_bits == 64 and \
masked.dtype != np.int64 and \
issubclass(masked.dtype.type, np.integer):
masked = masked.astype(np.int64)
# execute zone_func on masked zone ndarray
if zone_func is not None:
if not callable(zone_func):
raise TypeError(('zone_func must be a callable '
'which accepts function a '
'single `zone_array` arg.'))
zone_func(masked)
if masked.compressed().size == 0:
# nothing here, fill with None and move on
feature_stats = dict([(stat, None) for stat in stats])
if 'count' in stats: # special case, zero makes sense here
feature_stats['count'] = 0
else:
if run_count:
keys, counts = np.unique(masked.compressed(), return_counts=True)
pixel_count = dict(zip([np.asscalar(k) for k in keys],
[np.asscalar(c) for c in counts]))
if categorical:
feature_stats = dict(pixel_count)
if category_map:
feature_stats = remap_categories(category_map, feature_stats)
else:
feature_stats = {}
if 'min' in stats:
feature_stats['min'] = float(masked.min())
if 'max' in stats:
feature_stats['max'] = float(masked.max())
if 'mean' in stats:
feature_stats['mean'] = float(masked.mean())
if 'count' in stats:
feature_stats['count'] = int(masked.count())
# optional
if 'sum' in stats:
feature_stats['sum'] = float(masked.sum())
if 'std' in stats:
feature_stats['std'] = float(masked.std())
if 'median' in stats:
feature_stats['median'] = float(np.median(masked.compressed()))
if 'majority' in stats:
feature_stats['majority'] = float(key_assoc_val(pixel_count, max))
if 'minority' in stats:
feature_stats['minority'] = float(key_assoc_val(pixel_count, min))
if 'unique' in stats:
feature_stats['unique'] = len(list(pixel_count.keys()))
if 'range' in stats:
try:
rmin = feature_stats['min']
except KeyError:
rmin = float(masked.min())
try:
rmax = feature_stats['max']
except KeyError:
rmax = float(masked.max())
feature_stats['range'] = rmax - rmin
for pctile in [s for s in stats if s.startswith('percentile_')]:
q = get_percentile(pctile)
pctarr = masked.compressed()
feature_stats[pctile] = np.percentile(pctarr, q)
if 'nodata' in stats or 'nan' in stats:
featmasked = np.ma.MaskedArray(fsrc.array, mask=(~rv_array))
if 'nodata' in stats:
feature_stats['nodata'] = float((featmasked == fsrc.nodata).sum())
if 'nan' in stats:
feature_stats['nan'] = float(np.isnan(featmasked).sum()) if has_nan else 0
if add_stats is not None:
for stat_name, stat_func in add_stats.items():
feature_stats[stat_name] = stat_func(masked)
if raster_out:
feature_stats['mini_raster_array'] = masked
feature_stats['mini_raster_affine'] = fsrc.affine
feature_stats['mini_raster_nodata'] = fsrc.nodata
if prefix is not None:
prefixed_feature_stats = {}
for key, val in feature_stats.items():
newkey = "{}{}".format(prefix, key)
prefixed_feature_stats[newkey] = val
feature_stats = prefixed_feature_stats
if geojson_out:
for key, val in feature_stats.items():
if 'properties' not in feat:
feat['properties'] = {}
feat['properties'][key] = val
yield feat
else:
yield feature_stats |
def SetHeaders(self, soap_headers, http_headers):
"""Set the headers for the underlying client.
Args:
soap_headers: A SOAP element for the SOAP headers.
http_headers: A dictionary for the http headers.
"""
self.suds_client.set_options(soapheaders=soap_headers, headers=http_headers) | Set the headers for the underlying client.
Args:
soap_headers: A SOAP element for the SOAP headers.
http_headers: A dictionary for the http headers. | Below is the the instruction that describes the task:
### Input:
Set the headers for the underlying client.
Args:
soap_headers: A SOAP element for the SOAP headers.
http_headers: A dictionary for the http headers.
### Response:
def SetHeaders(self, soap_headers, http_headers):
"""Set the headers for the underlying client.
Args:
soap_headers: A SOAP element for the SOAP headers.
http_headers: A dictionary for the http headers.
"""
self.suds_client.set_options(soapheaders=soap_headers, headers=http_headers) |
def _setup_explorer(self, capabilities):
"""Setup Internet Explorer webdriver
:param capabilities: capabilities object
:returns: a new local Internet Explorer driver
"""
explorer_driver = self.config.get('Driver', 'explorer_driver_path')
self.logger.debug("Explorer driver path given in properties: %s", explorer_driver)
return webdriver.Ie(explorer_driver, capabilities=capabilities) | Setup Internet Explorer webdriver
:param capabilities: capabilities object
:returns: a new local Internet Explorer driver | Below is the the instruction that describes the task:
### Input:
Setup Internet Explorer webdriver
:param capabilities: capabilities object
:returns: a new local Internet Explorer driver
### Response:
def _setup_explorer(self, capabilities):
"""Setup Internet Explorer webdriver
:param capabilities: capabilities object
:returns: a new local Internet Explorer driver
"""
explorer_driver = self.config.get('Driver', 'explorer_driver_path')
self.logger.debug("Explorer driver path given in properties: %s", explorer_driver)
return webdriver.Ie(explorer_driver, capabilities=capabilities) |
def get_files(input_file):
"""
Initializes an index of files to generate, returns the base
directory and index.
"""
file_index = {}
base_dir = None
if os.path.isfile(input_file):
file_index[input_file] = None
base_dir = os.path.dirname(input_file)
elif os.path.isdir(input_file):
base_dir = input_file
for inf in glob.glob(input_file + s.SBP_EXTENSION):
file_index[os.path.abspath(inf)] = None
for inf in glob.glob(input_file + '/*'):
base, index = get_files(os.path.abspath(inf))
z = file_index.copy()
z.update(index)
file_index = z
return (base_dir, file_index) | Initializes an index of files to generate, returns the base
directory and index. | Below is the the instruction that describes the task:
### Input:
Initializes an index of files to generate, returns the base
directory and index.
### Response:
def get_files(input_file):
"""
Initializes an index of files to generate, returns the base
directory and index.
"""
file_index = {}
base_dir = None
if os.path.isfile(input_file):
file_index[input_file] = None
base_dir = os.path.dirname(input_file)
elif os.path.isdir(input_file):
base_dir = input_file
for inf in glob.glob(input_file + s.SBP_EXTENSION):
file_index[os.path.abspath(inf)] = None
for inf in glob.glob(input_file + '/*'):
base, index = get_files(os.path.abspath(inf))
z = file_index.copy()
z.update(index)
file_index = z
return (base_dir, file_index) |
def github_gfonts_ttFont(ttFont, license):
"""Get a TTFont object of a font downloaded
from Google Fonts git repository.
"""
if not license:
return
from fontbakery.utils import download_file
from fontTools.ttLib import TTFont
from urllib.request import HTTPError
LICENSE_DIRECTORY = {
"OFL.txt": "ofl",
"UFL.txt": "ufl",
"LICENSE.txt": "apache"
}
filename = os.path.basename(ttFont.reader.file.name)
fontname = filename.split('-')[0].lower()
url = ("https://github.com/google/fonts/raw/master"
"/{}/{}/{}").format(LICENSE_DIRECTORY[license],
fontname,
filename)
try:
fontfile = download_file(url)
return TTFont(fontfile)
except HTTPError:
return None | Get a TTFont object of a font downloaded
from Google Fonts git repository. | Below is the the instruction that describes the task:
### Input:
Get a TTFont object of a font downloaded
from Google Fonts git repository.
### Response:
def github_gfonts_ttFont(ttFont, license):
"""Get a TTFont object of a font downloaded
from Google Fonts git repository.
"""
if not license:
return
from fontbakery.utils import download_file
from fontTools.ttLib import TTFont
from urllib.request import HTTPError
LICENSE_DIRECTORY = {
"OFL.txt": "ofl",
"UFL.txt": "ufl",
"LICENSE.txt": "apache"
}
filename = os.path.basename(ttFont.reader.file.name)
fontname = filename.split('-')[0].lower()
url = ("https://github.com/google/fonts/raw/master"
"/{}/{}/{}").format(LICENSE_DIRECTORY[license],
fontname,
filename)
try:
fontfile = download_file(url)
return TTFont(fontfile)
except HTTPError:
return None |
def execute_bytecode(self,
origin: Address,
gas_price: int,
gas: int,
to: Address,
sender: Address,
value: int,
data: bytes,
code: bytes,
code_address: Address=None,
) -> BaseComputation:
"""
Execute raw bytecode in the context of the current state of
the virtual machine.
"""
if origin is None:
origin = sender
# Construct a message
message = Message(
gas=gas,
to=to,
sender=sender,
value=value,
data=data,
code=code,
code_address=code_address,
)
# Construction a tx context
transaction_context = self.state.get_transaction_context_class()(
gas_price=gas_price,
origin=origin,
)
# Execute it in the VM
return self.state.get_computation(message, transaction_context).apply_computation(
self.state,
message,
transaction_context,
) | Execute raw bytecode in the context of the current state of
the virtual machine. | Below is the the instruction that describes the task:
### Input:
Execute raw bytecode in the context of the current state of
the virtual machine.
### Response:
def execute_bytecode(self,
origin: Address,
gas_price: int,
gas: int,
to: Address,
sender: Address,
value: int,
data: bytes,
code: bytes,
code_address: Address=None,
) -> BaseComputation:
"""
Execute raw bytecode in the context of the current state of
the virtual machine.
"""
if origin is None:
origin = sender
# Construct a message
message = Message(
gas=gas,
to=to,
sender=sender,
value=value,
data=data,
code=code,
code_address=code_address,
)
# Construction a tx context
transaction_context = self.state.get_transaction_context_class()(
gas_price=gas_price,
origin=origin,
)
# Execute it in the VM
return self.state.get_computation(message, transaction_context).apply_computation(
self.state,
message,
transaction_context,
) |
def get_from_cache(self):
"""See if this rule has already been built and cached."""
for item in self.rule.output_files:
dstpath = os.path.join(self.buildroot, item)
self.linkorcopy(
self.cachemgr.path_in_cache(item, self._metahash()),
dstpath) | See if this rule has already been built and cached. | Below is the the instruction that describes the task:
### Input:
See if this rule has already been built and cached.
### Response:
def get_from_cache(self):
"""See if this rule has already been built and cached."""
for item in self.rule.output_files:
dstpath = os.path.join(self.buildroot, item)
self.linkorcopy(
self.cachemgr.path_in_cache(item, self._metahash()),
dstpath) |
def table(self):
"""Return a large string of the entire table ready to be printed to the terminal."""
dimensions = max_dimensions(self.table_data, self.padding_left, self.padding_right)[:3]
return flatten(self.gen_table(*dimensions)) | Return a large string of the entire table ready to be printed to the terminal. | Below is the the instruction that describes the task:
### Input:
Return a large string of the entire table ready to be printed to the terminal.
### Response:
def table(self):
"""Return a large string of the entire table ready to be printed to the terminal."""
dimensions = max_dimensions(self.table_data, self.padding_left, self.padding_right)[:3]
return flatten(self.gen_table(*dimensions)) |
def apply_diff(text, diff, reverse=False, verify=True):
"""
SOME EXAMPLES OF diff
#@@ -1 +1 @@
#-before china goes live, the content team will have to manually update the settings for the china-ready apps currently in marketplace.
#+before china goes live (end January developer release, June general audience release) , the content team will have to manually update the settings for the china-ready apps currently in marketplace.
@@ -0,0 +1,3 @@
+before china goes live, the content team will have to manually update the settings for the china-ready apps currently in marketplace.
+
+kward has the details.
@@ -1 +1 @@
-before china goes live (end January developer release, June general audience release), the content team will have to manually update the settings for the china-ready apps currently in marketplace.
+before china goes live , the content team will have to manually update the settings for the china-ready apps currently in marketplace.
@@ -3 +3 ,6 @@
-kward has the details.+kward has the details.
+
+Target Release Dates :
+https://mana.mozilla.org/wiki/display/PM/Firefox+OS+Wave+Launch+Cross+Functional+View
+
+Content Team Engagement & Tasks : https://appreview.etherpad.mozilla.org/40
"""
if not diff:
return text
output = text
hunks = [
(new_diff[start_hunk], new_diff[start_hunk+1:end_hunk])
for new_diff in [[d.lstrip() for d in diff if d.lstrip() and d != "\\ No newline at end of file"] + ["@@"]] # ANOTHER REPAIR
for start_hunk, end_hunk in pairwise(i for i, l in enumerate(new_diff) if l.startswith('@@'))
]
for header, hunk_body in (reversed(hunks) if reverse else hunks):
matches = DIFF_PREFIX.match(header.strip())
if not matches:
if not _Log:
_late_import()
_Log.error("Can not handle \n---\n{{diff}}\n---\n", diff=diff)
removes = tuple(int(i.strip()) for i in matches.group(1).split(",")) # EXPECTING start_line, length TO REMOVE
remove = Data(start=removes[0], length=1 if len(removes) == 1 else removes[1]) # ASSUME FIRST LINE
adds = tuple(int(i.strip()) for i in matches.group(2).split(",")) # EXPECTING start_line, length TO ADD
add = Data(start=adds[0], length=1 if len(adds) == 1 else adds[1])
if add.length == 0 and add.start == 0:
add.start = remove.start
def repair_hunk(hunk_body):
# THE LAST DELETED LINE MAY MISS A "\n" MEANING THE FIRST
# ADDED LINE WILL BE APPENDED TO THE LAST DELETED LINE
# EXAMPLE: -kward has the details.+kward has the details.
# DETECT THIS PROBLEM FOR THIS HUNK AND FIX THE DIFF
if reverse:
last_lines = [
o
for b, o in zip(reversed(hunk_body), reversed(output))
if b != "+" + o
]
if not last_lines:
return hunk_body
last_line = last_lines[0]
for problem_index, problem_line in enumerate(hunk_body):
if problem_line.startswith('-') and problem_line.endswith('+' + last_line):
split_point = len(problem_line) - (len(last_line) + 1)
break
elif problem_line.startswith('+' + last_line + "-"):
split_point = len(last_line) + 1
break
else:
return hunk_body
else:
if not output:
return hunk_body
last_line = output[-1]
for problem_index, problem_line in enumerate(hunk_body):
if problem_line.startswith('+') and problem_line.endswith('-' + last_line):
split_point = len(problem_line) - (len(last_line) + 1)
break
elif problem_line.startswith('-' + last_line + "+"):
split_point = len(last_line) + 1
break
else:
return hunk_body
new_hunk_body = (
hunk_body[:problem_index] +
[problem_line[:split_point], problem_line[split_point:]] +
hunk_body[problem_index + 1:]
)
return new_hunk_body
hunk_body = repair_hunk(hunk_body)
if reverse:
new_output = (
output[:add.start - 1] +
[d[1:] for d in hunk_body if d and d[0] == '-'] +
output[add.start + add.length - 1:]
)
else:
new_output = (
output[:add.start - 1] +
[d[1:] for d in hunk_body if d and d[0] == '+'] +
output[add.start + remove.length - 1:]
)
output = new_output
if verify:
original = apply_diff(output, diff, not reverse, False)
if set(text) != set(original): # bugzilla-etl diffs are a jumble
for t, o in zip_longest(text, original):
if t in ['reports: https://goo.gl/70o6w6\r']:
break # KNOWN INCONSISTENCIES
if t != o:
if not _Log:
_late_import()
_Log.error("logical verification check failed")
break
return output | SOME EXAMPLES OF diff
#@@ -1 +1 @@
#-before china goes live, the content team will have to manually update the settings for the china-ready apps currently in marketplace.
#+before china goes live (end January developer release, June general audience release) , the content team will have to manually update the settings for the china-ready apps currently in marketplace.
@@ -0,0 +1,3 @@
+before china goes live, the content team will have to manually update the settings for the china-ready apps currently in marketplace.
+
+kward has the details.
@@ -1 +1 @@
-before china goes live (end January developer release, June general audience release), the content team will have to manually update the settings for the china-ready apps currently in marketplace.
+before china goes live , the content team will have to manually update the settings for the china-ready apps currently in marketplace.
@@ -3 +3 ,6 @@
-kward has the details.+kward has the details.
+
+Target Release Dates :
+https://mana.mozilla.org/wiki/display/PM/Firefox+OS+Wave+Launch+Cross+Functional+View
+
+Content Team Engagement & Tasks : https://appreview.etherpad.mozilla.org/40 | Below is the the instruction that describes the task:
### Input:
SOME EXAMPLES OF diff
#@@ -1 +1 @@
#-before china goes live, the content team will have to manually update the settings for the china-ready apps currently in marketplace.
#+before china goes live (end January developer release, June general audience release) , the content team will have to manually update the settings for the china-ready apps currently in marketplace.
@@ -0,0 +1,3 @@
+before china goes live, the content team will have to manually update the settings for the china-ready apps currently in marketplace.
+
+kward has the details.
@@ -1 +1 @@
-before china goes live (end January developer release, June general audience release), the content team will have to manually update the settings for the china-ready apps currently in marketplace.
+before china goes live , the content team will have to manually update the settings for the china-ready apps currently in marketplace.
@@ -3 +3 ,6 @@
-kward has the details.+kward has the details.
+
+Target Release Dates :
+https://mana.mozilla.org/wiki/display/PM/Firefox+OS+Wave+Launch+Cross+Functional+View
+
+Content Team Engagement & Tasks : https://appreview.etherpad.mozilla.org/40
### Response:
def apply_diff(text, diff, reverse=False, verify=True):
"""
SOME EXAMPLES OF diff
#@@ -1 +1 @@
#-before china goes live, the content team will have to manually update the settings for the china-ready apps currently in marketplace.
#+before china goes live (end January developer release, June general audience release) , the content team will have to manually update the settings for the china-ready apps currently in marketplace.
@@ -0,0 +1,3 @@
+before china goes live, the content team will have to manually update the settings for the china-ready apps currently in marketplace.
+
+kward has the details.
@@ -1 +1 @@
-before china goes live (end January developer release, June general audience release), the content team will have to manually update the settings for the china-ready apps currently in marketplace.
+before china goes live , the content team will have to manually update the settings for the china-ready apps currently in marketplace.
@@ -3 +3 ,6 @@
-kward has the details.+kward has the details.
+
+Target Release Dates :
+https://mana.mozilla.org/wiki/display/PM/Firefox+OS+Wave+Launch+Cross+Functional+View
+
+Content Team Engagement & Tasks : https://appreview.etherpad.mozilla.org/40
"""
if not diff:
return text
output = text
hunks = [
(new_diff[start_hunk], new_diff[start_hunk+1:end_hunk])
for new_diff in [[d.lstrip() for d in diff if d.lstrip() and d != "\\ No newline at end of file"] + ["@@"]] # ANOTHER REPAIR
for start_hunk, end_hunk in pairwise(i for i, l in enumerate(new_diff) if l.startswith('@@'))
]
for header, hunk_body in (reversed(hunks) if reverse else hunks):
matches = DIFF_PREFIX.match(header.strip())
if not matches:
if not _Log:
_late_import()
_Log.error("Can not handle \n---\n{{diff}}\n---\n", diff=diff)
removes = tuple(int(i.strip()) for i in matches.group(1).split(",")) # EXPECTING start_line, length TO REMOVE
remove = Data(start=removes[0], length=1 if len(removes) == 1 else removes[1]) # ASSUME FIRST LINE
adds = tuple(int(i.strip()) for i in matches.group(2).split(",")) # EXPECTING start_line, length TO ADD
add = Data(start=adds[0], length=1 if len(adds) == 1 else adds[1])
if add.length == 0 and add.start == 0:
add.start = remove.start
def repair_hunk(hunk_body):
# THE LAST DELETED LINE MAY MISS A "\n" MEANING THE FIRST
# ADDED LINE WILL BE APPENDED TO THE LAST DELETED LINE
# EXAMPLE: -kward has the details.+kward has the details.
# DETECT THIS PROBLEM FOR THIS HUNK AND FIX THE DIFF
if reverse:
last_lines = [
o
for b, o in zip(reversed(hunk_body), reversed(output))
if b != "+" + o
]
if not last_lines:
return hunk_body
last_line = last_lines[0]
for problem_index, problem_line in enumerate(hunk_body):
if problem_line.startswith('-') and problem_line.endswith('+' + last_line):
split_point = len(problem_line) - (len(last_line) + 1)
break
elif problem_line.startswith('+' + last_line + "-"):
split_point = len(last_line) + 1
break
else:
return hunk_body
else:
if not output:
return hunk_body
last_line = output[-1]
for problem_index, problem_line in enumerate(hunk_body):
if problem_line.startswith('+') and problem_line.endswith('-' + last_line):
split_point = len(problem_line) - (len(last_line) + 1)
break
elif problem_line.startswith('-' + last_line + "+"):
split_point = len(last_line) + 1
break
else:
return hunk_body
new_hunk_body = (
hunk_body[:problem_index] +
[problem_line[:split_point], problem_line[split_point:]] +
hunk_body[problem_index + 1:]
)
return new_hunk_body
hunk_body = repair_hunk(hunk_body)
if reverse:
new_output = (
output[:add.start - 1] +
[d[1:] for d in hunk_body if d and d[0] == '-'] +
output[add.start + add.length - 1:]
)
else:
new_output = (
output[:add.start - 1] +
[d[1:] for d in hunk_body if d and d[0] == '+'] +
output[add.start + remove.length - 1:]
)
output = new_output
if verify:
original = apply_diff(output, diff, not reverse, False)
if set(text) != set(original): # bugzilla-etl diffs are a jumble
for t, o in zip_longest(text, original):
if t in ['reports: https://goo.gl/70o6w6\r']:
break # KNOWN INCONSISTENCIES
if t != o:
if not _Log:
_late_import()
_Log.error("logical verification check failed")
break
return output |
def get_data_model():
"""
try to grab the up to date data model document from the EarthRef site.
if that fails, try to get the data model document from the PmagPy directory on the user's computer.
if that fails, return False.
data_model is a set of nested dictionaries that looks like this:
{'magic_contributions':
{'group_userid': {'data_status': 'Optional', 'data_type': 'String(10)'}, 'activate': {'data_status': 'Optional', 'data_type': 'String(1)'}, ....},
'er_synthetics':
{'synthetic_type': {'data_status': 'Required', 'data_type': 'String(50)'}, 'er_citation_names': {'data_status': 'Required', 'data_type': 'List(500)'}, ...},
....
}
the top level keys are the file types.
the second level keys are the possible headers for that file type.
the third level keys are data_type and data_status for that header.
"""
#print("-I- getting data model, please be patient!!!!")
url = 'http://earthref.org/services/MagIC-data-model.txt'
offline = True # always get cached data model, as 2.5 is now static
#try:
# data = urllib2.urlopen(url)
#except urllib2.URLError:
# print '-W- Unable to fetch data model online\nTrying to use cached data model instead'
# offline = True
#except httplib.BadStatusLine:
# print '-W- Website: {} not responding\nTrying to use cached data model instead'.format(url)
# offline = True
if offline:
data = get_data_offline()
data_model, file_type = pmag.magic_read(None, data)
if file_type in ('bad file', 'empty_file'):
print('-W- Unable to read online data model.\nTrying to use cached data model instead')
data = get_data_offline()
data_model, file_type = pmag.magic_read(None, data)
ref_dicts = [d for d in data_model if d['column_nmb'] != '>>>>>>>>>>']
file_types = [d['field_name'] for d in data_model if d['column_nmb'] == 'tab delimited']
file_types.insert(0, file_type)
complete_ref = {}
dictionary = {}
n = 0
for d in ref_dicts:
if d['field_name'] in file_types:
complete_ref[file_types[n]] = dictionary
n += 1
dictionary = {}
else:
dictionary[d['field_name_oracle']] = {'data_type': d['data_type'], 'data_status': d['data_status']}
return complete_ref | try to grab the up to date data model document from the EarthRef site.
if that fails, try to get the data model document from the PmagPy directory on the user's computer.
if that fails, return False.
data_model is a set of nested dictionaries that looks like this:
{'magic_contributions':
{'group_userid': {'data_status': 'Optional', 'data_type': 'String(10)'}, 'activate': {'data_status': 'Optional', 'data_type': 'String(1)'}, ....},
'er_synthetics':
{'synthetic_type': {'data_status': 'Required', 'data_type': 'String(50)'}, 'er_citation_names': {'data_status': 'Required', 'data_type': 'List(500)'}, ...},
....
}
the top level keys are the file types.
the second level keys are the possible headers for that file type.
the third level keys are data_type and data_status for that header. | Below is the the instruction that describes the task:
### Input:
try to grab the up to date data model document from the EarthRef site.
if that fails, try to get the data model document from the PmagPy directory on the user's computer.
if that fails, return False.
data_model is a set of nested dictionaries that looks like this:
{'magic_contributions':
{'group_userid': {'data_status': 'Optional', 'data_type': 'String(10)'}, 'activate': {'data_status': 'Optional', 'data_type': 'String(1)'}, ....},
'er_synthetics':
{'synthetic_type': {'data_status': 'Required', 'data_type': 'String(50)'}, 'er_citation_names': {'data_status': 'Required', 'data_type': 'List(500)'}, ...},
....
}
the top level keys are the file types.
the second level keys are the possible headers for that file type.
the third level keys are data_type and data_status for that header.
### Response:
def get_data_model():
"""
try to grab the up to date data model document from the EarthRef site.
if that fails, try to get the data model document from the PmagPy directory on the user's computer.
if that fails, return False.
data_model is a set of nested dictionaries that looks like this:
{'magic_contributions':
{'group_userid': {'data_status': 'Optional', 'data_type': 'String(10)'}, 'activate': {'data_status': 'Optional', 'data_type': 'String(1)'}, ....},
'er_synthetics':
{'synthetic_type': {'data_status': 'Required', 'data_type': 'String(50)'}, 'er_citation_names': {'data_status': 'Required', 'data_type': 'List(500)'}, ...},
....
}
the top level keys are the file types.
the second level keys are the possible headers for that file type.
the third level keys are data_type and data_status for that header.
"""
#print("-I- getting data model, please be patient!!!!")
url = 'http://earthref.org/services/MagIC-data-model.txt'
offline = True # always get cached data model, as 2.5 is now static
#try:
# data = urllib2.urlopen(url)
#except urllib2.URLError:
# print '-W- Unable to fetch data model online\nTrying to use cached data model instead'
# offline = True
#except httplib.BadStatusLine:
# print '-W- Website: {} not responding\nTrying to use cached data model instead'.format(url)
# offline = True
if offline:
data = get_data_offline()
data_model, file_type = pmag.magic_read(None, data)
if file_type in ('bad file', 'empty_file'):
print('-W- Unable to read online data model.\nTrying to use cached data model instead')
data = get_data_offline()
data_model, file_type = pmag.magic_read(None, data)
ref_dicts = [d for d in data_model if d['column_nmb'] != '>>>>>>>>>>']
file_types = [d['field_name'] for d in data_model if d['column_nmb'] == 'tab delimited']
file_types.insert(0, file_type)
complete_ref = {}
dictionary = {}
n = 0
for d in ref_dicts:
if d['field_name'] in file_types:
complete_ref[file_types[n]] = dictionary
n += 1
dictionary = {}
else:
dictionary[d['field_name_oracle']] = {'data_type': d['data_type'], 'data_status': d['data_status']}
return complete_ref |
def upload_sticker_file(self, user_id, png_sticker):
"""
Use this method to upload a .png file with a sticker for later use in createNewStickerSet and addStickerToSet methods (can be used multiple times). Returns the uploaded File on success.
https://core.telegram.org/bots/api#uploadstickerfile
Parameters:
:param user_id: User identifier of sticker file owner
:type user_id: int
:param png_sticker: Png image with the sticker, must be up to 512 kilobytes in size, dimensions must not exceed 512px, and either width or height must be exactly 512px. More info on Sending Files »
:type png_sticker: pytgbot.api_types.sendable.files.InputFile
Returns:
:return: Returns the uploaded File on success
:rtype: pytgbot.api_types.receivable.media.File
"""
from pytgbot.api_types.sendable.files import InputFile
assert_type_or_raise(user_id, int, parameter_name="user_id")
assert_type_or_raise(png_sticker, InputFile, parameter_name="png_sticker")
result = self.do("uploadStickerFile", user_id=user_id, png_sticker=png_sticker)
if self.return_python_objects:
logger.debug("Trying to parse {data}".format(data=repr(result)))
from pytgbot.api_types.receivable.media import File
try:
return File.from_array(result)
except TgApiParseException:
logger.debug("Failed parsing as api_type File", exc_info=True)
# end try
# no valid parsing so far
raise TgApiParseException("Could not parse result.") # See debug log for details!
# end if return_python_objects
return result | Use this method to upload a .png file with a sticker for later use in createNewStickerSet and addStickerToSet methods (can be used multiple times). Returns the uploaded File on success.
https://core.telegram.org/bots/api#uploadstickerfile
Parameters:
:param user_id: User identifier of sticker file owner
:type user_id: int
:param png_sticker: Png image with the sticker, must be up to 512 kilobytes in size, dimensions must not exceed 512px, and either width or height must be exactly 512px. More info on Sending Files »
:type png_sticker: pytgbot.api_types.sendable.files.InputFile
Returns:
:return: Returns the uploaded File on success
:rtype: pytgbot.api_types.receivable.media.File | Below is the the instruction that describes the task:
### Input:
Use this method to upload a .png file with a sticker for later use in createNewStickerSet and addStickerToSet methods (can be used multiple times). Returns the uploaded File on success.
https://core.telegram.org/bots/api#uploadstickerfile
Parameters:
:param user_id: User identifier of sticker file owner
:type user_id: int
:param png_sticker: Png image with the sticker, must be up to 512 kilobytes in size, dimensions must not exceed 512px, and either width or height must be exactly 512px. More info on Sending Files »
:type png_sticker: pytgbot.api_types.sendable.files.InputFile
Returns:
:return: Returns the uploaded File on success
:rtype: pytgbot.api_types.receivable.media.File
### Response:
def upload_sticker_file(self, user_id, png_sticker):
"""
Use this method to upload a .png file with a sticker for later use in createNewStickerSet and addStickerToSet methods (can be used multiple times). Returns the uploaded File on success.
https://core.telegram.org/bots/api#uploadstickerfile
Parameters:
:param user_id: User identifier of sticker file owner
:type user_id: int
:param png_sticker: Png image with the sticker, must be up to 512 kilobytes in size, dimensions must not exceed 512px, and either width or height must be exactly 512px. More info on Sending Files »
:type png_sticker: pytgbot.api_types.sendable.files.InputFile
Returns:
:return: Returns the uploaded File on success
:rtype: pytgbot.api_types.receivable.media.File
"""
from pytgbot.api_types.sendable.files import InputFile
assert_type_or_raise(user_id, int, parameter_name="user_id")
assert_type_or_raise(png_sticker, InputFile, parameter_name="png_sticker")
result = self.do("uploadStickerFile", user_id=user_id, png_sticker=png_sticker)
if self.return_python_objects:
logger.debug("Trying to parse {data}".format(data=repr(result)))
from pytgbot.api_types.receivable.media import File
try:
return File.from_array(result)
except TgApiParseException:
logger.debug("Failed parsing as api_type File", exc_info=True)
# end try
# no valid parsing so far
raise TgApiParseException("Could not parse result.") # See debug log for details!
# end if return_python_objects
return result |
def td_sp(points, speed_threshold):
""" Top-Down Speed-Based Trajectory Compression Algorithm
Detailed in https://www.itc.nl/library/Papers_2003/peer_ref_conf/meratnia_new.pdf
Args:
points (:obj:`list` of :obj:`Point`): trajectory or part of it
speed_threshold (float): max speed error, in km/h
Returns:
:obj:`list` of :obj:`Point`, compressed trajectory
"""
if len(points) <= 2:
return points
else:
max_speed_threshold = 0
found_index = 0
for i in range(1, len(points)-1):
dt1 = time_dist(points[i], points[i-1])
if dt1 == 0:
dt1 = 0.000000001
vim = loc_dist(points[i], points[i-1]) / dt1
dt2 = time_dist(points[i+1], points[i])
if dt2 == 0:
dt2 = 0.000000001
vi_ = loc_dist(points[i+1], points[i]) / dt2
if abs(vi_ - vim) > max_speed_threshold:
max_speed_threshold = abs(vi_ - vim)
found_index = i
if max_speed_threshold > speed_threshold:
one = td_sp(points[:found_index], speed_threshold)
two = td_sp(points[found_index:], speed_threshold)
one.extend(two)
return one
else:
return [points[0], points[-1]] | Top-Down Speed-Based Trajectory Compression Algorithm
Detailed in https://www.itc.nl/library/Papers_2003/peer_ref_conf/meratnia_new.pdf
Args:
points (:obj:`list` of :obj:`Point`): trajectory or part of it
speed_threshold (float): max speed error, in km/h
Returns:
:obj:`list` of :obj:`Point`, compressed trajectory | Below is the the instruction that describes the task:
### Input:
Top-Down Speed-Based Trajectory Compression Algorithm
Detailed in https://www.itc.nl/library/Papers_2003/peer_ref_conf/meratnia_new.pdf
Args:
points (:obj:`list` of :obj:`Point`): trajectory or part of it
speed_threshold (float): max speed error, in km/h
Returns:
:obj:`list` of :obj:`Point`, compressed trajectory
### Response:
def td_sp(points, speed_threshold):
""" Top-Down Speed-Based Trajectory Compression Algorithm
Detailed in https://www.itc.nl/library/Papers_2003/peer_ref_conf/meratnia_new.pdf
Args:
points (:obj:`list` of :obj:`Point`): trajectory or part of it
speed_threshold (float): max speed error, in km/h
Returns:
:obj:`list` of :obj:`Point`, compressed trajectory
"""
if len(points) <= 2:
return points
else:
max_speed_threshold = 0
found_index = 0
for i in range(1, len(points)-1):
dt1 = time_dist(points[i], points[i-1])
if dt1 == 0:
dt1 = 0.000000001
vim = loc_dist(points[i], points[i-1]) / dt1
dt2 = time_dist(points[i+1], points[i])
if dt2 == 0:
dt2 = 0.000000001
vi_ = loc_dist(points[i+1], points[i]) / dt2
if abs(vi_ - vim) > max_speed_threshold:
max_speed_threshold = abs(vi_ - vim)
found_index = i
if max_speed_threshold > speed_threshold:
one = td_sp(points[:found_index], speed_threshold)
two = td_sp(points[found_index:], speed_threshold)
one.extend(two)
return one
else:
return [points[0], points[-1]] |
def delete_consent_id(self, consent_id: str) -> dict:
"""Delete documents with this consent_id, calls the DELETE /consent/{consentId} endpoint.
>>> from las import Client
>>> client = Client(endpoint='<api endpoint>')
>>> client.delete_consent_id('<consent id>')
:param consent_id: Delete documents with this consent_id
:type consent_id: str
:return: Delete consent id response from REST API
:rtype: dict
:raises InvalidCredentialsException: If the credentials are invalid
:raises TooManyRequestsException: If limit of requests per second is reached
:raises LimitExceededException: If limit of total requests per month is reached
:raises requests.exception.RequestException: If error was raised by requests
"""
body = json.dumps({}).encode()
uri, headers = self._create_signing_headers('DELETE', f'/consents/{consent_id}', body)
delete_consent_id_consent = requests.delete(
url=uri.geturl(),
headers=headers,
data=body
)
response = _json_decode(delete_consent_id_consent)
return response | Delete documents with this consent_id, calls the DELETE /consent/{consentId} endpoint.
>>> from las import Client
>>> client = Client(endpoint='<api endpoint>')
>>> client.delete_consent_id('<consent id>')
:param consent_id: Delete documents with this consent_id
:type consent_id: str
:return: Delete consent id response from REST API
:rtype: dict
:raises InvalidCredentialsException: If the credentials are invalid
:raises TooManyRequestsException: If limit of requests per second is reached
:raises LimitExceededException: If limit of total requests per month is reached
:raises requests.exception.RequestException: If error was raised by requests | Below is the the instruction that describes the task:
### Input:
Delete documents with this consent_id, calls the DELETE /consent/{consentId} endpoint.
>>> from las import Client
>>> client = Client(endpoint='<api endpoint>')
>>> client.delete_consent_id('<consent id>')
:param consent_id: Delete documents with this consent_id
:type consent_id: str
:return: Delete consent id response from REST API
:rtype: dict
:raises InvalidCredentialsException: If the credentials are invalid
:raises TooManyRequestsException: If limit of requests per second is reached
:raises LimitExceededException: If limit of total requests per month is reached
:raises requests.exception.RequestException: If error was raised by requests
### Response:
def delete_consent_id(self, consent_id: str) -> dict:
"""Delete documents with this consent_id, calls the DELETE /consent/{consentId} endpoint.
>>> from las import Client
>>> client = Client(endpoint='<api endpoint>')
>>> client.delete_consent_id('<consent id>')
:param consent_id: Delete documents with this consent_id
:type consent_id: str
:return: Delete consent id response from REST API
:rtype: dict
:raises InvalidCredentialsException: If the credentials are invalid
:raises TooManyRequestsException: If limit of requests per second is reached
:raises LimitExceededException: If limit of total requests per month is reached
:raises requests.exception.RequestException: If error was raised by requests
"""
body = json.dumps({}).encode()
uri, headers = self._create_signing_headers('DELETE', f'/consents/{consent_id}', body)
delete_consent_id_consent = requests.delete(
url=uri.geturl(),
headers=headers,
data=body
)
response = _json_decode(delete_consent_id_consent)
return response |
def files_set_public_or_private(self, request, set_public, files_queryset, folders_queryset):
"""
Action which enables or disables permissions for selected files and files in selected folders to clipboard (set them private or public).
"""
if not self.has_change_permission(request):
raise PermissionDenied
if request.method != 'POST':
return None
check_files_edit_permissions(request, files_queryset)
check_folder_edit_permissions(request, folders_queryset)
# We define it like that so that we can modify it inside the set_files
# function
files_count = [0]
def set_files(files):
for f in files:
if f.is_public != set_public:
f.is_public = set_public
f.save()
files_count[0] += 1
def set_folders(folders):
for f in folders:
set_files(f.files)
set_folders(f.children.all())
set_files(files_queryset)
set_folders(folders_queryset)
if set_public:
self.message_user(request, _("Successfully disabled permissions for %(count)d files.") % {
"count": files_count[0],
})
else:
self.message_user(request, _("Successfully enabled permissions for %(count)d files.") % {
"count": files_count[0],
})
return None | Action which enables or disables permissions for selected files and files in selected folders to clipboard (set them private or public). | Below is the the instruction that describes the task:
### Input:
Action which enables or disables permissions for selected files and files in selected folders to clipboard (set them private or public).
### Response:
def files_set_public_or_private(self, request, set_public, files_queryset, folders_queryset):
"""
Action which enables or disables permissions for selected files and files in selected folders to clipboard (set them private or public).
"""
if not self.has_change_permission(request):
raise PermissionDenied
if request.method != 'POST':
return None
check_files_edit_permissions(request, files_queryset)
check_folder_edit_permissions(request, folders_queryset)
# We define it like that so that we can modify it inside the set_files
# function
files_count = [0]
def set_files(files):
for f in files:
if f.is_public != set_public:
f.is_public = set_public
f.save()
files_count[0] += 1
def set_folders(folders):
for f in folders:
set_files(f.files)
set_folders(f.children.all())
set_files(files_queryset)
set_folders(folders_queryset)
if set_public:
self.message_user(request, _("Successfully disabled permissions for %(count)d files.") % {
"count": files_count[0],
})
else:
self.message_user(request, _("Successfully enabled permissions for %(count)d files.") % {
"count": files_count[0],
})
return None |
def ensure_vbounds(self, use_margins=None):
"""Ensure the cursor is within vertical screen bounds.
:param bool use_margins: when ``True`` or when
:data:`~pyte.modes.DECOM` is set,
cursor is bounded by top and and bottom
margins, instead of ``[0; lines - 1]``.
"""
if (use_margins or mo.DECOM in self.mode) and self.margins is not None:
top, bottom = self.margins
else:
top, bottom = 0, self.lines - 1
self.cursor.y = min(max(top, self.cursor.y), bottom) | Ensure the cursor is within vertical screen bounds.
:param bool use_margins: when ``True`` or when
:data:`~pyte.modes.DECOM` is set,
cursor is bounded by top and and bottom
margins, instead of ``[0; lines - 1]``. | Below is the the instruction that describes the task:
### Input:
Ensure the cursor is within vertical screen bounds.
:param bool use_margins: when ``True`` or when
:data:`~pyte.modes.DECOM` is set,
cursor is bounded by top and and bottom
margins, instead of ``[0; lines - 1]``.
### Response:
def ensure_vbounds(self, use_margins=None):
"""Ensure the cursor is within vertical screen bounds.
:param bool use_margins: when ``True`` or when
:data:`~pyte.modes.DECOM` is set,
cursor is bounded by top and and bottom
margins, instead of ``[0; lines - 1]``.
"""
if (use_margins or mo.DECOM in self.mode) and self.margins is not None:
top, bottom = self.margins
else:
top, bottom = 0, self.lines - 1
self.cursor.y = min(max(top, self.cursor.y), bottom) |
def unregister(self, entity_class, entity):
"""
Unregisters the given entity for the given class and discards its
state information.
"""
EntityState.release(entity, self)
self.__entity_set_map[entity_class].remove(entity) | Unregisters the given entity for the given class and discards its
state information. | Below is the the instruction that describes the task:
### Input:
Unregisters the given entity for the given class and discards its
state information.
### Response:
def unregister(self, entity_class, entity):
"""
Unregisters the given entity for the given class and discards its
state information.
"""
EntityState.release(entity, self)
self.__entity_set_map[entity_class].remove(entity) |
def get_client_parameters(username: str, ip_address: str, user_agent: str) -> dict:
"""
Get query parameters for filtering AccessAttempt queryset.
This method returns a dict that guarantees iteration order for keys and values,
and can so be used in e.g. the generation of hash keys or other deterministic functions.
"""
filter_kwargs = dict()
if settings.AXES_ONLY_USER_FAILURES:
# 1. Only individual usernames can be tracked with parametrization
filter_kwargs['username'] = username
else:
if settings.AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP:
# 2. A combination of username and IP address can be used as well
filter_kwargs['username'] = username
filter_kwargs['ip_address'] = ip_address
else:
# 3. Default case is to track the IP address only, which is the most secure option
filter_kwargs['ip_address'] = ip_address
if settings.AXES_USE_USER_AGENT:
# 4. The HTTP User-Agent can be used to track e.g. one browser
filter_kwargs['user_agent'] = user_agent
return filter_kwargs | Get query parameters for filtering AccessAttempt queryset.
This method returns a dict that guarantees iteration order for keys and values,
and can so be used in e.g. the generation of hash keys or other deterministic functions. | Below is the the instruction that describes the task:
### Input:
Get query parameters for filtering AccessAttempt queryset.
This method returns a dict that guarantees iteration order for keys and values,
and can so be used in e.g. the generation of hash keys or other deterministic functions.
### Response:
def get_client_parameters(username: str, ip_address: str, user_agent: str) -> dict:
"""
Get query parameters for filtering AccessAttempt queryset.
This method returns a dict that guarantees iteration order for keys and values,
and can so be used in e.g. the generation of hash keys or other deterministic functions.
"""
filter_kwargs = dict()
if settings.AXES_ONLY_USER_FAILURES:
# 1. Only individual usernames can be tracked with parametrization
filter_kwargs['username'] = username
else:
if settings.AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP:
# 2. A combination of username and IP address can be used as well
filter_kwargs['username'] = username
filter_kwargs['ip_address'] = ip_address
else:
# 3. Default case is to track the IP address only, which is the most secure option
filter_kwargs['ip_address'] = ip_address
if settings.AXES_USE_USER_AGENT:
# 4. The HTTP User-Agent can be used to track e.g. one browser
filter_kwargs['user_agent'] = user_agent
return filter_kwargs |
def get_worksheet(gc, gfile_id, wks_name, write_access=False, new_sheet_dimensions=(1000, 100)):
"""DOCS..."""
spsh = gc.open_by_key(gfile_id)
# if worksheet name is not provided , take first worksheet
if wks_name is None:
wks = spsh.sheet1
# if worksheet name provided and exist in given spreadsheet
else:
try:
wks = spsh.worksheet(wks_name)
except:
#rows, cols = new_sheet_dimensions
wks = spsh.add_worksheet(
wks_name, *new_sheet_dimensions) if write_access == True else None
return wks | DOCS... | Below is the the instruction that describes the task:
### Input:
DOCS...
### Response:
def get_worksheet(gc, gfile_id, wks_name, write_access=False, new_sheet_dimensions=(1000, 100)):
"""DOCS..."""
spsh = gc.open_by_key(gfile_id)
# if worksheet name is not provided , take first worksheet
if wks_name is None:
wks = spsh.sheet1
# if worksheet name provided and exist in given spreadsheet
else:
try:
wks = spsh.worksheet(wks_name)
except:
#rows, cols = new_sheet_dimensions
wks = spsh.add_worksheet(
wks_name, *new_sheet_dimensions) if write_access == True else None
return wks |
def get_instruction(self, idx, off=None):
"""
Get a particular instruction by using (default) the index of the address if specified
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object
"""
if off is not None:
idx = self.off_to_pos(off)
if self.cached_instructions is None:
self.get_instructions()
return self.cached_instructions[idx] | Get a particular instruction by using (default) the index of the address if specified
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object | Below is the the instruction that describes the task:
### Input:
Get a particular instruction by using (default) the index of the address if specified
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object
### Response:
def get_instruction(self, idx, off=None):
"""
Get a particular instruction by using (default) the index of the address if specified
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object
"""
if off is not None:
idx = self.off_to_pos(off)
if self.cached_instructions is None:
self.get_instructions()
return self.cached_instructions[idx] |
def filter(self, **kwargs):
"""
Filter data according to the given arguments.
"""
keys = self.filter_keys(**kwargs)
return self.keys_to_values(keys) | Filter data according to the given arguments. | Below is the the instruction that describes the task:
### Input:
Filter data according to the given arguments.
### Response:
def filter(self, **kwargs):
"""
Filter data according to the given arguments.
"""
keys = self.filter_keys(**kwargs)
return self.keys_to_values(keys) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.