code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def DocbookXInclude(env, target, source, *args, **kw):
"""
A pseudo-Builder, for resolving XIncludes in a separate processing step.
"""
# Init list of targets/sources
target, source = __extend_targets_sources(target, source)
# Setup builder
__builder = __select_builder(__xinclude_lxml_builder,__xinclude_libxml2_builder,__xmllint_builder)
# Create targets
result = []
for t,s in zip(target,source):
result.extend(__builder.__call__(env, t, s, **kw))
return result | A pseudo-Builder, for resolving XIncludes in a separate processing step. | Below is the the instruction that describes the task:
### Input:
A pseudo-Builder, for resolving XIncludes in a separate processing step.
### Response:
def DocbookXInclude(env, target, source, *args, **kw):
"""
A pseudo-Builder, for resolving XIncludes in a separate processing step.
"""
# Init list of targets/sources
target, source = __extend_targets_sources(target, source)
# Setup builder
__builder = __select_builder(__xinclude_lxml_builder,__xinclude_libxml2_builder,__xmllint_builder)
# Create targets
result = []
for t,s in zip(target,source):
result.extend(__builder.__call__(env, t, s, **kw))
return result |
def temperature(temp: Number, unit: str = 'C') -> str:
"""
Formats a temperature element into a string with both C and F values
Used for both Temp and Dew
Ex: 34°C (93°F)
"""
unit = unit.upper()
if not (temp and unit in ('C', 'F')):
return ''
if unit == 'C':
converted = temp.value * 1.8 + 32
converted = str(int(round(converted))) + '°F' # type: ignore
elif unit == 'F':
converted = (temp.value - 32) / 1.8
converted = str(int(round(converted))) + '°C' # type: ignore
return f'{temp.value}°{unit} ({converted})' | Formats a temperature element into a string with both C and F values
Used for both Temp and Dew
Ex: 34°C (93°F) | Below is the the instruction that describes the task:
### Input:
Formats a temperature element into a string with both C and F values
Used for both Temp and Dew
Ex: 34°C (93°F)
### Response:
def temperature(temp: Number, unit: str = 'C') -> str:
"""
Formats a temperature element into a string with both C and F values
Used for both Temp and Dew
Ex: 34°C (93°F)
"""
unit = unit.upper()
if not (temp and unit in ('C', 'F')):
return ''
if unit == 'C':
converted = temp.value * 1.8 + 32
converted = str(int(round(converted))) + '°F' # type: ignore
elif unit == 'F':
converted = (temp.value - 32) / 1.8
converted = str(int(round(converted))) + '°C' # type: ignore
return f'{temp.value}°{unit} ({converted})' |
def serialize_text(self):
'''Returns a serialized form of the Namepace.
All the elements in the namespace are sorted by
URI, joined to the associated prefix with a colon and
separated with spaces.
:return: bytes
'''
if self._uri_to_prefix is None or len(self._uri_to_prefix) == 0:
return b''
od = collections.OrderedDict(sorted(self._uri_to_prefix.items()))
data = []
for uri in od:
data.append(uri + ':' + od[uri])
return ' '.join(data).encode('utf-8') | Returns a serialized form of the Namepace.
All the elements in the namespace are sorted by
URI, joined to the associated prefix with a colon and
separated with spaces.
:return: bytes | Below is the the instruction that describes the task:
### Input:
Returns a serialized form of the Namepace.
All the elements in the namespace are sorted by
URI, joined to the associated prefix with a colon and
separated with spaces.
:return: bytes
### Response:
def serialize_text(self):
'''Returns a serialized form of the Namepace.
All the elements in the namespace are sorted by
URI, joined to the associated prefix with a colon and
separated with spaces.
:return: bytes
'''
if self._uri_to_prefix is None or len(self._uri_to_prefix) == 0:
return b''
od = collections.OrderedDict(sorted(self._uri_to_prefix.items()))
data = []
for uri in od:
data.append(uri + ':' + od[uri])
return ' '.join(data).encode('utf-8') |
def get_all_rooted_subtrees_as_lists(self, start_location=None):
"""Return a list of all rooted subtrees (each as a list of Location objects)."""
if start_location is not None and start_location not in self._location_to_children:
raise AssertionError(u'Received invalid start_location {} that was not present '
u'in the tree. Present root locations of complex @optional '
u'queries (ones that expand vertex fields within) are: {}'
.format(start_location, self._location_to_children.keys()))
if start_location is None:
start_location = self._root_location
if len(self._location_to_children[start_location]) == 0:
# Node with no children only returns a singleton list containing the null set.
return [[]]
current_children = sorted(self._location_to_children[start_location])
# Recursively find all rooted subtrees of each of the children of the current node.
location_to_list_of_subtrees = {
location: list(self.get_all_rooted_subtrees_as_lists(location))
for location in current_children
}
# All subsets of direct child Location objects
all_location_subsets = [
list(subset)
for subset in itertools.chain(*[
itertools.combinations(current_children, x)
for x in range(0, len(current_children) + 1)
])
]
# For every possible subset of the children, and every combination of the chosen
# subtrees within, create a list of subtree Location lists.
new_subtrees_as_lists = []
for location_subset in all_location_subsets:
all_child_subtree_possibilities = [
location_to_list_of_subtrees[location]
for location in location_subset
]
all_child_subtree_combinations = itertools.product(*all_child_subtree_possibilities)
for child_subtree_combination in all_child_subtree_combinations:
merged_child_subtree_combination = list(itertools.chain(*child_subtree_combination))
new_subtree_as_list = location_subset + merged_child_subtree_combination
new_subtrees_as_lists.append(new_subtree_as_list)
return new_subtrees_as_lists | Return a list of all rooted subtrees (each as a list of Location objects). | Below is the the instruction that describes the task:
### Input:
Return a list of all rooted subtrees (each as a list of Location objects).
### Response:
def get_all_rooted_subtrees_as_lists(self, start_location=None):
"""Return a list of all rooted subtrees (each as a list of Location objects)."""
if start_location is not None and start_location not in self._location_to_children:
raise AssertionError(u'Received invalid start_location {} that was not present '
u'in the tree. Present root locations of complex @optional '
u'queries (ones that expand vertex fields within) are: {}'
.format(start_location, self._location_to_children.keys()))
if start_location is None:
start_location = self._root_location
if len(self._location_to_children[start_location]) == 0:
# Node with no children only returns a singleton list containing the null set.
return [[]]
current_children = sorted(self._location_to_children[start_location])
# Recursively find all rooted subtrees of each of the children of the current node.
location_to_list_of_subtrees = {
location: list(self.get_all_rooted_subtrees_as_lists(location))
for location in current_children
}
# All subsets of direct child Location objects
all_location_subsets = [
list(subset)
for subset in itertools.chain(*[
itertools.combinations(current_children, x)
for x in range(0, len(current_children) + 1)
])
]
# For every possible subset of the children, and every combination of the chosen
# subtrees within, create a list of subtree Location lists.
new_subtrees_as_lists = []
for location_subset in all_location_subsets:
all_child_subtree_possibilities = [
location_to_list_of_subtrees[location]
for location in location_subset
]
all_child_subtree_combinations = itertools.product(*all_child_subtree_possibilities)
for child_subtree_combination in all_child_subtree_combinations:
merged_child_subtree_combination = list(itertools.chain(*child_subtree_combination))
new_subtree_as_list = location_subset + merged_child_subtree_combination
new_subtrees_as_lists.append(new_subtree_as_list)
return new_subtrees_as_lists |
def _symbol_or_keyword_handler(c, ctx, is_field_name=False):
"""Handles the start of an unquoted text token.
This may be an operator (if in an s-expression), an identifier symbol, or a keyword.
"""
in_sexp = ctx.container.ion_type is IonType.SEXP
if c not in _IDENTIFIER_STARTS:
if in_sexp and c in _OPERATORS:
c_next, _ = yield
ctx.queue.unread(c_next)
yield ctx.immediate_transition(_operator_symbol_handler(c, ctx))
_illegal_character(c, ctx)
assert not ctx.value
ctx.set_unicode().set_ion_type(IonType.SYMBOL)
val = ctx.value
val.append(c)
maybe_null = c == _N_LOWER
maybe_nan = maybe_null
maybe_true = c == _T_LOWER
maybe_false = c == _F_LOWER
c, self = yield
trans = ctx.immediate_transition(self)
keyword_trans = None
match_index = 0
while True:
def check_keyword(name, keyword_sequence, ion_type, value, match_transition=lambda: None):
maybe_keyword = True
transition = None
if match_index < len(keyword_sequence):
maybe_keyword = c == keyword_sequence[match_index]
else:
transition = match_transition()
if transition is not None:
pass
elif _ends_value(c):
if is_field_name:
_illegal_character(c, ctx, '%s keyword as field name not allowed.' % (name,))
transition = ctx.event_transition(IonEvent, IonEventType.SCALAR, ion_type, value)
elif c == _COLON:
message = ''
if is_field_name:
message = '%s keyword as field name not allowed.' % (name,)
_illegal_character(c, ctx, message)
elif in_sexp and c in _OPERATORS:
transition = ctx.event_transition(IonEvent, IonEventType.SCALAR, ion_type, value)
else:
maybe_keyword = False
return maybe_keyword, transition
if maybe_null:
def check_null_dot():
transition = None
found = c == _DOT
if found:
if is_field_name:
_illegal_character(c, ctx, "Illegal character in field name.")
transition = ctx.immediate_transition(_typed_null_handler(c, ctx))
return transition
maybe_null, keyword_trans = check_keyword('null', _NULL_SUFFIX.sequence,
IonType.NULL, None, check_null_dot)
if maybe_nan:
maybe_nan, keyword_trans = check_keyword('nan', _NAN_SUFFIX, IonType.FLOAT, _NAN)
elif maybe_true:
maybe_true, keyword_trans = check_keyword('true', _TRUE_SUFFIX, IonType.BOOL, True)
elif maybe_false:
maybe_false, keyword_trans = check_keyword('false', _FALSE_SUFFIX, IonType.BOOL, False)
if maybe_null or maybe_nan or maybe_true or maybe_false:
if keyword_trans is not None:
trans = keyword_trans
else:
val.append(c)
match_index += 1
else:
if c in _SYMBOL_TOKEN_TERMINATORS:
# This might be an annotation or a field name
ctx.set_pending_symbol(val)
trans = ctx.immediate_transition(ctx.whence)
elif _ends_value(c) or (in_sexp and c in _OPERATORS):
trans = ctx.event_transition(IonEvent, IonEventType.SCALAR, IonType.SYMBOL, val.as_symbol())
else:
trans = ctx.immediate_transition(_unquoted_symbol_handler(c, ctx, is_field_name=is_field_name))
c, _ = yield trans | Handles the start of an unquoted text token.
This may be an operator (if in an s-expression), an identifier symbol, or a keyword. | Below is the the instruction that describes the task:
### Input:
Handles the start of an unquoted text token.
This may be an operator (if in an s-expression), an identifier symbol, or a keyword.
### Response:
def _symbol_or_keyword_handler(c, ctx, is_field_name=False):
"""Handles the start of an unquoted text token.
This may be an operator (if in an s-expression), an identifier symbol, or a keyword.
"""
in_sexp = ctx.container.ion_type is IonType.SEXP
if c not in _IDENTIFIER_STARTS:
if in_sexp and c in _OPERATORS:
c_next, _ = yield
ctx.queue.unread(c_next)
yield ctx.immediate_transition(_operator_symbol_handler(c, ctx))
_illegal_character(c, ctx)
assert not ctx.value
ctx.set_unicode().set_ion_type(IonType.SYMBOL)
val = ctx.value
val.append(c)
maybe_null = c == _N_LOWER
maybe_nan = maybe_null
maybe_true = c == _T_LOWER
maybe_false = c == _F_LOWER
c, self = yield
trans = ctx.immediate_transition(self)
keyword_trans = None
match_index = 0
while True:
def check_keyword(name, keyword_sequence, ion_type, value, match_transition=lambda: None):
maybe_keyword = True
transition = None
if match_index < len(keyword_sequence):
maybe_keyword = c == keyword_sequence[match_index]
else:
transition = match_transition()
if transition is not None:
pass
elif _ends_value(c):
if is_field_name:
_illegal_character(c, ctx, '%s keyword as field name not allowed.' % (name,))
transition = ctx.event_transition(IonEvent, IonEventType.SCALAR, ion_type, value)
elif c == _COLON:
message = ''
if is_field_name:
message = '%s keyword as field name not allowed.' % (name,)
_illegal_character(c, ctx, message)
elif in_sexp and c in _OPERATORS:
transition = ctx.event_transition(IonEvent, IonEventType.SCALAR, ion_type, value)
else:
maybe_keyword = False
return maybe_keyword, transition
if maybe_null:
def check_null_dot():
transition = None
found = c == _DOT
if found:
if is_field_name:
_illegal_character(c, ctx, "Illegal character in field name.")
transition = ctx.immediate_transition(_typed_null_handler(c, ctx))
return transition
maybe_null, keyword_trans = check_keyword('null', _NULL_SUFFIX.sequence,
IonType.NULL, None, check_null_dot)
if maybe_nan:
maybe_nan, keyword_trans = check_keyword('nan', _NAN_SUFFIX, IonType.FLOAT, _NAN)
elif maybe_true:
maybe_true, keyword_trans = check_keyword('true', _TRUE_SUFFIX, IonType.BOOL, True)
elif maybe_false:
maybe_false, keyword_trans = check_keyword('false', _FALSE_SUFFIX, IonType.BOOL, False)
if maybe_null or maybe_nan or maybe_true or maybe_false:
if keyword_trans is not None:
trans = keyword_trans
else:
val.append(c)
match_index += 1
else:
if c in _SYMBOL_TOKEN_TERMINATORS:
# This might be an annotation or a field name
ctx.set_pending_symbol(val)
trans = ctx.immediate_transition(ctx.whence)
elif _ends_value(c) or (in_sexp and c in _OPERATORS):
trans = ctx.event_transition(IonEvent, IonEventType.SCALAR, IonType.SYMBOL, val.as_symbol())
else:
trans = ctx.immediate_transition(_unquoted_symbol_handler(c, ctx, is_field_name=is_field_name))
c, _ = yield trans |
def _make_symbol_function(handle, name, func_name):
"""Create a symbol function by handle and function name."""
code, doc_str = _generate_symbol_function_code(handle, name, func_name)
local = {}
exec(code, None, local) # pylint: disable=exec-used
symbol_function = local[func_name]
symbol_function.__name__ = func_name
symbol_function.__doc__ = doc_str
symbol_function.__module__ = 'mxnet.symbol'
return symbol_function | Create a symbol function by handle and function name. | Below is the the instruction that describes the task:
### Input:
Create a symbol function by handle and function name.
### Response:
def _make_symbol_function(handle, name, func_name):
"""Create a symbol function by handle and function name."""
code, doc_str = _generate_symbol_function_code(handle, name, func_name)
local = {}
exec(code, None, local) # pylint: disable=exec-used
symbol_function = local[func_name]
symbol_function.__name__ = func_name
symbol_function.__doc__ = doc_str
symbol_function.__module__ = 'mxnet.symbol'
return symbol_function |
def hash(symbol, hash_type='sha1'):
""" create a hash code from symbol """
code = hashlib.new(hash_type)
code.update(str(symbol).encode('utf-8'))
return code.hexdigest() | create a hash code from symbol | Below is the the instruction that describes the task:
### Input:
create a hash code from symbol
### Response:
def hash(symbol, hash_type='sha1'):
""" create a hash code from symbol """
code = hashlib.new(hash_type)
code.update(str(symbol).encode('utf-8'))
return code.hexdigest() |
def add(self, key, value):
"""Add an entry to a list preference
Add `value` to the list of entries for the `key` preference.
"""
if not key in self.prefs:
self.prefs[key] = []
self.prefs[key].append(value) | Add an entry to a list preference
Add `value` to the list of entries for the `key` preference. | Below is the the instruction that describes the task:
### Input:
Add an entry to a list preference
Add `value` to the list of entries for the `key` preference.
### Response:
def add(self, key, value):
"""Add an entry to a list preference
Add `value` to the list of entries for the `key` preference.
"""
if not key in self.prefs:
self.prefs[key] = []
self.prefs[key].append(value) |
def visit_Dict(self, node):
"""
Process dict arguments.
"""
if self.should_check_whitelist(node):
for key in node.keys:
if key.s in self.whitelist or key.s.startswith("debug_"):
continue
self.violations.append((self.current_logging_call, WHITELIST_VIOLATION.format(key.s)))
if self.should_check_extra_exception(node):
for value in node.values:
self.check_exception_arg(value)
super(LoggingVisitor, self).generic_visit(node) | Process dict arguments. | Below is the the instruction that describes the task:
### Input:
Process dict arguments.
### Response:
def visit_Dict(self, node):
"""
Process dict arguments.
"""
if self.should_check_whitelist(node):
for key in node.keys:
if key.s in self.whitelist or key.s.startswith("debug_"):
continue
self.violations.append((self.current_logging_call, WHITELIST_VIOLATION.format(key.s)))
if self.should_check_extra_exception(node):
for value in node.values:
self.check_exception_arg(value)
super(LoggingVisitor, self).generic_visit(node) |
def add_etag(self, overwrite=False, weak=False):
"""Add an etag for the current response if there is none yet."""
if overwrite or "etag" not in self.headers:
self.set_etag(generate_etag(self.get_data()), weak) | Add an etag for the current response if there is none yet. | Below is the the instruction that describes the task:
### Input:
Add an etag for the current response if there is none yet.
### Response:
def add_etag(self, overwrite=False, weak=False):
"""Add an etag for the current response if there is none yet."""
if overwrite or "etag" not in self.headers:
self.set_etag(generate_etag(self.get_data()), weak) |
def generate(ctx, url, *args, **kwargs):
"""
Generate preview for URL.
"""
file_previews = ctx.obj['file_previews']
options = {}
metadata = kwargs['metadata']
width = kwargs['width']
height = kwargs['height']
output_format = kwargs['format']
if metadata:
options['metadata'] = metadata.split(',')
if width:
options.setdefault('size', {})
options['size']['width'] = width
if height:
options.setdefault('size', {})
options['size']['height'] = height
if output_format:
options['format'] = output_format
results = file_previews.generate(url, **options)
click.echo(results) | Generate preview for URL. | Below is the the instruction that describes the task:
### Input:
Generate preview for URL.
### Response:
def generate(ctx, url, *args, **kwargs):
"""
Generate preview for URL.
"""
file_previews = ctx.obj['file_previews']
options = {}
metadata = kwargs['metadata']
width = kwargs['width']
height = kwargs['height']
output_format = kwargs['format']
if metadata:
options['metadata'] = metadata.split(',')
if width:
options.setdefault('size', {})
options['size']['width'] = width
if height:
options.setdefault('size', {})
options['size']['height'] = height
if output_format:
options['format'] = output_format
results = file_previews.generate(url, **options)
click.echo(results) |
def _class_dispatch(args, kwargs):
"""See 'class_multimethod'."""
_ = kwargs
if not args:
raise ValueError(
"Multimethods must be passed at least one positional arg.")
if not isinstance(args[0], type):
raise TypeError(
"class_multimethod must be called with a type, not instance.")
return args[0] | See 'class_multimethod'. | Below is the the instruction that describes the task:
### Input:
See 'class_multimethod'.
### Response:
def _class_dispatch(args, kwargs):
"""See 'class_multimethod'."""
_ = kwargs
if not args:
raise ValueError(
"Multimethods must be passed at least one positional arg.")
if not isinstance(args[0], type):
raise TypeError(
"class_multimethod must be called with a type, not instance.")
return args[0] |
def averageOnTime(vectors, numSamples=None):
"""
Returns the average on-time, averaged over all on-time runs.
Parameters:
-----------------------------------------------
vectors: the vectors for which the onTime is calculated. Row 0
contains the outputs from time step 0, row 1 from time step
1, etc.
numSamples: the number of elements for which on-time is calculated.
If not specified, then all elements are looked at.
Returns: (scalar average on-time of all outputs,
list containing frequency counts of each encountered on-time)
"""
# Special case given a 1 dimensional vector: it represents a single column
if vectors.ndim == 1:
vectors.shape = (-1,1)
numTimeSteps = len(vectors)
numElements = len(vectors[0])
# How many samples will we look at?
if numSamples is None:
numSamples = numElements
countOn = range(numElements)
else:
countOn = numpy.random.randint(0, numElements, numSamples)
# Compute the on-times and accumulate the frequency counts of each on-time
# encountered
sumOfLengths = 0.0
onTimeFreqCounts = None
n = 0
for i in countOn:
(onTime, segments, durations) = _listOfOnTimesInVec(vectors[:,i])
if onTime != 0.0:
sumOfLengths += onTime
n += segments
onTimeFreqCounts = _accumulateFrequencyCounts(durations, onTimeFreqCounts)
# Return the average on time of each element that was on.
if n > 0:
return (sumOfLengths/n, onTimeFreqCounts)
else:
return (0.0, onTimeFreqCounts) | Returns the average on-time, averaged over all on-time runs.
Parameters:
-----------------------------------------------
vectors: the vectors for which the onTime is calculated. Row 0
contains the outputs from time step 0, row 1 from time step
1, etc.
numSamples: the number of elements for which on-time is calculated.
If not specified, then all elements are looked at.
Returns: (scalar average on-time of all outputs,
list containing frequency counts of each encountered on-time) | Below is the the instruction that describes the task:
### Input:
Returns the average on-time, averaged over all on-time runs.
Parameters:
-----------------------------------------------
vectors: the vectors for which the onTime is calculated. Row 0
contains the outputs from time step 0, row 1 from time step
1, etc.
numSamples: the number of elements for which on-time is calculated.
If not specified, then all elements are looked at.
Returns: (scalar average on-time of all outputs,
list containing frequency counts of each encountered on-time)
### Response:
def averageOnTime(vectors, numSamples=None):
"""
Returns the average on-time, averaged over all on-time runs.
Parameters:
-----------------------------------------------
vectors: the vectors for which the onTime is calculated. Row 0
contains the outputs from time step 0, row 1 from time step
1, etc.
numSamples: the number of elements for which on-time is calculated.
If not specified, then all elements are looked at.
Returns: (scalar average on-time of all outputs,
list containing frequency counts of each encountered on-time)
"""
# Special case given a 1 dimensional vector: it represents a single column
if vectors.ndim == 1:
vectors.shape = (-1,1)
numTimeSteps = len(vectors)
numElements = len(vectors[0])
# How many samples will we look at?
if numSamples is None:
numSamples = numElements
countOn = range(numElements)
else:
countOn = numpy.random.randint(0, numElements, numSamples)
# Compute the on-times and accumulate the frequency counts of each on-time
# encountered
sumOfLengths = 0.0
onTimeFreqCounts = None
n = 0
for i in countOn:
(onTime, segments, durations) = _listOfOnTimesInVec(vectors[:,i])
if onTime != 0.0:
sumOfLengths += onTime
n += segments
onTimeFreqCounts = _accumulateFrequencyCounts(durations, onTimeFreqCounts)
# Return the average on time of each element that was on.
if n > 0:
return (sumOfLengths/n, onTimeFreqCounts)
else:
return (0.0, onTimeFreqCounts) |
def detach(self, attachments):
"""Remove an attachment, or a list of attachments, from this item. If the item has already been saved, the
attachments will be deleted on the server immediately. If the item has not yet been saved, the attachments will
simply not be created on the server the item is saved.
Removing attachments from an existing item will update the changekey of the item.
"""
if not is_iterable(attachments, generators_allowed=True):
attachments = [attachments]
for a in attachments:
if a.parent_item is not self:
raise ValueError('Attachment does not belong to this item')
if self.id:
# Item is already created. Detach the attachment server-side now
a.detach()
if a in self.attachments:
self.attachments.remove(a) | Remove an attachment, or a list of attachments, from this item. If the item has already been saved, the
attachments will be deleted on the server immediately. If the item has not yet been saved, the attachments will
simply not be created on the server the item is saved.
Removing attachments from an existing item will update the changekey of the item. | Below is the the instruction that describes the task:
### Input:
Remove an attachment, or a list of attachments, from this item. If the item has already been saved, the
attachments will be deleted on the server immediately. If the item has not yet been saved, the attachments will
simply not be created on the server the item is saved.
Removing attachments from an existing item will update the changekey of the item.
### Response:
def detach(self, attachments):
"""Remove an attachment, or a list of attachments, from this item. If the item has already been saved, the
attachments will be deleted on the server immediately. If the item has not yet been saved, the attachments will
simply not be created on the server the item is saved.
Removing attachments from an existing item will update the changekey of the item.
"""
if not is_iterable(attachments, generators_allowed=True):
attachments = [attachments]
for a in attachments:
if a.parent_item is not self:
raise ValueError('Attachment does not belong to this item')
if self.id:
# Item is already created. Detach the attachment server-side now
a.detach()
if a in self.attachments:
self.attachments.remove(a) |
def prt_tsv(self, prt, goea_results, **kws):
"""Write tab-separated table data"""
prt_flds = kws.get('prt_flds', self.get_prtflds_default(goea_results))
tsv_data = MgrNtGOEAs(goea_results).get_goea_nts_prt(prt_flds, **kws)
RPT.prt_tsv(prt, tsv_data, **kws) | Write tab-separated table data | Below is the the instruction that describes the task:
### Input:
Write tab-separated table data
### Response:
def prt_tsv(self, prt, goea_results, **kws):
"""Write tab-separated table data"""
prt_flds = kws.get('prt_flds', self.get_prtflds_default(goea_results))
tsv_data = MgrNtGOEAs(goea_results).get_goea_nts_prt(prt_flds, **kws)
RPT.prt_tsv(prt, tsv_data, **kws) |
def from_wif_or_ewif_hex(wif_hex: str, password: Optional[str] = None) -> SigningKeyType:
"""
Return SigningKey instance from Duniter WIF or EWIF in hexadecimal format
:param wif_hex: WIF or EWIF string in hexadecimal format
:param password: Password of EWIF encrypted seed
"""
wif_bytes = Base58Encoder.decode(wif_hex)
fi = wif_bytes[0:1]
if fi == b"\x01":
return SigningKey.from_wif_hex(wif_hex)
elif fi == b"\x02" and password is not None:
return SigningKey.from_ewif_hex(wif_hex, password)
else:
raise Exception("Error: Bad format: not WIF nor EWIF") | Return SigningKey instance from Duniter WIF or EWIF in hexadecimal format
:param wif_hex: WIF or EWIF string in hexadecimal format
:param password: Password of EWIF encrypted seed | Below is the the instruction that describes the task:
### Input:
Return SigningKey instance from Duniter WIF or EWIF in hexadecimal format
:param wif_hex: WIF or EWIF string in hexadecimal format
:param password: Password of EWIF encrypted seed
### Response:
def from_wif_or_ewif_hex(wif_hex: str, password: Optional[str] = None) -> SigningKeyType:
"""
Return SigningKey instance from Duniter WIF or EWIF in hexadecimal format
:param wif_hex: WIF or EWIF string in hexadecimal format
:param password: Password of EWIF encrypted seed
"""
wif_bytes = Base58Encoder.decode(wif_hex)
fi = wif_bytes[0:1]
if fi == b"\x01":
return SigningKey.from_wif_hex(wif_hex)
elif fi == b"\x02" and password is not None:
return SigningKey.from_ewif_hex(wif_hex, password)
else:
raise Exception("Error: Bad format: not WIF nor EWIF") |
def unregisterWalkthrough(self, walkthrough):
"""
Unregisters the inputed walkthrough from the application walkthroug
list.
:param walkthrough | <XWalkthrough>
"""
if type(walkthrough) in (str, unicode):
walkthrough = self.findWalkthrough(walkthrough)
try:
self._walkthroughs.remove(walkthrough)
except ValueError:
pass | Unregisters the inputed walkthrough from the application walkthroug
list.
:param walkthrough | <XWalkthrough> | Below is the the instruction that describes the task:
### Input:
Unregisters the inputed walkthrough from the application walkthroug
list.
:param walkthrough | <XWalkthrough>
### Response:
def unregisterWalkthrough(self, walkthrough):
"""
Unregisters the inputed walkthrough from the application walkthroug
list.
:param walkthrough | <XWalkthrough>
"""
if type(walkthrough) in (str, unicode):
walkthrough = self.findWalkthrough(walkthrough)
try:
self._walkthroughs.remove(walkthrough)
except ValueError:
pass |
def refresh(self):
"""Refresh the widgets from the current font"""
font = self.current_font
# refresh btn_bold
self.btn_bold.blockSignals(True)
self.btn_bold.setChecked(font.weight() > 50)
self.btn_bold.blockSignals(False)
# refresh btn_italic
self.btn_italic.blockSignals(True)
self.btn_italic.setChecked(font.italic())
self.btn_italic.blockSignals(False)
# refresh font size
self.spin_box.blockSignals(True)
self.spin_box.setValue(font.pointSize())
self.spin_box.blockSignals(False) | Refresh the widgets from the current font | Below is the the instruction that describes the task:
### Input:
Refresh the widgets from the current font
### Response:
def refresh(self):
"""Refresh the widgets from the current font"""
font = self.current_font
# refresh btn_bold
self.btn_bold.blockSignals(True)
self.btn_bold.setChecked(font.weight() > 50)
self.btn_bold.blockSignals(False)
# refresh btn_italic
self.btn_italic.blockSignals(True)
self.btn_italic.setChecked(font.italic())
self.btn_italic.blockSignals(False)
# refresh font size
self.spin_box.blockSignals(True)
self.spin_box.setValue(font.pointSize())
self.spin_box.blockSignals(False) |
def should_show_thanks_page_to(participant):
"""In the context of the /ad route, should the participant be shown
the thanks.html page instead of ad.html?
"""
if participant is None:
return False
status = participant.status
marked_done = participant.end_time is not None
ready_for_external_submission = (
status in ("overrecruited", "working") and marked_done
)
assignment_complete = status in ("submitted", "approved")
return assignment_complete or ready_for_external_submission | In the context of the /ad route, should the participant be shown
the thanks.html page instead of ad.html? | Below is the the instruction that describes the task:
### Input:
In the context of the /ad route, should the participant be shown
the thanks.html page instead of ad.html?
### Response:
def should_show_thanks_page_to(participant):
"""In the context of the /ad route, should the participant be shown
the thanks.html page instead of ad.html?
"""
if participant is None:
return False
status = participant.status
marked_done = participant.end_time is not None
ready_for_external_submission = (
status in ("overrecruited", "working") and marked_done
)
assignment_complete = status in ("submitted", "approved")
return assignment_complete or ready_for_external_submission |
def language(cls):
"""
Return language of the comic as a human-readable language name instead
of a 2-character ISO639-1 code.
"""
lang = 'Unknown (%s)' % cls.lang
if pycountry is None:
if cls.lang in languages.Languages:
lang = languages.Languages[cls.lang]
else:
try:
lang = pycountry.languages.get(alpha2 = cls.lang).name
except KeyError:
try:
lang = pycountry.languages.get(iso639_1_code = cls.lang).name
except KeyError:
pass
return lang | Return language of the comic as a human-readable language name instead
of a 2-character ISO639-1 code. | Below is the the instruction that describes the task:
### Input:
Return language of the comic as a human-readable language name instead
of a 2-character ISO639-1 code.
### Response:
def language(cls):
"""
Return language of the comic as a human-readable language name instead
of a 2-character ISO639-1 code.
"""
lang = 'Unknown (%s)' % cls.lang
if pycountry is None:
if cls.lang in languages.Languages:
lang = languages.Languages[cls.lang]
else:
try:
lang = pycountry.languages.get(alpha2 = cls.lang).name
except KeyError:
try:
lang = pycountry.languages.get(iso639_1_code = cls.lang).name
except KeyError:
pass
return lang |
def download_dataset(self, owner, id, **kwargs):
"""
Download dataset
This endpoint will return a .zip containing all files within the dataset as originally uploaded. If you are interested retrieving clean data extracted from those files by data.world, check out `GET:/sql` and `GET:/sparql`.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.download_dataset(owner, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required)
:param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.download_dataset_with_http_info(owner, id, **kwargs)
else:
(data) = self.download_dataset_with_http_info(owner, id, **kwargs)
return data | Download dataset
This endpoint will return a .zip containing all files within the dataset as originally uploaded. If you are interested retrieving clean data extracted from those files by data.world, check out `GET:/sql` and `GET:/sparql`.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.download_dataset(owner, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required)
:param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required)
:return: None
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Download dataset
This endpoint will return a .zip containing all files within the dataset as originally uploaded. If you are interested retrieving clean data extracted from those files by data.world, check out `GET:/sql` and `GET:/sparql`.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.download_dataset(owner, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required)
:param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
### Response:
def download_dataset(self, owner, id, **kwargs):
"""
Download dataset
This endpoint will return a .zip containing all files within the dataset as originally uploaded. If you are interested retrieving clean data extracted from those files by data.world, check out `GET:/sql` and `GET:/sparql`.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.download_dataset(owner, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required)
:param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.download_dataset_with_http_info(owner, id, **kwargs)
else:
(data) = self.download_dataset_with_http_info(owner, id, **kwargs)
return data |
def datasets(self, libref: str = '') -> str:
"""
This method is used to query a libref. The results show information about the libref including members.
:param libref: the libref to query
:return:
"""
code = "proc datasets"
if libref:
code += " dd=" + libref
code += "; quit;"
if self.nosub:
print(code)
else:
if self.results.lower() == 'html':
ll = self._io.submit(code, "html")
if not self.batch:
self.DISPLAY(self.HTML(ll['LST']))
else:
return ll
else:
ll = self._io.submit(code, "text")
if self.batch:
return ll['LOG'].rsplit(";*\';*\";*/;\n")[0]
else:
print(ll['LOG'].rsplit(";*\';*\";*/;\n")[0]) | This method is used to query a libref. The results show information about the libref including members.
:param libref: the libref to query
:return: | Below is the the instruction that describes the task:
### Input:
This method is used to query a libref. The results show information about the libref including members.
:param libref: the libref to query
:return:
### Response:
def datasets(self, libref: str = '') -> str:
"""
This method is used to query a libref. The results show information about the libref including members.
:param libref: the libref to query
:return:
"""
code = "proc datasets"
if libref:
code += " dd=" + libref
code += "; quit;"
if self.nosub:
print(code)
else:
if self.results.lower() == 'html':
ll = self._io.submit(code, "html")
if not self.batch:
self.DISPLAY(self.HTML(ll['LST']))
else:
return ll
else:
ll = self._io.submit(code, "text")
if self.batch:
return ll['LOG'].rsplit(";*\';*\";*/;\n")[0]
else:
print(ll['LOG'].rsplit(";*\';*\";*/;\n")[0]) |
def format_text(text):
"""Remove newlines, but preserve paragraphs"""
result = ""
for paragraph in text.split("\n\n"):
result += " ".join(paragraph.split()) + "\n\n"
result = result.rstrip("\n") # Remove last newlines
# converting links to HTML
pattern = r"(https?:\/\/(?:w{1,3}.)?[^\s]*?(?:\.[a-z]+)+)"
pattern += r"(?![^<]*?(?:<\/\w+>|\/?>))"
if re.search(pattern, result):
html = r"<a href='\1'><font color='FF00CC'>\1</font></a>"
result = re.sub(pattern, html, result)
return result | Remove newlines, but preserve paragraphs | Below is the the instruction that describes the task:
### Input:
Remove newlines, but preserve paragraphs
### Response:
def format_text(text):
"""Remove newlines, but preserve paragraphs"""
result = ""
for paragraph in text.split("\n\n"):
result += " ".join(paragraph.split()) + "\n\n"
result = result.rstrip("\n") # Remove last newlines
# converting links to HTML
pattern = r"(https?:\/\/(?:w{1,3}.)?[^\s]*?(?:\.[a-z]+)+)"
pattern += r"(?![^<]*?(?:<\/\w+>|\/?>))"
if re.search(pattern, result):
html = r"<a href='\1'><font color='FF00CC'>\1</font></a>"
result = re.sub(pattern, html, result)
return result |
def scaleToSeconds(requestContext, seriesList, seconds):
"""
Takes one metric or a wildcard seriesList and returns "value per seconds"
where seconds is a last argument to this functions.
Useful in conjunction with derivative or integral function if you want
to normalize its result to a known resolution for arbitrary retentions
"""
for series in seriesList:
series.name = "scaleToSeconds(%s,%d)" % (series.name, seconds)
series.pathExpression = series.name
factor = seconds * 1.0 / series.step
for i, value in enumerate(series):
series[i] = safeMul(value, factor)
return seriesList | Takes one metric or a wildcard seriesList and returns "value per seconds"
where seconds is a last argument to this functions.
Useful in conjunction with derivative or integral function if you want
to normalize its result to a known resolution for arbitrary retentions | Below is the the instruction that describes the task:
### Input:
Takes one metric or a wildcard seriesList and returns "value per seconds"
where seconds is a last argument to this functions.
Useful in conjunction with derivative or integral function if you want
to normalize its result to a known resolution for arbitrary retentions
### Response:
def scaleToSeconds(requestContext, seriesList, seconds):
"""
Takes one metric or a wildcard seriesList and returns "value per seconds"
where seconds is a last argument to this functions.
Useful in conjunction with derivative or integral function if you want
to normalize its result to a known resolution for arbitrary retentions
"""
for series in seriesList:
series.name = "scaleToSeconds(%s,%d)" % (series.name, seconds)
series.pathExpression = series.name
factor = seconds * 1.0 / series.step
for i, value in enumerate(series):
series[i] = safeMul(value, factor)
return seriesList |
def on_recv_rsp(self, rsp_pb):
"""receive response callback function"""
ret_code, msg, _= SubAccPush.unpack_rsp(rsp_pb)
if self._notify_obj is not None:
self._notify_obj.on_async_sub_acc_push(ret_code, msg)
return ret_code, msg | receive response callback function | Below is the the instruction that describes the task:
### Input:
receive response callback function
### Response:
def on_recv_rsp(self, rsp_pb):
"""receive response callback function"""
ret_code, msg, _= SubAccPush.unpack_rsp(rsp_pb)
if self._notify_obj is not None:
self._notify_obj.on_async_sub_acc_push(ret_code, msg)
return ret_code, msg |
def notify_done(self, error=False, run_done_callbacks=True):
''' if error clear all sessions otherwise check to see if all other sessions are complete
then run the done callbacks
'''
if error:
for _session in self._sessions.values():
_session.set_done()
self._session_count = 0
else:
self._update_session_count(-1)
for _session in self._sessions.values():
if not _session.is_done():
return
if run_done_callbacks:
self._run_done_callbacks()
self._done_event.set() | if error clear all sessions otherwise check to see if all other sessions are complete
then run the done callbacks | Below is the the instruction that describes the task:
### Input:
if error clear all sessions otherwise check to see if all other sessions are complete
then run the done callbacks
### Response:
def notify_done(self, error=False, run_done_callbacks=True):
''' if error clear all sessions otherwise check to see if all other sessions are complete
then run the done callbacks
'''
if error:
for _session in self._sessions.values():
_session.set_done()
self._session_count = 0
else:
self._update_session_count(-1)
for _session in self._sessions.values():
if not _session.is_done():
return
if run_done_callbacks:
self._run_done_callbacks()
self._done_event.set() |
def plot_network(
self, state_sizes=None, state_scale=1.0, state_colors='#ff5500', state_labels='auto',
arrow_scale=1.0, arrow_curvature=1.0, arrow_labels='weights', arrow_label_format='%10.2f',
max_width=12, max_height=12, figpadding=0.2, xticks=False, yticks=False, show_frame=False,
**textkwargs):
"""
Draws a network using discs and curved arrows.
The thicknesses and labels of the arrows are taken from the off-diagonal matrix elements
in A.
"""
# Set the default values for the text dictionary
from matplotlib import pyplot as _plt
textkwargs.setdefault('size', None)
textkwargs.setdefault('horizontalalignment', 'center')
textkwargs.setdefault('verticalalignment', 'center')
textkwargs.setdefault('color', 'black')
# remove the temporary key 'arrow_label_size' as it cannot be parsed by plt.text!
arrow_label_size = textkwargs.pop('arrow_label_size', textkwargs['size'])
if self.pos is None:
self.layout_automatic()
# number of nodes
n = len(self.pos)
# get bounds and pad figure
xmin = _np.min(self.pos[:, 0])
xmax = _np.max(self.pos[:, 0])
Dx = xmax - xmin
xmin -= Dx * figpadding
xmax += Dx * figpadding
Dx *= 1 + figpadding
ymin = _np.min(self.pos[:, 1])
ymax = _np.max(self.pos[:, 1])
Dy = ymax - ymin
ymin -= Dy * figpadding
ymax += Dy * figpadding
Dy *= 1 + figpadding
# sizes of nodes
if state_sizes is None:
state_sizes = 0.5 * state_scale * \
min(Dx, Dy)**2 * _np.ones(n) / float(n)
else:
state_sizes = 0.5 * state_scale * \
min(Dx, Dy)**2 * state_sizes / (_np.max(state_sizes) * float(n))
# automatic arrow rescaling
arrow_scale *= 1.0 / \
(_np.max(self.A - _np.diag(_np.diag(self.A))) * _sqrt(n))
# size figure
if (Dx / max_width > Dy / max_height):
figsize = (max_width, Dy * (max_width / Dx))
else:
figsize = (Dx / Dy * max_height, max_height)
if self.ax is None:
logger.debug("creating new figure")
fig = _plt.figure(None, figsize=figsize)
self.ax = fig.add_subplot(111)
else:
fig = self.ax.figure
window_extend = self.ax.get_window_extent()
axes_ratio = window_extend.height / window_extend.width
data_ratio = (ymax - ymin) / (xmax - xmin)
q = axes_ratio / data_ratio
if q > 1.0:
ymin *= q
ymax *= q
else:
xmin /= q
xmax /= q
if not xticks:
self.ax.get_xaxis().set_ticks([])
if not yticks:
self.ax.get_yaxis().set_ticks([])
# show or suppress frame
self.ax.set_frame_on(show_frame)
# set node labels
if state_labels is None:
pass
elif isinstance(state_labels, str) and state_labels == 'auto':
state_labels = [str(i) for i in _np.arange(n)]
else:
if len(state_labels) != n:
raise ValueError("length of state_labels({}) has to match length of states({})."
.format(len(state_labels), n))
# set node colors
if state_colors is None:
state_colors = '#ff5500' # None is not acceptable
if isinstance(state_colors, str):
state_colors = [state_colors] * n
if isinstance(state_colors, list) and not len(state_colors) == n:
raise ValueError("Mistmatch between nstates and nr. state_colors (%u vs %u)" % (n, len(state_colors)))
try:
colorscales = _types.ensure_ndarray(state_colors, ndim=1, kind='numeric')
colorscales /= colorscales.max()
state_colors = [_plt.cm.binary(int(256.0 * colorscales[i])) for i in range(n)]
except AssertionError:
# assume we have a list of strings now.
logger.debug("could not cast 'state_colors' to numeric values.")
# set arrow labels
if isinstance(arrow_labels, _np.ndarray):
L = arrow_labels
if isinstance(arrow_labels[0,0], str):
arrow_label_format = '%s'
elif isinstance(arrow_labels, str) and arrow_labels.lower() == 'weights':
L = self.A[:, :]
elif arrow_labels is None:
L = _np.empty(_np.shape(self.A), dtype=object)
L[:, :] = ''
arrow_label_format = '%s'
else:
raise ValueError('invalid arrow labels')
# draw circles
circles = []
for i in range(n):
# choose color
c = _plt.Circle(
self.pos[i], radius=_sqrt(
0.5 * state_sizes[i]) / 2.0,
color=state_colors[i], zorder=2)
circles.append(c)
self.ax.add_artist(c)
# add annotation
if state_labels is not None:
self.ax.text(self.pos[i][0], self.pos[i][1], state_labels[i], zorder=3, **textkwargs)
assert len(circles) == n, "%i != %i" % (len(circles), n)
# draw arrows
for i in range(n):
for j in range(i + 1, n):
if (abs(self.A[i, j]) > 0):
self._draw_arrow(
self.pos[i, 0], self.pos[i, 1], self.pos[j, 0], self.pos[j, 1], Dx, Dy,
label=arrow_label_format%L[i, j], width=arrow_scale * self.A[i, j],
arrow_curvature=arrow_curvature, patchA=circles[i], patchB=circles[j],
shrinkA=3, shrinkB=0, arrow_label_size=arrow_label_size)
if (abs(self.A[j, i]) > 0):
self._draw_arrow(
self.pos[j, 0], self.pos[j, 1], self.pos[i, 0], self.pos[i, 1], Dx, Dy,
label=arrow_label_format%L[j, i], width=arrow_scale * self.A[j, i],
arrow_curvature=arrow_curvature, patchA=circles[j], patchB=circles[i],
shrinkA=3, shrinkB=0, arrow_label_size=arrow_label_size)
# plot
self.ax.set_xlim(xmin, xmax)
self.ax.set_ylim(ymin, ymax)
return fig | Draws a network using discs and curved arrows.
The thicknesses and labels of the arrows are taken from the off-diagonal matrix elements
in A. | Below is the the instruction that describes the task:
### Input:
Draws a network using discs and curved arrows.
The thicknesses and labels of the arrows are taken from the off-diagonal matrix elements
in A.
### Response:
def plot_network(
self, state_sizes=None, state_scale=1.0, state_colors='#ff5500', state_labels='auto',
arrow_scale=1.0, arrow_curvature=1.0, arrow_labels='weights', arrow_label_format='%10.2f',
max_width=12, max_height=12, figpadding=0.2, xticks=False, yticks=False, show_frame=False,
**textkwargs):
"""
Draws a network using discs and curved arrows.
The thicknesses and labels of the arrows are taken from the off-diagonal matrix elements
in A.
"""
# Set the default values for the text dictionary
from matplotlib import pyplot as _plt
textkwargs.setdefault('size', None)
textkwargs.setdefault('horizontalalignment', 'center')
textkwargs.setdefault('verticalalignment', 'center')
textkwargs.setdefault('color', 'black')
# remove the temporary key 'arrow_label_size' as it cannot be parsed by plt.text!
arrow_label_size = textkwargs.pop('arrow_label_size', textkwargs['size'])
if self.pos is None:
self.layout_automatic()
# number of nodes
n = len(self.pos)
# get bounds and pad figure
xmin = _np.min(self.pos[:, 0])
xmax = _np.max(self.pos[:, 0])
Dx = xmax - xmin
xmin -= Dx * figpadding
xmax += Dx * figpadding
Dx *= 1 + figpadding
ymin = _np.min(self.pos[:, 1])
ymax = _np.max(self.pos[:, 1])
Dy = ymax - ymin
ymin -= Dy * figpadding
ymax += Dy * figpadding
Dy *= 1 + figpadding
# sizes of nodes
if state_sizes is None:
state_sizes = 0.5 * state_scale * \
min(Dx, Dy)**2 * _np.ones(n) / float(n)
else:
state_sizes = 0.5 * state_scale * \
min(Dx, Dy)**2 * state_sizes / (_np.max(state_sizes) * float(n))
# automatic arrow rescaling
arrow_scale *= 1.0 / \
(_np.max(self.A - _np.diag(_np.diag(self.A))) * _sqrt(n))
# size figure
if (Dx / max_width > Dy / max_height):
figsize = (max_width, Dy * (max_width / Dx))
else:
figsize = (Dx / Dy * max_height, max_height)
if self.ax is None:
logger.debug("creating new figure")
fig = _plt.figure(None, figsize=figsize)
self.ax = fig.add_subplot(111)
else:
fig = self.ax.figure
window_extend = self.ax.get_window_extent()
axes_ratio = window_extend.height / window_extend.width
data_ratio = (ymax - ymin) / (xmax - xmin)
q = axes_ratio / data_ratio
if q > 1.0:
ymin *= q
ymax *= q
else:
xmin /= q
xmax /= q
if not xticks:
self.ax.get_xaxis().set_ticks([])
if not yticks:
self.ax.get_yaxis().set_ticks([])
# show or suppress frame
self.ax.set_frame_on(show_frame)
# set node labels
if state_labels is None:
pass
elif isinstance(state_labels, str) and state_labels == 'auto':
state_labels = [str(i) for i in _np.arange(n)]
else:
if len(state_labels) != n:
raise ValueError("length of state_labels({}) has to match length of states({})."
.format(len(state_labels), n))
# set node colors
if state_colors is None:
state_colors = '#ff5500' # None is not acceptable
if isinstance(state_colors, str):
state_colors = [state_colors] * n
if isinstance(state_colors, list) and not len(state_colors) == n:
raise ValueError("Mistmatch between nstates and nr. state_colors (%u vs %u)" % (n, len(state_colors)))
try:
colorscales = _types.ensure_ndarray(state_colors, ndim=1, kind='numeric')
colorscales /= colorscales.max()
state_colors = [_plt.cm.binary(int(256.0 * colorscales[i])) for i in range(n)]
except AssertionError:
# assume we have a list of strings now.
logger.debug("could not cast 'state_colors' to numeric values.")
# set arrow labels
if isinstance(arrow_labels, _np.ndarray):
L = arrow_labels
if isinstance(arrow_labels[0,0], str):
arrow_label_format = '%s'
elif isinstance(arrow_labels, str) and arrow_labels.lower() == 'weights':
L = self.A[:, :]
elif arrow_labels is None:
L = _np.empty(_np.shape(self.A), dtype=object)
L[:, :] = ''
arrow_label_format = '%s'
else:
raise ValueError('invalid arrow labels')
# draw circles
circles = []
for i in range(n):
# choose color
c = _plt.Circle(
self.pos[i], radius=_sqrt(
0.5 * state_sizes[i]) / 2.0,
color=state_colors[i], zorder=2)
circles.append(c)
self.ax.add_artist(c)
# add annotation
if state_labels is not None:
self.ax.text(self.pos[i][0], self.pos[i][1], state_labels[i], zorder=3, **textkwargs)
assert len(circles) == n, "%i != %i" % (len(circles), n)
# draw arrows
for i in range(n):
for j in range(i + 1, n):
if (abs(self.A[i, j]) > 0):
self._draw_arrow(
self.pos[i, 0], self.pos[i, 1], self.pos[j, 0], self.pos[j, 1], Dx, Dy,
label=arrow_label_format%L[i, j], width=arrow_scale * self.A[i, j],
arrow_curvature=arrow_curvature, patchA=circles[i], patchB=circles[j],
shrinkA=3, shrinkB=0, arrow_label_size=arrow_label_size)
if (abs(self.A[j, i]) > 0):
self._draw_arrow(
self.pos[j, 0], self.pos[j, 1], self.pos[i, 0], self.pos[i, 1], Dx, Dy,
label=arrow_label_format%L[j, i], width=arrow_scale * self.A[j, i],
arrow_curvature=arrow_curvature, patchA=circles[j], patchB=circles[i],
shrinkA=3, shrinkB=0, arrow_label_size=arrow_label_size)
# plot
self.ax.set_xlim(xmin, xmax)
self.ax.set_ylim(ymin, ymax)
return fig |
def numpy():
'''Lazily import the numpy module'''
if LazyImport.numpy_module is None:
try:
LazyImport.numpy_module = __import__('numpypy')
except ImportError:
try:
LazyImport.numpy_module = __import__('numpy')
except ImportError:
raise ImportError('The numpy module is required')
return LazyImport.numpy_module | Lazily import the numpy module | Below is the the instruction that describes the task:
### Input:
Lazily import the numpy module
### Response:
def numpy():
'''Lazily import the numpy module'''
if LazyImport.numpy_module is None:
try:
LazyImport.numpy_module = __import__('numpypy')
except ImportError:
try:
LazyImport.numpy_module = __import__('numpy')
except ImportError:
raise ImportError('The numpy module is required')
return LazyImport.numpy_module |
def config_path(self, value):
"""Set config_path"""
self._config_path = value or ''
if not isinstance(self._config_path, str):
raise BadArgumentError("config_path must be string: {}".format(
self._config_path)) | Set config_path | Below is the the instruction that describes the task:
### Input:
Set config_path
### Response:
def config_path(self, value):
"""Set config_path"""
self._config_path = value or ''
if not isinstance(self._config_path, str):
raise BadArgumentError("config_path must be string: {}".format(
self._config_path)) |
def get_missing_options(self):
"""
Get a list of options that are required, but with default values
of None.
"""
return [option.name for option in self._options.values() if option.required and option.value is None] | Get a list of options that are required, but with default values
of None. | Below is the the instruction that describes the task:
### Input:
Get a list of options that are required, but with default values
of None.
### Response:
def get_missing_options(self):
"""
Get a list of options that are required, but with default values
of None.
"""
return [option.name for option in self._options.values() if option.required and option.value is None] |
def xlsx_to_csv(self, infile, worksheet=0, delimiter=","):
""" Convert xlsx to easier format first, since we want to use the
convenience of the CSV library
"""
wb = load_workbook(self.getInputFile())
sheet = wb.worksheets[worksheet]
buffer = StringIO()
# extract all rows
for n, row in enumerate(sheet.rows):
line = []
for cell in row:
value = cell.value
if type(value) in types.StringTypes:
value = value.encode("utf8")
if value is None:
value = ""
line.append(str(value))
print >>buffer, delimiter.join(line)
buffer.seek(0)
return buffer | Convert xlsx to easier format first, since we want to use the
convenience of the CSV library | Below is the the instruction that describes the task:
### Input:
Convert xlsx to easier format first, since we want to use the
convenience of the CSV library
### Response:
def xlsx_to_csv(self, infile, worksheet=0, delimiter=","):
""" Convert xlsx to easier format first, since we want to use the
convenience of the CSV library
"""
wb = load_workbook(self.getInputFile())
sheet = wb.worksheets[worksheet]
buffer = StringIO()
# extract all rows
for n, row in enumerate(sheet.rows):
line = []
for cell in row:
value = cell.value
if type(value) in types.StringTypes:
value = value.encode("utf8")
if value is None:
value = ""
line.append(str(value))
print >>buffer, delimiter.join(line)
buffer.seek(0)
return buffer |
def search(self, name):
""" Search node with given name based on regexp, basic method (find) uses equality"""
for node in self.climb():
if re.search(name, node.name):
return node
return None | Search node with given name based on regexp, basic method (find) uses equality | Below is the the instruction that describes the task:
### Input:
Search node with given name based on regexp, basic method (find) uses equality
### Response:
def search(self, name):
""" Search node with given name based on regexp, basic method (find) uses equality"""
for node in self.climb():
if re.search(name, node.name):
return node
return None |
def _total_seconds(t):
'''
Takes a `datetime.timedelta` object and returns the delta in seconds.
>>> _total_seconds(datetime.timedelta(23, 42, 123456))
1987242
>>> _total_seconds(datetime.timedelta(23, 42, 654321))
1987243
'''
return sum([
int(t.days * 86400 + t.seconds),
int(round(t.microseconds / 1000000.0))
]) | Takes a `datetime.timedelta` object and returns the delta in seconds.
>>> _total_seconds(datetime.timedelta(23, 42, 123456))
1987242
>>> _total_seconds(datetime.timedelta(23, 42, 654321))
1987243 | Below is the the instruction that describes the task:
### Input:
Takes a `datetime.timedelta` object and returns the delta in seconds.
>>> _total_seconds(datetime.timedelta(23, 42, 123456))
1987242
>>> _total_seconds(datetime.timedelta(23, 42, 654321))
1987243
### Response:
def _total_seconds(t):
'''
Takes a `datetime.timedelta` object and returns the delta in seconds.
>>> _total_seconds(datetime.timedelta(23, 42, 123456))
1987242
>>> _total_seconds(datetime.timedelta(23, 42, 654321))
1987243
'''
return sum([
int(t.days * 86400 + t.seconds),
int(round(t.microseconds / 1000000.0))
]) |
def calc_abc_interval(self,
conf_percentage,
init_vals,
epsilon=0.001,
**fit_kwargs):
"""
Calculates Approximate Bootstrap Confidence Intervals for one's model.
Parameters
----------
conf_percentage : scalar in the interval (0.0, 100.0).
Denotes the confidence-level for the returned endpoints. For
instance, to calculate a 95% confidence interval, pass `95`.
init_vals : 1D ndarray.
The initial values used to estimate the one's choice model.
epsilon : positive float, optional.
Should denote the 'very small' value being used to calculate the
desired finite difference approximations to the various influence
functions. Should be close to zero.
Default == sys.float_info.epsilon.
fit_kwargs : additional keyword arguments, optional.
Should contain any additional kwargs used to alter the default
behavior of `model_obj.fit_mle` and thereby enforce conformity with
how the MLE was obtained. Will be passed directly to
`model_obj.fit_mle`.
Returns
-------
None. Will store the ABC intervals as `self.abc_interval`.
"""
print("Calculating Approximate Bootstrap Confidence (ABC) Intervals")
print(time.strftime("%a %m-%d-%Y %I:%M%p"))
sys.stdout.flush()
# Get the alpha % that corresponds to the given confidence percentage.
alpha = bc.get_alpha_from_conf_percentage(conf_percentage)
# Create the column names for the dataframe of confidence intervals
single_column_names =\
['{:.3g}%'.format(alpha / 2.0),
'{:.3g}%'.format(100 - alpha / 2.0)]
# Calculate the ABC confidence intervals
conf_intervals =\
abc.calc_abc_interval(self.model_obj,
self.mle_params.values,
init_vals,
conf_percentage,
epsilon=epsilon,
**fit_kwargs)
# Store the ABC confidence intervals
self.abc_interval = pd.DataFrame(conf_intervals.T,
index=self.mle_params.index,
columns=single_column_names)
return None | Calculates Approximate Bootstrap Confidence Intervals for one's model.
Parameters
----------
conf_percentage : scalar in the interval (0.0, 100.0).
Denotes the confidence-level for the returned endpoints. For
instance, to calculate a 95% confidence interval, pass `95`.
init_vals : 1D ndarray.
The initial values used to estimate the one's choice model.
epsilon : positive float, optional.
Should denote the 'very small' value being used to calculate the
desired finite difference approximations to the various influence
functions. Should be close to zero.
Default == sys.float_info.epsilon.
fit_kwargs : additional keyword arguments, optional.
Should contain any additional kwargs used to alter the default
behavior of `model_obj.fit_mle` and thereby enforce conformity with
how the MLE was obtained. Will be passed directly to
`model_obj.fit_mle`.
Returns
-------
None. Will store the ABC intervals as `self.abc_interval`. | Below is the the instruction that describes the task:
### Input:
Calculates Approximate Bootstrap Confidence Intervals for one's model.
Parameters
----------
conf_percentage : scalar in the interval (0.0, 100.0).
Denotes the confidence-level for the returned endpoints. For
instance, to calculate a 95% confidence interval, pass `95`.
init_vals : 1D ndarray.
The initial values used to estimate the one's choice model.
epsilon : positive float, optional.
Should denote the 'very small' value being used to calculate the
desired finite difference approximations to the various influence
functions. Should be close to zero.
Default == sys.float_info.epsilon.
fit_kwargs : additional keyword arguments, optional.
Should contain any additional kwargs used to alter the default
behavior of `model_obj.fit_mle` and thereby enforce conformity with
how the MLE was obtained. Will be passed directly to
`model_obj.fit_mle`.
Returns
-------
None. Will store the ABC intervals as `self.abc_interval`.
### Response:
def calc_abc_interval(self,
conf_percentage,
init_vals,
epsilon=0.001,
**fit_kwargs):
"""
Calculates Approximate Bootstrap Confidence Intervals for one's model.
Parameters
----------
conf_percentage : scalar in the interval (0.0, 100.0).
Denotes the confidence-level for the returned endpoints. For
instance, to calculate a 95% confidence interval, pass `95`.
init_vals : 1D ndarray.
The initial values used to estimate the one's choice model.
epsilon : positive float, optional.
Should denote the 'very small' value being used to calculate the
desired finite difference approximations to the various influence
functions. Should be close to zero.
Default == sys.float_info.epsilon.
fit_kwargs : additional keyword arguments, optional.
Should contain any additional kwargs used to alter the default
behavior of `model_obj.fit_mle` and thereby enforce conformity with
how the MLE was obtained. Will be passed directly to
`model_obj.fit_mle`.
Returns
-------
None. Will store the ABC intervals as `self.abc_interval`.
"""
print("Calculating Approximate Bootstrap Confidence (ABC) Intervals")
print(time.strftime("%a %m-%d-%Y %I:%M%p"))
sys.stdout.flush()
# Get the alpha % that corresponds to the given confidence percentage.
alpha = bc.get_alpha_from_conf_percentage(conf_percentage)
# Create the column names for the dataframe of confidence intervals
single_column_names =\
['{:.3g}%'.format(alpha / 2.0),
'{:.3g}%'.format(100 - alpha / 2.0)]
# Calculate the ABC confidence intervals
conf_intervals =\
abc.calc_abc_interval(self.model_obj,
self.mle_params.values,
init_vals,
conf_percentage,
epsilon=epsilon,
**fit_kwargs)
# Store the ABC confidence intervals
self.abc_interval = pd.DataFrame(conf_intervals.T,
index=self.mle_params.index,
columns=single_column_names)
return None |
def get_jids():
'''
Return a dict mapping all job ids to job information
'''
ret = {}
for jid, job, _, _ in _walk_through(_job_dir()):
ret[jid] = salt.utils.jid.format_jid_instance(jid, job)
if __opts__.get('job_cache_store_endtime'):
endtime = get_endtime(jid)
if endtime:
ret[jid]['EndTime'] = endtime
return ret | Return a dict mapping all job ids to job information | Below is the the instruction that describes the task:
### Input:
Return a dict mapping all job ids to job information
### Response:
def get_jids():
'''
Return a dict mapping all job ids to job information
'''
ret = {}
for jid, job, _, _ in _walk_through(_job_dir()):
ret[jid] = salt.utils.jid.format_jid_instance(jid, job)
if __opts__.get('job_cache_store_endtime'):
endtime = get_endtime(jid)
if endtime:
ret[jid]['EndTime'] = endtime
return ret |
def get_remembered_identity(self, subject_context):
"""
Using the specified subject context map intended to build a ``Subject``
instance, returns any previously remembered identifiers for the subject
for automatic identity association (aka 'Remember Me').
"""
rmm = self.remember_me_manager
if rmm is not None:
try:
return rmm.get_remembered_identifiers(subject_context)
except Exception as ex:
msg = ("Delegate RememberMeManager instance of type [" +
rmm.__class__.__name__ + "] raised an exception during "
"get_remembered_identifiers().")
logger.warning(msg, exc_info=True)
return None | Using the specified subject context map intended to build a ``Subject``
instance, returns any previously remembered identifiers for the subject
for automatic identity association (aka 'Remember Me'). | Below is the the instruction that describes the task:
### Input:
Using the specified subject context map intended to build a ``Subject``
instance, returns any previously remembered identifiers for the subject
for automatic identity association (aka 'Remember Me').
### Response:
def get_remembered_identity(self, subject_context):
"""
Using the specified subject context map intended to build a ``Subject``
instance, returns any previously remembered identifiers for the subject
for automatic identity association (aka 'Remember Me').
"""
rmm = self.remember_me_manager
if rmm is not None:
try:
return rmm.get_remembered_identifiers(subject_context)
except Exception as ex:
msg = ("Delegate RememberMeManager instance of type [" +
rmm.__class__.__name__ + "] raised an exception during "
"get_remembered_identifiers().")
logger.warning(msg, exc_info=True)
return None |
def validate(self):
"""
Returns `True` if valid.
"""
# Don't call the validator again, if it was already called for the
# current input.
if self.validation_state != ValidationState.UNKNOWN:
return self.validation_state == ValidationState.VALID
# Validate first. If not valid, set validation exception.
if self.validator:
try:
self.validator.validate(self.document)
except ValidationError as e:
# Set cursor position (don't allow invalid values.)
cursor_position = e.cursor_position
self.cursor_position = min(max(0, cursor_position), len(self.text))
self.validation_state = ValidationState.INVALID
self.validation_error = e
return False
self.validation_state = ValidationState.VALID
self.validation_error = None
return True | Returns `True` if valid. | Below is the the instruction that describes the task:
### Input:
Returns `True` if valid.
### Response:
def validate(self):
"""
Returns `True` if valid.
"""
# Don't call the validator again, if it was already called for the
# current input.
if self.validation_state != ValidationState.UNKNOWN:
return self.validation_state == ValidationState.VALID
# Validate first. If not valid, set validation exception.
if self.validator:
try:
self.validator.validate(self.document)
except ValidationError as e:
# Set cursor position (don't allow invalid values.)
cursor_position = e.cursor_position
self.cursor_position = min(max(0, cursor_position), len(self.text))
self.validation_state = ValidationState.INVALID
self.validation_error = e
return False
self.validation_state = ValidationState.VALID
self.validation_error = None
return True |
def parse_pipeline(pipeline_str):
"""Parses a pipeline string into a list of dictionaries with the connections
between processes
Parameters
----------
pipeline_str : str
String with the definition of the pipeline, e.g.::
'processA processB processC(ProcessD | ProcessE)'
Returns
-------
pipeline_links : list
"""
if os.path.exists(pipeline_str):
logger.debug("Found pipeline file: {}".format(pipeline_str))
with open(pipeline_str) as fh:
pipeline_str = "".join([x.strip() for x in fh.readlines()])
logger.info(colored_print("Resulting pipeline string:\n"))
logger.info(colored_print(pipeline_str + "\n"))
# Perform pipeline insanity checks
insanity_checks(pipeline_str)
logger.debug("Parsing pipeline string: {}".format(pipeline_str))
pipeline_links = []
lane = 1
# Add unique identifiers to each process to allow a correct connection
# between forks with same processes
pipeline_str_modified, identifiers_to_tags = add_unique_identifiers(
pipeline_str)
# Get number of forks in the pipeline
nforks = pipeline_str_modified.count(FORK_TOKEN)
logger.debug("Found {} fork(s)".format(nforks))
# If there are no forks, connect the pipeline as purely linear
if not nforks:
logger.debug("Detected linear pipeline string : {}".format(
pipeline_str))
linear_pipeline = ["__init__"] + pipeline_str_modified.split()
pipeline_links.extend(linear_connection(linear_pipeline, lane))
# Removes unique identifiers used for correctly assign fork parents with
# a possible same process name
pipeline_links = remove_unique_identifiers(identifiers_to_tags,
pipeline_links)
return pipeline_links
for i in range(nforks):
logger.debug("Processing fork {} in lane {}".format(i, lane))
# Split the pipeline at each fork start position. fields[-1] will
# hold the process after the fork. fields[-2] will hold the processes
# before the fork.
fields = pipeline_str_modified.split(FORK_TOKEN, i + 1)
# Get the processes before the fork. This may be empty when the
# fork is at the beginning of the pipeline.
previous_process = fields[-2].split(LANE_TOKEN)[-1].split()
logger.debug("Previous processes string: {}".format(fields[-2]))
logger.debug("Previous processes list: {}".format(previous_process))
# Get lanes after the fork
next_lanes = get_lanes(fields[-1])
logger.debug("Next lanes object: {}".format(next_lanes))
# Get the immediate targets of the fork
fork_sink = [x[0] for x in next_lanes]
logger.debug("The fork sinks into the processes: {}".format(fork_sink))
# The first fork is a special case, where the processes before AND
# after the fork (until the start of another fork) are added to
# the ``pipeline_links`` variable. Otherwise, only the processes
# after the fork will be added
if i == 0:
# If there are no previous process, the fork is at the beginning
# of the pipeline string. In this case, inject the special
# "init" process.
if not previous_process:
previous_process = ["__init__"]
lane = 0
else:
previous_process = ["__init__"] + previous_process
# Add the linear modules before the fork
pipeline_links.extend(
linear_connection(previous_process, lane))
fork_source = previous_process[-1]
logger.debug("Fork source is set to: {}".format(fork_source))
fork_lane = get_source_lane(previous_process, pipeline_links)
logger.debug("Fork lane is set to: {}".format(fork_lane))
# Add the forking modules
pipeline_links.extend(
fork_connection(fork_source, fork_sink, fork_lane, lane))
# Add the linear connections in the subsequent lanes
pipeline_links.extend(
linear_lane_connection(next_lanes, lane))
lane += len(fork_sink)
pipeline_links = remove_unique_identifiers(identifiers_to_tags,
pipeline_links)
return pipeline_links | Parses a pipeline string into a list of dictionaries with the connections
between processes
Parameters
----------
pipeline_str : str
String with the definition of the pipeline, e.g.::
'processA processB processC(ProcessD | ProcessE)'
Returns
-------
pipeline_links : list | Below is the the instruction that describes the task:
### Input:
Parses a pipeline string into a list of dictionaries with the connections
between processes
Parameters
----------
pipeline_str : str
String with the definition of the pipeline, e.g.::
'processA processB processC(ProcessD | ProcessE)'
Returns
-------
pipeline_links : list
### Response:
def parse_pipeline(pipeline_str):
"""Parses a pipeline string into a list of dictionaries with the connections
between processes
Parameters
----------
pipeline_str : str
String with the definition of the pipeline, e.g.::
'processA processB processC(ProcessD | ProcessE)'
Returns
-------
pipeline_links : list
"""
if os.path.exists(pipeline_str):
logger.debug("Found pipeline file: {}".format(pipeline_str))
with open(pipeline_str) as fh:
pipeline_str = "".join([x.strip() for x in fh.readlines()])
logger.info(colored_print("Resulting pipeline string:\n"))
logger.info(colored_print(pipeline_str + "\n"))
# Perform pipeline insanity checks
insanity_checks(pipeline_str)
logger.debug("Parsing pipeline string: {}".format(pipeline_str))
pipeline_links = []
lane = 1
# Add unique identifiers to each process to allow a correct connection
# between forks with same processes
pipeline_str_modified, identifiers_to_tags = add_unique_identifiers(
pipeline_str)
# Get number of forks in the pipeline
nforks = pipeline_str_modified.count(FORK_TOKEN)
logger.debug("Found {} fork(s)".format(nforks))
# If there are no forks, connect the pipeline as purely linear
if not nforks:
logger.debug("Detected linear pipeline string : {}".format(
pipeline_str))
linear_pipeline = ["__init__"] + pipeline_str_modified.split()
pipeline_links.extend(linear_connection(linear_pipeline, lane))
# Removes unique identifiers used for correctly assign fork parents with
# a possible same process name
pipeline_links = remove_unique_identifiers(identifiers_to_tags,
pipeline_links)
return pipeline_links
for i in range(nforks):
logger.debug("Processing fork {} in lane {}".format(i, lane))
# Split the pipeline at each fork start position. fields[-1] will
# hold the process after the fork. fields[-2] will hold the processes
# before the fork.
fields = pipeline_str_modified.split(FORK_TOKEN, i + 1)
# Get the processes before the fork. This may be empty when the
# fork is at the beginning of the pipeline.
previous_process = fields[-2].split(LANE_TOKEN)[-1].split()
logger.debug("Previous processes string: {}".format(fields[-2]))
logger.debug("Previous processes list: {}".format(previous_process))
# Get lanes after the fork
next_lanes = get_lanes(fields[-1])
logger.debug("Next lanes object: {}".format(next_lanes))
# Get the immediate targets of the fork
fork_sink = [x[0] for x in next_lanes]
logger.debug("The fork sinks into the processes: {}".format(fork_sink))
# The first fork is a special case, where the processes before AND
# after the fork (until the start of another fork) are added to
# the ``pipeline_links`` variable. Otherwise, only the processes
# after the fork will be added
if i == 0:
# If there are no previous process, the fork is at the beginning
# of the pipeline string. In this case, inject the special
# "init" process.
if not previous_process:
previous_process = ["__init__"]
lane = 0
else:
previous_process = ["__init__"] + previous_process
# Add the linear modules before the fork
pipeline_links.extend(
linear_connection(previous_process, lane))
fork_source = previous_process[-1]
logger.debug("Fork source is set to: {}".format(fork_source))
fork_lane = get_source_lane(previous_process, pipeline_links)
logger.debug("Fork lane is set to: {}".format(fork_lane))
# Add the forking modules
pipeline_links.extend(
fork_connection(fork_source, fork_sink, fork_lane, lane))
# Add the linear connections in the subsequent lanes
pipeline_links.extend(
linear_lane_connection(next_lanes, lane))
lane += len(fork_sink)
pipeline_links = remove_unique_identifiers(identifiers_to_tags,
pipeline_links)
return pipeline_links |
def _validate_field(param, fields):
""" Ensure the field exists on the model """
if '/' not in param.field and param.field not in fields:
raise InvalidQueryParams(**{
'detail': 'The filter query param of "%s" is not possible. The '
'resource requested does not have a "%s" field. Please '
'modify your request & retry.' % (param, param.field),
'links': LINK,
'parameter': PARAM,
}) | Ensure the field exists on the model | Below is the the instruction that describes the task:
### Input:
Ensure the field exists on the model
### Response:
def _validate_field(param, fields):
""" Ensure the field exists on the model """
if '/' not in param.field and param.field not in fields:
raise InvalidQueryParams(**{
'detail': 'The filter query param of "%s" is not possible. The '
'resource requested does not have a "%s" field. Please '
'modify your request & retry.' % (param, param.field),
'links': LINK,
'parameter': PARAM,
}) |
def follow_double_underscores(obj, field_name=None, excel_dialect=True, eval_python=False, index_error_value=None):
'''Like getattr(obj, field_name) only follows model relationships through "__" or "." as link separators
>>> from django.contrib.auth.models import Permission
>>> import math
>>> p = Permission.objects.all()[0]
>>> follow_double_underscores(p, 'content_type__name') == p.content_type.name
True
>>> follow_double_underscores(p, 'math.sqrt(len(obj.content_type.name))', eval_python=True) == math.sqrt(len(p.content_type.name))
True
'''
if not obj:
return obj
if isinstance(field_name, list):
split_fields = field_name
else:
split_fields = re_model_instance_dot.split(field_name)
if False and eval_python:
try:
return eval(field_name, {'datetime': datetime, 'math': math, 'collections': collections}, {'obj': obj})
except IndexError:
return index_error_value
except:
pass
if len(split_fields) <= 1:
if hasattr(obj, split_fields[0]):
value = getattr(obj, split_fields[0])
elif hasattr(obj, split_fields[0] + '_id'):
value = getattr(obj, split_fields[0] + '_id')
elif hasattr(obj, split_fields[0] + '_set'):
value = getattr(obj, split_fields[0] + '_set')
elif split_fields[0] in obj.__dict__:
value = obj.__dict__.get(split_fields[0])
elif eval_python:
value = eval('obj.' + split_fields[0])
else:
return follow_double_underscores(getattr(obj, split_fields[0]), field_name=split_fields[1:], eval_python=eval_python, index_error_value=index_error_value)
if value and excel_dialect and isinstance(value, datetime.datetime):
value = value.strftime('%Y-%m-%d %H:%M:%S')
return value
return follow_double_underscores(getattr(obj, split_fields[0]), field_name=split_fields[1:], eval_python=eval_python, index_error_value=index_error_value) | Like getattr(obj, field_name) only follows model relationships through "__" or "." as link separators
>>> from django.contrib.auth.models import Permission
>>> import math
>>> p = Permission.objects.all()[0]
>>> follow_double_underscores(p, 'content_type__name') == p.content_type.name
True
>>> follow_double_underscores(p, 'math.sqrt(len(obj.content_type.name))', eval_python=True) == math.sqrt(len(p.content_type.name))
True | Below is the the instruction that describes the task:
### Input:
Like getattr(obj, field_name) only follows model relationships through "__" or "." as link separators
>>> from django.contrib.auth.models import Permission
>>> import math
>>> p = Permission.objects.all()[0]
>>> follow_double_underscores(p, 'content_type__name') == p.content_type.name
True
>>> follow_double_underscores(p, 'math.sqrt(len(obj.content_type.name))', eval_python=True) == math.sqrt(len(p.content_type.name))
True
### Response:
def follow_double_underscores(obj, field_name=None, excel_dialect=True, eval_python=False, index_error_value=None):
'''Like getattr(obj, field_name) only follows model relationships through "__" or "." as link separators
>>> from django.contrib.auth.models import Permission
>>> import math
>>> p = Permission.objects.all()[0]
>>> follow_double_underscores(p, 'content_type__name') == p.content_type.name
True
>>> follow_double_underscores(p, 'math.sqrt(len(obj.content_type.name))', eval_python=True) == math.sqrt(len(p.content_type.name))
True
'''
if not obj:
return obj
if isinstance(field_name, list):
split_fields = field_name
else:
split_fields = re_model_instance_dot.split(field_name)
if False and eval_python:
try:
return eval(field_name, {'datetime': datetime, 'math': math, 'collections': collections}, {'obj': obj})
except IndexError:
return index_error_value
except:
pass
if len(split_fields) <= 1:
if hasattr(obj, split_fields[0]):
value = getattr(obj, split_fields[0])
elif hasattr(obj, split_fields[0] + '_id'):
value = getattr(obj, split_fields[0] + '_id')
elif hasattr(obj, split_fields[0] + '_set'):
value = getattr(obj, split_fields[0] + '_set')
elif split_fields[0] in obj.__dict__:
value = obj.__dict__.get(split_fields[0])
elif eval_python:
value = eval('obj.' + split_fields[0])
else:
return follow_double_underscores(getattr(obj, split_fields[0]), field_name=split_fields[1:], eval_python=eval_python, index_error_value=index_error_value)
if value and excel_dialect and isinstance(value, datetime.datetime):
value = value.strftime('%Y-%m-%d %H:%M:%S')
return value
return follow_double_underscores(getattr(obj, split_fields[0]), field_name=split_fields[1:], eval_python=eval_python, index_error_value=index_error_value) |
def _domain_event_watchdog_cb(conn, domain, action, opaque):
'''
Domain watchdog events handler
'''
_salt_send_domain_event(opaque, conn, domain, opaque['event'], {
'action': _get_libvirt_enum_string('VIR_DOMAIN_EVENT_WATCHDOG_', action)
}) | Domain watchdog events handler | Below is the the instruction that describes the task:
### Input:
Domain watchdog events handler
### Response:
def _domain_event_watchdog_cb(conn, domain, action, opaque):
'''
Domain watchdog events handler
'''
_salt_send_domain_event(opaque, conn, domain, opaque['event'], {
'action': _get_libvirt_enum_string('VIR_DOMAIN_EVENT_WATCHDOG_', action)
}) |
def compress_amount(n):
"""\
Compress 64-bit integer values, preferring a smaller size for whole
numbers (base-10), so as to achieve run-length encoding gains on real-
world data. The basic algorithm:
* If the amount is 0, return 0
* Divide the amount (in base units) evenly by the largest power of 10
possible; call the exponent e (e is max 9)
* If e<9, the last digit of the resulting number cannot be 0; store it
as d, and drop it (divide by 10); regardless, call the result n
* Output 1 + 10*(9*n + d - 1) + e
* If e==9, we only know the resulting number is not zero, so output
1 + 10*(n - 1) + 9.
(This is decodable, as d is in [1-9] and e is in [0-9].)"""
if not n: return 0
e = 0
while (n % 10) == 0 and e < 9:
n = n // 10
e = e + 1
if e < 9:
n, d = divmod(n, 10);
return 1 + (n*9 + d - 1)*10 + e
else:
return 1 + (n - 1)*10 + 9 | \
Compress 64-bit integer values, preferring a smaller size for whole
numbers (base-10), so as to achieve run-length encoding gains on real-
world data. The basic algorithm:
* If the amount is 0, return 0
* Divide the amount (in base units) evenly by the largest power of 10
possible; call the exponent e (e is max 9)
* If e<9, the last digit of the resulting number cannot be 0; store it
as d, and drop it (divide by 10); regardless, call the result n
* Output 1 + 10*(9*n + d - 1) + e
* If e==9, we only know the resulting number is not zero, so output
1 + 10*(n - 1) + 9.
(This is decodable, as d is in [1-9] and e is in [0-9].) | Below is the the instruction that describes the task:
### Input:
\
Compress 64-bit integer values, preferring a smaller size for whole
numbers (base-10), so as to achieve run-length encoding gains on real-
world data. The basic algorithm:
* If the amount is 0, return 0
* Divide the amount (in base units) evenly by the largest power of 10
possible; call the exponent e (e is max 9)
* If e<9, the last digit of the resulting number cannot be 0; store it
as d, and drop it (divide by 10); regardless, call the result n
* Output 1 + 10*(9*n + d - 1) + e
* If e==9, we only know the resulting number is not zero, so output
1 + 10*(n - 1) + 9.
(This is decodable, as d is in [1-9] and e is in [0-9].)
### Response:
def compress_amount(n):
"""\
Compress 64-bit integer values, preferring a smaller size for whole
numbers (base-10), so as to achieve run-length encoding gains on real-
world data. The basic algorithm:
* If the amount is 0, return 0
* Divide the amount (in base units) evenly by the largest power of 10
possible; call the exponent e (e is max 9)
* If e<9, the last digit of the resulting number cannot be 0; store it
as d, and drop it (divide by 10); regardless, call the result n
* Output 1 + 10*(9*n + d - 1) + e
* If e==9, we only know the resulting number is not zero, so output
1 + 10*(n - 1) + 9.
(This is decodable, as d is in [1-9] and e is in [0-9].)"""
if not n: return 0
e = 0
while (n % 10) == 0 and e < 9:
n = n // 10
e = e + 1
if e < 9:
n, d = divmod(n, 10);
return 1 + (n*9 + d - 1)*10 + e
else:
return 1 + (n - 1)*10 + 9 |
def _initEphemerals(self):
"""
Initialize attributes that are not saved with the checkpoint.
"""
self._firstComputeCall = True
self._accuracy = None
self._protoScores = None
self._categoryDistances = None
self._knn = knn_classifier.KNNClassifier(**self.knnParams)
for x in ('_partitions', '_useAuxiliary', '_doSphering',
'_scanInfo', '_protoScores'):
if not hasattr(self, x):
setattr(self, x, None) | Initialize attributes that are not saved with the checkpoint. | Below is the the instruction that describes the task:
### Input:
Initialize attributes that are not saved with the checkpoint.
### Response:
def _initEphemerals(self):
"""
Initialize attributes that are not saved with the checkpoint.
"""
self._firstComputeCall = True
self._accuracy = None
self._protoScores = None
self._categoryDistances = None
self._knn = knn_classifier.KNNClassifier(**self.knnParams)
for x in ('_partitions', '_useAuxiliary', '_doSphering',
'_scanInfo', '_protoScores'):
if not hasattr(self, x):
setattr(self, x, None) |
def DyStrData(cls,name, regx, index = 0):
''' set dynamic value from the string data of response
@param name: glob parameter name
@param regx: re._pattern_type
e.g.
DyStrData("a",re.compile('123'))
'''
text = Markup(cls.__trackinfo["response_body"]).unescape()
if not text:
return
if not isinstance(regx, re._pattern_type):
raise Exception("DyStrData need the arg which have compiled the regular expression.")
values = regx.findall(text)
result = ""
if len(values)>index:
result = values[index]
cls.glob.update({name:result}) | set dynamic value from the string data of response
@param name: glob parameter name
@param regx: re._pattern_type
e.g.
DyStrData("a",re.compile('123')) | Below is the the instruction that describes the task:
### Input:
set dynamic value from the string data of response
@param name: glob parameter name
@param regx: re._pattern_type
e.g.
DyStrData("a",re.compile('123'))
### Response:
def DyStrData(cls,name, regx, index = 0):
''' set dynamic value from the string data of response
@param name: glob parameter name
@param regx: re._pattern_type
e.g.
DyStrData("a",re.compile('123'))
'''
text = Markup(cls.__trackinfo["response_body"]).unescape()
if not text:
return
if not isinstance(regx, re._pattern_type):
raise Exception("DyStrData need the arg which have compiled the regular expression.")
values = regx.findall(text)
result = ""
if len(values)>index:
result = values[index]
cls.glob.update({name:result}) |
def conditional(self, result, obj):
'''Check all file item with given conditions.'''
fileonly = (self.opt.last_modified_before is not None) or (self.opt.last_modified_after is not None)
if obj['is_dir']:
if not fileonly:
result.append(obj)
return
if (self.opt.last_modified_before is not None) and obj['last_modified'] >= self.opt.last_modified_before:
return
if (self.opt.last_modified_after is not None) and obj['last_modified'] <= self.opt.last_modified_after:
return
result.append(obj) | Check all file item with given conditions. | Below is the the instruction that describes the task:
### Input:
Check all file item with given conditions.
### Response:
def conditional(self, result, obj):
'''Check all file item with given conditions.'''
fileonly = (self.opt.last_modified_before is not None) or (self.opt.last_modified_after is not None)
if obj['is_dir']:
if not fileonly:
result.append(obj)
return
if (self.opt.last_modified_before is not None) and obj['last_modified'] >= self.opt.last_modified_before:
return
if (self.opt.last_modified_after is not None) and obj['last_modified'] <= self.opt.last_modified_after:
return
result.append(obj) |
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata) | Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target. | Below is the the instruction that describes the task:
### Input:
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
### Response:
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata) |
def body(self, body):
"""
Defines response body data.
Arguments:
body (str|bytes): response body to use.
Returns:
self: ``pook.Response`` current instance.
"""
if isinstance(body, bytes):
body = body.decode('utf-8')
self._body = body | Defines response body data.
Arguments:
body (str|bytes): response body to use.
Returns:
self: ``pook.Response`` current instance. | Below is the the instruction that describes the task:
### Input:
Defines response body data.
Arguments:
body (str|bytes): response body to use.
Returns:
self: ``pook.Response`` current instance.
### Response:
def body(self, body):
"""
Defines response body data.
Arguments:
body (str|bytes): response body to use.
Returns:
self: ``pook.Response`` current instance.
"""
if isinstance(body, bytes):
body = body.decode('utf-8')
self._body = body |
def makeGLMModel(model, coefs, threshold=.5):
"""
Create a custom GLM model using the given coefficients.
Needs to be passed source model trained on the dataset to extract the dataset information from.
:param model: source model, used for extracting dataset information
:param coefs: dictionary containing model coefficients
:param threshold: (optional, only for binomial) decision threshold used for classification
"""
model_json = h2o.api(
"POST /3/MakeGLMModel",
data={"model": model._model_json["model_id"]["name"],
"names": list(coefs.keys()),
"beta": list(coefs.values()),
"threshold": threshold}
)
m = H2OGeneralizedLinearEstimator()
m._resolve_model(model_json["model_id"]["name"], model_json)
return m | Create a custom GLM model using the given coefficients.
Needs to be passed source model trained on the dataset to extract the dataset information from.
:param model: source model, used for extracting dataset information
:param coefs: dictionary containing model coefficients
:param threshold: (optional, only for binomial) decision threshold used for classification | Below is the the instruction that describes the task:
### Input:
Create a custom GLM model using the given coefficients.
Needs to be passed source model trained on the dataset to extract the dataset information from.
:param model: source model, used for extracting dataset information
:param coefs: dictionary containing model coefficients
:param threshold: (optional, only for binomial) decision threshold used for classification
### Response:
def makeGLMModel(model, coefs, threshold=.5):
"""
Create a custom GLM model using the given coefficients.
Needs to be passed source model trained on the dataset to extract the dataset information from.
:param model: source model, used for extracting dataset information
:param coefs: dictionary containing model coefficients
:param threshold: (optional, only for binomial) decision threshold used for classification
"""
model_json = h2o.api(
"POST /3/MakeGLMModel",
data={"model": model._model_json["model_id"]["name"],
"names": list(coefs.keys()),
"beta": list(coefs.values()),
"threshold": threshold}
)
m = H2OGeneralizedLinearEstimator()
m._resolve_model(model_json["model_id"]["name"], model_json)
return m |
def items(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
int: The number of instances in the dictionary
Note:
This is the same as `dict.items()` """
for word in self._dictionary.keys():
yield word, self._dictionary[word] | Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
int: The number of instances in the dictionary
Note:
This is the same as `dict.items()` | Below is the the instruction that describes the task:
### Input:
Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
int: The number of instances in the dictionary
Note:
This is the same as `dict.items()`
### Response:
def items(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
int: The number of instances in the dictionary
Note:
This is the same as `dict.items()` """
for word in self._dictionary.keys():
yield word, self._dictionary[word] |
def available_backend_simulators(self, access_token=None, user_id=None):
"""
Get the backend simulators available to use in the QX Platform
"""
if access_token:
self.req.credential.set_token(access_token)
if user_id:
self.req.credential.set_user_id(user_id)
if not self.check_credentials():
raise CredentialsError('credentials invalid')
else:
ret = self.req.get('/Backends')
if (ret is not None) and (isinstance(ret, dict)):
return []
return [backend for backend in ret
if backend.get('status') == 'on' and
backend.get('simulator') is True] | Get the backend simulators available to use in the QX Platform | Below is the the instruction that describes the task:
### Input:
Get the backend simulators available to use in the QX Platform
### Response:
def available_backend_simulators(self, access_token=None, user_id=None):
"""
Get the backend simulators available to use in the QX Platform
"""
if access_token:
self.req.credential.set_token(access_token)
if user_id:
self.req.credential.set_user_id(user_id)
if not self.check_credentials():
raise CredentialsError('credentials invalid')
else:
ret = self.req.get('/Backends')
if (ret is not None) and (isinstance(ret, dict)):
return []
return [backend for backend in ret
if backend.get('status') == 'on' and
backend.get('simulator') is True] |
def _make_git(config_info):
"""This function initializes and Git SCM tool object."""
git_args = {}
def _add_value(value, key):
args_key, args_value = _GIT_ARG_FNS[key](value)
git_args[args_key] = args_value
devpipeline_core.toolsupport.args_builder("git", config_info, _GIT_ARGS, _add_value)
if git_args.get("uri"):
return devpipeline_scm.make_simple_scm(Git(git_args), config_info)
else:
raise Exception("No git uri ({})".format(config_info.config.name)) | This function initializes and Git SCM tool object. | Below is the the instruction that describes the task:
### Input:
This function initializes and Git SCM tool object.
### Response:
def _make_git(config_info):
"""This function initializes and Git SCM tool object."""
git_args = {}
def _add_value(value, key):
args_key, args_value = _GIT_ARG_FNS[key](value)
git_args[args_key] = args_value
devpipeline_core.toolsupport.args_builder("git", config_info, _GIT_ARGS, _add_value)
if git_args.get("uri"):
return devpipeline_scm.make_simple_scm(Git(git_args), config_info)
else:
raise Exception("No git uri ({})".format(config_info.config.name)) |
def wet_records(wet_filepath):
"""Generate WETRecords from filepath."""
if wet_filepath.endswith('.gz'):
fopen = gzip.open
else:
fopen = tf.gfile.GFile
with fopen(wet_filepath) as f:
for record in wet_records_from_file_obj(f):
yield record | Generate WETRecords from filepath. | Below is the the instruction that describes the task:
### Input:
Generate WETRecords from filepath.
### Response:
def wet_records(wet_filepath):
"""Generate WETRecords from filepath."""
if wet_filepath.endswith('.gz'):
fopen = gzip.open
else:
fopen = tf.gfile.GFile
with fopen(wet_filepath) as f:
for record in wet_records_from_file_obj(f):
yield record |
def isglove(filepath):
""" Get the first word vector in a GloVE file and return its dimensionality or False if not a vector
>>> isglove(os.path.join(DATA_PATH, 'cats_and_dogs.txt'))
False
"""
with ensure_open(filepath, 'r') as f:
header_line = f.readline()
vector_line = f.readline()
try:
num_vectors, num_dim = header_line.split()
return int(num_dim)
except (ValueError, TypeError):
pass
vector = vector_line.split()[1:]
if len(vector) % 10:
print(vector)
print(len(vector) % 10)
return False
try:
vector = np.array([float(x) for x in vector])
except (ValueError, TypeError):
return False
if np.all(np.abs(vector) < 12.):
return len(vector)
return False | Get the first word vector in a GloVE file and return its dimensionality or False if not a vector
>>> isglove(os.path.join(DATA_PATH, 'cats_and_dogs.txt'))
False | Below is the the instruction that describes the task:
### Input:
Get the first word vector in a GloVE file and return its dimensionality or False if not a vector
>>> isglove(os.path.join(DATA_PATH, 'cats_and_dogs.txt'))
False
### Response:
def isglove(filepath):
""" Get the first word vector in a GloVE file and return its dimensionality or False if not a vector
>>> isglove(os.path.join(DATA_PATH, 'cats_and_dogs.txt'))
False
"""
with ensure_open(filepath, 'r') as f:
header_line = f.readline()
vector_line = f.readline()
try:
num_vectors, num_dim = header_line.split()
return int(num_dim)
except (ValueError, TypeError):
pass
vector = vector_line.split()[1:]
if len(vector) % 10:
print(vector)
print(len(vector) % 10)
return False
try:
vector = np.array([float(x) for x in vector])
except (ValueError, TypeError):
return False
if np.all(np.abs(vector) < 12.):
return len(vector)
return False |
def _computModelDelay(self):
""" Computes the amount of time (if any) to delay the run of this model.
This can be determined by two mutually exclusive parameters:
delay and sleepModelRange.
'delay' specifies the number of seconds a model should be delayed. If a list
is specified, the appropriate amount of delay is determined by using the
model's modelIndex property.
However, this doesn't work when testing orphaned models, because the
modelIndex will be the same for every recovery attempt. Therefore, every
recovery attempt will also be delayed and potentially orphaned.
'sleepModelRange' doesn't use the modelIndex property for a model, but rather
sees which order the model is in the database, and uses that to determine
whether or not a model should be delayed.
"""
# 'delay' and 'sleepModelRange' are mutually exclusive
if self._params['delay'] is not None \
and self._params['sleepModelRange'] is not None:
raise RuntimeError("Only one of 'delay' or "
"'sleepModelRange' may be specified")
# Get the sleepModel range
if self._sleepModelRange is not None:
range, delay = self._sleepModelRange.split(':')
delay = float(delay)
range = map(int, range.split(','))
modelIDs = self._jobsDAO.jobGetModelIDs(self._jobID)
modelIDs.sort()
range[1] = min(range[1], len(modelIDs))
# If the model is in range, add the delay
if self._modelID in modelIDs[range[0]:range[1]]:
self._delay = delay
else:
self._delay = self._params['delay'] | Computes the amount of time (if any) to delay the run of this model.
This can be determined by two mutually exclusive parameters:
delay and sleepModelRange.
'delay' specifies the number of seconds a model should be delayed. If a list
is specified, the appropriate amount of delay is determined by using the
model's modelIndex property.
However, this doesn't work when testing orphaned models, because the
modelIndex will be the same for every recovery attempt. Therefore, every
recovery attempt will also be delayed and potentially orphaned.
'sleepModelRange' doesn't use the modelIndex property for a model, but rather
sees which order the model is in the database, and uses that to determine
whether or not a model should be delayed. | Below is the the instruction that describes the task:
### Input:
Computes the amount of time (if any) to delay the run of this model.
This can be determined by two mutually exclusive parameters:
delay and sleepModelRange.
'delay' specifies the number of seconds a model should be delayed. If a list
is specified, the appropriate amount of delay is determined by using the
model's modelIndex property.
However, this doesn't work when testing orphaned models, because the
modelIndex will be the same for every recovery attempt. Therefore, every
recovery attempt will also be delayed and potentially orphaned.
'sleepModelRange' doesn't use the modelIndex property for a model, but rather
sees which order the model is in the database, and uses that to determine
whether or not a model should be delayed.
### Response:
def _computModelDelay(self):
""" Computes the amount of time (if any) to delay the run of this model.
This can be determined by two mutually exclusive parameters:
delay and sleepModelRange.
'delay' specifies the number of seconds a model should be delayed. If a list
is specified, the appropriate amount of delay is determined by using the
model's modelIndex property.
However, this doesn't work when testing orphaned models, because the
modelIndex will be the same for every recovery attempt. Therefore, every
recovery attempt will also be delayed and potentially orphaned.
'sleepModelRange' doesn't use the modelIndex property for a model, but rather
sees which order the model is in the database, and uses that to determine
whether or not a model should be delayed.
"""
# 'delay' and 'sleepModelRange' are mutually exclusive
if self._params['delay'] is not None \
and self._params['sleepModelRange'] is not None:
raise RuntimeError("Only one of 'delay' or "
"'sleepModelRange' may be specified")
# Get the sleepModel range
if self._sleepModelRange is not None:
range, delay = self._sleepModelRange.split(':')
delay = float(delay)
range = map(int, range.split(','))
modelIDs = self._jobsDAO.jobGetModelIDs(self._jobID)
modelIDs.sort()
range[1] = min(range[1], len(modelIDs))
# If the model is in range, add the delay
if self._modelID in modelIDs[range[0]:range[1]]:
self._delay = delay
else:
self._delay = self._params['delay'] |
def save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close() | Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience. | Below is the the instruction that describes the task:
### Input:
Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
### Response:
def save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close() |
def log(self, string):
"""
appends input string to log file and sends it to log function (self.log_function)
Returns:
"""
self.log_data.append(string)
if self.log_function is None:
print(string)
else:
self.log_function(string) | appends input string to log file and sends it to log function (self.log_function)
Returns: | Below is the the instruction that describes the task:
### Input:
appends input string to log file and sends it to log function (self.log_function)
Returns:
### Response:
def log(self, string):
"""
appends input string to log file and sends it to log function (self.log_function)
Returns:
"""
self.log_data.append(string)
if self.log_function is None:
print(string)
else:
self.log_function(string) |
def main():
"""
Simple stdin/stdout interface.
"""
if len(sys.argv) == 2 and sys.argv[1] in Locales:
locale = sys.argv[1]
convertfunc = convert
elif len(sys.argv) == 3 and sys.argv[1] == '-w' and sys.argv[2] in Locales:
locale = sys.argv[2]
convertfunc = convert_for_mw
else:
thisfile = __file__ if __name__ == '__main__' else 'python -mzhconv'
print("usage: %s [-w] {zh-cn|zh-tw|zh-hk|zh-sg|zh-hans|zh-hant|zh} < input > output" % thisfile)
sys.exit(1)
loaddict()
ln = sys.stdin.readline()
while ln:
l = ln.rstrip('\r\n')
if sys.version_info[0] < 3:
l = unicode(l, 'utf-8')
res = convertfunc(l, locale)
if sys.version_info[0] < 3:
print(res.encode('utf-8'))
else:
print(res)
ln = sys.stdin.readline() | Simple stdin/stdout interface. | Below is the the instruction that describes the task:
### Input:
Simple stdin/stdout interface.
### Response:
def main():
"""
Simple stdin/stdout interface.
"""
if len(sys.argv) == 2 and sys.argv[1] in Locales:
locale = sys.argv[1]
convertfunc = convert
elif len(sys.argv) == 3 and sys.argv[1] == '-w' and sys.argv[2] in Locales:
locale = sys.argv[2]
convertfunc = convert_for_mw
else:
thisfile = __file__ if __name__ == '__main__' else 'python -mzhconv'
print("usage: %s [-w] {zh-cn|zh-tw|zh-hk|zh-sg|zh-hans|zh-hant|zh} < input > output" % thisfile)
sys.exit(1)
loaddict()
ln = sys.stdin.readline()
while ln:
l = ln.rstrip('\r\n')
if sys.version_info[0] < 3:
l = unicode(l, 'utf-8')
res = convertfunc(l, locale)
if sys.version_info[0] < 3:
print(res.encode('utf-8'))
else:
print(res)
ln = sys.stdin.readline() |
def _input_as_multiline_string(self, data):
"""Write a multiline string to a temp file and return the filename.
data: a multiline string to be written to a file.
* Note: the result will be the filename as a FilePath object
(which is a string subclass).
"""
filename = self._input_filename = \
FilePath(self.getTmpFilename(self.TmpDir))
data_file = open(filename, 'w')
data_file.write(data)
data_file.close()
return filename | Write a multiline string to a temp file and return the filename.
data: a multiline string to be written to a file.
* Note: the result will be the filename as a FilePath object
(which is a string subclass). | Below is the the instruction that describes the task:
### Input:
Write a multiline string to a temp file and return the filename.
data: a multiline string to be written to a file.
* Note: the result will be the filename as a FilePath object
(which is a string subclass).
### Response:
def _input_as_multiline_string(self, data):
"""Write a multiline string to a temp file and return the filename.
data: a multiline string to be written to a file.
* Note: the result will be the filename as a FilePath object
(which is a string subclass).
"""
filename = self._input_filename = \
FilePath(self.getTmpFilename(self.TmpDir))
data_file = open(filename, 'w')
data_file.write(data)
data_file.close()
return filename |
def add_reporter(self, reporter):
"""Add a MetricReporter"""
with self._lock:
reporter.init(list(self.metrics.values()))
self._reporters.append(reporter) | Add a MetricReporter | Below is the the instruction that describes the task:
### Input:
Add a MetricReporter
### Response:
def add_reporter(self, reporter):
"""Add a MetricReporter"""
with self._lock:
reporter.init(list(self.metrics.values()))
self._reporters.append(reporter) |
def _get_question_map(self, question_id):
"""get question map from questions matching question_id
This can make sense of both Section assigned Ids or normal Question/Item Ids
"""
if question_id.get_authority() == ASSESSMENT_AUTHORITY:
key = '_id'
match_value = ObjectId(question_id.get_identifier())
else:
key = 'questionId'
match_value = str(question_id)
for question_map in self._my_map['questions']:
if question_map[key] == match_value:
return question_map
raise errors.NotFound() | get question map from questions matching question_id
This can make sense of both Section assigned Ids or normal Question/Item Ids | Below is the the instruction that describes the task:
### Input:
get question map from questions matching question_id
This can make sense of both Section assigned Ids or normal Question/Item Ids
### Response:
def _get_question_map(self, question_id):
"""get question map from questions matching question_id
This can make sense of both Section assigned Ids or normal Question/Item Ids
"""
if question_id.get_authority() == ASSESSMENT_AUTHORITY:
key = '_id'
match_value = ObjectId(question_id.get_identifier())
else:
key = 'questionId'
match_value = str(question_id)
for question_map in self._my_map['questions']:
if question_map[key] == match_value:
return question_map
raise errors.NotFound() |
def _create_binary(self, binary_tgt, results_dir):
"""Create a .pex file for the specified binary target."""
# Note that we rebuild a chroot from scratch, instead of using the REQUIREMENTS_PEX
# and PYTHON_SOURCES products, because those products are already-built pexes, and there's
# no easy way to merge them into a single pex file (for example, they each have a __main__.py,
# metadata, and so on, which the merging code would have to handle specially).
interpreter = self.context.products.get_data(PythonInterpreter)
with temporary_dir() as tmpdir:
# Create the pex_info for the binary.
run_info_dict = self.context.run_tracker.run_info.get_as_dict()
build_properties = PexInfo.make_build_properties()
build_properties.update(run_info_dict)
pex_info = binary_tgt.pexinfo.copy()
pex_info.build_properties = build_properties
pex_builder = PexBuilderWrapper.Factory.create(
builder=PEXBuilder(path=tmpdir, interpreter=interpreter, pex_info=pex_info, copy=True),
log=self.context.log)
if binary_tgt.shebang:
self.context.log.info('Found Python binary target {} with customized shebang, using it: {}'
.format(binary_tgt.name, binary_tgt.shebang))
pex_builder.set_shebang(binary_tgt.shebang)
else:
self.context.log.debug('No customized shebang found for {}'.format(binary_tgt.name))
# Find which targets provide sources and which specify requirements.
source_tgts = []
req_tgts = []
constraint_tgts = []
for tgt in binary_tgt.closure(exclude_scopes=Scopes.COMPILE):
if has_python_sources(tgt) or has_resources(tgt):
source_tgts.append(tgt)
elif has_python_requirements(tgt):
req_tgts.append(tgt)
if is_python_target(tgt):
constraint_tgts.append(tgt)
# Add interpreter compatibility constraints to pex info. This will first check the targets for any
# constraints, and if they do not have any will resort to the global constraints.
pex_builder.add_interpreter_constraints_from(constraint_tgts)
# Dump everything into the builder's chroot.
for tgt in source_tgts:
pex_builder.add_sources_from(tgt)
# We need to ensure that we are resolving for only the current platform if we are
# including local python dist targets that have native extensions.
self._python_native_code_settings.check_build_for_current_platform_only(self.context.targets())
pex_builder.add_requirement_libs_from(req_tgts, platforms=binary_tgt.platforms)
# Build the .pex file.
pex_path = os.path.join(results_dir, '{}.pex'.format(binary_tgt.name))
pex_builder.build(pex_path)
return pex_path | Create a .pex file for the specified binary target. | Below is the the instruction that describes the task:
### Input:
Create a .pex file for the specified binary target.
### Response:
def _create_binary(self, binary_tgt, results_dir):
"""Create a .pex file for the specified binary target."""
# Note that we rebuild a chroot from scratch, instead of using the REQUIREMENTS_PEX
# and PYTHON_SOURCES products, because those products are already-built pexes, and there's
# no easy way to merge them into a single pex file (for example, they each have a __main__.py,
# metadata, and so on, which the merging code would have to handle specially).
interpreter = self.context.products.get_data(PythonInterpreter)
with temporary_dir() as tmpdir:
# Create the pex_info for the binary.
run_info_dict = self.context.run_tracker.run_info.get_as_dict()
build_properties = PexInfo.make_build_properties()
build_properties.update(run_info_dict)
pex_info = binary_tgt.pexinfo.copy()
pex_info.build_properties = build_properties
pex_builder = PexBuilderWrapper.Factory.create(
builder=PEXBuilder(path=tmpdir, interpreter=interpreter, pex_info=pex_info, copy=True),
log=self.context.log)
if binary_tgt.shebang:
self.context.log.info('Found Python binary target {} with customized shebang, using it: {}'
.format(binary_tgt.name, binary_tgt.shebang))
pex_builder.set_shebang(binary_tgt.shebang)
else:
self.context.log.debug('No customized shebang found for {}'.format(binary_tgt.name))
# Find which targets provide sources and which specify requirements.
source_tgts = []
req_tgts = []
constraint_tgts = []
for tgt in binary_tgt.closure(exclude_scopes=Scopes.COMPILE):
if has_python_sources(tgt) or has_resources(tgt):
source_tgts.append(tgt)
elif has_python_requirements(tgt):
req_tgts.append(tgt)
if is_python_target(tgt):
constraint_tgts.append(tgt)
# Add interpreter compatibility constraints to pex info. This will first check the targets for any
# constraints, and if they do not have any will resort to the global constraints.
pex_builder.add_interpreter_constraints_from(constraint_tgts)
# Dump everything into the builder's chroot.
for tgt in source_tgts:
pex_builder.add_sources_from(tgt)
# We need to ensure that we are resolving for only the current platform if we are
# including local python dist targets that have native extensions.
self._python_native_code_settings.check_build_for_current_platform_only(self.context.targets())
pex_builder.add_requirement_libs_from(req_tgts, platforms=binary_tgt.platforms)
# Build the .pex file.
pex_path = os.path.join(results_dir, '{}.pex'.format(binary_tgt.name))
pex_builder.build(pex_path)
return pex_path |
def convert_hexstr_to_bigbase(hexstr, alphabet=ALPHABET, bigbase=BIGBASE):
r"""
Packs a long hexstr into a shorter length string with a larger base
Ignore:
# Determine the length savings with lossless conversion
import sympy as sy
consts = dict(hexbase=16, hexlen=256, bigbase=27)
symbols = sy.symbols('hexbase, hexlen, bigbase, newlen')
haexbase, hexlen, bigbase, newlen = symbols
eqn = sy.Eq(16 ** hexlen, bigbase ** newlen)
newlen_ans = sy.solve(eqn, newlen)[0].subs(consts).evalf()
print('newlen_ans = %r' % (newlen_ans,))
# for a 27 char alphabet we can get 216
print('Required length for lossless conversion len2 = %r' % (len2,))
def info(base, len):
bits = base ** len
print('base = %r' % (base,))
print('len = %r' % (len,))
print('bits = %r' % (bits,))
info(16, 256)
info(27, 16)
info(27, 64)
info(27, 216)
"""
x = int(hexstr, 16) # first convert to base 16
if x == 0:
return '0'
sign = 1 if x > 0 else -1
x *= sign
digits = []
while x:
digits.append(alphabet[x % bigbase])
x //= bigbase
if sign < 0:
digits.append('-')
digits.reverse()
newbase_str = ''.join(digits)
return newbase_str | r"""
Packs a long hexstr into a shorter length string with a larger base
Ignore:
# Determine the length savings with lossless conversion
import sympy as sy
consts = dict(hexbase=16, hexlen=256, bigbase=27)
symbols = sy.symbols('hexbase, hexlen, bigbase, newlen')
haexbase, hexlen, bigbase, newlen = symbols
eqn = sy.Eq(16 ** hexlen, bigbase ** newlen)
newlen_ans = sy.solve(eqn, newlen)[0].subs(consts).evalf()
print('newlen_ans = %r' % (newlen_ans,))
# for a 27 char alphabet we can get 216
print('Required length for lossless conversion len2 = %r' % (len2,))
def info(base, len):
bits = base ** len
print('base = %r' % (base,))
print('len = %r' % (len,))
print('bits = %r' % (bits,))
info(16, 256)
info(27, 16)
info(27, 64)
info(27, 216) | Below is the the instruction that describes the task:
### Input:
r"""
Packs a long hexstr into a shorter length string with a larger base
Ignore:
# Determine the length savings with lossless conversion
import sympy as sy
consts = dict(hexbase=16, hexlen=256, bigbase=27)
symbols = sy.symbols('hexbase, hexlen, bigbase, newlen')
haexbase, hexlen, bigbase, newlen = symbols
eqn = sy.Eq(16 ** hexlen, bigbase ** newlen)
newlen_ans = sy.solve(eqn, newlen)[0].subs(consts).evalf()
print('newlen_ans = %r' % (newlen_ans,))
# for a 27 char alphabet we can get 216
print('Required length for lossless conversion len2 = %r' % (len2,))
def info(base, len):
bits = base ** len
print('base = %r' % (base,))
print('len = %r' % (len,))
print('bits = %r' % (bits,))
info(16, 256)
info(27, 16)
info(27, 64)
info(27, 216)
### Response:
def convert_hexstr_to_bigbase(hexstr, alphabet=ALPHABET, bigbase=BIGBASE):
r"""
Packs a long hexstr into a shorter length string with a larger base
Ignore:
# Determine the length savings with lossless conversion
import sympy as sy
consts = dict(hexbase=16, hexlen=256, bigbase=27)
symbols = sy.symbols('hexbase, hexlen, bigbase, newlen')
haexbase, hexlen, bigbase, newlen = symbols
eqn = sy.Eq(16 ** hexlen, bigbase ** newlen)
newlen_ans = sy.solve(eqn, newlen)[0].subs(consts).evalf()
print('newlen_ans = %r' % (newlen_ans,))
# for a 27 char alphabet we can get 216
print('Required length for lossless conversion len2 = %r' % (len2,))
def info(base, len):
bits = base ** len
print('base = %r' % (base,))
print('len = %r' % (len,))
print('bits = %r' % (bits,))
info(16, 256)
info(27, 16)
info(27, 64)
info(27, 216)
"""
x = int(hexstr, 16) # first convert to base 16
if x == 0:
return '0'
sign = 1 if x > 0 else -1
x *= sign
digits = []
while x:
digits.append(alphabet[x % bigbase])
x //= bigbase
if sign < 0:
digits.append('-')
digits.reverse()
newbase_str = ''.join(digits)
return newbase_str |
def save_pointings(self):
"""Print the currently defined FOVs"""
import tkFileDialog
f=tkFileDialog.asksaveasfile()
i=0
if self.pointing_format.get()=='CFHT PH':
f.write("""<?xml version = "1.0"?>
<!DOCTYPE ASTRO SYSTEM "http://vizier.u-strasbg.fr/xml/astrores.dtd">
<ASTRO ID="v0.8" xmlns:ASTRO="http://vizier.u-strasbg.fr/doc/astrores.htx">
<TABLE ID="Table">
<NAME>Fixed Targets</NAME>
<TITLE>Fixed Targets for CFHT QSO</TITLE>
<!-- Definition of each field -->
<FIELD name="NAME" datatype="A" width="20">
<DESCRIPTION>Name of target</DESCRIPTION>
</FIELD>
<FIELD name="RA" ref="" datatype="A" width="11" unit=""h:m:s"">
<DESCRIPTION>Right ascension of target</DESCRIPTION>
</FIELD>
<FIELD name="DEC" ref="" datatype="A" width="11" unit=""d:m:s"">
<DESCRIPTION>Declination of target</DESCRIPTION>
</FIELD>
<FIELD name="EPOCH" datatype="F" width="6">
<DESCRIPTION>Epoch of coordinates</DESCRIPTION>
</FIELD>
<FIELD name="POINT" datatype="A" width="5">
<DESCRIPTION>Pointing name</DESCRIPTION>
</FIELD>
<!-- Data table -->
<DATA><CSV headlines="4" colsep="|"><![CDATA[
NAME |RA |DEC |EPOCH |POINT|
|hh:mm:ss.ss|+dd:mm:ss.s| | |
12345678901234567890|12345678901|12345678901|123456|12345|
--------------------|-----------|-----------|------|-----|\n""")
if self.pointing_format.get()=='Palomar':
f.write("index\n")
for pointing in self.pointings:
i=i+1
name=pointing["label"]["text"]
(sra,sdec)=str(pointing["camera"]).split()
ra=sra.split(":")
dec=sdec.split(":")
dec[0]=str(int(dec[0]))
if int(dec[0])>=0:
dec[0]='+'+dec[0]
if self.pointing_format.get()=='Palomar':
f.write( "%5d %16s %2s %2s %4s %3s %2s %4s 2000\n" % (i, name,
ra[0].zfill(2),
ra[1].zfill(2),
ra[2].zfill(2),
dec[0].zfill(3),
dec[1].zfill(2),
dec[2].zfill(2)))
elif self.pointing_format.get()=='CFHT PH':
#f.write("%f %f\n" % (pointing["camera"].ra,pointing["camera"].dec))
f.write("%-20s|%11s|%11s|%6.1f|%-5d|\n" % (name,sra,sdec,2000.0,1))
elif self.pointing_format.get()=='KPNO/CTIO':
str1 = sra.replace(":"," ")
str2 = sdec.replace(":"," ")
f.write("%16s %16s %16s 2000\n" % ( name, str1, str2) )
elif self.pointing_format.get()=='SSim':
ra = []
dec= []
for ccd in pointing["camera"].getGeometry():
ra.append(ccd[0])
ra.append(ccd[2])
dec.append(ccd[1])
dec.append(ccd[3])
import math
dra=math.degrees(math.fabs(max(ra)-min(ra)))
ddec=math.degrees(math.fabs(max(dec)-min(dec)))
f.write("%f %f %16s %16s DATE 1.00 1.00 500 FILE\n" % (dra, ddec, sra, sdec ) )
if self.pointing_format.get()=='CFHT PH':
f.write("""]]</CSV></DATA>
</TABLE>
</ASTRO>
""")
f.close() | Print the currently defined FOVs | Below is the the instruction that describes the task:
### Input:
Print the currently defined FOVs
### Response:
def save_pointings(self):
"""Print the currently defined FOVs"""
import tkFileDialog
f=tkFileDialog.asksaveasfile()
i=0
if self.pointing_format.get()=='CFHT PH':
f.write("""<?xml version = "1.0"?>
<!DOCTYPE ASTRO SYSTEM "http://vizier.u-strasbg.fr/xml/astrores.dtd">
<ASTRO ID="v0.8" xmlns:ASTRO="http://vizier.u-strasbg.fr/doc/astrores.htx">
<TABLE ID="Table">
<NAME>Fixed Targets</NAME>
<TITLE>Fixed Targets for CFHT QSO</TITLE>
<!-- Definition of each field -->
<FIELD name="NAME" datatype="A" width="20">
<DESCRIPTION>Name of target</DESCRIPTION>
</FIELD>
<FIELD name="RA" ref="" datatype="A" width="11" unit=""h:m:s"">
<DESCRIPTION>Right ascension of target</DESCRIPTION>
</FIELD>
<FIELD name="DEC" ref="" datatype="A" width="11" unit=""d:m:s"">
<DESCRIPTION>Declination of target</DESCRIPTION>
</FIELD>
<FIELD name="EPOCH" datatype="F" width="6">
<DESCRIPTION>Epoch of coordinates</DESCRIPTION>
</FIELD>
<FIELD name="POINT" datatype="A" width="5">
<DESCRIPTION>Pointing name</DESCRIPTION>
</FIELD>
<!-- Data table -->
<DATA><CSV headlines="4" colsep="|"><![CDATA[
NAME |RA |DEC |EPOCH |POINT|
|hh:mm:ss.ss|+dd:mm:ss.s| | |
12345678901234567890|12345678901|12345678901|123456|12345|
--------------------|-----------|-----------|------|-----|\n""")
if self.pointing_format.get()=='Palomar':
f.write("index\n")
for pointing in self.pointings:
i=i+1
name=pointing["label"]["text"]
(sra,sdec)=str(pointing["camera"]).split()
ra=sra.split(":")
dec=sdec.split(":")
dec[0]=str(int(dec[0]))
if int(dec[0])>=0:
dec[0]='+'+dec[0]
if self.pointing_format.get()=='Palomar':
f.write( "%5d %16s %2s %2s %4s %3s %2s %4s 2000\n" % (i, name,
ra[0].zfill(2),
ra[1].zfill(2),
ra[2].zfill(2),
dec[0].zfill(3),
dec[1].zfill(2),
dec[2].zfill(2)))
elif self.pointing_format.get()=='CFHT PH':
#f.write("%f %f\n" % (pointing["camera"].ra,pointing["camera"].dec))
f.write("%-20s|%11s|%11s|%6.1f|%-5d|\n" % (name,sra,sdec,2000.0,1))
elif self.pointing_format.get()=='KPNO/CTIO':
str1 = sra.replace(":"," ")
str2 = sdec.replace(":"," ")
f.write("%16s %16s %16s 2000\n" % ( name, str1, str2) )
elif self.pointing_format.get()=='SSim':
ra = []
dec= []
for ccd in pointing["camera"].getGeometry():
ra.append(ccd[0])
ra.append(ccd[2])
dec.append(ccd[1])
dec.append(ccd[3])
import math
dra=math.degrees(math.fabs(max(ra)-min(ra)))
ddec=math.degrees(math.fabs(max(dec)-min(dec)))
f.write("%f %f %16s %16s DATE 1.00 1.00 500 FILE\n" % (dra, ddec, sra, sdec ) )
if self.pointing_format.get()=='CFHT PH':
f.write("""]]</CSV></DATA>
</TABLE>
</ASTRO>
""")
f.close() |
def minmax(low, high):
"""Test that the data items fall within range: low <= x <= high."""
def decorator(function):
"""Decorate a function with args."""
@functools.wraps(function)
def wrapper(*args, **kwargs):
"""Wrap the function."""
series = function(*args, **kwargs)
lo_pass = low <= series
hi_pass = series <= high
return lo_pass & hi_pass
return wrapper
return decorator | Test that the data items fall within range: low <= x <= high. | Below is the the instruction that describes the task:
### Input:
Test that the data items fall within range: low <= x <= high.
### Response:
def minmax(low, high):
"""Test that the data items fall within range: low <= x <= high."""
def decorator(function):
"""Decorate a function with args."""
@functools.wraps(function)
def wrapper(*args, **kwargs):
"""Wrap the function."""
series = function(*args, **kwargs)
lo_pass = low <= series
hi_pass = series <= high
return lo_pass & hi_pass
return wrapper
return decorator |
def get_index(self):
'''Calculate index of frame, starting at 0 for the newest frame within
this thread'''
index = 0
# Go down until you reach the newest frame:
iter_frame = self
while iter_frame.newer():
index += 1
iter_frame = iter_frame.newer()
return index | Calculate index of frame, starting at 0 for the newest frame within
this thread | Below is the the instruction that describes the task:
### Input:
Calculate index of frame, starting at 0 for the newest frame within
this thread
### Response:
def get_index(self):
'''Calculate index of frame, starting at 0 for the newest frame within
this thread'''
index = 0
# Go down until you reach the newest frame:
iter_frame = self
while iter_frame.newer():
index += 1
iter_frame = iter_frame.newer()
return index |
def _pushMessages(self):
""" Internal callback used to make sure the msg list keeps moving. """
# This continues to get itself called until no msgs are left in list.
self.showStatus('')
if len(self._statusMsgsToShow) > 0:
self.top.after(200, self._pushMessages) | Internal callback used to make sure the msg list keeps moving. | Below is the the instruction that describes the task:
### Input:
Internal callback used to make sure the msg list keeps moving.
### Response:
def _pushMessages(self):
""" Internal callback used to make sure the msg list keeps moving. """
# This continues to get itself called until no msgs are left in list.
self.showStatus('')
if len(self._statusMsgsToShow) > 0:
self.top.after(200, self._pushMessages) |
def _do_else(self, rule, p_selectors, p_parents, p_children, scope, media, c_lineno, c_property, c_codestr, code, name):
"""
Implements @else
"""
if '@if' not in rule[OPTIONS]:
log.error("@else with no @if (%s", rule[INDEX][rule[LINENO]])
val = rule[OPTIONS].pop('@if', True)
if not val:
rule[CODESTR] = c_codestr
self.manage_children(
rule, p_selectors, p_parents, p_children, scope, media) | Implements @else | Below is the the instruction that describes the task:
### Input:
Implements @else
### Response:
def _do_else(self, rule, p_selectors, p_parents, p_children, scope, media, c_lineno, c_property, c_codestr, code, name):
"""
Implements @else
"""
if '@if' not in rule[OPTIONS]:
log.error("@else with no @if (%s", rule[INDEX][rule[LINENO]])
val = rule[OPTIONS].pop('@if', True)
if not val:
rule[CODESTR] = c_codestr
self.manage_children(
rule, p_selectors, p_parents, p_children, scope, media) |
def radius_of_gyration(neurite):
'''Calculate and return radius of gyration of a given neurite.'''
centre_mass = neurite_centre_of_mass(neurite)
sum_sqr_distance = 0
N = 0
dist_sqr = [distance_sqr(centre_mass, s) for s in nm.iter_segments(neurite)]
sum_sqr_distance = np.sum(dist_sqr)
N = len(dist_sqr)
return np.sqrt(sum_sqr_distance / N) | Calculate and return radius of gyration of a given neurite. | Below is the the instruction that describes the task:
### Input:
Calculate and return radius of gyration of a given neurite.
### Response:
def radius_of_gyration(neurite):
'''Calculate and return radius of gyration of a given neurite.'''
centre_mass = neurite_centre_of_mass(neurite)
sum_sqr_distance = 0
N = 0
dist_sqr = [distance_sqr(centre_mass, s) for s in nm.iter_segments(neurite)]
sum_sqr_distance = np.sum(dist_sqr)
N = len(dist_sqr)
return np.sqrt(sum_sqr_distance / N) |
def ExpectingFunctionArgs(clean_lines, linenum):
"""Checks whether where function type arguments are expected.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if the line at 'linenum' is inside something that expects arguments
of function types.
"""
line = clean_lines.elided[linenum]
return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
(linenum >= 2 and
(Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]) or
Search(r'\bstd::m?function\s*\<\s*$',
clean_lines.elided[linenum - 1])))) | Checks whether where function type arguments are expected.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if the line at 'linenum' is inside something that expects arguments
of function types. | Below is the the instruction that describes the task:
### Input:
Checks whether where function type arguments are expected.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if the line at 'linenum' is inside something that expects arguments
of function types.
### Response:
def ExpectingFunctionArgs(clean_lines, linenum):
"""Checks whether where function type arguments are expected.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if the line at 'linenum' is inside something that expects arguments
of function types.
"""
line = clean_lines.elided[linenum]
return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
(linenum >= 2 and
(Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]) or
Search(r'\bstd::m?function\s*\<\s*$',
clean_lines.elided[linenum - 1])))) |
def generate(self):
"""
Generate a diff report from the reports specified.
:return: True/False : return status of whether the diff report generation succeeded.
"""
if (self.discover(CONSTANTS.STATS_CSV_LIST_FILE) and self.discover(CONSTANTS.PLOTS_CSV_LIST_FILE) and self.discover(CONSTANTS.CDF_PLOTS_CSV_LIST_FILE) and
self.collect() and self.collect_datasources() and self.collect_cdf_datasources()):
for stats in self.reports[0].stats:
metric_label = stats.replace('.stats.csv', '')
stats_0 = os.path.join(self.reports[0].local_location, stats)
stats_1 = os.path.join(self.reports[1].local_location, stats)
report0_stats = {}
report1_stats = {}
if naarad.utils.is_valid_file(stats_0) and naarad.utils.is_valid_file(stats_1):
report0 = csv.DictReader(open(stats_0))
for row in report0:
report0_stats[row[CONSTANTS.SUBMETRIC_HEADER]] = row
report0_stats['__headers__'] = report0._fieldnames
report1 = csv.DictReader(open(stats_1))
for row in report1:
report1_stats[row[CONSTANTS.SUBMETRIC_HEADER]] = row
report1_stats['__headers__'] = report1._fieldnames
common_stats = sorted(set(report0_stats['__headers__']) & set(report1_stats['__headers__']))
common_submetrics = sorted(set(report0_stats.keys()) & set(report1_stats.keys()))
for submetric in common_submetrics:
if submetric != '__headers__':
for stat in common_stats:
if stat != CONSTANTS.SUBMETRIC_HEADER:
diff_metric = reduce(defaultdict.__getitem__, [stats.split('.')[0], submetric, stat], self.diff_data)
diff_metric[0] = float(report0_stats[submetric][stat])
diff_metric[1] = float(report1_stats[submetric][stat])
diff_metric['absolute_diff'] = naarad.utils.normalize_float_for_display(diff_metric[1] - diff_metric[0])
if diff_metric[0] == 0:
if diff_metric['absolute_diff'] == '0.0':
diff_metric['percent_diff'] = 0.0
else:
diff_metric['percent_diff'] = 'N/A'
else:
diff_metric['percent_diff'] = naarad.utils.normalize_float_for_display((diff_metric[1] - diff_metric[0]) * 100 / diff_metric[0])
# check whether there is a SLA failure
if ((metric_label in self.sla_map.keys()) and (submetric in self.sla_map[metric_label].keys()) and
(stat in self.sla_map[metric_label][submetric].keys())):
self.check_sla(self.sla_map[metric_label][submetric][stat], diff_metric)
else:
return False
self.plot_diff()
diff_html = ''
if self.diff_data:
diff_html = self.generate_diff_html()
client_html = self.generate_client_charting_page(self.reports[0].datasource)
if diff_html != '':
with open(os.path.join(self.output_directory, CONSTANTS.DIFF_REPORT_FILE), 'w') as diff_file:
diff_file.write(diff_html)
with open(os.path.join(self.output_directory, CONSTANTS.CLIENT_CHARTING_FILE), 'w') as client_file:
client_file.write(client_html)
return True | Generate a diff report from the reports specified.
:return: True/False : return status of whether the diff report generation succeeded. | Below is the the instruction that describes the task:
### Input:
Generate a diff report from the reports specified.
:return: True/False : return status of whether the diff report generation succeeded.
### Response:
def generate(self):
"""
Generate a diff report from the reports specified.
:return: True/False : return status of whether the diff report generation succeeded.
"""
if (self.discover(CONSTANTS.STATS_CSV_LIST_FILE) and self.discover(CONSTANTS.PLOTS_CSV_LIST_FILE) and self.discover(CONSTANTS.CDF_PLOTS_CSV_LIST_FILE) and
self.collect() and self.collect_datasources() and self.collect_cdf_datasources()):
for stats in self.reports[0].stats:
metric_label = stats.replace('.stats.csv', '')
stats_0 = os.path.join(self.reports[0].local_location, stats)
stats_1 = os.path.join(self.reports[1].local_location, stats)
report0_stats = {}
report1_stats = {}
if naarad.utils.is_valid_file(stats_0) and naarad.utils.is_valid_file(stats_1):
report0 = csv.DictReader(open(stats_0))
for row in report0:
report0_stats[row[CONSTANTS.SUBMETRIC_HEADER]] = row
report0_stats['__headers__'] = report0._fieldnames
report1 = csv.DictReader(open(stats_1))
for row in report1:
report1_stats[row[CONSTANTS.SUBMETRIC_HEADER]] = row
report1_stats['__headers__'] = report1._fieldnames
common_stats = sorted(set(report0_stats['__headers__']) & set(report1_stats['__headers__']))
common_submetrics = sorted(set(report0_stats.keys()) & set(report1_stats.keys()))
for submetric in common_submetrics:
if submetric != '__headers__':
for stat in common_stats:
if stat != CONSTANTS.SUBMETRIC_HEADER:
diff_metric = reduce(defaultdict.__getitem__, [stats.split('.')[0], submetric, stat], self.diff_data)
diff_metric[0] = float(report0_stats[submetric][stat])
diff_metric[1] = float(report1_stats[submetric][stat])
diff_metric['absolute_diff'] = naarad.utils.normalize_float_for_display(diff_metric[1] - diff_metric[0])
if diff_metric[0] == 0:
if diff_metric['absolute_diff'] == '0.0':
diff_metric['percent_diff'] = 0.0
else:
diff_metric['percent_diff'] = 'N/A'
else:
diff_metric['percent_diff'] = naarad.utils.normalize_float_for_display((diff_metric[1] - diff_metric[0]) * 100 / diff_metric[0])
# check whether there is a SLA failure
if ((metric_label in self.sla_map.keys()) and (submetric in self.sla_map[metric_label].keys()) and
(stat in self.sla_map[metric_label][submetric].keys())):
self.check_sla(self.sla_map[metric_label][submetric][stat], diff_metric)
else:
return False
self.plot_diff()
diff_html = ''
if self.diff_data:
diff_html = self.generate_diff_html()
client_html = self.generate_client_charting_page(self.reports[0].datasource)
if diff_html != '':
with open(os.path.join(self.output_directory, CONSTANTS.DIFF_REPORT_FILE), 'w') as diff_file:
diff_file.write(diff_html)
with open(os.path.join(self.output_directory, CONSTANTS.CLIENT_CHARTING_FILE), 'w') as client_file:
client_file.write(client_html)
return True |
def publish(config, source=None, template=None, destination=None, jinja_env=None, no_write=False):
"""Given a config, performs an end-to-end publishing pipeline and returns the result:
linking -> compiling -> templating -> writing
NOTE: at most one of source and template can be None. If both are None, the publisher
effectively has nothing to do; an exception is raised.
PARAMETERS:
config -- Config; a context that includes variables, compiler options, and templater
information.
source -- str; path to a source file, relative to the current working directory. If None,
the publisher effectively becomes a templating engine.
template -- str; path to a Jinja template file. Templar treats the path as relative to the
list of template directories in config. If the template cannot be found relative
to those directories, Templar finally tries the path relative to the current
directory.
If template is None, the publisher effectively becomes a linker and compiler.
destination -- str; path for the destination file.
jinja_env -- jinja2.Environment; if None, a Jinja2 Environment is created with a
FileSystemLoader that is configured with config.template_dirs. Otherwise, the
given Jinja2 Environment is used to retrieve and render the template.
no_write -- bool; if True, the result is not written to a file or printed. If False and
destination is provided, the result is written to the provided destination file.
RETURNS:
str; the result of the publishing pipeline.
"""
if not isinstance(config, Config):
raise PublishError(
"config must be a Config object, "
"but instead was type '{}'".format(type(config).__name__))
if source is None and template is None:
raise PublishError('When publishing, source and template cannot both be omitted.')
variables = config.variables
if source:
# Linking stage.
all_block, extracted_variables = linker.link(source)
variables.update(extracted_variables)
# Compiling stage.
block_variables = {}
for rule in config.rules:
if rule.applies(source, destination):
if isinstance(rule, VariableRule):
variables.update(rule.apply(str(all_block)))
else:
all_block.apply_rule(rule)
block_variables.update(linker.get_block_dict(all_block))
variables['blocks'] = block_variables # Blocks are namespaced with 'blocks'.
# Templating stage.
if template:
if not jinja_env:
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(config.template_dirs))
jinja_template = jinja_env.get_template(template)
result = jinja_template.render(variables)
# Handle recursive evaluation of Jinja expressions.
iterations = 0
while config.recursively_evaluate_jinja_expressions \
and iterations < _MAX_JINJA_RECURSIVE_DEPTH + 1 \
and _jinja_expression_re.search(result):
if iterations == _MAX_JINJA_RECURSIVE_DEPTH:
raise PublishError('\n'.join([
'Recursive Jinja expression evaluation exceeded the allowed '
'number of iterations. Last state of template:',
result]))
jinja_env = jinja2.Environment(loader=jinja2.DictLoader({'intermediate': result}))
jinja_template = jinja_env.get_template('intermediate')
result = jinja_template.render(variables)
iterations += 1
else:
# template is None implies source is not None, so variables['blocks'] must exist.
result = variables['blocks']['all']
# Writing stage.
if not no_write and destination:
destination_dir = os.path.dirname(destination)
if destination_dir != '' and not os.path.isdir(destination_dir):
os.makedirs(destination_dir)
with open(destination, 'w') as f:
f.write(result)
return result | Given a config, performs an end-to-end publishing pipeline and returns the result:
linking -> compiling -> templating -> writing
NOTE: at most one of source and template can be None. If both are None, the publisher
effectively has nothing to do; an exception is raised.
PARAMETERS:
config -- Config; a context that includes variables, compiler options, and templater
information.
source -- str; path to a source file, relative to the current working directory. If None,
the publisher effectively becomes a templating engine.
template -- str; path to a Jinja template file. Templar treats the path as relative to the
list of template directories in config. If the template cannot be found relative
to those directories, Templar finally tries the path relative to the current
directory.
If template is None, the publisher effectively becomes a linker and compiler.
destination -- str; path for the destination file.
jinja_env -- jinja2.Environment; if None, a Jinja2 Environment is created with a
FileSystemLoader that is configured with config.template_dirs. Otherwise, the
given Jinja2 Environment is used to retrieve and render the template.
no_write -- bool; if True, the result is not written to a file or printed. If False and
destination is provided, the result is written to the provided destination file.
RETURNS:
str; the result of the publishing pipeline. | Below is the the instruction that describes the task:
### Input:
Given a config, performs an end-to-end publishing pipeline and returns the result:
linking -> compiling -> templating -> writing
NOTE: at most one of source and template can be None. If both are None, the publisher
effectively has nothing to do; an exception is raised.
PARAMETERS:
config -- Config; a context that includes variables, compiler options, and templater
information.
source -- str; path to a source file, relative to the current working directory. If None,
the publisher effectively becomes a templating engine.
template -- str; path to a Jinja template file. Templar treats the path as relative to the
list of template directories in config. If the template cannot be found relative
to those directories, Templar finally tries the path relative to the current
directory.
If template is None, the publisher effectively becomes a linker and compiler.
destination -- str; path for the destination file.
jinja_env -- jinja2.Environment; if None, a Jinja2 Environment is created with a
FileSystemLoader that is configured with config.template_dirs. Otherwise, the
given Jinja2 Environment is used to retrieve and render the template.
no_write -- bool; if True, the result is not written to a file or printed. If False and
destination is provided, the result is written to the provided destination file.
RETURNS:
str; the result of the publishing pipeline.
### Response:
def publish(config, source=None, template=None, destination=None, jinja_env=None, no_write=False):
"""Given a config, performs an end-to-end publishing pipeline and returns the result:
linking -> compiling -> templating -> writing
NOTE: at most one of source and template can be None. If both are None, the publisher
effectively has nothing to do; an exception is raised.
PARAMETERS:
config -- Config; a context that includes variables, compiler options, and templater
information.
source -- str; path to a source file, relative to the current working directory. If None,
the publisher effectively becomes a templating engine.
template -- str; path to a Jinja template file. Templar treats the path as relative to the
list of template directories in config. If the template cannot be found relative
to those directories, Templar finally tries the path relative to the current
directory.
If template is None, the publisher effectively becomes a linker and compiler.
destination -- str; path for the destination file.
jinja_env -- jinja2.Environment; if None, a Jinja2 Environment is created with a
FileSystemLoader that is configured with config.template_dirs. Otherwise, the
given Jinja2 Environment is used to retrieve and render the template.
no_write -- bool; if True, the result is not written to a file or printed. If False and
destination is provided, the result is written to the provided destination file.
RETURNS:
str; the result of the publishing pipeline.
"""
if not isinstance(config, Config):
raise PublishError(
"config must be a Config object, "
"but instead was type '{}'".format(type(config).__name__))
if source is None and template is None:
raise PublishError('When publishing, source and template cannot both be omitted.')
variables = config.variables
if source:
# Linking stage.
all_block, extracted_variables = linker.link(source)
variables.update(extracted_variables)
# Compiling stage.
block_variables = {}
for rule in config.rules:
if rule.applies(source, destination):
if isinstance(rule, VariableRule):
variables.update(rule.apply(str(all_block)))
else:
all_block.apply_rule(rule)
block_variables.update(linker.get_block_dict(all_block))
variables['blocks'] = block_variables # Blocks are namespaced with 'blocks'.
# Templating stage.
if template:
if not jinja_env:
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(config.template_dirs))
jinja_template = jinja_env.get_template(template)
result = jinja_template.render(variables)
# Handle recursive evaluation of Jinja expressions.
iterations = 0
while config.recursively_evaluate_jinja_expressions \
and iterations < _MAX_JINJA_RECURSIVE_DEPTH + 1 \
and _jinja_expression_re.search(result):
if iterations == _MAX_JINJA_RECURSIVE_DEPTH:
raise PublishError('\n'.join([
'Recursive Jinja expression evaluation exceeded the allowed '
'number of iterations. Last state of template:',
result]))
jinja_env = jinja2.Environment(loader=jinja2.DictLoader({'intermediate': result}))
jinja_template = jinja_env.get_template('intermediate')
result = jinja_template.render(variables)
iterations += 1
else:
# template is None implies source is not None, so variables['blocks'] must exist.
result = variables['blocks']['all']
# Writing stage.
if not no_write and destination:
destination_dir = os.path.dirname(destination)
if destination_dir != '' and not os.path.isdir(destination_dir):
os.makedirs(destination_dir)
with open(destination, 'w') as f:
f.write(result)
return result |
def _total_jxn_counts(fns):
"""Count the total unique coverage junction for junctions in a set of
SJ.out.tab files."""
df = pd.read_table(fns[0], header=None, names=COLUMN_NAMES)
df.index = (df.chrom + ':' + df.start.astype(int).astype(str) + '-' +
df.end.astype(int).astype(str))
counts = df.unique_junction_reads
for fn in fns[1:]:
df = pd.read_table(fn, header=None, names=COLUMN_NAMES)
df.index = (df.chrom + ':' + df.start.astype(int).astype(str) + '-' +
df.end.astype(int).astype(str))
counts = counts.add(df.unique_junction_reads, fill_value=0)
return counts | Count the total unique coverage junction for junctions in a set of
SJ.out.tab files. | Below is the the instruction that describes the task:
### Input:
Count the total unique coverage junction for junctions in a set of
SJ.out.tab files.
### Response:
def _total_jxn_counts(fns):
"""Count the total unique coverage junction for junctions in a set of
SJ.out.tab files."""
df = pd.read_table(fns[0], header=None, names=COLUMN_NAMES)
df.index = (df.chrom + ':' + df.start.astype(int).astype(str) + '-' +
df.end.astype(int).astype(str))
counts = df.unique_junction_reads
for fn in fns[1:]:
df = pd.read_table(fn, header=None, names=COLUMN_NAMES)
df.index = (df.chrom + ':' + df.start.astype(int).astype(str) + '-' +
df.end.astype(int).astype(str))
counts = counts.add(df.unique_junction_reads, fill_value=0)
return counts |
def get_versions(cls, bucket, key, desc=True):
"""Fetch all versions of a specific object.
:param bucket: The bucket (instance or id) to get the object from.
:param key: Key of object.
:param desc: Sort results desc if True, asc otherwise.
:returns: The query to execute to fetch all versions.
"""
filters = [
cls.bucket_id == as_bucket_id(bucket),
cls.key == key,
]
order = cls.created.desc() if desc else cls.created.asc()
return cls.query.filter(*filters).order_by(cls.key, order) | Fetch all versions of a specific object.
:param bucket: The bucket (instance or id) to get the object from.
:param key: Key of object.
:param desc: Sort results desc if True, asc otherwise.
:returns: The query to execute to fetch all versions. | Below is the the instruction that describes the task:
### Input:
Fetch all versions of a specific object.
:param bucket: The bucket (instance or id) to get the object from.
:param key: Key of object.
:param desc: Sort results desc if True, asc otherwise.
:returns: The query to execute to fetch all versions.
### Response:
def get_versions(cls, bucket, key, desc=True):
"""Fetch all versions of a specific object.
:param bucket: The bucket (instance or id) to get the object from.
:param key: Key of object.
:param desc: Sort results desc if True, asc otherwise.
:returns: The query to execute to fetch all versions.
"""
filters = [
cls.bucket_id == as_bucket_id(bucket),
cls.key == key,
]
order = cls.created.desc() if desc else cls.created.asc()
return cls.query.filter(*filters).order_by(cls.key, order) |
def patch_cmdline_parser():
"""
Patches the ``luigi.cmdline_parser.CmdlineParser`` to store the original command line arguments
for later processing in the :py:class:`law.config.Config`.
"""
# store original functions
_init = luigi.cmdline_parser.CmdlineParser.__init__
# patch init
def __init__(self, cmdline_args):
_init(self, cmdline_args)
self.cmdline_args = cmdline_args
luigi.cmdline_parser.CmdlineParser.__init__ = __init__ | Patches the ``luigi.cmdline_parser.CmdlineParser`` to store the original command line arguments
for later processing in the :py:class:`law.config.Config`. | Below is the the instruction that describes the task:
### Input:
Patches the ``luigi.cmdline_parser.CmdlineParser`` to store the original command line arguments
for later processing in the :py:class:`law.config.Config`.
### Response:
def patch_cmdline_parser():
"""
Patches the ``luigi.cmdline_parser.CmdlineParser`` to store the original command line arguments
for later processing in the :py:class:`law.config.Config`.
"""
# store original functions
_init = luigi.cmdline_parser.CmdlineParser.__init__
# patch init
def __init__(self, cmdline_args):
_init(self, cmdline_args)
self.cmdline_args = cmdline_args
luigi.cmdline_parser.CmdlineParser.__init__ = __init__ |
def get_output_tags(self):
"""
Return an escaped string of comma separated tag_name: tag_value pairs
Tags should be sorted by key before being sent for best performance. The sort should
match that from the Go bytes. Compare function (http://golang.org/pkg/bytes/#Compare).
"""
# Sort the tags in lexicographically by tag name
sorted_tags = sorted(self.tags.items())
# Finally render, escape and return the tag string
return u",".join(u"{0}={1}".format(format_string(k), format_string(v)) for k, v in sorted_tags) | Return an escaped string of comma separated tag_name: tag_value pairs
Tags should be sorted by key before being sent for best performance. The sort should
match that from the Go bytes. Compare function (http://golang.org/pkg/bytes/#Compare). | Below is the the instruction that describes the task:
### Input:
Return an escaped string of comma separated tag_name: tag_value pairs
Tags should be sorted by key before being sent for best performance. The sort should
match that from the Go bytes. Compare function (http://golang.org/pkg/bytes/#Compare).
### Response:
def get_output_tags(self):
"""
Return an escaped string of comma separated tag_name: tag_value pairs
Tags should be sorted by key before being sent for best performance. The sort should
match that from the Go bytes. Compare function (http://golang.org/pkg/bytes/#Compare).
"""
# Sort the tags in lexicographically by tag name
sorted_tags = sorted(self.tags.items())
# Finally render, escape and return the tag string
return u",".join(u"{0}={1}".format(format_string(k), format_string(v)) for k, v in sorted_tags) |
def build_static(self, *args, **options):
"""
Builds the static files directory as well as robots.txt and favicon.ico
"""
logger.debug("Building static directory")
if self.verbosity > 1:
self.stdout.write("Building static directory")
management.call_command(
"collectstatic",
interactive=False,
verbosity=0
)
# Set the target directory inside the filesystem.
target_dir = path.join(
self.build_dir,
settings.STATIC_URL.lstrip('/')
)
target_dir = smart_text(target_dir)
if os.path.exists(self.static_root) and settings.STATIC_URL:
if getattr(settings, 'BAKERY_GZIP', False):
self.copytree_and_gzip(self.static_root, target_dir)
# if gzip isn't enabled, just copy the tree straight over
else:
logger.debug("Copying {}{} to {}{}".format("osfs://", self.static_root, self.fs_name, target_dir))
copy.copy_dir("osfs:///", self.static_root, self.fs, target_dir)
# If they exist in the static directory, copy the robots.txt
# and favicon.ico files down to the root so they will work
# on the live website.
robots_src = path.join(target_dir, 'robots.txt')
if self.fs.exists(robots_src):
robots_target = path.join(self.build_dir, 'robots.txt')
logger.debug("Copying {}{} to {}{}".format(self.fs_name, robots_src, self.fs_name, robots_target))
self.fs.copy(robots_src, robots_target)
favicon_src = path.join(target_dir, 'favicon.ico')
if self.fs.exists(favicon_src):
favicon_target = path.join(self.build_dir, 'favicon.ico')
logger.debug("Copying {}{} to {}{}".format(self.fs_name, favicon_src, self.fs_name, favicon_target))
self.fs.copy(favicon_src, favicon_target) | Builds the static files directory as well as robots.txt and favicon.ico | Below is the the instruction that describes the task:
### Input:
Builds the static files directory as well as robots.txt and favicon.ico
### Response:
def build_static(self, *args, **options):
"""
Builds the static files directory as well as robots.txt and favicon.ico
"""
logger.debug("Building static directory")
if self.verbosity > 1:
self.stdout.write("Building static directory")
management.call_command(
"collectstatic",
interactive=False,
verbosity=0
)
# Set the target directory inside the filesystem.
target_dir = path.join(
self.build_dir,
settings.STATIC_URL.lstrip('/')
)
target_dir = smart_text(target_dir)
if os.path.exists(self.static_root) and settings.STATIC_URL:
if getattr(settings, 'BAKERY_GZIP', False):
self.copytree_and_gzip(self.static_root, target_dir)
# if gzip isn't enabled, just copy the tree straight over
else:
logger.debug("Copying {}{} to {}{}".format("osfs://", self.static_root, self.fs_name, target_dir))
copy.copy_dir("osfs:///", self.static_root, self.fs, target_dir)
# If they exist in the static directory, copy the robots.txt
# and favicon.ico files down to the root so they will work
# on the live website.
robots_src = path.join(target_dir, 'robots.txt')
if self.fs.exists(robots_src):
robots_target = path.join(self.build_dir, 'robots.txt')
logger.debug("Copying {}{} to {}{}".format(self.fs_name, robots_src, self.fs_name, robots_target))
self.fs.copy(robots_src, robots_target)
favicon_src = path.join(target_dir, 'favicon.ico')
if self.fs.exists(favicon_src):
favicon_target = path.join(self.build_dir, 'favicon.ico')
logger.debug("Copying {}{} to {}{}".format(self.fs_name, favicon_src, self.fs_name, favicon_target))
self.fs.copy(favicon_src, favicon_target) |
def mark_pausing(self):
"""Requests that the service move to the Paused state, without waiting for it to do so.
Raises if the service is not currently in the Running state.
"""
with self._lock:
self._set_state(self._PAUSING, self._RUNNING) | Requests that the service move to the Paused state, without waiting for it to do so.
Raises if the service is not currently in the Running state. | Below is the the instruction that describes the task:
### Input:
Requests that the service move to the Paused state, without waiting for it to do so.
Raises if the service is not currently in the Running state.
### Response:
def mark_pausing(self):
"""Requests that the service move to the Paused state, without waiting for it to do so.
Raises if the service is not currently in the Running state.
"""
with self._lock:
self._set_state(self._PAUSING, self._RUNNING) |
def PDFEmitter(target, source, env):
"""Strips any .aux or .log files from the input source list.
These are created by the TeX Builder that in all likelihood was
used to generate the .dvi file we're using as input, and we only
care about the .dvi file.
"""
def strip_suffixes(n):
return not SCons.Util.splitext(str(n))[1] in ['.aux', '.log']
source = [src for src in source if strip_suffixes(src)]
return (target, source) | Strips any .aux or .log files from the input source list.
These are created by the TeX Builder that in all likelihood was
used to generate the .dvi file we're using as input, and we only
care about the .dvi file. | Below is the the instruction that describes the task:
### Input:
Strips any .aux or .log files from the input source list.
These are created by the TeX Builder that in all likelihood was
used to generate the .dvi file we're using as input, and we only
care about the .dvi file.
### Response:
def PDFEmitter(target, source, env):
"""Strips any .aux or .log files from the input source list.
These are created by the TeX Builder that in all likelihood was
used to generate the .dvi file we're using as input, and we only
care about the .dvi file.
"""
def strip_suffixes(n):
return not SCons.Util.splitext(str(n))[1] in ['.aux', '.log']
source = [src for src in source if strip_suffixes(src)]
return (target, source) |
def check_backup_count_and_state(self, site):
"""Look up basebackups from the object store, prune any extra
backups and return the datetime of the latest backup."""
basebackups = self.get_remote_basebackups_info(site)
self.log.debug("Found %r basebackups", basebackups)
if basebackups:
last_backup_time = basebackups[-1]["metadata"]["start-time"]
else:
last_backup_time = None
allowed_basebackup_count = self.config["backup_sites"][site]["basebackup_count"]
if allowed_basebackup_count is None:
allowed_basebackup_count = len(basebackups)
while len(basebackups) > allowed_basebackup_count:
self.log.warning("Too many basebackups: %d > %d, %r, starting to get rid of %r",
len(basebackups), allowed_basebackup_count, basebackups, basebackups[0]["name"])
basebackup_to_be_deleted = basebackups.pop(0)
pg_version = basebackup_to_be_deleted["metadata"].get("pg-version")
last_wal_segment_still_needed = 0
if basebackups:
last_wal_segment_still_needed = basebackups[0]["metadata"]["start-wal-segment"]
if last_wal_segment_still_needed:
self.delete_remote_wal_before(last_wal_segment_still_needed, site, pg_version)
self.delete_remote_basebackup(site, basebackup_to_be_deleted["name"], basebackup_to_be_deleted["metadata"])
self.state["backup_sites"][site]["basebackups"] = basebackups
return last_backup_time | Look up basebackups from the object store, prune any extra
backups and return the datetime of the latest backup. | Below is the the instruction that describes the task:
### Input:
Look up basebackups from the object store, prune any extra
backups and return the datetime of the latest backup.
### Response:
def check_backup_count_and_state(self, site):
"""Look up basebackups from the object store, prune any extra
backups and return the datetime of the latest backup."""
basebackups = self.get_remote_basebackups_info(site)
self.log.debug("Found %r basebackups", basebackups)
if basebackups:
last_backup_time = basebackups[-1]["metadata"]["start-time"]
else:
last_backup_time = None
allowed_basebackup_count = self.config["backup_sites"][site]["basebackup_count"]
if allowed_basebackup_count is None:
allowed_basebackup_count = len(basebackups)
while len(basebackups) > allowed_basebackup_count:
self.log.warning("Too many basebackups: %d > %d, %r, starting to get rid of %r",
len(basebackups), allowed_basebackup_count, basebackups, basebackups[0]["name"])
basebackup_to_be_deleted = basebackups.pop(0)
pg_version = basebackup_to_be_deleted["metadata"].get("pg-version")
last_wal_segment_still_needed = 0
if basebackups:
last_wal_segment_still_needed = basebackups[0]["metadata"]["start-wal-segment"]
if last_wal_segment_still_needed:
self.delete_remote_wal_before(last_wal_segment_still_needed, site, pg_version)
self.delete_remote_basebackup(site, basebackup_to_be_deleted["name"], basebackup_to_be_deleted["metadata"])
self.state["backup_sites"][site]["basebackups"] = basebackups
return last_backup_time |
def get_standard_form(self, data):
"""Roman schemes define multiple representations of the same devanAgarI character. This method gets a library-standard representation.
data : a text in the given scheme.
"""
if self.synonym_map is None:
return data
from indic_transliteration import sanscript
return sanscript.transliterate(data=sanscript.transliterate(_from=self.name, _to=sanscript.DEVANAGARI, data=data), _from=sanscript.DEVANAGARI, _to=self.name) | Roman schemes define multiple representations of the same devanAgarI character. This method gets a library-standard representation.
data : a text in the given scheme. | Below is the the instruction that describes the task:
### Input:
Roman schemes define multiple representations of the same devanAgarI character. This method gets a library-standard representation.
data : a text in the given scheme.
### Response:
def get_standard_form(self, data):
"""Roman schemes define multiple representations of the same devanAgarI character. This method gets a library-standard representation.
data : a text in the given scheme.
"""
if self.synonym_map is None:
return data
from indic_transliteration import sanscript
return sanscript.transliterate(data=sanscript.transliterate(_from=self.name, _to=sanscript.DEVANAGARI, data=data), _from=sanscript.DEVANAGARI, _to=self.name) |
def invite_user(self, new_member, inviter=None, roster=None):
"""Invites a new member to the chatroom"""
roster = roster or self.client.getRoster()
jid = new_member['JID']
logger.info('roster %s %s' % (jid, roster.getSubscription(jid)))
if jid in roster.keys() and roster.getSubscription(jid) in ['both', 'to']:
new_member['STATUS'] = 'ACTIVE'
if inviter:
self.send_message('%s is already a member' % (jid,), inviter)
else:
new_member['STATUS'] = 'INVITED'
self.broadcast('inviting %s to the room' % (jid,))
#Add nickname according to http://xmpp.org/extensions/xep-0172.html
subscribe_presence = xmpp.dispatcher.Presence(to=jid, typ='subscribe')
if 'NICK' in self.params:
subscribe_presence.addChild(name='nick', namespace=xmpp.protocol.NS_NICK, payload=self.params['NICK'])
self.client.send(subscribe_presence)
if not self.is_member(new_member):
new_member.setdefault('NICK', jid.split('@')[0])
self.params['MEMBERS'].append(new_member) | Invites a new member to the chatroom | Below is the the instruction that describes the task:
### Input:
Invites a new member to the chatroom
### Response:
def invite_user(self, new_member, inviter=None, roster=None):
"""Invites a new member to the chatroom"""
roster = roster or self.client.getRoster()
jid = new_member['JID']
logger.info('roster %s %s' % (jid, roster.getSubscription(jid)))
if jid in roster.keys() and roster.getSubscription(jid) in ['both', 'to']:
new_member['STATUS'] = 'ACTIVE'
if inviter:
self.send_message('%s is already a member' % (jid,), inviter)
else:
new_member['STATUS'] = 'INVITED'
self.broadcast('inviting %s to the room' % (jid,))
#Add nickname according to http://xmpp.org/extensions/xep-0172.html
subscribe_presence = xmpp.dispatcher.Presence(to=jid, typ='subscribe')
if 'NICK' in self.params:
subscribe_presence.addChild(name='nick', namespace=xmpp.protocol.NS_NICK, payload=self.params['NICK'])
self.client.send(subscribe_presence)
if not self.is_member(new_member):
new_member.setdefault('NICK', jid.split('@')[0])
self.params['MEMBERS'].append(new_member) |
def get_user_details(self, response):
"""Return user details from GitHub account"""
account = response['account']
metadata = json.loads(account.get('json_metadata') or '{}')
account['json_metadata'] = metadata
return {
'id': account['id'],
'username': account['name'],
'name': metadata.get("profile", {}).get('name', ''),
'account': account,
} | Return user details from GitHub account | Below is the the instruction that describes the task:
### Input:
Return user details from GitHub account
### Response:
def get_user_details(self, response):
"""Return user details from GitHub account"""
account = response['account']
metadata = json.loads(account.get('json_metadata') or '{}')
account['json_metadata'] = metadata
return {
'id': account['id'],
'username': account['name'],
'name': metadata.get("profile", {}).get('name', ''),
'account': account,
} |
def get_referenced_object(self):
"""
:rtype: core.BunqModel
:raise: BunqException
"""
if self._BillingInvoice is not None:
return self._BillingInvoice
if self._DraftPayment is not None:
return self._DraftPayment
if self._MasterCardAction is not None:
return self._MasterCardAction
if self._Payment is not None:
return self._Payment
if self._PaymentBatch is not None:
return self._PaymentBatch
if self._RequestResponse is not None:
return self._RequestResponse
if self._ScheduleInstance is not None:
return self._ScheduleInstance
if self._TabResultResponse is not None:
return self._TabResultResponse
if self._WhitelistResult is not None:
return self._WhitelistResult
raise exception.BunqException(self._ERROR_NULL_FIELDS) | :rtype: core.BunqModel
:raise: BunqException | Below is the the instruction that describes the task:
### Input:
:rtype: core.BunqModel
:raise: BunqException
### Response:
def get_referenced_object(self):
"""
:rtype: core.BunqModel
:raise: BunqException
"""
if self._BillingInvoice is not None:
return self._BillingInvoice
if self._DraftPayment is not None:
return self._DraftPayment
if self._MasterCardAction is not None:
return self._MasterCardAction
if self._Payment is not None:
return self._Payment
if self._PaymentBatch is not None:
return self._PaymentBatch
if self._RequestResponse is not None:
return self._RequestResponse
if self._ScheduleInstance is not None:
return self._ScheduleInstance
if self._TabResultResponse is not None:
return self._TabResultResponse
if self._WhitelistResult is not None:
return self._WhitelistResult
raise exception.BunqException(self._ERROR_NULL_FIELDS) |
def exit_and_fail(self, msg=None, out=None):
"""Exits the runtime with a nonzero exit code, indicating failure.
:param msg: A string message to print to stderr or another custom file desciptor before exiting.
(Optional)
:param out: The file descriptor to emit `msg` to. (Optional)
"""
self.exit(result=PANTS_FAILED_EXIT_CODE, msg=msg, out=out) | Exits the runtime with a nonzero exit code, indicating failure.
:param msg: A string message to print to stderr or another custom file desciptor before exiting.
(Optional)
:param out: The file descriptor to emit `msg` to. (Optional) | Below is the the instruction that describes the task:
### Input:
Exits the runtime with a nonzero exit code, indicating failure.
:param msg: A string message to print to stderr or another custom file desciptor before exiting.
(Optional)
:param out: The file descriptor to emit `msg` to. (Optional)
### Response:
def exit_and_fail(self, msg=None, out=None):
"""Exits the runtime with a nonzero exit code, indicating failure.
:param msg: A string message to print to stderr or another custom file desciptor before exiting.
(Optional)
:param out: The file descriptor to emit `msg` to. (Optional)
"""
self.exit(result=PANTS_FAILED_EXIT_CODE, msg=msg, out=out) |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: StyleSheetContext for this StyleSheetInstance
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetContext
"""
if self._context is None:
self._context = StyleSheetContext(self._version, assistant_sid=self._solution['assistant_sid'], )
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: StyleSheetContext for this StyleSheetInstance
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetContext | Below is the the instruction that describes the task:
### Input:
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: StyleSheetContext for this StyleSheetInstance
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetContext
### Response:
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: StyleSheetContext for this StyleSheetInstance
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetContext
"""
if self._context is None:
self._context = StyleSheetContext(self._version, assistant_sid=self._solution['assistant_sid'], )
return self._context |
def read_structure(self, lpBaseAddress, stype):
"""
Reads a ctypes structure from the memory of the process.
@see: L{read}
@type lpBaseAddress: int
@param lpBaseAddress: Memory address to begin reading.
@type stype: class ctypes.Structure or a subclass.
@param stype: Structure definition.
@rtype: int
@return: Structure instance filled in with data
read from the process memory.
@raise WindowsError: On error an exception is raised.
"""
if type(lpBaseAddress) not in (type(0), type(long(0))):
lpBaseAddress = ctypes.cast(lpBaseAddress, ctypes.c_void_p)
data = self.read(lpBaseAddress, ctypes.sizeof(stype))
buff = ctypes.create_string_buffer(data)
ptr = ctypes.cast(ctypes.pointer(buff), ctypes.POINTER(stype))
return ptr.contents | Reads a ctypes structure from the memory of the process.
@see: L{read}
@type lpBaseAddress: int
@param lpBaseAddress: Memory address to begin reading.
@type stype: class ctypes.Structure or a subclass.
@param stype: Structure definition.
@rtype: int
@return: Structure instance filled in with data
read from the process memory.
@raise WindowsError: On error an exception is raised. | Below is the the instruction that describes the task:
### Input:
Reads a ctypes structure from the memory of the process.
@see: L{read}
@type lpBaseAddress: int
@param lpBaseAddress: Memory address to begin reading.
@type stype: class ctypes.Structure or a subclass.
@param stype: Structure definition.
@rtype: int
@return: Structure instance filled in with data
read from the process memory.
@raise WindowsError: On error an exception is raised.
### Response:
def read_structure(self, lpBaseAddress, stype):
"""
Reads a ctypes structure from the memory of the process.
@see: L{read}
@type lpBaseAddress: int
@param lpBaseAddress: Memory address to begin reading.
@type stype: class ctypes.Structure or a subclass.
@param stype: Structure definition.
@rtype: int
@return: Structure instance filled in with data
read from the process memory.
@raise WindowsError: On error an exception is raised.
"""
if type(lpBaseAddress) not in (type(0), type(long(0))):
lpBaseAddress = ctypes.cast(lpBaseAddress, ctypes.c_void_p)
data = self.read(lpBaseAddress, ctypes.sizeof(stype))
buff = ctypes.create_string_buffer(data)
ptr = ctypes.cast(ctypes.pointer(buff), ctypes.POINTER(stype))
return ptr.contents |
def validate_template_name(self, key, value):
"""Validate template name.
:param key: The template path.
:param value: The template name.
:raises ValueError: If template name is wrong.
"""
if value not in dict(current_app.config['PAGES_TEMPLATES']):
raise ValueError(
'Template "{0}" does not exist.'.format(value))
return value | Validate template name.
:param key: The template path.
:param value: The template name.
:raises ValueError: If template name is wrong. | Below is the the instruction that describes the task:
### Input:
Validate template name.
:param key: The template path.
:param value: The template name.
:raises ValueError: If template name is wrong.
### Response:
def validate_template_name(self, key, value):
"""Validate template name.
:param key: The template path.
:param value: The template name.
:raises ValueError: If template name is wrong.
"""
if value not in dict(current_app.config['PAGES_TEMPLATES']):
raise ValueError(
'Template "{0}" does not exist.'.format(value))
return value |
def save(self, *args, **kwargs):
"""sets uuid for url
:param args: inline arguments (optional)
:param kwargs: keyword arguments (optional)
:return: `super.save()`
"""
if not self.id: # this is a totally new instance, create uuid value
self.url_uuid = str(uuid.uuid4()).replace("-", "")
super(ObfuscatedUrlInfo, self).save(*args, **kwargs) | sets uuid for url
:param args: inline arguments (optional)
:param kwargs: keyword arguments (optional)
:return: `super.save()` | Below is the the instruction that describes the task:
### Input:
sets uuid for url
:param args: inline arguments (optional)
:param kwargs: keyword arguments (optional)
:return: `super.save()`
### Response:
def save(self, *args, **kwargs):
"""sets uuid for url
:param args: inline arguments (optional)
:param kwargs: keyword arguments (optional)
:return: `super.save()`
"""
if not self.id: # this is a totally new instance, create uuid value
self.url_uuid = str(uuid.uuid4()).replace("-", "")
super(ObfuscatedUrlInfo, self).save(*args, **kwargs) |
def prepare_replicant_order_object(manager, snapshot_schedule, location,
tier, volume, volume_type):
"""Prepare the order object which is submitted to the placeOrder() method
:param manager: The File or Block manager calling this function
:param snapshot_schedule: The primary volume's snapshot
schedule to use for replication
:param location: The location for the ordered replicant volume
:param tier: The tier (IOPS per GB) of the primary volume
:param volume: The primary volume as a SoftLayer_Network_Storage object
:param volume_type: The type of the primary volume ('file' or 'block')
:return: Returns the order object for the
Product_Order service's placeOrder() method
"""
# Ensure the primary volume and snapshot space are not set for cancellation
if 'billingItem' not in volume\
or volume['billingItem']['cancellationDate'] != '':
raise exceptions.SoftLayerError(
'This volume is set for cancellation; '
'unable to order replicant volume')
for child in volume['billingItem']['activeChildren']:
if child['categoryCode'] == 'storage_snapshot_space'\
and child['cancellationDate'] != '':
raise exceptions.SoftLayerError(
'The snapshot space for this volume is set for '
'cancellation; unable to order replicant volume')
# Find the ID for the requested location
try:
location_id = get_location_id(manager, location)
except ValueError:
raise exceptions.SoftLayerError(
"Invalid datacenter name specified. "
"Please provide the lower case short name (e.g.: dal09)")
# Get sizes and properties needed for the order
volume_size = int(volume['capacityGb'])
billing_item_category_code = volume['billingItem']['categoryCode']
if billing_item_category_code == 'storage_as_a_service':
order_type_is_saas = True
elif billing_item_category_code == 'storage_service_enterprise':
order_type_is_saas = False
else:
raise exceptions.SoftLayerError(
"A replicant volume cannot be ordered for a primary volume with a "
"billing item category code of '%s'" % billing_item_category_code)
if 'snapshotCapacityGb' in volume:
snapshot_size = int(volume['snapshotCapacityGb'])
else:
raise exceptions.SoftLayerError(
"Snapshot capacity not found for the given primary volume")
snapshot_schedule_id = find_snapshot_schedule_id(
volume,
'SNAPSHOT_' + snapshot_schedule
)
# Use the volume's billing item category code to get the product package
package = get_package(manager, billing_item_category_code)
# Find prices based on the primary volume's type and billing item category
if order_type_is_saas: # 'storage_as_a_service' package
complex_type = 'SoftLayer_Container_Product_Order_'\
'Network_Storage_AsAService'
volume_storage_type = volume['storageType']['keyName']
if 'ENDURANCE' in volume_storage_type:
volume_is_performance = False
if tier is None:
tier = find_endurance_tier_iops_per_gb(volume)
prices = [
find_price_by_category(package, billing_item_category_code),
find_price_by_category(package, 'storage_' + volume_type),
find_saas_endurance_space_price(package, volume_size, tier),
find_saas_endurance_tier_price(package, tier),
find_saas_snapshot_space_price(
package, snapshot_size, tier=tier),
find_saas_replication_price(package, tier=tier)
]
elif 'PERFORMANCE' in volume_storage_type:
if not _staas_version_is_v2_or_above(volume):
raise exceptions.SoftLayerError(
"A replica volume cannot be ordered for this performance "
"volume since it does not support Encryption at Rest.")
volume_is_performance = True
iops = int(volume['provisionedIops'])
prices = [
find_price_by_category(package, billing_item_category_code),
find_price_by_category(package, 'storage_' + volume_type),
find_saas_perform_space_price(package, volume_size),
find_saas_perform_iops_price(package, volume_size, iops),
find_saas_snapshot_space_price(
package, snapshot_size, iops=iops),
find_saas_replication_price(package, iops=iops)
]
else:
raise exceptions.SoftLayerError(
"Storage volume does not have a valid storage type "
"(with an appropriate keyName to indicate the "
"volume is a PERFORMANCE or an ENDURANCE volume)")
else: # 'storage_service_enterprise' package
complex_type = 'SoftLayer_Container_Product_Order_'\
'Network_Storage_Enterprise'
volume_is_performance = False
if tier is None:
tier = find_endurance_tier_iops_per_gb(volume)
prices = [
find_price_by_category(package, billing_item_category_code),
find_price_by_category(package, 'storage_' + volume_type),
find_ent_space_price(package, 'endurance', volume_size, tier),
find_ent_endurance_tier_price(package, tier),
find_ent_space_price(package, 'snapshot', snapshot_size, tier),
find_ent_space_price(package, 'replication', volume_size, tier)
]
# Determine if hourly billing should be used
hourly_billing_flag = utils.lookup(volume, 'billingItem', 'hourlyFlag')
if hourly_billing_flag is None:
hourly_billing_flag = False
# Build and return the order object
replicant_order = {
'complexType': complex_type,
'packageId': package['id'],
'prices': prices,
'quantity': 1,
'location': location_id,
'originVolumeId': volume['id'],
'originVolumeScheduleId': snapshot_schedule_id,
'useHourlyPricing': hourly_billing_flag
}
if order_type_is_saas:
replicant_order['volumeSize'] = volume_size
if volume_is_performance:
replicant_order['iops'] = iops
return replicant_order | Prepare the order object which is submitted to the placeOrder() method
:param manager: The File or Block manager calling this function
:param snapshot_schedule: The primary volume's snapshot
schedule to use for replication
:param location: The location for the ordered replicant volume
:param tier: The tier (IOPS per GB) of the primary volume
:param volume: The primary volume as a SoftLayer_Network_Storage object
:param volume_type: The type of the primary volume ('file' or 'block')
:return: Returns the order object for the
Product_Order service's placeOrder() method | Below is the the instruction that describes the task:
### Input:
Prepare the order object which is submitted to the placeOrder() method
:param manager: The File or Block manager calling this function
:param snapshot_schedule: The primary volume's snapshot
schedule to use for replication
:param location: The location for the ordered replicant volume
:param tier: The tier (IOPS per GB) of the primary volume
:param volume: The primary volume as a SoftLayer_Network_Storage object
:param volume_type: The type of the primary volume ('file' or 'block')
:return: Returns the order object for the
Product_Order service's placeOrder() method
### Response:
def prepare_replicant_order_object(manager, snapshot_schedule, location,
tier, volume, volume_type):
"""Prepare the order object which is submitted to the placeOrder() method
:param manager: The File or Block manager calling this function
:param snapshot_schedule: The primary volume's snapshot
schedule to use for replication
:param location: The location for the ordered replicant volume
:param tier: The tier (IOPS per GB) of the primary volume
:param volume: The primary volume as a SoftLayer_Network_Storage object
:param volume_type: The type of the primary volume ('file' or 'block')
:return: Returns the order object for the
Product_Order service's placeOrder() method
"""
# Ensure the primary volume and snapshot space are not set for cancellation
if 'billingItem' not in volume\
or volume['billingItem']['cancellationDate'] != '':
raise exceptions.SoftLayerError(
'This volume is set for cancellation; '
'unable to order replicant volume')
for child in volume['billingItem']['activeChildren']:
if child['categoryCode'] == 'storage_snapshot_space'\
and child['cancellationDate'] != '':
raise exceptions.SoftLayerError(
'The snapshot space for this volume is set for '
'cancellation; unable to order replicant volume')
# Find the ID for the requested location
try:
location_id = get_location_id(manager, location)
except ValueError:
raise exceptions.SoftLayerError(
"Invalid datacenter name specified. "
"Please provide the lower case short name (e.g.: dal09)")
# Get sizes and properties needed for the order
volume_size = int(volume['capacityGb'])
billing_item_category_code = volume['billingItem']['categoryCode']
if billing_item_category_code == 'storage_as_a_service':
order_type_is_saas = True
elif billing_item_category_code == 'storage_service_enterprise':
order_type_is_saas = False
else:
raise exceptions.SoftLayerError(
"A replicant volume cannot be ordered for a primary volume with a "
"billing item category code of '%s'" % billing_item_category_code)
if 'snapshotCapacityGb' in volume:
snapshot_size = int(volume['snapshotCapacityGb'])
else:
raise exceptions.SoftLayerError(
"Snapshot capacity not found for the given primary volume")
snapshot_schedule_id = find_snapshot_schedule_id(
volume,
'SNAPSHOT_' + snapshot_schedule
)
# Use the volume's billing item category code to get the product package
package = get_package(manager, billing_item_category_code)
# Find prices based on the primary volume's type and billing item category
if order_type_is_saas: # 'storage_as_a_service' package
complex_type = 'SoftLayer_Container_Product_Order_'\
'Network_Storage_AsAService'
volume_storage_type = volume['storageType']['keyName']
if 'ENDURANCE' in volume_storage_type:
volume_is_performance = False
if tier is None:
tier = find_endurance_tier_iops_per_gb(volume)
prices = [
find_price_by_category(package, billing_item_category_code),
find_price_by_category(package, 'storage_' + volume_type),
find_saas_endurance_space_price(package, volume_size, tier),
find_saas_endurance_tier_price(package, tier),
find_saas_snapshot_space_price(
package, snapshot_size, tier=tier),
find_saas_replication_price(package, tier=tier)
]
elif 'PERFORMANCE' in volume_storage_type:
if not _staas_version_is_v2_or_above(volume):
raise exceptions.SoftLayerError(
"A replica volume cannot be ordered for this performance "
"volume since it does not support Encryption at Rest.")
volume_is_performance = True
iops = int(volume['provisionedIops'])
prices = [
find_price_by_category(package, billing_item_category_code),
find_price_by_category(package, 'storage_' + volume_type),
find_saas_perform_space_price(package, volume_size),
find_saas_perform_iops_price(package, volume_size, iops),
find_saas_snapshot_space_price(
package, snapshot_size, iops=iops),
find_saas_replication_price(package, iops=iops)
]
else:
raise exceptions.SoftLayerError(
"Storage volume does not have a valid storage type "
"(with an appropriate keyName to indicate the "
"volume is a PERFORMANCE or an ENDURANCE volume)")
else: # 'storage_service_enterprise' package
complex_type = 'SoftLayer_Container_Product_Order_'\
'Network_Storage_Enterprise'
volume_is_performance = False
if tier is None:
tier = find_endurance_tier_iops_per_gb(volume)
prices = [
find_price_by_category(package, billing_item_category_code),
find_price_by_category(package, 'storage_' + volume_type),
find_ent_space_price(package, 'endurance', volume_size, tier),
find_ent_endurance_tier_price(package, tier),
find_ent_space_price(package, 'snapshot', snapshot_size, tier),
find_ent_space_price(package, 'replication', volume_size, tier)
]
# Determine if hourly billing should be used
hourly_billing_flag = utils.lookup(volume, 'billingItem', 'hourlyFlag')
if hourly_billing_flag is None:
hourly_billing_flag = False
# Build and return the order object
replicant_order = {
'complexType': complex_type,
'packageId': package['id'],
'prices': prices,
'quantity': 1,
'location': location_id,
'originVolumeId': volume['id'],
'originVolumeScheduleId': snapshot_schedule_id,
'useHourlyPricing': hourly_billing_flag
}
if order_type_is_saas:
replicant_order['volumeSize'] = volume_size
if volume_is_performance:
replicant_order['iops'] = iops
return replicant_order |
def full_path_from_dirrecord(self, rec, rockridge=False):
# type: (Union[dr.DirectoryRecord, udfmod.UDFFileEntry], bool) -> str
'''
A method to get the absolute path of a directory record.
Parameters:
rec - The directory record to get the full path for.
rockridge - Whether to get the rock ridge full path.
Returns:
A string representing the absolute path to the file on the ISO.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
ret = b''
if isinstance(rec, dr.DirectoryRecord):
encoding = 'utf-8'
if self.joliet_vd is not None and id(rec.vd) == id(self.joliet_vd):
encoding = 'utf-16_be'
slash = '/'.encode(encoding)
# A root entry has no Rock Ridge entry, even on a Rock Ridge ISO. Just
# always return / here.
if rec.is_root:
return '/'
if rockridge and rec.rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidInput('Cannot generate a Rock Ridge path on a non-Rock Ridge ISO')
parent = rec # type: Optional[dr.DirectoryRecord]
while parent is not None:
if not parent.is_root:
if rockridge and parent.rock_ridge is not None:
ret = slash + parent.rock_ridge.name() + ret
else:
ret = slash + parent.file_identifier() + ret
parent = parent.parent
else:
if rec.parent is None:
return '/'
if rec.file_ident is not None:
encoding = rec.file_ident.encoding
else:
encoding = 'utf-8'
slash = '/'.encode(encoding)
udfparent = rec # type: Optional[udfmod.UDFFileEntry]
while udfparent is not None:
ident = udfparent.file_identifier()
if ident != b'/':
ret = slash + ident + ret
udfparent = udfparent.parent
if sys.version_info >= (3, 0):
# Python 3, just return the encoded version
return ret.decode(encoding)
# Python 2.
return ret.decode(encoding).encode('utf-8') | A method to get the absolute path of a directory record.
Parameters:
rec - The directory record to get the full path for.
rockridge - Whether to get the rock ridge full path.
Returns:
A string representing the absolute path to the file on the ISO. | Below is the the instruction that describes the task:
### Input:
A method to get the absolute path of a directory record.
Parameters:
rec - The directory record to get the full path for.
rockridge - Whether to get the rock ridge full path.
Returns:
A string representing the absolute path to the file on the ISO.
### Response:
def full_path_from_dirrecord(self, rec, rockridge=False):
# type: (Union[dr.DirectoryRecord, udfmod.UDFFileEntry], bool) -> str
'''
A method to get the absolute path of a directory record.
Parameters:
rec - The directory record to get the full path for.
rockridge - Whether to get the rock ridge full path.
Returns:
A string representing the absolute path to the file on the ISO.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
ret = b''
if isinstance(rec, dr.DirectoryRecord):
encoding = 'utf-8'
if self.joliet_vd is not None and id(rec.vd) == id(self.joliet_vd):
encoding = 'utf-16_be'
slash = '/'.encode(encoding)
# A root entry has no Rock Ridge entry, even on a Rock Ridge ISO. Just
# always return / here.
if rec.is_root:
return '/'
if rockridge and rec.rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidInput('Cannot generate a Rock Ridge path on a non-Rock Ridge ISO')
parent = rec # type: Optional[dr.DirectoryRecord]
while parent is not None:
if not parent.is_root:
if rockridge and parent.rock_ridge is not None:
ret = slash + parent.rock_ridge.name() + ret
else:
ret = slash + parent.file_identifier() + ret
parent = parent.parent
else:
if rec.parent is None:
return '/'
if rec.file_ident is not None:
encoding = rec.file_ident.encoding
else:
encoding = 'utf-8'
slash = '/'.encode(encoding)
udfparent = rec # type: Optional[udfmod.UDFFileEntry]
while udfparent is not None:
ident = udfparent.file_identifier()
if ident != b'/':
ret = slash + ident + ret
udfparent = udfparent.parent
if sys.version_info >= (3, 0):
# Python 3, just return the encoded version
return ret.decode(encoding)
# Python 2.
return ret.decode(encoding).encode('utf-8') |
def digest_auth(
qop=None, user="user", passwd="passwd", algorithm="MD5", stale_after="never"
):
"""Prompts the user for authorization using Digest Auth + Algorithm.
allow settings the stale_after argument.
---
tags:
- Auth
parameters:
- in: path
name: qop
type: string
description: auth or auth-int
- in: path
name: user
type: string
- in: path
name: passwd
type: string
- in: path
name: algorithm
type: string
description: MD5, SHA-256, SHA-512
default: MD5
- in: path
name: stale_after
type: string
default: never
produces:
- application/json
responses:
200:
description: Sucessful authentication.
401:
description: Unsuccessful authentication.
"""
require_cookie_handling = request.args.get("require-cookie", "").lower() in (
"1",
"t",
"true",
)
if algorithm not in ("MD5", "SHA-256", "SHA-512"):
algorithm = "MD5"
if qop not in ("auth", "auth-int"):
qop = None
authorization = request.headers.get("Authorization")
credentials = None
if authorization:
credentials = parse_authorization_header(authorization)
if (
not authorization
or not credentials
or credentials.type.lower() != "digest"
or (require_cookie_handling and "Cookie" not in request.headers)
):
response = digest_challenge_response(app, qop, algorithm)
response.set_cookie("stale_after", value=stale_after)
response.set_cookie("fake", value="fake_value")
return response
if require_cookie_handling and request.cookies.get("fake") != "fake_value":
response = jsonify({"errors": ["missing cookie set on challenge"]})
response.set_cookie("fake", value="fake_value")
response.status_code = 403
return response
current_nonce = credentials.get("nonce")
stale_after_value = None
if "stale_after" in request.cookies:
stale_after_value = request.cookies.get("stale_after")
if (
"last_nonce" in request.cookies
and current_nonce == request.cookies.get("last_nonce")
or stale_after_value == "0"
):
response = digest_challenge_response(app, qop, algorithm, True)
response.set_cookie("stale_after", value=stale_after)
response.set_cookie("last_nonce", value=current_nonce)
response.set_cookie("fake", value="fake_value")
return response
if not check_digest_auth(user, passwd):
response = digest_challenge_response(app, qop, algorithm, False)
response.set_cookie("stale_after", value=stale_after)
response.set_cookie("last_nonce", value=current_nonce)
response.set_cookie("fake", value="fake_value")
return response
response = jsonify(authenticated=True, user=user)
response.set_cookie("fake", value="fake_value")
if stale_after_value:
response.set_cookie(
"stale_after", value=next_stale_after_value(stale_after_value)
)
return response | Prompts the user for authorization using Digest Auth + Algorithm.
allow settings the stale_after argument.
---
tags:
- Auth
parameters:
- in: path
name: qop
type: string
description: auth or auth-int
- in: path
name: user
type: string
- in: path
name: passwd
type: string
- in: path
name: algorithm
type: string
description: MD5, SHA-256, SHA-512
default: MD5
- in: path
name: stale_after
type: string
default: never
produces:
- application/json
responses:
200:
description: Sucessful authentication.
401:
description: Unsuccessful authentication. | Below is the the instruction that describes the task:
### Input:
Prompts the user for authorization using Digest Auth + Algorithm.
allow settings the stale_after argument.
---
tags:
- Auth
parameters:
- in: path
name: qop
type: string
description: auth or auth-int
- in: path
name: user
type: string
- in: path
name: passwd
type: string
- in: path
name: algorithm
type: string
description: MD5, SHA-256, SHA-512
default: MD5
- in: path
name: stale_after
type: string
default: never
produces:
- application/json
responses:
200:
description: Sucessful authentication.
401:
description: Unsuccessful authentication.
### Response:
def digest_auth(
qop=None, user="user", passwd="passwd", algorithm="MD5", stale_after="never"
):
"""Prompts the user for authorization using Digest Auth + Algorithm.
allow settings the stale_after argument.
---
tags:
- Auth
parameters:
- in: path
name: qop
type: string
description: auth or auth-int
- in: path
name: user
type: string
- in: path
name: passwd
type: string
- in: path
name: algorithm
type: string
description: MD5, SHA-256, SHA-512
default: MD5
- in: path
name: stale_after
type: string
default: never
produces:
- application/json
responses:
200:
description: Sucessful authentication.
401:
description: Unsuccessful authentication.
"""
require_cookie_handling = request.args.get("require-cookie", "").lower() in (
"1",
"t",
"true",
)
if algorithm not in ("MD5", "SHA-256", "SHA-512"):
algorithm = "MD5"
if qop not in ("auth", "auth-int"):
qop = None
authorization = request.headers.get("Authorization")
credentials = None
if authorization:
credentials = parse_authorization_header(authorization)
if (
not authorization
or not credentials
or credentials.type.lower() != "digest"
or (require_cookie_handling and "Cookie" not in request.headers)
):
response = digest_challenge_response(app, qop, algorithm)
response.set_cookie("stale_after", value=stale_after)
response.set_cookie("fake", value="fake_value")
return response
if require_cookie_handling and request.cookies.get("fake") != "fake_value":
response = jsonify({"errors": ["missing cookie set on challenge"]})
response.set_cookie("fake", value="fake_value")
response.status_code = 403
return response
current_nonce = credentials.get("nonce")
stale_after_value = None
if "stale_after" in request.cookies:
stale_after_value = request.cookies.get("stale_after")
if (
"last_nonce" in request.cookies
and current_nonce == request.cookies.get("last_nonce")
or stale_after_value == "0"
):
response = digest_challenge_response(app, qop, algorithm, True)
response.set_cookie("stale_after", value=stale_after)
response.set_cookie("last_nonce", value=current_nonce)
response.set_cookie("fake", value="fake_value")
return response
if not check_digest_auth(user, passwd):
response = digest_challenge_response(app, qop, algorithm, False)
response.set_cookie("stale_after", value=stale_after)
response.set_cookie("last_nonce", value=current_nonce)
response.set_cookie("fake", value="fake_value")
return response
response = jsonify(authenticated=True, user=user)
response.set_cookie("fake", value="fake_value")
if stale_after_value:
response.set_cookie(
"stale_after", value=next_stale_after_value(stale_after_value)
)
return response |
def sink(wrapped):
"""Creates an SPL operator with a single input port.
A SPL operator with a single input port and no output ports.
For each tuple on the input port the decorated function
is called passing the contents of the tuple.
.. deprecated:: 1.8
Recommended to use :py:class:`@spl.for_each <for_each>` instead.
"""
if not inspect.isfunction(wrapped):
raise TypeError('A function is required')
return _wrapforsplop(_OperatorType.Sink, wrapped, 'position', False) | Creates an SPL operator with a single input port.
A SPL operator with a single input port and no output ports.
For each tuple on the input port the decorated function
is called passing the contents of the tuple.
.. deprecated:: 1.8
Recommended to use :py:class:`@spl.for_each <for_each>` instead. | Below is the the instruction that describes the task:
### Input:
Creates an SPL operator with a single input port.
A SPL operator with a single input port and no output ports.
For each tuple on the input port the decorated function
is called passing the contents of the tuple.
.. deprecated:: 1.8
Recommended to use :py:class:`@spl.for_each <for_each>` instead.
### Response:
def sink(wrapped):
"""Creates an SPL operator with a single input port.
A SPL operator with a single input port and no output ports.
For each tuple on the input port the decorated function
is called passing the contents of the tuple.
.. deprecated:: 1.8
Recommended to use :py:class:`@spl.for_each <for_each>` instead.
"""
if not inspect.isfunction(wrapped):
raise TypeError('A function is required')
return _wrapforsplop(_OperatorType.Sink, wrapped, 'position', False) |
def save(self, *args, **kwargs):
"""Auto-generate a slug from the name."""
self._create_slug()
self._create_date_slug()
self._render_content()
# Call ``_set_published`` the *first* time this Entry is published.
# NOTE: if this is unpublished, and then republished, this method won't
# get called; e.g. the date won't get changed and the
# ``entry_published`` signal won't get re-sent.
send_published_signal = False
if self.published and self.published_on is None:
send_published_signal = self._set_published()
super(Entry, self).save(*args, **kwargs)
# We need an ID before we can send this signal.
if send_published_signal:
entry_published.send(sender=self, entry=self) | Auto-generate a slug from the name. | Below is the the instruction that describes the task:
### Input:
Auto-generate a slug from the name.
### Response:
def save(self, *args, **kwargs):
"""Auto-generate a slug from the name."""
self._create_slug()
self._create_date_slug()
self._render_content()
# Call ``_set_published`` the *first* time this Entry is published.
# NOTE: if this is unpublished, and then republished, this method won't
# get called; e.g. the date won't get changed and the
# ``entry_published`` signal won't get re-sent.
send_published_signal = False
if self.published and self.published_on is None:
send_published_signal = self._set_published()
super(Entry, self).save(*args, **kwargs)
# We need an ID before we can send this signal.
if send_published_signal:
entry_published.send(sender=self, entry=self) |
def from_NoteContainer(notes, width=80, tuning=None):
"""Return a string made out of ASCII tablature representing a
NoteContainer object or list of note strings / Note objects.
Throw a FingerError if no playable fingering can be found.
'tuning' should be a StringTuning object or None for the default tuning.
To force a certain fingering you can use a 'string' and 'fret' attribute
on one or more of the Notes. If the fingering is valid, it will get used
instead of the default one.
"""
if tuning is None:
tuning = default_tuning
result = begin_track(tuning)
l = len(result[0])
w = max(4, (width - l) - 1)
fingerings = tuning.find_fingering(notes)
if fingerings != []:
# Do an attribute check
f = []
attr = []
for note in notes:
if hasattr(note, 'string') and hasattr(note, 'fret'):
n = tuning.get_Note(note.string, note.fret)
if n is not None and int(n) == int(note):
f += (note.string, note.fret)
attr.append(int(note))
# See if there are any possible fingerings with the attributes
# that are set.
fres = []
if f != []:
for x in fingerings:
found = True
for pos in f:
if pos not in x:
found = False
if found:
fres.append(x)
# Use best fingering.
if fres != []:
f = fres[0]
else:
# Use default fingering if attributes don't make sense
f = fingerings[0]
# Build {string: fret} result
res = {}
for (string, fret) in f:
res[string] = str(fret)
maxfret = max(res.values())
# Produce ASCII
for i in range(len(result)):
if i not in res.keys():
result[i] += '-' * w + '|'
else:
d = w - len(res[i])
result[i] += '-' * (d / 2) + res[i]
d = (w - d / 2) - len(res[i])
result[i] += '-' * d + '|'
else:
raise FingerError('No playable fingering found for: %s' % notes)
result.reverse()
return os.linesep.join(result) | Return a string made out of ASCII tablature representing a
NoteContainer object or list of note strings / Note objects.
Throw a FingerError if no playable fingering can be found.
'tuning' should be a StringTuning object or None for the default tuning.
To force a certain fingering you can use a 'string' and 'fret' attribute
on one or more of the Notes. If the fingering is valid, it will get used
instead of the default one. | Below is the the instruction that describes the task:
### Input:
Return a string made out of ASCII tablature representing a
NoteContainer object or list of note strings / Note objects.
Throw a FingerError if no playable fingering can be found.
'tuning' should be a StringTuning object or None for the default tuning.
To force a certain fingering you can use a 'string' and 'fret' attribute
on one or more of the Notes. If the fingering is valid, it will get used
instead of the default one.
### Response:
def from_NoteContainer(notes, width=80, tuning=None):
"""Return a string made out of ASCII tablature representing a
NoteContainer object or list of note strings / Note objects.
Throw a FingerError if no playable fingering can be found.
'tuning' should be a StringTuning object or None for the default tuning.
To force a certain fingering you can use a 'string' and 'fret' attribute
on one or more of the Notes. If the fingering is valid, it will get used
instead of the default one.
"""
if tuning is None:
tuning = default_tuning
result = begin_track(tuning)
l = len(result[0])
w = max(4, (width - l) - 1)
fingerings = tuning.find_fingering(notes)
if fingerings != []:
# Do an attribute check
f = []
attr = []
for note in notes:
if hasattr(note, 'string') and hasattr(note, 'fret'):
n = tuning.get_Note(note.string, note.fret)
if n is not None and int(n) == int(note):
f += (note.string, note.fret)
attr.append(int(note))
# See if there are any possible fingerings with the attributes
# that are set.
fres = []
if f != []:
for x in fingerings:
found = True
for pos in f:
if pos not in x:
found = False
if found:
fres.append(x)
# Use best fingering.
if fres != []:
f = fres[0]
else:
# Use default fingering if attributes don't make sense
f = fingerings[0]
# Build {string: fret} result
res = {}
for (string, fret) in f:
res[string] = str(fret)
maxfret = max(res.values())
# Produce ASCII
for i in range(len(result)):
if i not in res.keys():
result[i] += '-' * w + '|'
else:
d = w - len(res[i])
result[i] += '-' * (d / 2) + res[i]
d = (w - d / 2) - len(res[i])
result[i] += '-' * d + '|'
else:
raise FingerError('No playable fingering found for: %s' % notes)
result.reverse()
return os.linesep.join(result) |
def structure_attrs_fromtuple(self, obj, cl):
# type: (Tuple, Type[T]) -> T
"""Load an attrs class from a sequence (tuple)."""
conv_obj = [] # A list of converter parameters.
for a, value in zip(cl.__attrs_attrs__, obj): # type: ignore
# We detect the type by the metadata.
converted = self._structure_attr_from_tuple(a, a.name, value)
conv_obj.append(converted)
return cl(*conv_obj) | Load an attrs class from a sequence (tuple). | Below is the the instruction that describes the task:
### Input:
Load an attrs class from a sequence (tuple).
### Response:
def structure_attrs_fromtuple(self, obj, cl):
# type: (Tuple, Type[T]) -> T
"""Load an attrs class from a sequence (tuple)."""
conv_obj = [] # A list of converter parameters.
for a, value in zip(cl.__attrs_attrs__, obj): # type: ignore
# We detect the type by the metadata.
converted = self._structure_attr_from_tuple(a, a.name, value)
conv_obj.append(converted)
return cl(*conv_obj) |
def push(self, style={}):
"""push a new level on the stack with a style dictionnary containing style:value pairs"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr)
else:
_style[attr] = self.enforce_type(attr, style[attr])
self.stack.append(_style) | push a new level on the stack with a style dictionnary containing style:value pairs | Below is the the instruction that describes the task:
### Input:
push a new level on the stack with a style dictionnary containing style:value pairs
### Response:
def push(self, style={}):
"""push a new level on the stack with a style dictionnary containing style:value pairs"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr)
else:
_style[attr] = self.enforce_type(attr, style[attr])
self.stack.append(_style) |
def _signal_handler(self, signal_interupt, frame): # pylint: disable=W0613
"""Handle singal interrupt.
Args:
signal_interupt ([type]): [Description]
frame ([type]): [Description]
"""
if self.container is not None:
print('{}{}Stopping docker container.'.format(c.Style.BRIGHT, c.Fore.YELLOW))
self.container.stop()
print('{}{}Interrupt signal received.'.format(c.Style.BRIGHT, c.Fore.RED))
self.log.error('tcrun received an interrupt signal and will now exit.')
sys.exit(1) | Handle singal interrupt.
Args:
signal_interupt ([type]): [Description]
frame ([type]): [Description] | Below is the the instruction that describes the task:
### Input:
Handle singal interrupt.
Args:
signal_interupt ([type]): [Description]
frame ([type]): [Description]
### Response:
def _signal_handler(self, signal_interupt, frame): # pylint: disable=W0613
"""Handle singal interrupt.
Args:
signal_interupt ([type]): [Description]
frame ([type]): [Description]
"""
if self.container is not None:
print('{}{}Stopping docker container.'.format(c.Style.BRIGHT, c.Fore.YELLOW))
self.container.stop()
print('{}{}Interrupt signal received.'.format(c.Style.BRIGHT, c.Fore.RED))
self.log.error('tcrun received an interrupt signal and will now exit.')
sys.exit(1) |
def issubset(self, other):
"""
Check if the contents of `self` is a subset of the contents of
`other.`
Args:
other (:class:`FrameSet`):
Returns:
bool:
:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`
"""
other = self._cast_to_frameset(other)
if other is NotImplemented:
return NotImplemented
return self.items <= other.items | Check if the contents of `self` is a subset of the contents of
`other.`
Args:
other (:class:`FrameSet`):
Returns:
bool:
:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet` | Below is the the instruction that describes the task:
### Input:
Check if the contents of `self` is a subset of the contents of
`other.`
Args:
other (:class:`FrameSet`):
Returns:
bool:
:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`
### Response:
def issubset(self, other):
"""
Check if the contents of `self` is a subset of the contents of
`other.`
Args:
other (:class:`FrameSet`):
Returns:
bool:
:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`
"""
other = self._cast_to_frameset(other)
if other is NotImplemented:
return NotImplemented
return self.items <= other.items |
def GetValue(self, identifier, default_value=None):
"""Retrieves a value by identifier.
Args:
identifier (str): case insensitive unique identifier for the value.
default_value (object): default value.
Returns:
object: value or default value if not available.
Raises:
TypeError: if the identifier is not a string type.
"""
if not isinstance(identifier, py2to3.STRING_TYPES):
raise TypeError('Identifier not a string type.')
identifier = identifier.lower()
return self._values.get(identifier, default_value) | Retrieves a value by identifier.
Args:
identifier (str): case insensitive unique identifier for the value.
default_value (object): default value.
Returns:
object: value or default value if not available.
Raises:
TypeError: if the identifier is not a string type. | Below is the the instruction that describes the task:
### Input:
Retrieves a value by identifier.
Args:
identifier (str): case insensitive unique identifier for the value.
default_value (object): default value.
Returns:
object: value or default value if not available.
Raises:
TypeError: if the identifier is not a string type.
### Response:
def GetValue(self, identifier, default_value=None):
"""Retrieves a value by identifier.
Args:
identifier (str): case insensitive unique identifier for the value.
default_value (object): default value.
Returns:
object: value or default value if not available.
Raises:
TypeError: if the identifier is not a string type.
"""
if not isinstance(identifier, py2to3.STRING_TYPES):
raise TypeError('Identifier not a string type.')
identifier = identifier.lower()
return self._values.get(identifier, default_value) |
def db004(self, value=None):
""" Corresponds to IDD Field `db004`
Dry-bulb temperature corresponding to 0.4% annual cumulative frequency of occurrence (warm conditions)
Args:
value (float): value for IDD Field `db004`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `db004`'.format(value))
self._db004 = value | Corresponds to IDD Field `db004`
Dry-bulb temperature corresponding to 0.4% annual cumulative frequency of occurrence (warm conditions)
Args:
value (float): value for IDD Field `db004`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | Below is the the instruction that describes the task:
### Input:
Corresponds to IDD Field `db004`
Dry-bulb temperature corresponding to 0.4% annual cumulative frequency of occurrence (warm conditions)
Args:
value (float): value for IDD Field `db004`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
### Response:
def db004(self, value=None):
""" Corresponds to IDD Field `db004`
Dry-bulb temperature corresponding to 0.4% annual cumulative frequency of occurrence (warm conditions)
Args:
value (float): value for IDD Field `db004`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `db004`'.format(value))
self._db004 = value |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.