code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def safe_int(val, default=None):
"""
Returns int() of val if val is not convertable to int use default
instead
:param val:
:param default:
"""
try:
val = int(val)
except (ValueError, TypeError):
val = default
return val | Returns int() of val if val is not convertable to int use default
instead
:param val:
:param default: | Below is the the instruction that describes the task:
### Input:
Returns int() of val if val is not convertable to int use default
instead
:param val:
:param default:
### Response:
def safe_int(val, default=None):
"""
Returns int() of val if val is not convertable to int use default
instead
:param val:
:param default:
"""
try:
val = int(val)
except (ValueError, TypeError):
val = default
return val |
def is_break_tag(self, el):
"""Check if tag is an element we should break on."""
name = el.name
return name in self.break_tags or name in self.user_break_tags | Check if tag is an element we should break on. | Below is the the instruction that describes the task:
### Input:
Check if tag is an element we should break on.
### Response:
def is_break_tag(self, el):
"""Check if tag is an element we should break on."""
name = el.name
return name in self.break_tags or name in self.user_break_tags |
def resetaA(self,pot=None,type=None):
"""
NAME:
resetaA
PURPOSE:
re-set up an actionAngle module for this Orbit
INPUT:
(none)
OUTPUT:
True if reset happened, False otherwise
HISTORY:
2014-01-06 - Written - Bovy (IAS)
"""
try:
delattr(self._orb,'_aA')
except AttributeError:
return False
else:
return True | NAME:
resetaA
PURPOSE:
re-set up an actionAngle module for this Orbit
INPUT:
(none)
OUTPUT:
True if reset happened, False otherwise
HISTORY:
2014-01-06 - Written - Bovy (IAS) | Below is the the instruction that describes the task:
### Input:
NAME:
resetaA
PURPOSE:
re-set up an actionAngle module for this Orbit
INPUT:
(none)
OUTPUT:
True if reset happened, False otherwise
HISTORY:
2014-01-06 - Written - Bovy (IAS)
### Response:
def resetaA(self,pot=None,type=None):
"""
NAME:
resetaA
PURPOSE:
re-set up an actionAngle module for this Orbit
INPUT:
(none)
OUTPUT:
True if reset happened, False otherwise
HISTORY:
2014-01-06 - Written - Bovy (IAS)
"""
try:
delattr(self._orb,'_aA')
except AttributeError:
return False
else:
return True |
def hrs_84_and_db12_8_or_20_6(self, value=None):
""" Corresponds to IDD Field `hrs_84_and_db12_8_or_20_6`
Number of hours between 8 AM and 4 PM (inclusive) with dry-bulb temperature between 12.8 and 20.6 C
Args:
value (float): value for IDD Field `hrs_84_and_db12_8_or_20_6`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `hrs_84_and_db12_8_or_20_6`'.format(value))
self._hrs_84_and_db12_8_or_20_6 = value | Corresponds to IDD Field `hrs_84_and_db12_8_or_20_6`
Number of hours between 8 AM and 4 PM (inclusive) with dry-bulb temperature between 12.8 and 20.6 C
Args:
value (float): value for IDD Field `hrs_84_and_db12_8_or_20_6`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | Below is the the instruction that describes the task:
### Input:
Corresponds to IDD Field `hrs_84_and_db12_8_or_20_6`
Number of hours between 8 AM and 4 PM (inclusive) with dry-bulb temperature between 12.8 and 20.6 C
Args:
value (float): value for IDD Field `hrs_84_and_db12_8_or_20_6`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
### Response:
def hrs_84_and_db12_8_or_20_6(self, value=None):
""" Corresponds to IDD Field `hrs_84_and_db12_8_or_20_6`
Number of hours between 8 AM and 4 PM (inclusive) with dry-bulb temperature between 12.8 and 20.6 C
Args:
value (float): value for IDD Field `hrs_84_and_db12_8_or_20_6`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `hrs_84_and_db12_8_or_20_6`'.format(value))
self._hrs_84_and_db12_8_or_20_6 = value |
def _value(obj):
"""
make sure to get a float
"""
# TODO: this is ugly and makes everything ugly
# can we handle this with a clean decorator or just requiring that only floats be passed??
if hasattr(obj, 'value'):
return obj.value
elif isinstance(obj, np.ndarray):
return np.array([o.value for o in obj])
elif hasattr(obj, '__iter__'):
return [_value(o) for o in obj]
return obj | make sure to get a float | Below is the the instruction that describes the task:
### Input:
make sure to get a float
### Response:
def _value(obj):
"""
make sure to get a float
"""
# TODO: this is ugly and makes everything ugly
# can we handle this with a clean decorator or just requiring that only floats be passed??
if hasattr(obj, 'value'):
return obj.value
elif isinstance(obj, np.ndarray):
return np.array([o.value for o in obj])
elif hasattr(obj, '__iter__'):
return [_value(o) for o in obj]
return obj |
def blit_rect(
self,
console: tcod.console.Console,
x: int,
y: int,
width: int,
height: int,
bg_blend: int,
) -> None:
"""Blit onto a Console without scaling or rotation.
Args:
console (Console): Blit destination Console.
x (int): Console tile X position starting from the left at 0.
y (int): Console tile Y position starting from the top at 0.
width (int): Use -1 for Image width.
height (int): Use -1 for Image height.
bg_blend (int): Background blending mode to use.
"""
lib.TCOD_image_blit_rect(
self.image_c, _console(console), x, y, width, height, bg_blend
) | Blit onto a Console without scaling or rotation.
Args:
console (Console): Blit destination Console.
x (int): Console tile X position starting from the left at 0.
y (int): Console tile Y position starting from the top at 0.
width (int): Use -1 for Image width.
height (int): Use -1 for Image height.
bg_blend (int): Background blending mode to use. | Below is the the instruction that describes the task:
### Input:
Blit onto a Console without scaling or rotation.
Args:
console (Console): Blit destination Console.
x (int): Console tile X position starting from the left at 0.
y (int): Console tile Y position starting from the top at 0.
width (int): Use -1 for Image width.
height (int): Use -1 for Image height.
bg_blend (int): Background blending mode to use.
### Response:
def blit_rect(
self,
console: tcod.console.Console,
x: int,
y: int,
width: int,
height: int,
bg_blend: int,
) -> None:
"""Blit onto a Console without scaling or rotation.
Args:
console (Console): Blit destination Console.
x (int): Console tile X position starting from the left at 0.
y (int): Console tile Y position starting from the top at 0.
width (int): Use -1 for Image width.
height (int): Use -1 for Image height.
bg_blend (int): Background blending mode to use.
"""
lib.TCOD_image_blit_rect(
self.image_c, _console(console), x, y, width, height, bg_blend
) |
def timeit(method):
"""
Decorator: Compute the execution time of a function
:param method: the function
:return: the method runtime
"""
def timed(*arguments, **kw):
ts = time.time()
result = method(*arguments, **kw)
te = time.time()
sys.stdout.write('Time: %r %2.2f sec\n' % (method.__name__.strip("_"), te - ts))
sys.stdout.write('------------------------------------\n')
sys.stdout.flush()
return result
return timed | Decorator: Compute the execution time of a function
:param method: the function
:return: the method runtime | Below is the the instruction that describes the task:
### Input:
Decorator: Compute the execution time of a function
:param method: the function
:return: the method runtime
### Response:
def timeit(method):
"""
Decorator: Compute the execution time of a function
:param method: the function
:return: the method runtime
"""
def timed(*arguments, **kw):
ts = time.time()
result = method(*arguments, **kw)
te = time.time()
sys.stdout.write('Time: %r %2.2f sec\n' % (method.__name__.strip("_"), te - ts))
sys.stdout.write('------------------------------------\n')
sys.stdout.flush()
return result
return timed |
def indent_text(*strs, **kwargs):
""" indents text according to an operater string and a global indentation
level. returns a tuple of all passed args, indented according to the
operator string
indent: [defaults to +0]
The operator string, of the form
++n : increments the global indentation level by n and indents
+n : indents with the global indentation level + n
--n : decrements the global indentation level by n
-n : indents with the global indentation level - n
==n : sets the global indentation level to exactly n and indents
=n : indents with an indentation level of exactly n
"""
# python 2.7 workaround
indent = kwargs["indent"] if "indent" in kwargs else"+0"
autobreak = kwargs.get("autobreak", False)
char_limit = kwargs.get("char_limit", 80)
split_char = kwargs.get("split_char", " ")
strs = list(strs)
if autobreak:
for index, s in enumerate(strs):
if len(s) > char_limit:
strs[index] = []
spl = s.split(split_char)
result = []
collect = ""
for current_block in spl:
if len(current_block) + len(collect) > char_limit:
strs[index].append(collect[:-1] + "\n")
collect = " "
collect += current_block + split_char
strs[index].append(collect + "\n")
strs = flatten_list(strs)
global lasting_indent
if indent.startswith("++"):
lasting_indent = lasting_indent + int(indent[2:])
cur_indent = lasting_indent
elif indent.startswith("+"):
cur_indent = lasting_indent + int(indent[1:])
elif indent.startswith("--"):
lasting_indent = lasting_indent - int(indent[2:])
cur_indent = lasting_indent
elif indent.startswith("-"):
cur_indent = lasting_indent - int(indent[1:])
elif indent.startswith("=="):
lasting_indent = int(indent[2:])
cur_indent = lasting_indent
elif indent.startswith("="):
lasting_indent = int(indent[1:])
cur_indent = int(indent[1:])
else:
raise Exception(
"indent command format '%s' unrecognized (see the docstring)")
# mutate indentation level if needed
return tuple([" " * cur_indent] + [elem.replace("\n", "\n" + " " * cur_indent)
for elem in strs]) | indents text according to an operater string and a global indentation
level. returns a tuple of all passed args, indented according to the
operator string
indent: [defaults to +0]
The operator string, of the form
++n : increments the global indentation level by n and indents
+n : indents with the global indentation level + n
--n : decrements the global indentation level by n
-n : indents with the global indentation level - n
==n : sets the global indentation level to exactly n and indents
=n : indents with an indentation level of exactly n | Below is the the instruction that describes the task:
### Input:
indents text according to an operater string and a global indentation
level. returns a tuple of all passed args, indented according to the
operator string
indent: [defaults to +0]
The operator string, of the form
++n : increments the global indentation level by n and indents
+n : indents with the global indentation level + n
--n : decrements the global indentation level by n
-n : indents with the global indentation level - n
==n : sets the global indentation level to exactly n and indents
=n : indents with an indentation level of exactly n
### Response:
def indent_text(*strs, **kwargs):
""" indents text according to an operater string and a global indentation
level. returns a tuple of all passed args, indented according to the
operator string
indent: [defaults to +0]
The operator string, of the form
++n : increments the global indentation level by n and indents
+n : indents with the global indentation level + n
--n : decrements the global indentation level by n
-n : indents with the global indentation level - n
==n : sets the global indentation level to exactly n and indents
=n : indents with an indentation level of exactly n
"""
# python 2.7 workaround
indent = kwargs["indent"] if "indent" in kwargs else"+0"
autobreak = kwargs.get("autobreak", False)
char_limit = kwargs.get("char_limit", 80)
split_char = kwargs.get("split_char", " ")
strs = list(strs)
if autobreak:
for index, s in enumerate(strs):
if len(s) > char_limit:
strs[index] = []
spl = s.split(split_char)
result = []
collect = ""
for current_block in spl:
if len(current_block) + len(collect) > char_limit:
strs[index].append(collect[:-1] + "\n")
collect = " "
collect += current_block + split_char
strs[index].append(collect + "\n")
strs = flatten_list(strs)
global lasting_indent
if indent.startswith("++"):
lasting_indent = lasting_indent + int(indent[2:])
cur_indent = lasting_indent
elif indent.startswith("+"):
cur_indent = lasting_indent + int(indent[1:])
elif indent.startswith("--"):
lasting_indent = lasting_indent - int(indent[2:])
cur_indent = lasting_indent
elif indent.startswith("-"):
cur_indent = lasting_indent - int(indent[1:])
elif indent.startswith("=="):
lasting_indent = int(indent[2:])
cur_indent = lasting_indent
elif indent.startswith("="):
lasting_indent = int(indent[1:])
cur_indent = int(indent[1:])
else:
raise Exception(
"indent command format '%s' unrecognized (see the docstring)")
# mutate indentation level if needed
return tuple([" " * cur_indent] + [elem.replace("\n", "\n" + " " * cur_indent)
for elem in strs]) |
def splitLines0(frags, widths):
"""
given a list of ParaFrags we return a list of ParaLines
each ParaLine has
1) ExtraSpace
2) blankCount
3) [textDefns....]
each text definition is a (ParaFrag, start, limit) triplet
"""
#initialise the algorithm
lineNum = 0
maxW = widths[lineNum]
i = -1
l = len(frags)
lim = start = 0
text = frags[0]
while 1:
#find a non whitespace character
while i < l:
while start < lim and text[start] == ' ': start += 1
if start == lim:
i += 1
if i == l: break
start = 0
f = frags[i]
text = f.text
lim = len(text)
else:
break # we found one
if start == lim: break # if we didn't find one we are done
#start of a line
g = (None, None, None)
line = []
cLen = 0
nSpaces = 0
while cLen < maxW:
j = text.find(' ', start)
if j < 0:
j == lim
w = stringWidth(text[start:j], f.fontName, f.fontSize)
cLen += w
if cLen > maxW and line != []:
cLen = cLen - w
#this is the end of the line
while g.text[lim] == ' ':
lim -= 1
nSpaces -= 1
break
if j < 0:
j = lim
if g[0] is f:
g[2] = j #extend
else:
g = (f, start, j)
line.append(g)
if j == lim:
i += 1 | given a list of ParaFrags we return a list of ParaLines
each ParaLine has
1) ExtraSpace
2) blankCount
3) [textDefns....]
each text definition is a (ParaFrag, start, limit) triplet | Below is the the instruction that describes the task:
### Input:
given a list of ParaFrags we return a list of ParaLines
each ParaLine has
1) ExtraSpace
2) blankCount
3) [textDefns....]
each text definition is a (ParaFrag, start, limit) triplet
### Response:
def splitLines0(frags, widths):
"""
given a list of ParaFrags we return a list of ParaLines
each ParaLine has
1) ExtraSpace
2) blankCount
3) [textDefns....]
each text definition is a (ParaFrag, start, limit) triplet
"""
#initialise the algorithm
lineNum = 0
maxW = widths[lineNum]
i = -1
l = len(frags)
lim = start = 0
text = frags[0]
while 1:
#find a non whitespace character
while i < l:
while start < lim and text[start] == ' ': start += 1
if start == lim:
i += 1
if i == l: break
start = 0
f = frags[i]
text = f.text
lim = len(text)
else:
break # we found one
if start == lim: break # if we didn't find one we are done
#start of a line
g = (None, None, None)
line = []
cLen = 0
nSpaces = 0
while cLen < maxW:
j = text.find(' ', start)
if j < 0:
j == lim
w = stringWidth(text[start:j], f.fontName, f.fontSize)
cLen += w
if cLen > maxW and line != []:
cLen = cLen - w
#this is the end of the line
while g.text[lim] == ' ':
lim -= 1
nSpaces -= 1
break
if j < 0:
j = lim
if g[0] is f:
g[2] = j #extend
else:
g = (f, start, j)
line.append(g)
if j == lim:
i += 1 |
def swap_vertices(self, i, j):
"""
Swap two vertices in the tree structure array.
swap_vertex swaps the location of two vertices in a tree structure array.
:param tree: the tree for which two vertices are to be swapped.
:param i: the index of the first vertex to be swapped.
:param j: the index of the second vertex to be swapped.
:rval tree: the tree structure with the two vertex locations swapped.
"""
store_vertex_i = self.vertices[i]
store_vertex_j = self.vertices[j]
self.vertices[j] = store_vertex_i
self.vertices[i] = store_vertex_j
for k in range(len(self.vertices)):
for swap_list in [self.vertices[k].children, self.vertices[k].parents]:
if i in swap_list:
swap_list[swap_list.index(i)] = -1
if j in swap_list:
swap_list[swap_list.index(j)] = i
if -1 in swap_list:
swap_list[swap_list.index(-1)] = j | Swap two vertices in the tree structure array.
swap_vertex swaps the location of two vertices in a tree structure array.
:param tree: the tree for which two vertices are to be swapped.
:param i: the index of the first vertex to be swapped.
:param j: the index of the second vertex to be swapped.
:rval tree: the tree structure with the two vertex locations swapped. | Below is the the instruction that describes the task:
### Input:
Swap two vertices in the tree structure array.
swap_vertex swaps the location of two vertices in a tree structure array.
:param tree: the tree for which two vertices are to be swapped.
:param i: the index of the first vertex to be swapped.
:param j: the index of the second vertex to be swapped.
:rval tree: the tree structure with the two vertex locations swapped.
### Response:
def swap_vertices(self, i, j):
"""
Swap two vertices in the tree structure array.
swap_vertex swaps the location of two vertices in a tree structure array.
:param tree: the tree for which two vertices are to be swapped.
:param i: the index of the first vertex to be swapped.
:param j: the index of the second vertex to be swapped.
:rval tree: the tree structure with the two vertex locations swapped.
"""
store_vertex_i = self.vertices[i]
store_vertex_j = self.vertices[j]
self.vertices[j] = store_vertex_i
self.vertices[i] = store_vertex_j
for k in range(len(self.vertices)):
for swap_list in [self.vertices[k].children, self.vertices[k].parents]:
if i in swap_list:
swap_list[swap_list.index(i)] = -1
if j in swap_list:
swap_list[swap_list.index(j)] = i
if -1 in swap_list:
swap_list[swap_list.index(-1)] = j |
def to_string(self, format_, fps=None, **kwargs):
"""
Get subtitle file as a string.
See :meth:`SSAFile.save()` for full description.
Returns:
str
"""
fp = io.StringIO()
self.to_file(fp, format_, fps=fps, **kwargs)
return fp.getvalue() | Get subtitle file as a string.
See :meth:`SSAFile.save()` for full description.
Returns:
str | Below is the the instruction that describes the task:
### Input:
Get subtitle file as a string.
See :meth:`SSAFile.save()` for full description.
Returns:
str
### Response:
def to_string(self, format_, fps=None, **kwargs):
"""
Get subtitle file as a string.
See :meth:`SSAFile.save()` for full description.
Returns:
str
"""
fp = io.StringIO()
self.to_file(fp, format_, fps=fps, **kwargs)
return fp.getvalue() |
def _wait_response(self, ready, timeout_sec):
"""Wait for a response to be ready (the provided ready bits are set).
If the specified timeout (in seconds) is exceeded and error will be
thrown.
"""
# Wait for the measurement to be ready (or a timeout elapses).
start = time.time()
while True:
# Check if the timeout has elapsed.
if (time.time() - start) >= timeout_sec:
raise RuntimeError('Exceeded timeout waiting for VCNL40xx response, check your wiring.')
# Check if result is ready and return it.
result = self._device.readU8(VCNL40xx_COMMAND)
if (result & ready) > 0:
return
# Otherwise delay for a bit and try reading again.
time.sleep(0.001) | Wait for a response to be ready (the provided ready bits are set).
If the specified timeout (in seconds) is exceeded and error will be
thrown. | Below is the the instruction that describes the task:
### Input:
Wait for a response to be ready (the provided ready bits are set).
If the specified timeout (in seconds) is exceeded and error will be
thrown.
### Response:
def _wait_response(self, ready, timeout_sec):
"""Wait for a response to be ready (the provided ready bits are set).
If the specified timeout (in seconds) is exceeded and error will be
thrown.
"""
# Wait for the measurement to be ready (or a timeout elapses).
start = time.time()
while True:
# Check if the timeout has elapsed.
if (time.time() - start) >= timeout_sec:
raise RuntimeError('Exceeded timeout waiting for VCNL40xx response, check your wiring.')
# Check if result is ready and return it.
result = self._device.readU8(VCNL40xx_COMMAND)
if (result & ready) > 0:
return
# Otherwise delay for a bit and try reading again.
time.sleep(0.001) |
def get_video(self, node):
"""
Create a video object from a video embed
"""
video = Video()
video._embed_code = self.get_embed_code(node)
video._embed_type = self.get_embed_type(node)
video._width = self.get_width(node)
video._height = self.get_height(node)
video._src = self.get_src(node)
video._provider = self.get_provider(video.src)
return video | Create a video object from a video embed | Below is the the instruction that describes the task:
### Input:
Create a video object from a video embed
### Response:
def get_video(self, node):
"""
Create a video object from a video embed
"""
video = Video()
video._embed_code = self.get_embed_code(node)
video._embed_type = self.get_embed_type(node)
video._width = self.get_width(node)
video._height = self.get_height(node)
video._src = self.get_src(node)
video._provider = self.get_provider(video.src)
return video |
def option_registrations_iter(self):
"""Returns an iterator over the normalized registration arguments of each option in this parser.
Useful for generating help and other documentation.
Each yielded item is an (args, kwargs) pair, as passed to register(), except that kwargs
will be normalized in the following ways:
- It will always have 'dest' explicitly set.
- It will always have 'default' explicitly set, and the value will be a RankedValue.
- For recursive options, the original registrar will also have 'recursive_root' set.
Note that recursive options we inherit from a parent will also be yielded here, with
the correctly-scoped default value.
"""
def normalize_kwargs(args, orig_kwargs):
nkwargs = copy.copy(orig_kwargs)
dest = self.parse_dest(*args, **nkwargs)
nkwargs['dest'] = dest
if not ('default' in nkwargs and isinstance(nkwargs['default'], RankedValue)):
nkwargs['default'] = self._compute_value(dest, nkwargs, [])
return nkwargs
# First yield any recursive options we inherit from our parent.
if self._parent_parser:
for args, kwargs in self._parent_parser._recursive_option_registration_args():
yield args, normalize_kwargs(args, kwargs)
# Then yield our directly-registered options.
# This must come after yielding inherited recursive options, so we can detect shadowing.
for args, kwargs in self._option_registrations:
normalized_kwargs = normalize_kwargs(args, kwargs)
if 'recursive' in normalized_kwargs:
# If we're the original registrar, make sure we can distinguish that.
normalized_kwargs['recursive_root'] = True
yield args, normalized_kwargs | Returns an iterator over the normalized registration arguments of each option in this parser.
Useful for generating help and other documentation.
Each yielded item is an (args, kwargs) pair, as passed to register(), except that kwargs
will be normalized in the following ways:
- It will always have 'dest' explicitly set.
- It will always have 'default' explicitly set, and the value will be a RankedValue.
- For recursive options, the original registrar will also have 'recursive_root' set.
Note that recursive options we inherit from a parent will also be yielded here, with
the correctly-scoped default value. | Below is the the instruction that describes the task:
### Input:
Returns an iterator over the normalized registration arguments of each option in this parser.
Useful for generating help and other documentation.
Each yielded item is an (args, kwargs) pair, as passed to register(), except that kwargs
will be normalized in the following ways:
- It will always have 'dest' explicitly set.
- It will always have 'default' explicitly set, and the value will be a RankedValue.
- For recursive options, the original registrar will also have 'recursive_root' set.
Note that recursive options we inherit from a parent will also be yielded here, with
the correctly-scoped default value.
### Response:
def option_registrations_iter(self):
"""Returns an iterator over the normalized registration arguments of each option in this parser.
Useful for generating help and other documentation.
Each yielded item is an (args, kwargs) pair, as passed to register(), except that kwargs
will be normalized in the following ways:
- It will always have 'dest' explicitly set.
- It will always have 'default' explicitly set, and the value will be a RankedValue.
- For recursive options, the original registrar will also have 'recursive_root' set.
Note that recursive options we inherit from a parent will also be yielded here, with
the correctly-scoped default value.
"""
def normalize_kwargs(args, orig_kwargs):
nkwargs = copy.copy(orig_kwargs)
dest = self.parse_dest(*args, **nkwargs)
nkwargs['dest'] = dest
if not ('default' in nkwargs and isinstance(nkwargs['default'], RankedValue)):
nkwargs['default'] = self._compute_value(dest, nkwargs, [])
return nkwargs
# First yield any recursive options we inherit from our parent.
if self._parent_parser:
for args, kwargs in self._parent_parser._recursive_option_registration_args():
yield args, normalize_kwargs(args, kwargs)
# Then yield our directly-registered options.
# This must come after yielding inherited recursive options, so we can detect shadowing.
for args, kwargs in self._option_registrations:
normalized_kwargs = normalize_kwargs(args, kwargs)
if 'recursive' in normalized_kwargs:
# If we're the original registrar, make sure we can distinguish that.
normalized_kwargs['recursive_root'] = True
yield args, normalized_kwargs |
def _request(self, method, path, server=None, **kwargs):
"""Execute a request to the cluster
A server is selected from the server pool.
"""
while True:
next_server = server or self._get_server()
try:
response = self.server_pool[next_server].request(
method, path, username=self.username, password=self.password, schema=self.schema, **kwargs)
redirect_location = response.get_redirect_location()
if redirect_location and 300 <= response.status <= 308:
redirect_server = _server_url(redirect_location)
self._add_server(redirect_server)
return self._request(
method, path, server=redirect_server, **kwargs)
if not server and response.status in SRV_UNAVAILABLE_STATUSES:
with self._lock:
# drop server from active ones
self._drop_server(next_server, response.reason)
else:
return response
except (urllib3.exceptions.MaxRetryError,
urllib3.exceptions.ReadTimeoutError,
urllib3.exceptions.SSLError,
urllib3.exceptions.HTTPError,
urllib3.exceptions.ProxyError,) as ex:
ex_message = _ex_to_message(ex)
if server:
raise ConnectionError(
"Server not available, exception: %s" % ex_message
)
preserve_server = False
if isinstance(ex, urllib3.exceptions.ProtocolError):
preserve_server = any(
t in [type(arg) for arg in ex.args]
for t in PRESERVE_ACTIVE_SERVER_EXCEPTIONS
)
if (not preserve_server):
with self._lock:
# drop server from active ones
self._drop_server(next_server, ex_message)
except Exception as e:
raise ProgrammingError(_ex_to_message(e)) | Execute a request to the cluster
A server is selected from the server pool. | Below is the the instruction that describes the task:
### Input:
Execute a request to the cluster
A server is selected from the server pool.
### Response:
def _request(self, method, path, server=None, **kwargs):
"""Execute a request to the cluster
A server is selected from the server pool.
"""
while True:
next_server = server or self._get_server()
try:
response = self.server_pool[next_server].request(
method, path, username=self.username, password=self.password, schema=self.schema, **kwargs)
redirect_location = response.get_redirect_location()
if redirect_location and 300 <= response.status <= 308:
redirect_server = _server_url(redirect_location)
self._add_server(redirect_server)
return self._request(
method, path, server=redirect_server, **kwargs)
if not server and response.status in SRV_UNAVAILABLE_STATUSES:
with self._lock:
# drop server from active ones
self._drop_server(next_server, response.reason)
else:
return response
except (urllib3.exceptions.MaxRetryError,
urllib3.exceptions.ReadTimeoutError,
urllib3.exceptions.SSLError,
urllib3.exceptions.HTTPError,
urllib3.exceptions.ProxyError,) as ex:
ex_message = _ex_to_message(ex)
if server:
raise ConnectionError(
"Server not available, exception: %s" % ex_message
)
preserve_server = False
if isinstance(ex, urllib3.exceptions.ProtocolError):
preserve_server = any(
t in [type(arg) for arg in ex.args]
for t in PRESERVE_ACTIVE_SERVER_EXCEPTIONS
)
if (not preserve_server):
with self._lock:
# drop server from active ones
self._drop_server(next_server, ex_message)
except Exception as e:
raise ProgrammingError(_ex_to_message(e)) |
def corners(self):
"""
Iterate the vector of all corners of the hyperrectangles
>>> Tile(3, dim=2).corners
array([[0, 0],
[0, 3],
[3, 0],
[3, 3]])
"""
corners = []
for ind in itertools.product(*((0,1),)*self.dim):
ind = np.array(ind)
corners.append(self.l + ind*self.r)
return np.array(corners) | Iterate the vector of all corners of the hyperrectangles
>>> Tile(3, dim=2).corners
array([[0, 0],
[0, 3],
[3, 0],
[3, 3]]) | Below is the the instruction that describes the task:
### Input:
Iterate the vector of all corners of the hyperrectangles
>>> Tile(3, dim=2).corners
array([[0, 0],
[0, 3],
[3, 0],
[3, 3]])
### Response:
def corners(self):
"""
Iterate the vector of all corners of the hyperrectangles
>>> Tile(3, dim=2).corners
array([[0, 0],
[0, 3],
[3, 0],
[3, 3]])
"""
corners = []
for ind in itertools.product(*((0,1),)*self.dim):
ind = np.array(ind)
corners.append(self.l + ind*self.r)
return np.array(corners) |
def ParseFileObject(self, parser_mediator, file_object):
"""Parses an Android usage-history file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
data = file_object.read(self._HEADER_READ_SIZE)
if not data.startswith(b'<?xml'):
raise errors.UnableToParseFile(
'Not an Android usage history file [not XML]')
_, _, data = data.partition(b'\n')
if not data.startswith(b'<usage-history'):
raise errors.UnableToParseFile(
'Not an Android usage history file [wrong XML root key]')
# The current offset of the file-like object needs to point at
# the start of the file for ElementTree to parse the XML data correctly.
file_object.seek(0, os.SEEK_SET)
xml = ElementTree.parse(file_object)
root_node = xml.getroot()
for application_node in root_node:
package_name = application_node.get('name', None)
for part_node in application_node.iter():
if part_node.tag != 'comp':
continue
last_resume_time = part_node.get('lrt', None)
if last_resume_time is None:
parser_mediator.ProduceExtractionWarning('missing last resume time.')
continue
try:
last_resume_time = int(last_resume_time, 10)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'unsupported last resume time: {0:s}.'.format(last_resume_time))
continue
event_data = AndroidAppUsageEventData()
event_data.component = part_node.get('name', None)
event_data.package = package_name
date_time = dfdatetime_java_time.JavaTime(timestamp=last_resume_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_RESUME)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses an Android usage-history file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed. | Below is the the instruction that describes the task:
### Input:
Parses an Android usage-history file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
### Response:
def ParseFileObject(self, parser_mediator, file_object):
"""Parses an Android usage-history file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
data = file_object.read(self._HEADER_READ_SIZE)
if not data.startswith(b'<?xml'):
raise errors.UnableToParseFile(
'Not an Android usage history file [not XML]')
_, _, data = data.partition(b'\n')
if not data.startswith(b'<usage-history'):
raise errors.UnableToParseFile(
'Not an Android usage history file [wrong XML root key]')
# The current offset of the file-like object needs to point at
# the start of the file for ElementTree to parse the XML data correctly.
file_object.seek(0, os.SEEK_SET)
xml = ElementTree.parse(file_object)
root_node = xml.getroot()
for application_node in root_node:
package_name = application_node.get('name', None)
for part_node in application_node.iter():
if part_node.tag != 'comp':
continue
last_resume_time = part_node.get('lrt', None)
if last_resume_time is None:
parser_mediator.ProduceExtractionWarning('missing last resume time.')
continue
try:
last_resume_time = int(last_resume_time, 10)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'unsupported last resume time: {0:s}.'.format(last_resume_time))
continue
event_data = AndroidAppUsageEventData()
event_data.component = part_node.get('name', None)
event_data.package = package_name
date_time = dfdatetime_java_time.JavaTime(timestamp=last_resume_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_RESUME)
parser_mediator.ProduceEventWithEventData(event, event_data) |
def iter_all_repos(self, number=-1, since=None, etag=None, per_page=None):
"""Iterate over every repository in the order they were created.
:param int number: (optional), number of repositories to return.
Default: -1, returns all of them
:param int since: (optional), last repository id seen (allows
restarting this iteration)
:param str etag: (optional), ETag from a previous request to the same
endpoint
:param int per_page: (optional), number of repositories to list per
request
:returns: generator of :class:`Repository <github3.repos.Repository>`
"""
url = self._build_url('repositories')
return self._iter(int(number), url, Repository,
params={'since': since, 'per_page': per_page},
etag=etag) | Iterate over every repository in the order they were created.
:param int number: (optional), number of repositories to return.
Default: -1, returns all of them
:param int since: (optional), last repository id seen (allows
restarting this iteration)
:param str etag: (optional), ETag from a previous request to the same
endpoint
:param int per_page: (optional), number of repositories to list per
request
:returns: generator of :class:`Repository <github3.repos.Repository>` | Below is the the instruction that describes the task:
### Input:
Iterate over every repository in the order they were created.
:param int number: (optional), number of repositories to return.
Default: -1, returns all of them
:param int since: (optional), last repository id seen (allows
restarting this iteration)
:param str etag: (optional), ETag from a previous request to the same
endpoint
:param int per_page: (optional), number of repositories to list per
request
:returns: generator of :class:`Repository <github3.repos.Repository>`
### Response:
def iter_all_repos(self, number=-1, since=None, etag=None, per_page=None):
"""Iterate over every repository in the order they were created.
:param int number: (optional), number of repositories to return.
Default: -1, returns all of them
:param int since: (optional), last repository id seen (allows
restarting this iteration)
:param str etag: (optional), ETag from a previous request to the same
endpoint
:param int per_page: (optional), number of repositories to list per
request
:returns: generator of :class:`Repository <github3.repos.Repository>`
"""
url = self._build_url('repositories')
return self._iter(int(number), url, Repository,
params={'since': since, 'per_page': per_page},
etag=etag) |
def get_folder(self, title):
"""
Retrieve a folder by its title
Usage: C{engine.get_folder(title)}
Note that if more than one folder has the same title, only the first match will be
returned.
"""
for folder in self.configManager.allFolders:
if folder.title == title:
return folder
return None | Retrieve a folder by its title
Usage: C{engine.get_folder(title)}
Note that if more than one folder has the same title, only the first match will be
returned. | Below is the the instruction that describes the task:
### Input:
Retrieve a folder by its title
Usage: C{engine.get_folder(title)}
Note that if more than one folder has the same title, only the first match will be
returned.
### Response:
def get_folder(self, title):
"""
Retrieve a folder by its title
Usage: C{engine.get_folder(title)}
Note that if more than one folder has the same title, only the first match will be
returned.
"""
for folder in self.configManager.allFolders:
if folder.title == title:
return folder
return None |
def get_instance(self, payload):
"""
Build an instance of AlphaSenderInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderInstance
:rtype: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderInstance
"""
return AlphaSenderInstance(self._version, payload, service_sid=self._solution['service_sid'], ) | Build an instance of AlphaSenderInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderInstance
:rtype: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderInstance | Below is the the instruction that describes the task:
### Input:
Build an instance of AlphaSenderInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderInstance
:rtype: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderInstance
### Response:
def get_instance(self, payload):
"""
Build an instance of AlphaSenderInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderInstance
:rtype: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderInstance
"""
return AlphaSenderInstance(self._version, payload, service_sid=self._solution['service_sid'], ) |
def alter_db_table(self, model, old_db_table, new_db_table):
"""Ran when the name of a model is changed."""
super(SchemaEditor, self).alter_db_table(
model, old_db_table, new_db_table
)
for mixin in self.post_processing_mixins:
mixin.alter_db_table(
model,
old_db_table,
new_db_table
) | Ran when the name of a model is changed. | Below is the the instruction that describes the task:
### Input:
Ran when the name of a model is changed.
### Response:
def alter_db_table(self, model, old_db_table, new_db_table):
"""Ran when the name of a model is changed."""
super(SchemaEditor, self).alter_db_table(
model, old_db_table, new_db_table
)
for mixin in self.post_processing_mixins:
mixin.alter_db_table(
model,
old_db_table,
new_db_table
) |
def irfs(self, **kwargs):
""" Get the name of IFRs associted with a particular dataset
"""
dsval = kwargs.get('dataset', self.dataset(**kwargs))
tokens = dsval.split('_')
irf_name = "%s_%s_%s" % (DATASET_DICTIONARY['%s_%s' % (tokens[0], tokens[1])],
EVCLASS_NAME_DICTIONARY[tokens[3]],
kwargs.get('irf_ver'))
return irf_name | Get the name of IFRs associted with a particular dataset | Below is the the instruction that describes the task:
### Input:
Get the name of IFRs associted with a particular dataset
### Response:
def irfs(self, **kwargs):
""" Get the name of IFRs associted with a particular dataset
"""
dsval = kwargs.get('dataset', self.dataset(**kwargs))
tokens = dsval.split('_')
irf_name = "%s_%s_%s" % (DATASET_DICTIONARY['%s_%s' % (tokens[0], tokens[1])],
EVCLASS_NAME_DICTIONARY[tokens[3]],
kwargs.get('irf_ver'))
return irf_name |
def default_field_to_parameter(self, default_field):
"""Obtain parameter from default field.
:param default_field: A default field definition.
:type default_field: dict
:returns: A parameter object.
:rtype: FloatParameter, IntegerParameter
"""
if default_field.get('type') == QVariant.Double:
parameter = FloatParameter()
elif default_field.get('type') in qvariant_whole_numbers:
parameter = IntegerParameter()
else:
return
default_value = default_field.get('default_value')
if not default_value:
message = (
'InaSAFE default field %s does not have default value'
% default_field.get('name'))
LOGGER.exception(message)
return
parameter.guid = default_field.get('key')
parameter.name = default_value.get('name')
parameter.is_required = True
parameter.precision = default_field.get('precision')
parameter.minimum_allowed_value = default_value.get(
'min_value', 0)
parameter.maximum_allowed_value = default_value.get(
'max_value', 100000000)
parameter.help_text = default_value.get('help_text')
parameter.description = default_value.get('description')
# Check if user ask to restore to the most default value.
if self.is_restore_default:
parameter._value = default_value.get('default_value')
else:
# Current value
qsetting_default_value = get_inasafe_default_value_qsetting(
self.settings, GLOBAL, default_field['key'])
# To avoid python error
if qsetting_default_value > parameter.maximum_allowed_value:
qsetting_default_value = parameter.maximum_allowed_value
if qsetting_default_value < parameter.minimum_allowed_value:
qsetting_default_value = parameter.minimum_allowed_value
parameter.value = qsetting_default_value
return parameter | Obtain parameter from default field.
:param default_field: A default field definition.
:type default_field: dict
:returns: A parameter object.
:rtype: FloatParameter, IntegerParameter | Below is the the instruction that describes the task:
### Input:
Obtain parameter from default field.
:param default_field: A default field definition.
:type default_field: dict
:returns: A parameter object.
:rtype: FloatParameter, IntegerParameter
### Response:
def default_field_to_parameter(self, default_field):
"""Obtain parameter from default field.
:param default_field: A default field definition.
:type default_field: dict
:returns: A parameter object.
:rtype: FloatParameter, IntegerParameter
"""
if default_field.get('type') == QVariant.Double:
parameter = FloatParameter()
elif default_field.get('type') in qvariant_whole_numbers:
parameter = IntegerParameter()
else:
return
default_value = default_field.get('default_value')
if not default_value:
message = (
'InaSAFE default field %s does not have default value'
% default_field.get('name'))
LOGGER.exception(message)
return
parameter.guid = default_field.get('key')
parameter.name = default_value.get('name')
parameter.is_required = True
parameter.precision = default_field.get('precision')
parameter.minimum_allowed_value = default_value.get(
'min_value', 0)
parameter.maximum_allowed_value = default_value.get(
'max_value', 100000000)
parameter.help_text = default_value.get('help_text')
parameter.description = default_value.get('description')
# Check if user ask to restore to the most default value.
if self.is_restore_default:
parameter._value = default_value.get('default_value')
else:
# Current value
qsetting_default_value = get_inasafe_default_value_qsetting(
self.settings, GLOBAL, default_field['key'])
# To avoid python error
if qsetting_default_value > parameter.maximum_allowed_value:
qsetting_default_value = parameter.maximum_allowed_value
if qsetting_default_value < parameter.minimum_allowed_value:
qsetting_default_value = parameter.minimum_allowed_value
parameter.value = qsetting_default_value
return parameter |
def _locate(path):
"""Search for a relative path and turn it into an absolute path.
This is handy when hunting for data files to be passed into h2o and used by import file.
Note: This function is for unit testing purposes only.
Parameters
----------
path : str
Path to search for
:return: Absolute path if it is found. None otherwise.
"""
tmp_dir = os.path.realpath(os.getcwd())
possible_result = os.path.join(tmp_dir, path)
while True:
if os.path.exists(possible_result):
return possible_result
next_tmp_dir = os.path.dirname(tmp_dir)
if next_tmp_dir == tmp_dir:
raise ValueError("File not found: " + path)
tmp_dir = next_tmp_dir
possible_result = os.path.join(tmp_dir, path) | Search for a relative path and turn it into an absolute path.
This is handy when hunting for data files to be passed into h2o and used by import file.
Note: This function is for unit testing purposes only.
Parameters
----------
path : str
Path to search for
:return: Absolute path if it is found. None otherwise. | Below is the the instruction that describes the task:
### Input:
Search for a relative path and turn it into an absolute path.
This is handy when hunting for data files to be passed into h2o and used by import file.
Note: This function is for unit testing purposes only.
Parameters
----------
path : str
Path to search for
:return: Absolute path if it is found. None otherwise.
### Response:
def _locate(path):
"""Search for a relative path and turn it into an absolute path.
This is handy when hunting for data files to be passed into h2o and used by import file.
Note: This function is for unit testing purposes only.
Parameters
----------
path : str
Path to search for
:return: Absolute path if it is found. None otherwise.
"""
tmp_dir = os.path.realpath(os.getcwd())
possible_result = os.path.join(tmp_dir, path)
while True:
if os.path.exists(possible_result):
return possible_result
next_tmp_dir = os.path.dirname(tmp_dir)
if next_tmp_dir == tmp_dir:
raise ValueError("File not found: " + path)
tmp_dir = next_tmp_dir
possible_result = os.path.join(tmp_dir, path) |
def _display_colab(port, height, display_handle):
"""Display a TensorBoard instance in a Colab output frame.
The Colab VM is not directly exposed to the network, so the Colab
runtime provides a service worker tunnel to proxy requests from the
end user's browser through to servers running on the Colab VM: the
output frame may issue requests to https://localhost:<port> (HTTPS
only), which will be forwarded to the specified port on the VM.
It does not suffice to create an `iframe` and let the service worker
redirect its traffic (`<iframe src="https://localhost:6006">`),
because for security reasons service workers cannot intercept iframe
traffic. Instead, we manually fetch the TensorBoard index page with an
XHR in the output frame, and inject the raw HTML into `document.body`.
By default, the TensorBoard web app requests resources against
relative paths, like `./data/logdir`. Within the output frame, these
requests must instead hit `https://localhost:<port>/data/logdir`. To
redirect them, we change the document base URI, which transparently
affects all requests (XHRs and resources alike).
"""
import IPython.display
shell = """
<div id="root"></div>
<script>
(function() {
window.TENSORBOARD_ENV = window.TENSORBOARD_ENV || {};
window.TENSORBOARD_ENV["IN_COLAB"] = true;
document.querySelector("base").href = "https://localhost:%PORT%";
function fixUpTensorboard(root) {
const tftb = root.querySelector("tf-tensorboard");
// Disable the fragment manipulation behavior in Colab. Not
// only is the behavior not useful (as the iframe's location
// is not visible to the user), it causes TensorBoard's usage
// of `window.replace` to navigate away from the page and to
// the `localhost:<port>` URL specified by the base URI, which
// in turn causes the frame to (likely) crash.
tftb.removeAttribute("use-hash");
}
function executeAllScripts(root) {
// When `script` elements are inserted into the DOM by
// assigning to an element's `innerHTML`, the scripts are not
// executed. Thus, we manually re-insert these scripts so that
// TensorBoard can initialize itself.
for (const script of root.querySelectorAll("script")) {
const newScript = document.createElement("script");
newScript.type = script.type;
newScript.textContent = script.textContent;
root.appendChild(newScript);
script.remove();
}
}
function setHeight(root, height) {
// We set the height dynamically after the TensorBoard UI has
// been initialized. This avoids an intermediate state in
// which the container plus the UI become taller than the
// final width and cause the Colab output frame to be
// permanently resized, eventually leading to an empty
// vertical gap below the TensorBoard UI. It's not clear
// exactly what causes this problematic intermediate state,
// but setting the height late seems to fix it.
root.style.height = `${height}px`;
}
const root = document.getElementById("root");
fetch(".")
.then((x) => x.text())
.then((html) => void (root.innerHTML = html))
.then(() => fixUpTensorboard(root))
.then(() => executeAllScripts(root))
.then(() => setHeight(root, %HEIGHT%));
})();
</script>
""".replace("%PORT%", "%d" % port).replace("%HEIGHT%", "%d" % height)
html = IPython.display.HTML(shell)
if display_handle:
display_handle.update(html)
else:
IPython.display.display(html) | Display a TensorBoard instance in a Colab output frame.
The Colab VM is not directly exposed to the network, so the Colab
runtime provides a service worker tunnel to proxy requests from the
end user's browser through to servers running on the Colab VM: the
output frame may issue requests to https://localhost:<port> (HTTPS
only), which will be forwarded to the specified port on the VM.
It does not suffice to create an `iframe` and let the service worker
redirect its traffic (`<iframe src="https://localhost:6006">`),
because for security reasons service workers cannot intercept iframe
traffic. Instead, we manually fetch the TensorBoard index page with an
XHR in the output frame, and inject the raw HTML into `document.body`.
By default, the TensorBoard web app requests resources against
relative paths, like `./data/logdir`. Within the output frame, these
requests must instead hit `https://localhost:<port>/data/logdir`. To
redirect them, we change the document base URI, which transparently
affects all requests (XHRs and resources alike). | Below is the the instruction that describes the task:
### Input:
Display a TensorBoard instance in a Colab output frame.
The Colab VM is not directly exposed to the network, so the Colab
runtime provides a service worker tunnel to proxy requests from the
end user's browser through to servers running on the Colab VM: the
output frame may issue requests to https://localhost:<port> (HTTPS
only), which will be forwarded to the specified port on the VM.
It does not suffice to create an `iframe` and let the service worker
redirect its traffic (`<iframe src="https://localhost:6006">`),
because for security reasons service workers cannot intercept iframe
traffic. Instead, we manually fetch the TensorBoard index page with an
XHR in the output frame, and inject the raw HTML into `document.body`.
By default, the TensorBoard web app requests resources against
relative paths, like `./data/logdir`. Within the output frame, these
requests must instead hit `https://localhost:<port>/data/logdir`. To
redirect them, we change the document base URI, which transparently
affects all requests (XHRs and resources alike).
### Response:
def _display_colab(port, height, display_handle):
"""Display a TensorBoard instance in a Colab output frame.
The Colab VM is not directly exposed to the network, so the Colab
runtime provides a service worker tunnel to proxy requests from the
end user's browser through to servers running on the Colab VM: the
output frame may issue requests to https://localhost:<port> (HTTPS
only), which will be forwarded to the specified port on the VM.
It does not suffice to create an `iframe` and let the service worker
redirect its traffic (`<iframe src="https://localhost:6006">`),
because for security reasons service workers cannot intercept iframe
traffic. Instead, we manually fetch the TensorBoard index page with an
XHR in the output frame, and inject the raw HTML into `document.body`.
By default, the TensorBoard web app requests resources against
relative paths, like `./data/logdir`. Within the output frame, these
requests must instead hit `https://localhost:<port>/data/logdir`. To
redirect them, we change the document base URI, which transparently
affects all requests (XHRs and resources alike).
"""
import IPython.display
shell = """
<div id="root"></div>
<script>
(function() {
window.TENSORBOARD_ENV = window.TENSORBOARD_ENV || {};
window.TENSORBOARD_ENV["IN_COLAB"] = true;
document.querySelector("base").href = "https://localhost:%PORT%";
function fixUpTensorboard(root) {
const tftb = root.querySelector("tf-tensorboard");
// Disable the fragment manipulation behavior in Colab. Not
// only is the behavior not useful (as the iframe's location
// is not visible to the user), it causes TensorBoard's usage
// of `window.replace` to navigate away from the page and to
// the `localhost:<port>` URL specified by the base URI, which
// in turn causes the frame to (likely) crash.
tftb.removeAttribute("use-hash");
}
function executeAllScripts(root) {
// When `script` elements are inserted into the DOM by
// assigning to an element's `innerHTML`, the scripts are not
// executed. Thus, we manually re-insert these scripts so that
// TensorBoard can initialize itself.
for (const script of root.querySelectorAll("script")) {
const newScript = document.createElement("script");
newScript.type = script.type;
newScript.textContent = script.textContent;
root.appendChild(newScript);
script.remove();
}
}
function setHeight(root, height) {
// We set the height dynamically after the TensorBoard UI has
// been initialized. This avoids an intermediate state in
// which the container plus the UI become taller than the
// final width and cause the Colab output frame to be
// permanently resized, eventually leading to an empty
// vertical gap below the TensorBoard UI. It's not clear
// exactly what causes this problematic intermediate state,
// but setting the height late seems to fix it.
root.style.height = `${height}px`;
}
const root = document.getElementById("root");
fetch(".")
.then((x) => x.text())
.then((html) => void (root.innerHTML = html))
.then(() => fixUpTensorboard(root))
.then(() => executeAllScripts(root))
.then(() => setHeight(root, %HEIGHT%));
})();
</script>
""".replace("%PORT%", "%d" % port).replace("%HEIGHT%", "%d" % height)
html = IPython.display.HTML(shell)
if display_handle:
display_handle.update(html)
else:
IPython.display.display(html) |
def set_circulating(self, param):
"""
Sets whether to circulate - in effect whether the heater is on.
:param param: The mode to set, must be 0 or 1.
:return: Empty string.
"""
if param == 0:
self.is_circulating = param
self.circulate_commanded = False
elif param == 1:
self.is_circulating = param
self.circulate_commanded = True
return "" | Sets whether to circulate - in effect whether the heater is on.
:param param: The mode to set, must be 0 or 1.
:return: Empty string. | Below is the the instruction that describes the task:
### Input:
Sets whether to circulate - in effect whether the heater is on.
:param param: The mode to set, must be 0 or 1.
:return: Empty string.
### Response:
def set_circulating(self, param):
"""
Sets whether to circulate - in effect whether the heater is on.
:param param: The mode to set, must be 0 or 1.
:return: Empty string.
"""
if param == 0:
self.is_circulating = param
self.circulate_commanded = False
elif param == 1:
self.is_circulating = param
self.circulate_commanded = True
return "" |
def _propagate_glyph_anchors(self, ufo, parent, processed):
"""Propagate anchors for a single parent glyph."""
if parent.name in processed:
return
processed.add(parent.name)
base_components = []
mark_components = []
anchor_names = set()
to_add = {}
for component in parent.components:
try:
glyph = ufo[component.baseGlyph]
except KeyError:
self.logger.warning(
"Anchors not propagated for inexistent component {} in glyph {}".format(
component.baseGlyph, parent.name
)
)
else:
_propagate_glyph_anchors(self, ufo, glyph, processed)
if any(a.name.startswith("_") for a in glyph.anchors):
mark_components.append(component)
else:
base_components.append(component)
anchor_names |= {a.name for a in glyph.anchors}
for anchor_name in anchor_names:
# don't add if parent already contains this anchor OR any associated
# ligature anchors (e.g. "top_1, top_2" for "top")
if not any(a.name.startswith(anchor_name) for a in parent.anchors):
_get_anchor_data(to_add, ufo, base_components, anchor_name)
for component in mark_components:
_adjust_anchors(to_add, ufo, component)
# we sort propagated anchors to append in a deterministic order
for name, (x, y) in sorted(to_add.items()):
anchor_dict = {"name": name, "x": x, "y": y}
parent.appendAnchor(glyph.anchorClass(anchorDict=anchor_dict)) | Propagate anchors for a single parent glyph. | Below is the the instruction that describes the task:
### Input:
Propagate anchors for a single parent glyph.
### Response:
def _propagate_glyph_anchors(self, ufo, parent, processed):
"""Propagate anchors for a single parent glyph."""
if parent.name in processed:
return
processed.add(parent.name)
base_components = []
mark_components = []
anchor_names = set()
to_add = {}
for component in parent.components:
try:
glyph = ufo[component.baseGlyph]
except KeyError:
self.logger.warning(
"Anchors not propagated for inexistent component {} in glyph {}".format(
component.baseGlyph, parent.name
)
)
else:
_propagate_glyph_anchors(self, ufo, glyph, processed)
if any(a.name.startswith("_") for a in glyph.anchors):
mark_components.append(component)
else:
base_components.append(component)
anchor_names |= {a.name for a in glyph.anchors}
for anchor_name in anchor_names:
# don't add if parent already contains this anchor OR any associated
# ligature anchors (e.g. "top_1, top_2" for "top")
if not any(a.name.startswith(anchor_name) for a in parent.anchors):
_get_anchor_data(to_add, ufo, base_components, anchor_name)
for component in mark_components:
_adjust_anchors(to_add, ufo, component)
# we sort propagated anchors to append in a deterministic order
for name, (x, y) in sorted(to_add.items()):
anchor_dict = {"name": name, "x": x, "y": y}
parent.appendAnchor(glyph.anchorClass(anchorDict=anchor_dict)) |
def makeringlatticeCIJ(n, k, seed=None):
'''
This function generates a directed lattice network with toroidal
boundary counditions (i.e. with ring-like "wrapping around").
Parameters
----------
N : int
number of vertices
K : int
number of edges
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
connection matrix
Notes
-----
The lattice is made by placing connections as close as possible
to the main diagonal, with wrapping around. No connections are made
on the main diagonal. In/Outdegree is kept approx. constant at K/N.
'''
rng = get_rng(seed)
# initialize
CIJ = np.zeros((n, n))
CIJ1 = np.ones((n, n))
kk = 0
count = 0
seq = range(1, n)
seq2 = range(n - 1, 0, -1)
# fill in
while kk < k:
count += 1
dCIJ = np.triu(CIJ1, seq[count]) - np.triu(CIJ1, seq[count] + 1)
dCIJ2 = np.triu(CIJ1, seq2[count]) - np.triu(CIJ1, seq2[count] + 1)
dCIJ = dCIJ + dCIJ.T + dCIJ2 + dCIJ2.T
CIJ += dCIJ
kk = int(np.sum(CIJ))
# remove excess connections
overby = kk - k
if overby:
i, j = np.where(dCIJ)
rp = rng.permutation(np.size(i))
for ii in range(overby):
CIJ[i[rp[ii]], j[rp[ii]]] = 0
return CIJ | This function generates a directed lattice network with toroidal
boundary counditions (i.e. with ring-like "wrapping around").
Parameters
----------
N : int
number of vertices
K : int
number of edges
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
connection matrix
Notes
-----
The lattice is made by placing connections as close as possible
to the main diagonal, with wrapping around. No connections are made
on the main diagonal. In/Outdegree is kept approx. constant at K/N. | Below is the the instruction that describes the task:
### Input:
This function generates a directed lattice network with toroidal
boundary counditions (i.e. with ring-like "wrapping around").
Parameters
----------
N : int
number of vertices
K : int
number of edges
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
connection matrix
Notes
-----
The lattice is made by placing connections as close as possible
to the main diagonal, with wrapping around. No connections are made
on the main diagonal. In/Outdegree is kept approx. constant at K/N.
### Response:
def makeringlatticeCIJ(n, k, seed=None):
'''
This function generates a directed lattice network with toroidal
boundary counditions (i.e. with ring-like "wrapping around").
Parameters
----------
N : int
number of vertices
K : int
number of edges
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
connection matrix
Notes
-----
The lattice is made by placing connections as close as possible
to the main diagonal, with wrapping around. No connections are made
on the main diagonal. In/Outdegree is kept approx. constant at K/N.
'''
rng = get_rng(seed)
# initialize
CIJ = np.zeros((n, n))
CIJ1 = np.ones((n, n))
kk = 0
count = 0
seq = range(1, n)
seq2 = range(n - 1, 0, -1)
# fill in
while kk < k:
count += 1
dCIJ = np.triu(CIJ1, seq[count]) - np.triu(CIJ1, seq[count] + 1)
dCIJ2 = np.triu(CIJ1, seq2[count]) - np.triu(CIJ1, seq2[count] + 1)
dCIJ = dCIJ + dCIJ.T + dCIJ2 + dCIJ2.T
CIJ += dCIJ
kk = int(np.sum(CIJ))
# remove excess connections
overby = kk - k
if overby:
i, j = np.where(dCIJ)
rp = rng.permutation(np.size(i))
for ii in range(overby):
CIJ[i[rp[ii]], j[rp[ii]]] = 0
return CIJ |
def s(command, verbose=False, fail_fast=True, interactive=False):
"""
Run a shell command.
"""
completed_process = None
output = None
if interactive:
completed_process = subprocess.run(
command,
shell=True,
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
)
output = {
"exit_code": completed_process.returncode,
}
else:
completed_process = subprocess.run(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
output = {
"exit_code": completed_process.returncode,
"out": completed_process.stdout.decode('utf-8'),
"err": completed_process.stderr.decode('utf-8'),
}
result = None
if completed_process.returncode == 0:
result = cmn.OkResult(output)
else:
result = cmn.ErrorResult(output)
fail = fail_fast and not result.ok
if verbose or fail:
cmn.logger.log(
"Executed shell command '{command}'\n"
"exit code was: {exit_code}\n".format(
command=command,
**result.value
)
)
if (verbose or fail) and not interactive:
cmn.logger.log(
"stdout was:\n{out}\n"
"stderr was:\n{err}".format(
**result.value
)
)
if fail:
cmn.fail("Failed executing shell command '{command}'".format(command=command))
return result | Run a shell command. | Below is the the instruction that describes the task:
### Input:
Run a shell command.
### Response:
def s(command, verbose=False, fail_fast=True, interactive=False):
"""
Run a shell command.
"""
completed_process = None
output = None
if interactive:
completed_process = subprocess.run(
command,
shell=True,
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
)
output = {
"exit_code": completed_process.returncode,
}
else:
completed_process = subprocess.run(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
output = {
"exit_code": completed_process.returncode,
"out": completed_process.stdout.decode('utf-8'),
"err": completed_process.stderr.decode('utf-8'),
}
result = None
if completed_process.returncode == 0:
result = cmn.OkResult(output)
else:
result = cmn.ErrorResult(output)
fail = fail_fast and not result.ok
if verbose or fail:
cmn.logger.log(
"Executed shell command '{command}'\n"
"exit code was: {exit_code}\n".format(
command=command,
**result.value
)
)
if (verbose or fail) and not interactive:
cmn.logger.log(
"stdout was:\n{out}\n"
"stderr was:\n{err}".format(
**result.value
)
)
if fail:
cmn.fail("Failed executing shell command '{command}'".format(command=command))
return result |
def get_unique_repositories(repo_list):
"""Method to create unique list of repositories from the list of
repositories given.
:param repo_list: List of repositories which might contain duplicates.
:return: List of repositories with no duplicate in them.
"""
unique_list = list()
included = defaultdict(lambda: False)
for repo in repo_list:
if not included[repo.full_name]:
unique_list.append(repo)
included[repo.full_name] = True
return unique_list | Method to create unique list of repositories from the list of
repositories given.
:param repo_list: List of repositories which might contain duplicates.
:return: List of repositories with no duplicate in them. | Below is the the instruction that describes the task:
### Input:
Method to create unique list of repositories from the list of
repositories given.
:param repo_list: List of repositories which might contain duplicates.
:return: List of repositories with no duplicate in them.
### Response:
def get_unique_repositories(repo_list):
"""Method to create unique list of repositories from the list of
repositories given.
:param repo_list: List of repositories which might contain duplicates.
:return: List of repositories with no duplicate in them.
"""
unique_list = list()
included = defaultdict(lambda: False)
for repo in repo_list:
if not included[repo.full_name]:
unique_list.append(repo)
included[repo.full_name] = True
return unique_list |
def plot_nodes_pcolor_to_ax(self, ax, nid, **kwargs):
"""Plot node data to an axes object
Parameters
----------
ax : axes object
axes to plot to
nid : int
node id pointing to the respective data set
cmap : string, optional
color map to use. Default: jet
vmin : float, optional
Minimum colorbar value
vmax : float, optional
Maximum colorbar value
Returns
-------
"""
fig = ax.get_figure()
x = self.grid.nodes['presort'][:, 1]
z = self.grid.nodes['presort'][:, 2]
ax.scatter(x, z)
xz = np.vstack((x, z)).T
# generate grid
X, Z = np.meshgrid(
np.linspace(x.min(), x.max(), 100),
np.linspace(z.min(), z.max(), 100),
)
values = np.array(self.nodeman.nodevals[nid])
# linear
# cubic
cint = scipy.interpolate.griddata(
xz,
values,
(X, Z),
method='linear',
# method='linear',
# method='nearest',
fill_value=np.nan,
)
cint_ma = np.ma.masked_invalid(cint)
pc = ax.pcolormesh(
X, Z,
cint_ma,
cmap=kwargs.get('cmap', 'jet'),
vmin=kwargs.get('vmin', None),
vmax=kwargs.get('vmax', None),
)
if kwargs.get('plot_colorbar', False):
divider = make_axes_locatable(ax)
cbposition = kwargs.get('cbposition', 'vertical')
if cbposition == 'horizontal':
ax_cb = divider.new_vertical(
size=0.1, pad=0.4, pack_start=True
)
elif cbposition == 'vertical':
ax_cb = divider.new_horizontal(
size=0.1, pad=0.4,
)
else:
raise Exception('cbposition not recognized')
ax.get_figure().add_axes(ax_cb)
cb = fig.colorbar(
pc,
cax=ax_cb,
orientation=cbposition,
label=kwargs.get('cblabel', ''),
ticks=mpl.ticker.MaxNLocator(kwargs.get('cbnrticks', 3)),
format=kwargs.get('cbformat', None),
extend='both',
)
no_elecs = kwargs.get('no_elecs', False)
if self.grid.electrodes is not None and no_elecs is not True:
ax.scatter(
self.grid.electrodes[:, 1],
self.grid.electrodes[:, 2],
color=self.grid.props['electrode_color'],
# clip_on=False,
)
return fig, ax, pc, cb
return fig, ax, pc | Plot node data to an axes object
Parameters
----------
ax : axes object
axes to plot to
nid : int
node id pointing to the respective data set
cmap : string, optional
color map to use. Default: jet
vmin : float, optional
Minimum colorbar value
vmax : float, optional
Maximum colorbar value
Returns
------- | Below is the the instruction that describes the task:
### Input:
Plot node data to an axes object
Parameters
----------
ax : axes object
axes to plot to
nid : int
node id pointing to the respective data set
cmap : string, optional
color map to use. Default: jet
vmin : float, optional
Minimum colorbar value
vmax : float, optional
Maximum colorbar value
Returns
-------
### Response:
def plot_nodes_pcolor_to_ax(self, ax, nid, **kwargs):
"""Plot node data to an axes object
Parameters
----------
ax : axes object
axes to plot to
nid : int
node id pointing to the respective data set
cmap : string, optional
color map to use. Default: jet
vmin : float, optional
Minimum colorbar value
vmax : float, optional
Maximum colorbar value
Returns
-------
"""
fig = ax.get_figure()
x = self.grid.nodes['presort'][:, 1]
z = self.grid.nodes['presort'][:, 2]
ax.scatter(x, z)
xz = np.vstack((x, z)).T
# generate grid
X, Z = np.meshgrid(
np.linspace(x.min(), x.max(), 100),
np.linspace(z.min(), z.max(), 100),
)
values = np.array(self.nodeman.nodevals[nid])
# linear
# cubic
cint = scipy.interpolate.griddata(
xz,
values,
(X, Z),
method='linear',
# method='linear',
# method='nearest',
fill_value=np.nan,
)
cint_ma = np.ma.masked_invalid(cint)
pc = ax.pcolormesh(
X, Z,
cint_ma,
cmap=kwargs.get('cmap', 'jet'),
vmin=kwargs.get('vmin', None),
vmax=kwargs.get('vmax', None),
)
if kwargs.get('plot_colorbar', False):
divider = make_axes_locatable(ax)
cbposition = kwargs.get('cbposition', 'vertical')
if cbposition == 'horizontal':
ax_cb = divider.new_vertical(
size=0.1, pad=0.4, pack_start=True
)
elif cbposition == 'vertical':
ax_cb = divider.new_horizontal(
size=0.1, pad=0.4,
)
else:
raise Exception('cbposition not recognized')
ax.get_figure().add_axes(ax_cb)
cb = fig.colorbar(
pc,
cax=ax_cb,
orientation=cbposition,
label=kwargs.get('cblabel', ''),
ticks=mpl.ticker.MaxNLocator(kwargs.get('cbnrticks', 3)),
format=kwargs.get('cbformat', None),
extend='both',
)
no_elecs = kwargs.get('no_elecs', False)
if self.grid.electrodes is not None and no_elecs is not True:
ax.scatter(
self.grid.electrodes[:, 1],
self.grid.electrodes[:, 2],
color=self.grid.props['electrode_color'],
# clip_on=False,
)
return fig, ax, pc, cb
return fig, ax, pc |
def setTypingStatus(self, status, thread_id=None, thread_type=None):
"""
Sets users typing status in a thread
:param status: Specify the typing status
:param thread_id: User/Group ID to change status in. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type status: models.TypingStatus
:type thread_type: models.ThreadType
:raises: FBchatException if request failed
"""
thread_id, thread_type = self._getThread(thread_id, thread_type)
data = {
"typ": status.value,
"thread": thread_id,
"to": thread_id if thread_type == ThreadType.USER else "",
"source": "mercury-chat",
}
j = self._post(self.req_url.TYPING, data, fix_request=True, as_json=True) | Sets users typing status in a thread
:param status: Specify the typing status
:param thread_id: User/Group ID to change status in. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type status: models.TypingStatus
:type thread_type: models.ThreadType
:raises: FBchatException if request failed | Below is the the instruction that describes the task:
### Input:
Sets users typing status in a thread
:param status: Specify the typing status
:param thread_id: User/Group ID to change status in. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type status: models.TypingStatus
:type thread_type: models.ThreadType
:raises: FBchatException if request failed
### Response:
def setTypingStatus(self, status, thread_id=None, thread_type=None):
"""
Sets users typing status in a thread
:param status: Specify the typing status
:param thread_id: User/Group ID to change status in. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type status: models.TypingStatus
:type thread_type: models.ThreadType
:raises: FBchatException if request failed
"""
thread_id, thread_type = self._getThread(thread_id, thread_type)
data = {
"typ": status.value,
"thread": thread_id,
"to": thread_id if thread_type == ThreadType.USER else "",
"source": "mercury-chat",
}
j = self._post(self.req_url.TYPING, data, fix_request=True, as_json=True) |
def push(self, document=None):
''' Push the given document to the server and record it as session.document.
If this is called more than once, the Document has to be the same (or None
to mean "session.document").
.. note::
Automatically calls :func:`~connect` before pushing.
Args:
document (:class:`~bokeh.document.Document`, optional) :
The document which will be kept in sync with the server document.
None to use session.document or create a new document.
'''
if self.document is None:
if document is None:
doc = Document()
else:
doc = document
else:
if document is None:
doc = self.document
else:
raise ValueError("Cannot push() a different document from existing session.document")
self.connect()
if not self.connected:
raise IOError("Cannot push session document because we failed to connect to the server (to start the server, try the 'bokeh serve' command)")
self._connection.push_doc(doc)
if self._document is None:
self._attach_document(doc) | Push the given document to the server and record it as session.document.
If this is called more than once, the Document has to be the same (or None
to mean "session.document").
.. note::
Automatically calls :func:`~connect` before pushing.
Args:
document (:class:`~bokeh.document.Document`, optional) :
The document which will be kept in sync with the server document.
None to use session.document or create a new document. | Below is the the instruction that describes the task:
### Input:
Push the given document to the server and record it as session.document.
If this is called more than once, the Document has to be the same (or None
to mean "session.document").
.. note::
Automatically calls :func:`~connect` before pushing.
Args:
document (:class:`~bokeh.document.Document`, optional) :
The document which will be kept in sync with the server document.
None to use session.document or create a new document.
### Response:
def push(self, document=None):
''' Push the given document to the server and record it as session.document.
If this is called more than once, the Document has to be the same (or None
to mean "session.document").
.. note::
Automatically calls :func:`~connect` before pushing.
Args:
document (:class:`~bokeh.document.Document`, optional) :
The document which will be kept in sync with the server document.
None to use session.document or create a new document.
'''
if self.document is None:
if document is None:
doc = Document()
else:
doc = document
else:
if document is None:
doc = self.document
else:
raise ValueError("Cannot push() a different document from existing session.document")
self.connect()
if not self.connected:
raise IOError("Cannot push session document because we failed to connect to the server (to start the server, try the 'bokeh serve' command)")
self._connection.push_doc(doc)
if self._document is None:
self._attach_document(doc) |
def _failed(self, msg):
"""
Log a validation failure.
:param string msg: the error message
"""
self.log(msg)
self.result.passed = False
self.result.add_error(msg)
self.log(u"Failed") | Log a validation failure.
:param string msg: the error message | Below is the the instruction that describes the task:
### Input:
Log a validation failure.
:param string msg: the error message
### Response:
def _failed(self, msg):
"""
Log a validation failure.
:param string msg: the error message
"""
self.log(msg)
self.result.passed = False
self.result.add_error(msg)
self.log(u"Failed") |
def write(self, filename='same'):
"""
write object to an ENVI header file
"""
if filename == 'same':
filename = self.filename
if not filename.endswith('.hdr'):
filename += '.hdr'
with open(filename, 'w') as out:
out.write(self.__str__()) | write object to an ENVI header file | Below is the the instruction that describes the task:
### Input:
write object to an ENVI header file
### Response:
def write(self, filename='same'):
"""
write object to an ENVI header file
"""
if filename == 'same':
filename = self.filename
if not filename.endswith('.hdr'):
filename += '.hdr'
with open(filename, 'w') as out:
out.write(self.__str__()) |
def attach(self, gui):
"""Attach the view to the GUI."""
super(TraceView, self).attach(gui)
self.actions.add(self.go_to, alias='tg')
self.actions.separator()
self.actions.add(self.shift, alias='ts')
self.actions.add(self.go_right)
self.actions.add(self.go_left)
self.actions.separator()
self.actions.add(self.increase)
self.actions.add(self.decrease)
self.actions.separator()
self.actions.add(self.widen)
self.actions.add(self.narrow)
self.actions.separator()
self.actions.add(self.toggle_show_labels)
# We forward the event from VisPy to the phy GUI.
@self.connect
def on_spike_click(e):
logger.log(5, "Spike click on channel %s, spike %s, cluster %s.",
e.channel_id, e.spike_id, e.cluster_id)
gui.emit('spike_click',
channel_id=e.channel_id,
spike_id=e.spike_id,
cluster_id=e.cluster_id,
) | Attach the view to the GUI. | Below is the the instruction that describes the task:
### Input:
Attach the view to the GUI.
### Response:
def attach(self, gui):
"""Attach the view to the GUI."""
super(TraceView, self).attach(gui)
self.actions.add(self.go_to, alias='tg')
self.actions.separator()
self.actions.add(self.shift, alias='ts')
self.actions.add(self.go_right)
self.actions.add(self.go_left)
self.actions.separator()
self.actions.add(self.increase)
self.actions.add(self.decrease)
self.actions.separator()
self.actions.add(self.widen)
self.actions.add(self.narrow)
self.actions.separator()
self.actions.add(self.toggle_show_labels)
# We forward the event from VisPy to the phy GUI.
@self.connect
def on_spike_click(e):
logger.log(5, "Spike click on channel %s, spike %s, cluster %s.",
e.channel_id, e.spike_id, e.cluster_id)
gui.emit('spike_click',
channel_id=e.channel_id,
spike_id=e.spike_id,
cluster_id=e.cluster_id,
) |
def ext_pillar(minion_id,
pillar, # pylint: disable=W0613
use_grain=False,
minion_ids=None,
tag_match_key=None,
tag_match_value='asis',
tag_list_key=None,
tag_list_sep=';'):
'''
Execute a command and read the output as YAML
'''
valid_tag_match_value = ['uqdn', 'asis']
# meta-data:instance-id
grain_instance_id = __grains__.get('meta-data', {}).get('instance-id', None)
if not grain_instance_id:
# dynamic:instance-identity:document:instanceId
grain_instance_id = \
__grains__.get('dynamic', {}).get('instance-identity', {}).get('document', {}).get('instance-id', None)
if grain_instance_id and re.search(r'^i-([0-9a-z]{17}|[0-9a-z]{8})$', grain_instance_id) is None:
log.error('External pillar %s, instance-id \'%s\' is not valid for '
'\'%s\'', __name__, grain_instance_id, minion_id)
grain_instance_id = None # invalid instance id found, remove it from use.
# Check AWS Tag restrictions .i.e. letters, spaces, and numbers and + - = . _ : / @
if tag_match_key and re.match(r'[\w=.:/@-]+$', tag_match_key) is None:
log.error('External pillar %s, tag_match_key \'%s\' is not valid ',
__name__, tag_match_key if isinstance(tag_match_key, six.text_type) else 'non-string')
return {}
if tag_match_key and tag_match_value not in valid_tag_match_value:
log.error('External pillar %s, tag_value \'%s\' is not valid must be one '
'of %s', __name__, tag_match_value, ' '.join(valid_tag_match_value))
return {}
if not tag_match_key:
base_msg = ('External pillar %s, querying EC2 tags for minion id \'%s\' '
'against instance-id', __name__, minion_id)
else:
base_msg = ('External pillar %s, querying EC2 tags for minion id \'%s\' '
'against instance-id or \'%s\' against \'%s\'', __name__, minion_id, tag_match_key, tag_match_value)
log.debug(base_msg)
find_filter = None
find_id = None
if re.search(r'^i-([0-9a-z]{17}|[0-9a-z]{8})$', minion_id) is not None:
find_filter = None
find_id = minion_id
elif tag_match_key:
if tag_match_value == 'uqdn':
find_filter = {'tag:{0}'.format(tag_match_key): minion_id.split('.', 1)[0]}
else:
find_filter = {'tag:{0}'.format(tag_match_key): minion_id}
if grain_instance_id:
# we have an untrusted grain_instance_id, use it to narrow the search
# even more. Combination will be unique even if uqdn is set.
find_filter.update({'instance-id': grain_instance_id})
# Add this if running state is not dependant on EC2Config
# find_filter.update('instance-state-name': 'running')
# no minion-id is instance-id and no suitable filter, try use_grain if enabled
if not find_filter and not find_id and use_grain:
if not grain_instance_id:
log.debug('Minion-id is not in AWS instance-id formation, and there '
'is no instance-id grain for minion %s', minion_id)
return {}
if minion_ids is not None and minion_id not in minion_ids:
log.debug('Minion-id is not in AWS instance ID format, and minion_ids '
'is set in the ec2_pillar configuration, but minion %s is '
'not in the list of allowed minions %s', minion_id, minion_ids)
return {}
find_id = grain_instance_id
if not (find_filter or find_id):
log.debug('External pillar %s, querying EC2 tags for minion id \'%s\' against '
'instance-id or \'%s\' against \'%s\' noughthing to match against',
__name__, minion_id, tag_match_key, tag_match_value)
return {}
myself = boto.utils.get_instance_metadata(timeout=0.1, num_retries=1)
if not myself:
log.info("%s: salt master not an EC2 instance, skipping", __name__)
return {}
# Get the Master's instance info, primarily the region
(_, region) = _get_instance_info()
# If the Minion's region is available, use it instead
if use_grain:
region = __grains__.get('ec2', {}).get('region', region)
try:
conn = boto.ec2.connect_to_region(region)
except boto.exception.AWSConnectionError as exc:
log.error('%s: invalid AWS credentials, %s', __name__, exc)
return {}
if conn is None:
log.error('%s: Could not connect to region %s', __name__, region)
return {}
try:
if find_id:
instance_data = conn.get_only_instances(instance_ids=[find_id], dry_run=False)
else:
# filters and max_results can not be used togther.
instance_data = conn.get_only_instances(filters=find_filter, dry_run=False)
except boto.exception.EC2ResponseError as exc:
log.error('%s failed with \'%s\'', base_msg, exc)
return {}
if not instance_data:
log.debug('%s no match using \'%s\'', base_msg, find_id if find_id else find_filter)
return {}
# Find a active instance, i.e. ignore terminated and stopped instances
active_inst = []
for inst in range(0, len(instance_data)):
if instance_data[inst].state not in ['terminated', 'stopped']:
active_inst.append(inst)
valid_inst = len(active_inst)
if not valid_inst:
log.debug('%s match found but not active \'%s\'', base_msg, find_id if find_id else find_filter)
return {}
if valid_inst > 1:
log.error('%s multiple matches, ignored, using \'%s\'', base_msg, find_id if find_id else find_filter)
return {}
instance = instance_data[active_inst[0]]
if instance.tags:
ec2_tags = instance.tags
ec2_tags_list = {}
log.debug('External pillar %s, for minion id \'%s\', tags: %s', __name__, minion_id, instance.tags)
if tag_list_key and isinstance(tag_list_key, list):
for item in tag_list_key:
if item in ec2_tags:
ec2_tags_list[item] = ec2_tags[item].split(tag_list_sep)
del ec2_tags[item] # make sure its only in ec2_tags_list
else:
ec2_tags_list[item] = [] # always return a result
return {'ec2_tags': ec2_tags, 'ec2_tags_list': ec2_tags_list}
return {} | Execute a command and read the output as YAML | Below is the the instruction that describes the task:
### Input:
Execute a command and read the output as YAML
### Response:
def ext_pillar(minion_id,
pillar, # pylint: disable=W0613
use_grain=False,
minion_ids=None,
tag_match_key=None,
tag_match_value='asis',
tag_list_key=None,
tag_list_sep=';'):
'''
Execute a command and read the output as YAML
'''
valid_tag_match_value = ['uqdn', 'asis']
# meta-data:instance-id
grain_instance_id = __grains__.get('meta-data', {}).get('instance-id', None)
if not grain_instance_id:
# dynamic:instance-identity:document:instanceId
grain_instance_id = \
__grains__.get('dynamic', {}).get('instance-identity', {}).get('document', {}).get('instance-id', None)
if grain_instance_id and re.search(r'^i-([0-9a-z]{17}|[0-9a-z]{8})$', grain_instance_id) is None:
log.error('External pillar %s, instance-id \'%s\' is not valid for '
'\'%s\'', __name__, grain_instance_id, minion_id)
grain_instance_id = None # invalid instance id found, remove it from use.
# Check AWS Tag restrictions .i.e. letters, spaces, and numbers and + - = . _ : / @
if tag_match_key and re.match(r'[\w=.:/@-]+$', tag_match_key) is None:
log.error('External pillar %s, tag_match_key \'%s\' is not valid ',
__name__, tag_match_key if isinstance(tag_match_key, six.text_type) else 'non-string')
return {}
if tag_match_key and tag_match_value not in valid_tag_match_value:
log.error('External pillar %s, tag_value \'%s\' is not valid must be one '
'of %s', __name__, tag_match_value, ' '.join(valid_tag_match_value))
return {}
if not tag_match_key:
base_msg = ('External pillar %s, querying EC2 tags for minion id \'%s\' '
'against instance-id', __name__, minion_id)
else:
base_msg = ('External pillar %s, querying EC2 tags for minion id \'%s\' '
'against instance-id or \'%s\' against \'%s\'', __name__, minion_id, tag_match_key, tag_match_value)
log.debug(base_msg)
find_filter = None
find_id = None
if re.search(r'^i-([0-9a-z]{17}|[0-9a-z]{8})$', minion_id) is not None:
find_filter = None
find_id = minion_id
elif tag_match_key:
if tag_match_value == 'uqdn':
find_filter = {'tag:{0}'.format(tag_match_key): minion_id.split('.', 1)[0]}
else:
find_filter = {'tag:{0}'.format(tag_match_key): minion_id}
if grain_instance_id:
# we have an untrusted grain_instance_id, use it to narrow the search
# even more. Combination will be unique even if uqdn is set.
find_filter.update({'instance-id': grain_instance_id})
# Add this if running state is not dependant on EC2Config
# find_filter.update('instance-state-name': 'running')
# no minion-id is instance-id and no suitable filter, try use_grain if enabled
if not find_filter and not find_id and use_grain:
if not grain_instance_id:
log.debug('Minion-id is not in AWS instance-id formation, and there '
'is no instance-id grain for minion %s', minion_id)
return {}
if minion_ids is not None and minion_id not in minion_ids:
log.debug('Minion-id is not in AWS instance ID format, and minion_ids '
'is set in the ec2_pillar configuration, but minion %s is '
'not in the list of allowed minions %s', minion_id, minion_ids)
return {}
find_id = grain_instance_id
if not (find_filter or find_id):
log.debug('External pillar %s, querying EC2 tags for minion id \'%s\' against '
'instance-id or \'%s\' against \'%s\' noughthing to match against',
__name__, minion_id, tag_match_key, tag_match_value)
return {}
myself = boto.utils.get_instance_metadata(timeout=0.1, num_retries=1)
if not myself:
log.info("%s: salt master not an EC2 instance, skipping", __name__)
return {}
# Get the Master's instance info, primarily the region
(_, region) = _get_instance_info()
# If the Minion's region is available, use it instead
if use_grain:
region = __grains__.get('ec2', {}).get('region', region)
try:
conn = boto.ec2.connect_to_region(region)
except boto.exception.AWSConnectionError as exc:
log.error('%s: invalid AWS credentials, %s', __name__, exc)
return {}
if conn is None:
log.error('%s: Could not connect to region %s', __name__, region)
return {}
try:
if find_id:
instance_data = conn.get_only_instances(instance_ids=[find_id], dry_run=False)
else:
# filters and max_results can not be used togther.
instance_data = conn.get_only_instances(filters=find_filter, dry_run=False)
except boto.exception.EC2ResponseError as exc:
log.error('%s failed with \'%s\'', base_msg, exc)
return {}
if not instance_data:
log.debug('%s no match using \'%s\'', base_msg, find_id if find_id else find_filter)
return {}
# Find a active instance, i.e. ignore terminated and stopped instances
active_inst = []
for inst in range(0, len(instance_data)):
if instance_data[inst].state not in ['terminated', 'stopped']:
active_inst.append(inst)
valid_inst = len(active_inst)
if not valid_inst:
log.debug('%s match found but not active \'%s\'', base_msg, find_id if find_id else find_filter)
return {}
if valid_inst > 1:
log.error('%s multiple matches, ignored, using \'%s\'', base_msg, find_id if find_id else find_filter)
return {}
instance = instance_data[active_inst[0]]
if instance.tags:
ec2_tags = instance.tags
ec2_tags_list = {}
log.debug('External pillar %s, for minion id \'%s\', tags: %s', __name__, minion_id, instance.tags)
if tag_list_key and isinstance(tag_list_key, list):
for item in tag_list_key:
if item in ec2_tags:
ec2_tags_list[item] = ec2_tags[item].split(tag_list_sep)
del ec2_tags[item] # make sure its only in ec2_tags_list
else:
ec2_tags_list[item] = [] # always return a result
return {'ec2_tags': ec2_tags, 'ec2_tags_list': ec2_tags_list}
return {} |
def syspath(path, prefix=True):
"""Convert a path for use by the operating system. In particular,
paths on Windows must receive a magic prefix and must be converted
to Unicode before they are sent to the OS. To disable the magic
prefix on Windows, set `prefix` to False---but only do this if you
*really* know what you're doing.
"""
# Don't do anything if we're not on windows
if os.path.__name__ != 'ntpath':
return path
if not isinstance(path, six.text_type):
# Beets currently represents Windows paths internally with UTF-8
# arbitrarily. But earlier versions used MBCS because it is
# reported as the FS encoding by Windows. Try both.
try:
path = path.decode('utf8')
except UnicodeError:
# The encoding should always be MBCS, Windows' broken
# Unicode representation.
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
path = path.decode(encoding, 'replace')
# Add the magic prefix if it isn't already there.
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
if prefix and not path.startswith(WINDOWS_MAGIC_PREFIX):
if path.startswith(u'\\\\'):
# UNC path. Final path should look like \\?\UNC\...
path = u'UNC' + path[1:]
path = WINDOWS_MAGIC_PREFIX + path
return path | Convert a path for use by the operating system. In particular,
paths on Windows must receive a magic prefix and must be converted
to Unicode before they are sent to the OS. To disable the magic
prefix on Windows, set `prefix` to False---but only do this if you
*really* know what you're doing. | Below is the the instruction that describes the task:
### Input:
Convert a path for use by the operating system. In particular,
paths on Windows must receive a magic prefix and must be converted
to Unicode before they are sent to the OS. To disable the magic
prefix on Windows, set `prefix` to False---but only do this if you
*really* know what you're doing.
### Response:
def syspath(path, prefix=True):
"""Convert a path for use by the operating system. In particular,
paths on Windows must receive a magic prefix and must be converted
to Unicode before they are sent to the OS. To disable the magic
prefix on Windows, set `prefix` to False---but only do this if you
*really* know what you're doing.
"""
# Don't do anything if we're not on windows
if os.path.__name__ != 'ntpath':
return path
if not isinstance(path, six.text_type):
# Beets currently represents Windows paths internally with UTF-8
# arbitrarily. But earlier versions used MBCS because it is
# reported as the FS encoding by Windows. Try both.
try:
path = path.decode('utf8')
except UnicodeError:
# The encoding should always be MBCS, Windows' broken
# Unicode representation.
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
path = path.decode(encoding, 'replace')
# Add the magic prefix if it isn't already there.
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
if prefix and not path.startswith(WINDOWS_MAGIC_PREFIX):
if path.startswith(u'\\\\'):
# UNC path. Final path should look like \\?\UNC\...
path = u'UNC' + path[1:]
path = WINDOWS_MAGIC_PREFIX + path
return path |
def _return_result(self, result, comparison_vectors=None):
"""Return different formatted classification results.
"""
return_type = cf.get_option('classification.return_type')
if type(result) != np.ndarray:
raise ValueError("numpy.ndarray expected.")
# return the pandas.MultiIndex
if return_type == 'index':
return comparison_vectors.index[result.astype(bool)]
# return a pandas.Series
elif return_type == 'series':
return pandas.Series(
result,
index=comparison_vectors.index,
name='classification')
# return a numpy.ndarray
elif return_type == 'array':
return result
# return_type not known
else:
raise ValueError(
"return_type {} unknown. Choose 'index', 'series' or "
"'array'".format(return_type)) | Return different formatted classification results. | Below is the the instruction that describes the task:
### Input:
Return different formatted classification results.
### Response:
def _return_result(self, result, comparison_vectors=None):
"""Return different formatted classification results.
"""
return_type = cf.get_option('classification.return_type')
if type(result) != np.ndarray:
raise ValueError("numpy.ndarray expected.")
# return the pandas.MultiIndex
if return_type == 'index':
return comparison_vectors.index[result.astype(bool)]
# return a pandas.Series
elif return_type == 'series':
return pandas.Series(
result,
index=comparison_vectors.index,
name='classification')
# return a numpy.ndarray
elif return_type == 'array':
return result
# return_type not known
else:
raise ValueError(
"return_type {} unknown. Choose 'index', 'series' or "
"'array'".format(return_type)) |
def decompressBWT(inputDir, outputDir, numProcs, logger):
'''
This is called for taking a BWT and decompressing it back out to it's original form. While unusual to do,
it's included in this package for completion purposes.
@param inputDir - the directory of the compressed BWT we plan on decompressing
@param outputFN - the directory for the output decompressed BWT, it can be the same, we don't care
@param numProcs - number of processes we're allowed to use
@param logger - log all the things!
'''
#load it, force it to be a compressed bwt also
msbwt = MultiStringBWT.CompressedMSBWT()
msbwt.loadMsbwt(inputDir, logger)
#make the output file
outputFile = np.lib.format.open_memmap(outputDir+'/msbwt.npy', 'w+', '<u1', (msbwt.getTotalSize(),))
del outputFile
worksize = 1000000
tups = [None]*(msbwt.getTotalSize()/worksize+1)
x = 0
if msbwt.getTotalSize() > worksize:
for x in xrange(0, msbwt.getTotalSize()/worksize):
tups[x] = (inputDir, outputDir, x*worksize, (x+1)*worksize)
tups[-1] = (inputDir, outputDir, (x+1)*worksize, msbwt.getTotalSize())
else:
tups[0] = (inputDir, outputDir, 0, msbwt.getTotalSize())
if numProcs > 1:
myPool = multiprocessing.Pool(numProcs)
rets = myPool.map(decompressBWTPoolProcess, tups)
else:
rets = []
for tup in tups:
rets.append(decompressBWTPoolProcess(tup)) | This is called for taking a BWT and decompressing it back out to it's original form. While unusual to do,
it's included in this package for completion purposes.
@param inputDir - the directory of the compressed BWT we plan on decompressing
@param outputFN - the directory for the output decompressed BWT, it can be the same, we don't care
@param numProcs - number of processes we're allowed to use
@param logger - log all the things! | Below is the the instruction that describes the task:
### Input:
This is called for taking a BWT and decompressing it back out to it's original form. While unusual to do,
it's included in this package for completion purposes.
@param inputDir - the directory of the compressed BWT we plan on decompressing
@param outputFN - the directory for the output decompressed BWT, it can be the same, we don't care
@param numProcs - number of processes we're allowed to use
@param logger - log all the things!
### Response:
def decompressBWT(inputDir, outputDir, numProcs, logger):
'''
This is called for taking a BWT and decompressing it back out to it's original form. While unusual to do,
it's included in this package for completion purposes.
@param inputDir - the directory of the compressed BWT we plan on decompressing
@param outputFN - the directory for the output decompressed BWT, it can be the same, we don't care
@param numProcs - number of processes we're allowed to use
@param logger - log all the things!
'''
#load it, force it to be a compressed bwt also
msbwt = MultiStringBWT.CompressedMSBWT()
msbwt.loadMsbwt(inputDir, logger)
#make the output file
outputFile = np.lib.format.open_memmap(outputDir+'/msbwt.npy', 'w+', '<u1', (msbwt.getTotalSize(),))
del outputFile
worksize = 1000000
tups = [None]*(msbwt.getTotalSize()/worksize+1)
x = 0
if msbwt.getTotalSize() > worksize:
for x in xrange(0, msbwt.getTotalSize()/worksize):
tups[x] = (inputDir, outputDir, x*worksize, (x+1)*worksize)
tups[-1] = (inputDir, outputDir, (x+1)*worksize, msbwt.getTotalSize())
else:
tups[0] = (inputDir, outputDir, 0, msbwt.getTotalSize())
if numProcs > 1:
myPool = multiprocessing.Pool(numProcs)
rets = myPool.map(decompressBWTPoolProcess, tups)
else:
rets = []
for tup in tups:
rets.append(decompressBWTPoolProcess(tup)) |
def delete(self):
# type: () -> None
"""Delete this service.
:raises APIError: if delete was not succesfull.
"""
response = self._client._request('DELETE', self._client._build_url('service', service_id=self.id))
if response.status_code != requests.codes.no_content: # pragma: no cover
raise APIError("Could not delete service: {} with id {}".format(self.name, self.id)) | Delete this service.
:raises APIError: if delete was not succesfull. | Below is the the instruction that describes the task:
### Input:
Delete this service.
:raises APIError: if delete was not succesfull.
### Response:
def delete(self):
# type: () -> None
"""Delete this service.
:raises APIError: if delete was not succesfull.
"""
response = self._client._request('DELETE', self._client._build_url('service', service_id=self.id))
if response.status_code != requests.codes.no_content: # pragma: no cover
raise APIError("Could not delete service: {} with id {}".format(self.name, self.id)) |
def get_limits(self):
"""
Return all known limits for this service, as a dict of their names
to :py:class:`~.AwsLimit` objects.
:returns: dict of limit names to :py:class:`~.AwsLimit` objects
:rtype: dict
"""
if self.limits != {}:
return self.limits
limits = {}
limits.update(self._get_limits_ebs())
self.limits = limits
return limits | Return all known limits for this service, as a dict of their names
to :py:class:`~.AwsLimit` objects.
:returns: dict of limit names to :py:class:`~.AwsLimit` objects
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Return all known limits for this service, as a dict of their names
to :py:class:`~.AwsLimit` objects.
:returns: dict of limit names to :py:class:`~.AwsLimit` objects
:rtype: dict
### Response:
def get_limits(self):
"""
Return all known limits for this service, as a dict of their names
to :py:class:`~.AwsLimit` objects.
:returns: dict of limit names to :py:class:`~.AwsLimit` objects
:rtype: dict
"""
if self.limits != {}:
return self.limits
limits = {}
limits.update(self._get_limits_ebs())
self.limits = limits
return limits |
def _load_img(handle, target_dtype=np.float32, size=None, **kwargs):
"""Load image file as numpy array."""
image_pil = PIL.Image.open(handle, **kwargs)
# resize the image to the requested size, if one was specified
if size is not None:
if len(size) > 2:
size = size[:2]
log.warning("`_load_img()` received size: {}, trimming to first two dims!".format(size))
image_pil = image_pil.resize(size, resample=PIL.Image.LANCZOS)
image_array = np.asarray(image_pil)
# remove alpha channel if it contains no information
# if image_array.shape[-1] > 3 and 'A' not in image_pil.mode:
# image_array = image_array[..., :-1]
image_dtype = image_array.dtype
image_max_value = np.iinfo(image_dtype).max # ...for uint8 that's 255, etc.
# using np.divide should avoid an extra copy compared to doing division first
ndimage = np.divide(image_array, image_max_value, dtype=target_dtype)
rank = len(ndimage.shape)
if rank == 3:
return ndimage
elif rank == 2:
return np.repeat(np.expand_dims(ndimage, axis=2), 3, axis=2)
else:
message = "Loaded image has more dimensions than expected: {}".format(rank)
raise NotImplementedError(message) | Load image file as numpy array. | Below is the the instruction that describes the task:
### Input:
Load image file as numpy array.
### Response:
def _load_img(handle, target_dtype=np.float32, size=None, **kwargs):
"""Load image file as numpy array."""
image_pil = PIL.Image.open(handle, **kwargs)
# resize the image to the requested size, if one was specified
if size is not None:
if len(size) > 2:
size = size[:2]
log.warning("`_load_img()` received size: {}, trimming to first two dims!".format(size))
image_pil = image_pil.resize(size, resample=PIL.Image.LANCZOS)
image_array = np.asarray(image_pil)
# remove alpha channel if it contains no information
# if image_array.shape[-1] > 3 and 'A' not in image_pil.mode:
# image_array = image_array[..., :-1]
image_dtype = image_array.dtype
image_max_value = np.iinfo(image_dtype).max # ...for uint8 that's 255, etc.
# using np.divide should avoid an extra copy compared to doing division first
ndimage = np.divide(image_array, image_max_value, dtype=target_dtype)
rank = len(ndimage.shape)
if rank == 3:
return ndimage
elif rank == 2:
return np.repeat(np.expand_dims(ndimage, axis=2), 3, axis=2)
else:
message = "Loaded image has more dimensions than expected: {}".format(rank)
raise NotImplementedError(message) |
def create_superuser(
self, username, email, short_name, full_name,
institute, password, **extra_fields):
""" Creates a new person with super powers. """
return self._create_user(
username=username, email=email,
institute=institute, password=password,
short_name=short_name, full_name=full_name,
is_admin=True, **extra_fields) | Creates a new person with super powers. | Below is the the instruction that describes the task:
### Input:
Creates a new person with super powers.
### Response:
def create_superuser(
self, username, email, short_name, full_name,
institute, password, **extra_fields):
""" Creates a new person with super powers. """
return self._create_user(
username=username, email=email,
institute=institute, password=password,
short_name=short_name, full_name=full_name,
is_admin=True, **extra_fields) |
def _save_results(self, output_dir, label, results, ngrams, type_label):
"""Saves `results` filtered by `label` and `ngram` to `output_dir`.
:param output_dir: directory to save results to
:type output_dir: `str`
:param label: catalogue label of results, used in saved filename
:type label: `str`
:param results: results to filter and save
:type results: `pandas.DataFrame`
:param ngrams: n-grams to save from results
:type ngrams: `list` of `str`
:param type_label: name of type of results, used in saved filename
:type type_label: `str`
"""
path = os.path.join(output_dir, '{}-{}.csv'.format(label, type_label))
results[results[constants.NGRAM_FIELDNAME].isin(
ngrams)].to_csv(path, encoding='utf-8', float_format='%d',
index=False) | Saves `results` filtered by `label` and `ngram` to `output_dir`.
:param output_dir: directory to save results to
:type output_dir: `str`
:param label: catalogue label of results, used in saved filename
:type label: `str`
:param results: results to filter and save
:type results: `pandas.DataFrame`
:param ngrams: n-grams to save from results
:type ngrams: `list` of `str`
:param type_label: name of type of results, used in saved filename
:type type_label: `str` | Below is the the instruction that describes the task:
### Input:
Saves `results` filtered by `label` and `ngram` to `output_dir`.
:param output_dir: directory to save results to
:type output_dir: `str`
:param label: catalogue label of results, used in saved filename
:type label: `str`
:param results: results to filter and save
:type results: `pandas.DataFrame`
:param ngrams: n-grams to save from results
:type ngrams: `list` of `str`
:param type_label: name of type of results, used in saved filename
:type type_label: `str`
### Response:
def _save_results(self, output_dir, label, results, ngrams, type_label):
"""Saves `results` filtered by `label` and `ngram` to `output_dir`.
:param output_dir: directory to save results to
:type output_dir: `str`
:param label: catalogue label of results, used in saved filename
:type label: `str`
:param results: results to filter and save
:type results: `pandas.DataFrame`
:param ngrams: n-grams to save from results
:type ngrams: `list` of `str`
:param type_label: name of type of results, used in saved filename
:type type_label: `str`
"""
path = os.path.join(output_dir, '{}-{}.csv'.format(label, type_label))
results[results[constants.NGRAM_FIELDNAME].isin(
ngrams)].to_csv(path, encoding='utf-8', float_format='%d',
index=False) |
def describe_table(self, tablename):
"""
Get the details about a table
Parameters
----------
tablename : str
Name of the table
Returns
-------
table : :class:`~dynamo3.fields.Table`
"""
try:
response = self.call(
'describe_table', TableName=tablename)['Table']
return Table.from_response(response)
except DynamoDBError as e:
if e.kwargs['Code'] == 'ResourceNotFoundException':
return None
else: # pragma: no cover
raise | Get the details about a table
Parameters
----------
tablename : str
Name of the table
Returns
-------
table : :class:`~dynamo3.fields.Table` | Below is the the instruction that describes the task:
### Input:
Get the details about a table
Parameters
----------
tablename : str
Name of the table
Returns
-------
table : :class:`~dynamo3.fields.Table`
### Response:
def describe_table(self, tablename):
"""
Get the details about a table
Parameters
----------
tablename : str
Name of the table
Returns
-------
table : :class:`~dynamo3.fields.Table`
"""
try:
response = self.call(
'describe_table', TableName=tablename)['Table']
return Table.from_response(response)
except DynamoDBError as e:
if e.kwargs['Code'] == 'ResourceNotFoundException':
return None
else: # pragma: no cover
raise |
def _get_bookmark(repo, name):
'''
Find the requested bookmark in the specified repo
'''
try:
return [x for x in _all_bookmarks(repo) if x[0] == name][0]
except IndexError:
return False | Find the requested bookmark in the specified repo | Below is the the instruction that describes the task:
### Input:
Find the requested bookmark in the specified repo
### Response:
def _get_bookmark(repo, name):
'''
Find the requested bookmark in the specified repo
'''
try:
return [x for x in _all_bookmarks(repo) if x[0] == name][0]
except IndexError:
return False |
def dump(new_data):
'''
Replace the entire datastore with a passed data structure
CLI Example:
.. code-block:: bash
salt '*' data.dump '{'eggs': 'spam'}'
'''
if not isinstance(new_data, dict):
if isinstance(ast.literal_eval(new_data), dict):
new_data = ast.literal_eval(new_data)
else:
return False
try:
datastore_path = os.path.join(__opts__['cachedir'], 'datastore')
with salt.utils.files.fopen(datastore_path, 'w+b') as fn_:
serial = salt.payload.Serial(__opts__)
serial.dump(new_data, fn_)
return True
except (IOError, OSError, NameError):
return False | Replace the entire datastore with a passed data structure
CLI Example:
.. code-block:: bash
salt '*' data.dump '{'eggs': 'spam'}' | Below is the the instruction that describes the task:
### Input:
Replace the entire datastore with a passed data structure
CLI Example:
.. code-block:: bash
salt '*' data.dump '{'eggs': 'spam'}'
### Response:
def dump(new_data):
'''
Replace the entire datastore with a passed data structure
CLI Example:
.. code-block:: bash
salt '*' data.dump '{'eggs': 'spam'}'
'''
if not isinstance(new_data, dict):
if isinstance(ast.literal_eval(new_data), dict):
new_data = ast.literal_eval(new_data)
else:
return False
try:
datastore_path = os.path.join(__opts__['cachedir'], 'datastore')
with salt.utils.files.fopen(datastore_path, 'w+b') as fn_:
serial = salt.payload.Serial(__opts__)
serial.dump(new_data, fn_)
return True
except (IOError, OSError, NameError):
return False |
def getTypedValueNoExceptions(self, row):
'''Returns the properly-typed value for the given row at this column.
Returns the type's default value if either the getter or the type conversion fails.'''
return wrapply(self.type, wrapply(self.getValue, row)) | Returns the properly-typed value for the given row at this column.
Returns the type's default value if either the getter or the type conversion fails. | Below is the the instruction that describes the task:
### Input:
Returns the properly-typed value for the given row at this column.
Returns the type's default value if either the getter or the type conversion fails.
### Response:
def getTypedValueNoExceptions(self, row):
'''Returns the properly-typed value for the given row at this column.
Returns the type's default value if either the getter or the type conversion fails.'''
return wrapply(self.type, wrapply(self.getValue, row)) |
def silence_logging(method):
"""Disables logging for the duration of what is being wrapped. This is
particularly useful when testing if a test method is supposed to issue an
error message which is confusing that the error shows for a successful
test.
"""
@wraps(method)
def wrapper(*args, **kwargs):
logging.disable(logging.ERROR)
result = method(*args, **kwargs)
logging.disable(logging.NOTSET)
return result
return wrapper | Disables logging for the duration of what is being wrapped. This is
particularly useful when testing if a test method is supposed to issue an
error message which is confusing that the error shows for a successful
test. | Below is the the instruction that describes the task:
### Input:
Disables logging for the duration of what is being wrapped. This is
particularly useful when testing if a test method is supposed to issue an
error message which is confusing that the error shows for a successful
test.
### Response:
def silence_logging(method):
"""Disables logging for the duration of what is being wrapped. This is
particularly useful when testing if a test method is supposed to issue an
error message which is confusing that the error shows for a successful
test.
"""
@wraps(method)
def wrapper(*args, **kwargs):
logging.disable(logging.ERROR)
result = method(*args, **kwargs)
logging.disable(logging.NOTSET)
return result
return wrapper |
def ParseCategorizedPath(path):
"""Parses a categorized path string into type and list of components."""
components = tuple(component for component in path.split("/") if component)
if components[0:2] == ("fs", "os"):
return PathInfo.PathType.OS, components[2:]
elif components[0:2] == ("fs", "tsk"):
return PathInfo.PathType.TSK, components[2:]
elif components[0:1] == ("registry",):
return PathInfo.PathType.REGISTRY, components[1:]
elif components[0:1] == ("temp",):
return PathInfo.PathType.TEMP, components[1:]
else:
raise ValueError("Incorrect path: '%s'" % path) | Parses a categorized path string into type and list of components. | Below is the the instruction that describes the task:
### Input:
Parses a categorized path string into type and list of components.
### Response:
def ParseCategorizedPath(path):
"""Parses a categorized path string into type and list of components."""
components = tuple(component for component in path.split("/") if component)
if components[0:2] == ("fs", "os"):
return PathInfo.PathType.OS, components[2:]
elif components[0:2] == ("fs", "tsk"):
return PathInfo.PathType.TSK, components[2:]
elif components[0:1] == ("registry",):
return PathInfo.PathType.REGISTRY, components[1:]
elif components[0:1] == ("temp",):
return PathInfo.PathType.TEMP, components[1:]
else:
raise ValueError("Incorrect path: '%s'" % path) |
def lgauss(x, mu, sigma=1.0, logpdf=False):
""" Log10 normal distribution...
x : Parameter of interest for scanning the pdf
mu : Peak of the lognormal distribution (mean of the underlying
normal distribution is log10(mu)
sigma : Standard deviation of the underlying normal distribution
"""
x = np.array(x, ndmin=1)
lmu = np.log10(mu)
s2 = sigma * sigma
lx = np.zeros(x.shape)
v = np.zeros(x.shape)
lx[x > 0] = np.log10(x[x > 0])
v = 1. / np.sqrt(2 * s2 * np.pi) * np.exp(-(lx - lmu)**2 / (2 * s2))
if not logpdf:
v /= (x * np.log(10.))
v[x <= 0] = -np.inf
return v | Log10 normal distribution...
x : Parameter of interest for scanning the pdf
mu : Peak of the lognormal distribution (mean of the underlying
normal distribution is log10(mu)
sigma : Standard deviation of the underlying normal distribution | Below is the the instruction that describes the task:
### Input:
Log10 normal distribution...
x : Parameter of interest for scanning the pdf
mu : Peak of the lognormal distribution (mean of the underlying
normal distribution is log10(mu)
sigma : Standard deviation of the underlying normal distribution
### Response:
def lgauss(x, mu, sigma=1.0, logpdf=False):
""" Log10 normal distribution...
x : Parameter of interest for scanning the pdf
mu : Peak of the lognormal distribution (mean of the underlying
normal distribution is log10(mu)
sigma : Standard deviation of the underlying normal distribution
"""
x = np.array(x, ndmin=1)
lmu = np.log10(mu)
s2 = sigma * sigma
lx = np.zeros(x.shape)
v = np.zeros(x.shape)
lx[x > 0] = np.log10(x[x > 0])
v = 1. / np.sqrt(2 * s2 * np.pi) * np.exp(-(lx - lmu)**2 / (2 * s2))
if not logpdf:
v /= (x * np.log(10.))
v[x <= 0] = -np.inf
return v |
def export_node_data(bpmn_diagram, process_id, params, process):
"""
Creates a new XML element (depends on node type) for given node parameters and adds it to 'process' element.
:param bpmn_diagram: BPMNDiagramGraph class instantion representing a BPMN process diagram,
:param process_id: string representing ID of given flow node,
:param params: dictionary with node parameters,
:param process: object of Element class, representing BPMN XML 'process' element (root for nodes).
"""
node_type = params[consts.Consts.type]
output_element = eTree.SubElement(process, node_type)
output_element.set(consts.Consts.id, process_id)
output_element.set(consts.Consts.name, params[consts.Consts.node_name])
for incoming in params[consts.Consts.incoming_flow]:
incoming_element = eTree.SubElement(output_element, consts.Consts.incoming_flow)
incoming_element.text = incoming
for outgoing in params[consts.Consts.outgoing_flow]:
outgoing_element = eTree.SubElement(output_element, consts.Consts.outgoing_flow)
outgoing_element.text = outgoing
if node_type == consts.Consts.task \
or node_type == consts.Consts.user_task \
or node_type == consts.Consts.service_task \
or node_type == consts.Consts.manual_task:
BpmnDiagramGraphExport.export_task_info(params, output_element)
elif node_type == consts.Consts.subprocess:
BpmnDiagramGraphExport.export_subprocess_info(bpmn_diagram, params, output_element)
elif node_type == consts.Consts.data_object:
BpmnDiagramGraphExport.export_data_object_info(bpmn_diagram, params, output_element)
elif node_type == consts.Consts.complex_gateway:
BpmnDiagramGraphExport.export_complex_gateway_info(params, output_element)
elif node_type == consts.Consts.event_based_gateway:
BpmnDiagramGraphExport.export_event_based_gateway_info(params, output_element)
elif node_type == consts.Consts.inclusive_gateway or node_type == consts.Consts.exclusive_gateway:
BpmnDiagramGraphExport.export_inclusive_exclusive_gateway_info(params, output_element)
elif node_type == consts.Consts.parallel_gateway:
BpmnDiagramGraphExport.export_parallel_gateway_info(params, output_element)
elif node_type == consts.Consts.start_event:
BpmnDiagramGraphExport.export_start_event_info(params, output_element)
elif node_type == consts.Consts.intermediate_catch_event:
BpmnDiagramGraphExport.export_catch_event_info(params, output_element)
elif node_type == consts.Consts.end_event or node_type == consts.Consts.intermediate_throw_event:
BpmnDiagramGraphExport.export_throw_event_info(params, output_element)
elif node_type == consts.Consts.boundary_event:
BpmnDiagramGraphExport.export_boundary_event_info(params, output_element) | Creates a new XML element (depends on node type) for given node parameters and adds it to 'process' element.
:param bpmn_diagram: BPMNDiagramGraph class instantion representing a BPMN process diagram,
:param process_id: string representing ID of given flow node,
:param params: dictionary with node parameters,
:param process: object of Element class, representing BPMN XML 'process' element (root for nodes). | Below is the the instruction that describes the task:
### Input:
Creates a new XML element (depends on node type) for given node parameters and adds it to 'process' element.
:param bpmn_diagram: BPMNDiagramGraph class instantion representing a BPMN process diagram,
:param process_id: string representing ID of given flow node,
:param params: dictionary with node parameters,
:param process: object of Element class, representing BPMN XML 'process' element (root for nodes).
### Response:
def export_node_data(bpmn_diagram, process_id, params, process):
"""
Creates a new XML element (depends on node type) for given node parameters and adds it to 'process' element.
:param bpmn_diagram: BPMNDiagramGraph class instantion representing a BPMN process diagram,
:param process_id: string representing ID of given flow node,
:param params: dictionary with node parameters,
:param process: object of Element class, representing BPMN XML 'process' element (root for nodes).
"""
node_type = params[consts.Consts.type]
output_element = eTree.SubElement(process, node_type)
output_element.set(consts.Consts.id, process_id)
output_element.set(consts.Consts.name, params[consts.Consts.node_name])
for incoming in params[consts.Consts.incoming_flow]:
incoming_element = eTree.SubElement(output_element, consts.Consts.incoming_flow)
incoming_element.text = incoming
for outgoing in params[consts.Consts.outgoing_flow]:
outgoing_element = eTree.SubElement(output_element, consts.Consts.outgoing_flow)
outgoing_element.text = outgoing
if node_type == consts.Consts.task \
or node_type == consts.Consts.user_task \
or node_type == consts.Consts.service_task \
or node_type == consts.Consts.manual_task:
BpmnDiagramGraphExport.export_task_info(params, output_element)
elif node_type == consts.Consts.subprocess:
BpmnDiagramGraphExport.export_subprocess_info(bpmn_diagram, params, output_element)
elif node_type == consts.Consts.data_object:
BpmnDiagramGraphExport.export_data_object_info(bpmn_diagram, params, output_element)
elif node_type == consts.Consts.complex_gateway:
BpmnDiagramGraphExport.export_complex_gateway_info(params, output_element)
elif node_type == consts.Consts.event_based_gateway:
BpmnDiagramGraphExport.export_event_based_gateway_info(params, output_element)
elif node_type == consts.Consts.inclusive_gateway or node_type == consts.Consts.exclusive_gateway:
BpmnDiagramGraphExport.export_inclusive_exclusive_gateway_info(params, output_element)
elif node_type == consts.Consts.parallel_gateway:
BpmnDiagramGraphExport.export_parallel_gateway_info(params, output_element)
elif node_type == consts.Consts.start_event:
BpmnDiagramGraphExport.export_start_event_info(params, output_element)
elif node_type == consts.Consts.intermediate_catch_event:
BpmnDiagramGraphExport.export_catch_event_info(params, output_element)
elif node_type == consts.Consts.end_event or node_type == consts.Consts.intermediate_throw_event:
BpmnDiagramGraphExport.export_throw_event_info(params, output_element)
elif node_type == consts.Consts.boundary_event:
BpmnDiagramGraphExport.export_boundary_event_info(params, output_element) |
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name | Given a fully qualified name, find what module contains it. | Below is the the instruction that describes the task:
### Input:
Given a fully qualified name, find what module contains it.
### Response:
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name |
def fetch_object(self, container, obj, include_meta=False,
chunk_size=None, size=None, extra_info=None):
"""
Fetches the object from storage.
If 'include_meta' is False, only the bytes representing the
stored object are returned.
Note: if 'chunk_size' is defined, you must fully read the object's
contents before making another request.
If 'size' is specified, only the first 'size' bytes of the object will
be returned. If the object if smaller than 'size', the entire object is
returned.
When 'include_meta' is True, what is returned from this method is a
2-tuple:
Element 0: a dictionary containing metadata about the file.
Element 1: a stream of bytes representing the object's contents.
The 'extra_info' parameter is included for backwards compatibility. It
is no longer used at all, and will not be modified with swiftclient
info, since swiftclient is not used any more.
"""
return self._manager.fetch_object(container, obj,
include_meta=include_meta, chunk_size=chunk_size, size=size) | Fetches the object from storage.
If 'include_meta' is False, only the bytes representing the
stored object are returned.
Note: if 'chunk_size' is defined, you must fully read the object's
contents before making another request.
If 'size' is specified, only the first 'size' bytes of the object will
be returned. If the object if smaller than 'size', the entire object is
returned.
When 'include_meta' is True, what is returned from this method is a
2-tuple:
Element 0: a dictionary containing metadata about the file.
Element 1: a stream of bytes representing the object's contents.
The 'extra_info' parameter is included for backwards compatibility. It
is no longer used at all, and will not be modified with swiftclient
info, since swiftclient is not used any more. | Below is the the instruction that describes the task:
### Input:
Fetches the object from storage.
If 'include_meta' is False, only the bytes representing the
stored object are returned.
Note: if 'chunk_size' is defined, you must fully read the object's
contents before making another request.
If 'size' is specified, only the first 'size' bytes of the object will
be returned. If the object if smaller than 'size', the entire object is
returned.
When 'include_meta' is True, what is returned from this method is a
2-tuple:
Element 0: a dictionary containing metadata about the file.
Element 1: a stream of bytes representing the object's contents.
The 'extra_info' parameter is included for backwards compatibility. It
is no longer used at all, and will not be modified with swiftclient
info, since swiftclient is not used any more.
### Response:
def fetch_object(self, container, obj, include_meta=False,
chunk_size=None, size=None, extra_info=None):
"""
Fetches the object from storage.
If 'include_meta' is False, only the bytes representing the
stored object are returned.
Note: if 'chunk_size' is defined, you must fully read the object's
contents before making another request.
If 'size' is specified, only the first 'size' bytes of the object will
be returned. If the object if smaller than 'size', the entire object is
returned.
When 'include_meta' is True, what is returned from this method is a
2-tuple:
Element 0: a dictionary containing metadata about the file.
Element 1: a stream of bytes representing the object's contents.
The 'extra_info' parameter is included for backwards compatibility. It
is no longer used at all, and will not be modified with swiftclient
info, since swiftclient is not used any more.
"""
return self._manager.fetch_object(container, obj,
include_meta=include_meta, chunk_size=chunk_size, size=size) |
def time_description(self):
"""String description of the year or year range"""
tc = [t for t in self._p.time_coverage if t]
if not tc:
return ''
mn = min(tc)
mx = max(tc)
if not mn and not mx:
return ''
elif mn == mx:
return mn
else:
return "{} to {}".format(mn, mx) | String description of the year or year range | Below is the the instruction that describes the task:
### Input:
String description of the year or year range
### Response:
def time_description(self):
"""String description of the year or year range"""
tc = [t for t in self._p.time_coverage if t]
if not tc:
return ''
mn = min(tc)
mx = max(tc)
if not mn and not mx:
return ''
elif mn == mx:
return mn
else:
return "{} to {}".format(mn, mx) |
def fmtd_sub_expr_str(self):
"""Get the entire sub expressions as a string with spaces."""
result = u"{}({}".format(self.normalized_func,
self._metric_name)
if self._dimensions is not None:
result += "{" + self.dimensions_str + "}"
if self._period != _DEFAULT_PERIOD:
result += ", {}".format(self._period)
result += ")"
result += " {} {}".format(self._operator,
self._threshold)
if self._periods != _DEFAULT_PERIODS:
result += " times {}".format(self._periods)
return result | Get the entire sub expressions as a string with spaces. | Below is the the instruction that describes the task:
### Input:
Get the entire sub expressions as a string with spaces.
### Response:
def fmtd_sub_expr_str(self):
"""Get the entire sub expressions as a string with spaces."""
result = u"{}({}".format(self.normalized_func,
self._metric_name)
if self._dimensions is not None:
result += "{" + self.dimensions_str + "}"
if self._period != _DEFAULT_PERIOD:
result += ", {}".format(self._period)
result += ")"
result += " {} {}".format(self._operator,
self._threshold)
if self._periods != _DEFAULT_PERIODS:
result += " times {}".format(self._periods)
return result |
def _learnPhase2(self, readOnly=False):
"""
Compute the predicted segments given the current set of active cells.
:param readOnly True if being called from backtracking logic.
This tells us not to increment any segment
duty cycles or queue up any updates.
This computes the lrnPredictedState['t'] and queues up any segments that
became active (and the list of active synapses for each segment) into
the segmentUpdates queue
This looks at:
- @ref lrnActiveState['t']
This modifies:
- @ref lrnPredictedState['t']
- @ref segmentUpdates
"""
# Clear out predicted state to start with
self.lrnPredictedState['t'].fill(0)
# Compute new predicted state. When computing predictions for
# phase 2, we predict at most one cell per column (the one with the best
# matching segment).
for c in xrange(self.numberOfCols):
# Is there a cell predicted to turn on in this column?
i, s, numActive = self._getBestMatchingCell(
c, self.lrnActiveState['t'], minThreshold = self.activationThreshold)
if i is None:
continue
# Turn on the predicted state for the best matching cell and queue
# the pertinent segment up for an update, which will get processed if
# the cell receives bottom up in the future.
self.lrnPredictedState['t'][c, i] = 1
if readOnly:
continue
# Queue up this segment for updating
segUpdate = self._getSegmentActiveSynapses(
c, i, s, activeState=self.lrnActiveState['t'],
newSynapses=(numActive < self.newSynapseCount))
s.totalActivations += 1 # increment totalActivations
self._addToSegmentUpdates(c, i, segUpdate)
if self.doPooling:
# creates a new pooling segment if no best matching segment found
# sum(all synapses) >= minThreshold, "weak" activation
predSegment = self._getBestMatchingSegment(c, i,
self.lrnActiveState['t-1'])
segUpdate = self._getSegmentActiveSynapses(c, i, predSegment,
self.lrnActiveState['t-1'], newSynapses=True)
self._addToSegmentUpdates(c, i, segUpdate) | Compute the predicted segments given the current set of active cells.
:param readOnly True if being called from backtracking logic.
This tells us not to increment any segment
duty cycles or queue up any updates.
This computes the lrnPredictedState['t'] and queues up any segments that
became active (and the list of active synapses for each segment) into
the segmentUpdates queue
This looks at:
- @ref lrnActiveState['t']
This modifies:
- @ref lrnPredictedState['t']
- @ref segmentUpdates | Below is the the instruction that describes the task:
### Input:
Compute the predicted segments given the current set of active cells.
:param readOnly True if being called from backtracking logic.
This tells us not to increment any segment
duty cycles or queue up any updates.
This computes the lrnPredictedState['t'] and queues up any segments that
became active (and the list of active synapses for each segment) into
the segmentUpdates queue
This looks at:
- @ref lrnActiveState['t']
This modifies:
- @ref lrnPredictedState['t']
- @ref segmentUpdates
### Response:
def _learnPhase2(self, readOnly=False):
"""
Compute the predicted segments given the current set of active cells.
:param readOnly True if being called from backtracking logic.
This tells us not to increment any segment
duty cycles or queue up any updates.
This computes the lrnPredictedState['t'] and queues up any segments that
became active (and the list of active synapses for each segment) into
the segmentUpdates queue
This looks at:
- @ref lrnActiveState['t']
This modifies:
- @ref lrnPredictedState['t']
- @ref segmentUpdates
"""
# Clear out predicted state to start with
self.lrnPredictedState['t'].fill(0)
# Compute new predicted state. When computing predictions for
# phase 2, we predict at most one cell per column (the one with the best
# matching segment).
for c in xrange(self.numberOfCols):
# Is there a cell predicted to turn on in this column?
i, s, numActive = self._getBestMatchingCell(
c, self.lrnActiveState['t'], minThreshold = self.activationThreshold)
if i is None:
continue
# Turn on the predicted state for the best matching cell and queue
# the pertinent segment up for an update, which will get processed if
# the cell receives bottom up in the future.
self.lrnPredictedState['t'][c, i] = 1
if readOnly:
continue
# Queue up this segment for updating
segUpdate = self._getSegmentActiveSynapses(
c, i, s, activeState=self.lrnActiveState['t'],
newSynapses=(numActive < self.newSynapseCount))
s.totalActivations += 1 # increment totalActivations
self._addToSegmentUpdates(c, i, segUpdate)
if self.doPooling:
# creates a new pooling segment if no best matching segment found
# sum(all synapses) >= minThreshold, "weak" activation
predSegment = self._getBestMatchingSegment(c, i,
self.lrnActiveState['t-1'])
segUpdate = self._getSegmentActiveSynapses(c, i, predSegment,
self.lrnActiveState['t-1'], newSynapses=True)
self._addToSegmentUpdates(c, i, segUpdate) |
def random_combinations(items, size, num=None, rng=None):
"""
Yields `num` combinations of length `size` from items in random order
Args:
items (?):
size (?):
num (None): (default = None)
rng (RandomState): random number generator(default = None)
Yields:
tuple: combo
CommandLine:
python -m utool.util_iter random_combinations
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_iter import * # NOQA
>>> import utool as ut
>>> items = list(range(10))
>>> size = 3
>>> num = 5
>>> rng = 0
>>> combos = list(random_combinations(items, size, num, rng))
>>> result = ('combos = %s' % (ut.repr2(combos),))
>>> print(result)
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_iter import * # NOQA
>>> import utool as ut
>>> items = list(zip(range(10), range(10)))
>>> size = 3
>>> num = 5
>>> rng = 0
>>> combos = list(random_combinations(items, size, num, rng))
>>> result = ('combos = %s' % (ut.repr2(combos),))
>>> print(result)
"""
import scipy.misc
import numpy as np
import utool as ut
rng = ut.ensure_rng(rng, impl='python')
num_ = np.inf if num is None else num
# Ensure we dont request more than is possible
n_max = int(scipy.misc.comb(len(items), size))
num_ = min(n_max, num_)
if num is not None and num_ > n_max // 2:
# If num is too big just generate all combinations and shuffle them
combos = list(it.combinations(items, size))
rng.shuffle(combos)
for combo in combos[:num]:
yield combo
else:
# Otherwise yield randomly until we get something we havent seen
items = list(items)
combos = set()
while len(combos) < num_:
# combo = tuple(sorted(rng.choice(items, size, replace=False)))
combo = tuple(sorted(rng.sample(items, size)))
if combo not in combos:
# TODO: store indices instead of combo values
combos.add(combo)
yield combo | Yields `num` combinations of length `size` from items in random order
Args:
items (?):
size (?):
num (None): (default = None)
rng (RandomState): random number generator(default = None)
Yields:
tuple: combo
CommandLine:
python -m utool.util_iter random_combinations
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_iter import * # NOQA
>>> import utool as ut
>>> items = list(range(10))
>>> size = 3
>>> num = 5
>>> rng = 0
>>> combos = list(random_combinations(items, size, num, rng))
>>> result = ('combos = %s' % (ut.repr2(combos),))
>>> print(result)
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_iter import * # NOQA
>>> import utool as ut
>>> items = list(zip(range(10), range(10)))
>>> size = 3
>>> num = 5
>>> rng = 0
>>> combos = list(random_combinations(items, size, num, rng))
>>> result = ('combos = %s' % (ut.repr2(combos),))
>>> print(result) | Below is the the instruction that describes the task:
### Input:
Yields `num` combinations of length `size` from items in random order
Args:
items (?):
size (?):
num (None): (default = None)
rng (RandomState): random number generator(default = None)
Yields:
tuple: combo
CommandLine:
python -m utool.util_iter random_combinations
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_iter import * # NOQA
>>> import utool as ut
>>> items = list(range(10))
>>> size = 3
>>> num = 5
>>> rng = 0
>>> combos = list(random_combinations(items, size, num, rng))
>>> result = ('combos = %s' % (ut.repr2(combos),))
>>> print(result)
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_iter import * # NOQA
>>> import utool as ut
>>> items = list(zip(range(10), range(10)))
>>> size = 3
>>> num = 5
>>> rng = 0
>>> combos = list(random_combinations(items, size, num, rng))
>>> result = ('combos = %s' % (ut.repr2(combos),))
>>> print(result)
### Response:
def random_combinations(items, size, num=None, rng=None):
"""
Yields `num` combinations of length `size` from items in random order
Args:
items (?):
size (?):
num (None): (default = None)
rng (RandomState): random number generator(default = None)
Yields:
tuple: combo
CommandLine:
python -m utool.util_iter random_combinations
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_iter import * # NOQA
>>> import utool as ut
>>> items = list(range(10))
>>> size = 3
>>> num = 5
>>> rng = 0
>>> combos = list(random_combinations(items, size, num, rng))
>>> result = ('combos = %s' % (ut.repr2(combos),))
>>> print(result)
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_iter import * # NOQA
>>> import utool as ut
>>> items = list(zip(range(10), range(10)))
>>> size = 3
>>> num = 5
>>> rng = 0
>>> combos = list(random_combinations(items, size, num, rng))
>>> result = ('combos = %s' % (ut.repr2(combos),))
>>> print(result)
"""
import scipy.misc
import numpy as np
import utool as ut
rng = ut.ensure_rng(rng, impl='python')
num_ = np.inf if num is None else num
# Ensure we dont request more than is possible
n_max = int(scipy.misc.comb(len(items), size))
num_ = min(n_max, num_)
if num is not None and num_ > n_max // 2:
# If num is too big just generate all combinations and shuffle them
combos = list(it.combinations(items, size))
rng.shuffle(combos)
for combo in combos[:num]:
yield combo
else:
# Otherwise yield randomly until we get something we havent seen
items = list(items)
combos = set()
while len(combos) < num_:
# combo = tuple(sorted(rng.choice(items, size, replace=False)))
combo = tuple(sorted(rng.sample(items, size)))
if combo not in combos:
# TODO: store indices instead of combo values
combos.add(combo)
yield combo |
def nbviewer_link(url):
"""Return the link to the Jupyter nbviewer for the given notebook url"""
if six.PY2:
from urlparse import urlparse as urlsplit
else:
from urllib.parse import urlsplit
info = urlsplit(url)
domain = info.netloc
url_type = 'github' if domain == 'github.com' else 'url'
return 'https://nbviewer.jupyter.org/%s%s' % (url_type, info.path) | Return the link to the Jupyter nbviewer for the given notebook url | Below is the the instruction that describes the task:
### Input:
Return the link to the Jupyter nbviewer for the given notebook url
### Response:
def nbviewer_link(url):
"""Return the link to the Jupyter nbviewer for the given notebook url"""
if six.PY2:
from urlparse import urlparse as urlsplit
else:
from urllib.parse import urlsplit
info = urlsplit(url)
domain = info.netloc
url_type = 'github' if domain == 'github.com' else 'url'
return 'https://nbviewer.jupyter.org/%s%s' % (url_type, info.path) |
def IsIconic(handle: int) -> bool:
"""
IsIconic from Win32.
Determine whether a native window is minimized.
handle: int, the handle of a native window.
Return bool.
"""
return bool(ctypes.windll.user32.IsIconic(ctypes.c_void_p(handle))) | IsIconic from Win32.
Determine whether a native window is minimized.
handle: int, the handle of a native window.
Return bool. | Below is the the instruction that describes the task:
### Input:
IsIconic from Win32.
Determine whether a native window is minimized.
handle: int, the handle of a native window.
Return bool.
### Response:
def IsIconic(handle: int) -> bool:
"""
IsIconic from Win32.
Determine whether a native window is minimized.
handle: int, the handle of a native window.
Return bool.
"""
return bool(ctypes.windll.user32.IsIconic(ctypes.c_void_p(handle))) |
def init():
'''Initialise a WSGI application to be loaded by uWSGI.'''
# Load values from config file
config_file = os.path.realpath(os.path.join(os.getcwd(), 'swaggery.ini'))
config = configparser.RawConfigParser(allow_no_value=True)
config.read(config_file)
log_level = config.get('application', 'logging_level').upper()
api_dirs = list(config['apis'])
do_checks = config.get('application',
'disable_boot_checks').lower() == 'false'
# Set logging level
log.setLevel(getattr(logging, log_level))
log.debug('Log level set to {}'.format(log_level))
# Bootstrap application
log.debug('Exploring directories: {}'.format(api_dirs))
application = Swaggery(api_dirs=api_dirs, do_checks=do_checks)
return application | Initialise a WSGI application to be loaded by uWSGI. | Below is the the instruction that describes the task:
### Input:
Initialise a WSGI application to be loaded by uWSGI.
### Response:
def init():
'''Initialise a WSGI application to be loaded by uWSGI.'''
# Load values from config file
config_file = os.path.realpath(os.path.join(os.getcwd(), 'swaggery.ini'))
config = configparser.RawConfigParser(allow_no_value=True)
config.read(config_file)
log_level = config.get('application', 'logging_level').upper()
api_dirs = list(config['apis'])
do_checks = config.get('application',
'disable_boot_checks').lower() == 'false'
# Set logging level
log.setLevel(getattr(logging, log_level))
log.debug('Log level set to {}'.format(log_level))
# Bootstrap application
log.debug('Exploring directories: {}'.format(api_dirs))
application = Swaggery(api_dirs=api_dirs, do_checks=do_checks)
return application |
def _capture(f, t, t0, factor):
'''
capture signal and return its standard deviation
#TODO: more detail
'''
n_per_sec = len(t) / t[-1]
# len of one split:
n = int(t0 * factor * n_per_sec)
s = len(f) // n
m = s * n
f = f[:m]
ff = np.split(f, s)
m = np.mean(ff, axis=1)
return np.std(m) | capture signal and return its standard deviation
#TODO: more detail | Below is the the instruction that describes the task:
### Input:
capture signal and return its standard deviation
#TODO: more detail
### Response:
def _capture(f, t, t0, factor):
'''
capture signal and return its standard deviation
#TODO: more detail
'''
n_per_sec = len(t) / t[-1]
# len of one split:
n = int(t0 * factor * n_per_sec)
s = len(f) // n
m = s * n
f = f[:m]
ff = np.split(f, s)
m = np.mean(ff, axis=1)
return np.std(m) |
def gen_key_input(self, key_params={}):
'''
Generate --gen-key input per gpg doc/DETAILS.
:param key_params: Key parameters
:rtype: str
:return: Control input for :func:`regnupg.gen_key`
'''
params = self.default_key_params.copy()
params.update(key_params)
result = ['Key-Type: %s' % params.pop('Key-Type')]
result += ('%s: %s' % (param, value) for param, value in params.items())
result.append('%commit\n')
return '\n'.join(result) | Generate --gen-key input per gpg doc/DETAILS.
:param key_params: Key parameters
:rtype: str
:return: Control input for :func:`regnupg.gen_key` | Below is the the instruction that describes the task:
### Input:
Generate --gen-key input per gpg doc/DETAILS.
:param key_params: Key parameters
:rtype: str
:return: Control input for :func:`regnupg.gen_key`
### Response:
def gen_key_input(self, key_params={}):
'''
Generate --gen-key input per gpg doc/DETAILS.
:param key_params: Key parameters
:rtype: str
:return: Control input for :func:`regnupg.gen_key`
'''
params = self.default_key_params.copy()
params.update(key_params)
result = ['Key-Type: %s' % params.pop('Key-Type')]
result += ('%s: %s' % (param, value) for param, value in params.items())
result.append('%commit\n')
return '\n'.join(result) |
def engine_file(self):
"""Specify path to the ipcontroller-engine.json file.
This file is stored in in the ipython_dir/profile folders.
Returns :
- str, File path to engine file
"""
return os.path.join(self.ipython_dir,
'profile_{0}'.format(self.profile),
'security/ipcontroller-engine.json') | Specify path to the ipcontroller-engine.json file.
This file is stored in in the ipython_dir/profile folders.
Returns :
- str, File path to engine file | Below is the the instruction that describes the task:
### Input:
Specify path to the ipcontroller-engine.json file.
This file is stored in in the ipython_dir/profile folders.
Returns :
- str, File path to engine file
### Response:
def engine_file(self):
"""Specify path to the ipcontroller-engine.json file.
This file is stored in in the ipython_dir/profile folders.
Returns :
- str, File path to engine file
"""
return os.path.join(self.ipython_dir,
'profile_{0}'.format(self.profile),
'security/ipcontroller-engine.json') |
def read_long(self, registeraddress, functioncode=3, signed=False):
"""Read a long integer (32 bits) from the slave.
Long integers (32 bits = 4 bytes) are stored in two consecutive 16-bit registers in the slave.
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* functioncode (int): Modbus function code. Can be 3 or 4.
* signed (bool): Whether the data should be interpreted as unsigned or signed.
============== ================== ================ ==========================
``signed`` Data type in slave Alternative name Range
============== ================== ================ ==========================
:const:`False` Unsigned INT32 Unsigned long 0 to 4294967295
:const:`True` INT32 Long -2147483648 to 2147483647
============== ================== ================ ==========================
Returns:
The numerical value (int).
Raises:
ValueError, TypeError, IOError
"""
_checkFunctioncode(functioncode, [3, 4])
_checkBool(signed, description='signed')
return self._genericCommand(functioncode, registeraddress, numberOfRegisters=2, signed=signed, payloadformat='long') | Read a long integer (32 bits) from the slave.
Long integers (32 bits = 4 bytes) are stored in two consecutive 16-bit registers in the slave.
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* functioncode (int): Modbus function code. Can be 3 or 4.
* signed (bool): Whether the data should be interpreted as unsigned or signed.
============== ================== ================ ==========================
``signed`` Data type in slave Alternative name Range
============== ================== ================ ==========================
:const:`False` Unsigned INT32 Unsigned long 0 to 4294967295
:const:`True` INT32 Long -2147483648 to 2147483647
============== ================== ================ ==========================
Returns:
The numerical value (int).
Raises:
ValueError, TypeError, IOError | Below is the the instruction that describes the task:
### Input:
Read a long integer (32 bits) from the slave.
Long integers (32 bits = 4 bytes) are stored in two consecutive 16-bit registers in the slave.
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* functioncode (int): Modbus function code. Can be 3 or 4.
* signed (bool): Whether the data should be interpreted as unsigned or signed.
============== ================== ================ ==========================
``signed`` Data type in slave Alternative name Range
============== ================== ================ ==========================
:const:`False` Unsigned INT32 Unsigned long 0 to 4294967295
:const:`True` INT32 Long -2147483648 to 2147483647
============== ================== ================ ==========================
Returns:
The numerical value (int).
Raises:
ValueError, TypeError, IOError
### Response:
def read_long(self, registeraddress, functioncode=3, signed=False):
"""Read a long integer (32 bits) from the slave.
Long integers (32 bits = 4 bytes) are stored in two consecutive 16-bit registers in the slave.
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* functioncode (int): Modbus function code. Can be 3 or 4.
* signed (bool): Whether the data should be interpreted as unsigned or signed.
============== ================== ================ ==========================
``signed`` Data type in slave Alternative name Range
============== ================== ================ ==========================
:const:`False` Unsigned INT32 Unsigned long 0 to 4294967295
:const:`True` INT32 Long -2147483648 to 2147483647
============== ================== ================ ==========================
Returns:
The numerical value (int).
Raises:
ValueError, TypeError, IOError
"""
_checkFunctioncode(functioncode, [3, 4])
_checkBool(signed, description='signed')
return self._genericCommand(functioncode, registeraddress, numberOfRegisters=2, signed=signed, payloadformat='long') |
def license_from_trove(trove):
"""Finds out license from list of trove classifiers.
Args:
trove: list of trove classifiers
Returns:
Fedora name of the package license or empty string, if no licensing
information is found in trove classifiers.
"""
license = []
for classifier in trove:
if 'License' in classifier:
stripped = classifier.strip()
# if taken from EGG-INFO, begins with Classifier:
stripped = stripped[stripped.find('License'):]
if stripped in settings.TROVE_LICENSES:
license.append(settings.TROVE_LICENSES[stripped])
return ' and '.join(license) | Finds out license from list of trove classifiers.
Args:
trove: list of trove classifiers
Returns:
Fedora name of the package license or empty string, if no licensing
information is found in trove classifiers. | Below is the the instruction that describes the task:
### Input:
Finds out license from list of trove classifiers.
Args:
trove: list of trove classifiers
Returns:
Fedora name of the package license or empty string, if no licensing
information is found in trove classifiers.
### Response:
def license_from_trove(trove):
"""Finds out license from list of trove classifiers.
Args:
trove: list of trove classifiers
Returns:
Fedora name of the package license or empty string, if no licensing
information is found in trove classifiers.
"""
license = []
for classifier in trove:
if 'License' in classifier:
stripped = classifier.strip()
# if taken from EGG-INFO, begins with Classifier:
stripped = stripped[stripped.find('License'):]
if stripped in settings.TROVE_LICENSES:
license.append(settings.TROVE_LICENSES[stripped])
return ' and '.join(license) |
def get_tags(self, field):
"""Get the set of tags for a given field.
.. note::
The named field must be accessible given the current set of values
defined.
Parameters
----------
field : str
The field whose tag should be read.
Returns
-------
set([tag, ...])
Raises
------
UnavailableFieldError
If the field does not exist or is not available.
"""
return self.fields.get_field(field, self.field_values).tags.copy() | Get the set of tags for a given field.
.. note::
The named field must be accessible given the current set of values
defined.
Parameters
----------
field : str
The field whose tag should be read.
Returns
-------
set([tag, ...])
Raises
------
UnavailableFieldError
If the field does not exist or is not available. | Below is the the instruction that describes the task:
### Input:
Get the set of tags for a given field.
.. note::
The named field must be accessible given the current set of values
defined.
Parameters
----------
field : str
The field whose tag should be read.
Returns
-------
set([tag, ...])
Raises
------
UnavailableFieldError
If the field does not exist or is not available.
### Response:
def get_tags(self, field):
"""Get the set of tags for a given field.
.. note::
The named field must be accessible given the current set of values
defined.
Parameters
----------
field : str
The field whose tag should be read.
Returns
-------
set([tag, ...])
Raises
------
UnavailableFieldError
If the field does not exist or is not available.
"""
return self.fields.get_field(field, self.field_values).tags.copy() |
def setup_cmd_parser(cls):
"""Returns the Git argument parser."""
parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES,
from_date=True,
to_date=True)
# Optional arguments
group = parser.parser.add_argument_group('Git arguments')
group.add_argument('--branches', dest='branches',
nargs='+', type=str, default=None,
help="Fetch commits only from these branches")
# Mutual exclusive parameters
exgroup = group.add_mutually_exclusive_group()
exgroup.add_argument('--git-path', dest='git_path',
help="Path where the Git repository will be cloned")
exgroup.add_argument('--git-log', dest='git_log',
help="Path to the Git log file")
exgroup_fetch = group.add_mutually_exclusive_group()
exgroup_fetch.add_argument('--latest-items', dest='latest_items',
action='store_true',
help="Fetch latest commits added to the repository")
exgroup_fetch.add_argument('--no-update', dest='no_update',
action='store_true',
help="Fetch all commits without updating the repository")
# Required arguments
parser.parser.add_argument('uri',
help="URI of the Git log repository")
return parser | Returns the Git argument parser. | Below is the the instruction that describes the task:
### Input:
Returns the Git argument parser.
### Response:
def setup_cmd_parser(cls):
"""Returns the Git argument parser."""
parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES,
from_date=True,
to_date=True)
# Optional arguments
group = parser.parser.add_argument_group('Git arguments')
group.add_argument('--branches', dest='branches',
nargs='+', type=str, default=None,
help="Fetch commits only from these branches")
# Mutual exclusive parameters
exgroup = group.add_mutually_exclusive_group()
exgroup.add_argument('--git-path', dest='git_path',
help="Path where the Git repository will be cloned")
exgroup.add_argument('--git-log', dest='git_log',
help="Path to the Git log file")
exgroup_fetch = group.add_mutually_exclusive_group()
exgroup_fetch.add_argument('--latest-items', dest='latest_items',
action='store_true',
help="Fetch latest commits added to the repository")
exgroup_fetch.add_argument('--no-update', dest='no_update',
action='store_true',
help="Fetch all commits without updating the repository")
# Required arguments
parser.parser.add_argument('uri',
help="URI of the Git log repository")
return parser |
def decode_numpy_dict_values(attrs: Mapping[K, V]) -> Dict[K, V]:
"""Convert attribute values from numpy objects to native Python objects,
for use in to_dict
"""
attrs = dict(attrs)
for k, v in attrs.items():
if isinstance(v, np.ndarray):
attrs[k] = v.tolist()
elif isinstance(v, np.generic):
attrs[k] = v.item()
return attrs | Convert attribute values from numpy objects to native Python objects,
for use in to_dict | Below is the the instruction that describes the task:
### Input:
Convert attribute values from numpy objects to native Python objects,
for use in to_dict
### Response:
def decode_numpy_dict_values(attrs: Mapping[K, V]) -> Dict[K, V]:
"""Convert attribute values from numpy objects to native Python objects,
for use in to_dict
"""
attrs = dict(attrs)
for k, v in attrs.items():
if isinstance(v, np.ndarray):
attrs[k] = v.tolist()
elif isinstance(v, np.generic):
attrs[k] = v.item()
return attrs |
def get_alternative_short_names(cls, entry):
"""
get list of models.AlternativeShortName objects from XML node entry
:param entry: XML node entry
:return: list of :class:`pyuniprot.manager.models.AlternativeShortName` objects
"""
names = []
query = "./protein/alternativeName/shortName"
for name in entry.iterfind(query):
names.append(models.AlternativeShortName(name=name.text))
return names | get list of models.AlternativeShortName objects from XML node entry
:param entry: XML node entry
:return: list of :class:`pyuniprot.manager.models.AlternativeShortName` objects | Below is the the instruction that describes the task:
### Input:
get list of models.AlternativeShortName objects from XML node entry
:param entry: XML node entry
:return: list of :class:`pyuniprot.manager.models.AlternativeShortName` objects
### Response:
def get_alternative_short_names(cls, entry):
"""
get list of models.AlternativeShortName objects from XML node entry
:param entry: XML node entry
:return: list of :class:`pyuniprot.manager.models.AlternativeShortName` objects
"""
names = []
query = "./protein/alternativeName/shortName"
for name in entry.iterfind(query):
names.append(models.AlternativeShortName(name=name.text))
return names |
def export(self):
"""
Exports a network as a networkx MultiDiGraph intermediate representation
suitable for visualization.
:return: networkx MultiDiGraph
"""
graph = nx.MultiDiGraph()
# Add regions to graph as nodes, annotated by name
regions = self.network.getRegions()
for idx in xrange(regions.getCount()):
regionPair = regions.getByIndex(idx)
regionName = regionPair[0]
graph.add_node(regionName, label=regionName)
# Add links between regions to graph as edges, annotate by input-output
# name pairs
for linkName, link in self.network.getLinks():
graph.add_edge(link.getSrcRegionName(),
link.getDestRegionName(),
src=link.getSrcOutputName(),
dest=link.getDestInputName())
return graph | Exports a network as a networkx MultiDiGraph intermediate representation
suitable for visualization.
:return: networkx MultiDiGraph | Below is the the instruction that describes the task:
### Input:
Exports a network as a networkx MultiDiGraph intermediate representation
suitable for visualization.
:return: networkx MultiDiGraph
### Response:
def export(self):
"""
Exports a network as a networkx MultiDiGraph intermediate representation
suitable for visualization.
:return: networkx MultiDiGraph
"""
graph = nx.MultiDiGraph()
# Add regions to graph as nodes, annotated by name
regions = self.network.getRegions()
for idx in xrange(regions.getCount()):
regionPair = regions.getByIndex(idx)
regionName = regionPair[0]
graph.add_node(regionName, label=regionName)
# Add links between regions to graph as edges, annotate by input-output
# name pairs
for linkName, link in self.network.getLinks():
graph.add_edge(link.getSrcRegionName(),
link.getDestRegionName(),
src=link.getSrcOutputName(),
dest=link.getDestInputName())
return graph |
def chown(self, paths, owner, recurse=False):
''' Change the owner for paths. The owner can be specified as `user` or `user:group`
:param paths: List of paths to chmod
:type paths: list
:param owner: New owner
:type owner: string
:param recurse: Recursive chown
:type recurse: boolean
:returns: a generator that yields dictionaries
This always include the toplevel when recursing.'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("chown: no path given")
if not owner:
raise InvalidInputException("chown: no owner given")
processor = lambda path, node, owner=owner: self._handle_chown(path, node, owner)
for item in self._find_items(paths, processor, include_toplevel=True,
include_children=False, recurse=recurse):
if item:
yield item | Change the owner for paths. The owner can be specified as `user` or `user:group`
:param paths: List of paths to chmod
:type paths: list
:param owner: New owner
:type owner: string
:param recurse: Recursive chown
:type recurse: boolean
:returns: a generator that yields dictionaries
This always include the toplevel when recursing. | Below is the the instruction that describes the task:
### Input:
Change the owner for paths. The owner can be specified as `user` or `user:group`
:param paths: List of paths to chmod
:type paths: list
:param owner: New owner
:type owner: string
:param recurse: Recursive chown
:type recurse: boolean
:returns: a generator that yields dictionaries
This always include the toplevel when recursing.
### Response:
def chown(self, paths, owner, recurse=False):
''' Change the owner for paths. The owner can be specified as `user` or `user:group`
:param paths: List of paths to chmod
:type paths: list
:param owner: New owner
:type owner: string
:param recurse: Recursive chown
:type recurse: boolean
:returns: a generator that yields dictionaries
This always include the toplevel when recursing.'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("chown: no path given")
if not owner:
raise InvalidInputException("chown: no owner given")
processor = lambda path, node, owner=owner: self._handle_chown(path, node, owner)
for item in self._find_items(paths, processor, include_toplevel=True,
include_children=False, recurse=recurse):
if item:
yield item |
def save_resource(plugin_name, resource_name, resource_data):
"""
Save a resource in local cache
:param plugin_name: Name of plugin this resource belongs to
:type plugin_name: str
:param resource_name: Name of resource
:type resource_name: str
:param resource_data: Resource content - base64 encoded
:type resource_data: str
:rtype: None
"""
path = os.path.join(resource_dir_path, plugin_name)
if not os.path.exists(path):
os.makedirs(path)
path = os.path.join(path, resource_name)
logger.debug("Saving {}".format(path))
with open(path, 'wb') as f:
f.write(base64.b64decode(resource_data)) | Save a resource in local cache
:param plugin_name: Name of plugin this resource belongs to
:type plugin_name: str
:param resource_name: Name of resource
:type resource_name: str
:param resource_data: Resource content - base64 encoded
:type resource_data: str
:rtype: None | Below is the the instruction that describes the task:
### Input:
Save a resource in local cache
:param plugin_name: Name of plugin this resource belongs to
:type plugin_name: str
:param resource_name: Name of resource
:type resource_name: str
:param resource_data: Resource content - base64 encoded
:type resource_data: str
:rtype: None
### Response:
def save_resource(plugin_name, resource_name, resource_data):
"""
Save a resource in local cache
:param plugin_name: Name of plugin this resource belongs to
:type plugin_name: str
:param resource_name: Name of resource
:type resource_name: str
:param resource_data: Resource content - base64 encoded
:type resource_data: str
:rtype: None
"""
path = os.path.join(resource_dir_path, plugin_name)
if not os.path.exists(path):
os.makedirs(path)
path = os.path.join(path, resource_name)
logger.debug("Saving {}".format(path))
with open(path, 'wb') as f:
f.write(base64.b64decode(resource_data)) |
def get_sea_names():
'''
Returns a list of NODC sea names
source of list: http://www.nodc.noaa.gov/General/NODC-Archive/seanames.xml
'''
global _SEA_NAMES
if _SEA_NAMES is None:
resource_text = get_data("cc_plugin_ncei", "data/seanames.xml")
parser = etree.XMLParser(remove_blank_text=True)
root = etree.fromstring(resource_text, parser)
buf = {}
for seaname in root.findall('seaname'):
name = seaname.find('seaname').text
buf[name] = seaname.find('seacode').text if seaname.find('seacode') is not None else 'N/A'
_SEA_NAMES = buf
return _SEA_NAMES | Returns a list of NODC sea names
source of list: http://www.nodc.noaa.gov/General/NODC-Archive/seanames.xml | Below is the the instruction that describes the task:
### Input:
Returns a list of NODC sea names
source of list: http://www.nodc.noaa.gov/General/NODC-Archive/seanames.xml
### Response:
def get_sea_names():
'''
Returns a list of NODC sea names
source of list: http://www.nodc.noaa.gov/General/NODC-Archive/seanames.xml
'''
global _SEA_NAMES
if _SEA_NAMES is None:
resource_text = get_data("cc_plugin_ncei", "data/seanames.xml")
parser = etree.XMLParser(remove_blank_text=True)
root = etree.fromstring(resource_text, parser)
buf = {}
for seaname in root.findall('seaname'):
name = seaname.find('seaname').text
buf[name] = seaname.find('seacode').text if seaname.find('seacode') is not None else 'N/A'
_SEA_NAMES = buf
return _SEA_NAMES |
def skipping_window(sequence, target, n=3):
"""
Return a sliding window with a constraint to check that
target is inside the window.
From http://stackoverflow.com/q/43626525/610569
>>> list(skipping_window([1,2,3,4,5], 2, 3))
[(1, 2, 3), (2, 3, 4)]
"""
start, stop = 0, n
seq = list(sequence)
while stop <= len(seq):
subseq = seq[start:stop]
if target in subseq:
yield tuple(seq[start:stop])
start += 1
stop += 1
# Fast forwarding the start.
# Find the next window which contains the target.
try:
# `seq.index(target, start) - (n-1)` would be the next
# window where the constraint is met.
start = max(seq.index(target, start) - (n-1), start)
stop = start + n
except ValueError:
break | Return a sliding window with a constraint to check that
target is inside the window.
From http://stackoverflow.com/q/43626525/610569
>>> list(skipping_window([1,2,3,4,5], 2, 3))
[(1, 2, 3), (2, 3, 4)] | Below is the the instruction that describes the task:
### Input:
Return a sliding window with a constraint to check that
target is inside the window.
From http://stackoverflow.com/q/43626525/610569
>>> list(skipping_window([1,2,3,4,5], 2, 3))
[(1, 2, 3), (2, 3, 4)]
### Response:
def skipping_window(sequence, target, n=3):
"""
Return a sliding window with a constraint to check that
target is inside the window.
From http://stackoverflow.com/q/43626525/610569
>>> list(skipping_window([1,2,3,4,5], 2, 3))
[(1, 2, 3), (2, 3, 4)]
"""
start, stop = 0, n
seq = list(sequence)
while stop <= len(seq):
subseq = seq[start:stop]
if target in subseq:
yield tuple(seq[start:stop])
start += 1
stop += 1
# Fast forwarding the start.
# Find the next window which contains the target.
try:
# `seq.index(target, start) - (n-1)` would be the next
# window where the constraint is met.
start = max(seq.index(target, start) - (n-1), start)
stop = start + n
except ValueError:
break |
def _request_one_trial_job(self):
"""get one trial job, i.e., one hyperparameter configuration.
If this function is called, Command will be sent by BOHB:
a. If there is a parameter need to run, will return "NewTrialJob" with a dict:
{
'parameter_id': id of new hyperparameter
'parameter_source': 'algorithm'
'parameters': value of new hyperparameter
}
b. If BOHB don't have parameter waiting, will return "NoMoreTrialJobs" with
{
'parameter_id': '-1_0_0',
'parameter_source': 'algorithm',
'parameters': ''
}
"""
if not self.generated_hyper_configs:
ret = {
'parameter_id': '-1_0_0',
'parameter_source': 'algorithm',
'parameters': ''
}
send(CommandType.NoMoreTrialJobs, json_tricks.dumps(ret))
return
assert self.generated_hyper_configs
params = self.generated_hyper_configs.pop()
ret = {
'parameter_id': params[0],
'parameter_source': 'algorithm',
'parameters': params[1]
}
self.parameters[params[0]] = params[1]
send(CommandType.NewTrialJob, json_tricks.dumps(ret))
self.credit -= 1 | get one trial job, i.e., one hyperparameter configuration.
If this function is called, Command will be sent by BOHB:
a. If there is a parameter need to run, will return "NewTrialJob" with a dict:
{
'parameter_id': id of new hyperparameter
'parameter_source': 'algorithm'
'parameters': value of new hyperparameter
}
b. If BOHB don't have parameter waiting, will return "NoMoreTrialJobs" with
{
'parameter_id': '-1_0_0',
'parameter_source': 'algorithm',
'parameters': ''
} | Below is the the instruction that describes the task:
### Input:
get one trial job, i.e., one hyperparameter configuration.
If this function is called, Command will be sent by BOHB:
a. If there is a parameter need to run, will return "NewTrialJob" with a dict:
{
'parameter_id': id of new hyperparameter
'parameter_source': 'algorithm'
'parameters': value of new hyperparameter
}
b. If BOHB don't have parameter waiting, will return "NoMoreTrialJobs" with
{
'parameter_id': '-1_0_0',
'parameter_source': 'algorithm',
'parameters': ''
}
### Response:
def _request_one_trial_job(self):
"""get one trial job, i.e., one hyperparameter configuration.
If this function is called, Command will be sent by BOHB:
a. If there is a parameter need to run, will return "NewTrialJob" with a dict:
{
'parameter_id': id of new hyperparameter
'parameter_source': 'algorithm'
'parameters': value of new hyperparameter
}
b. If BOHB don't have parameter waiting, will return "NoMoreTrialJobs" with
{
'parameter_id': '-1_0_0',
'parameter_source': 'algorithm',
'parameters': ''
}
"""
if not self.generated_hyper_configs:
ret = {
'parameter_id': '-1_0_0',
'parameter_source': 'algorithm',
'parameters': ''
}
send(CommandType.NoMoreTrialJobs, json_tricks.dumps(ret))
return
assert self.generated_hyper_configs
params = self.generated_hyper_configs.pop()
ret = {
'parameter_id': params[0],
'parameter_source': 'algorithm',
'parameters': params[1]
}
self.parameters[params[0]] = params[1]
send(CommandType.NewTrialJob, json_tricks.dumps(ret))
self.credit -= 1 |
def create_lines(self, draw, n_line, width, height):
'''绘制干扰线'''
line_num = randint(n_line[0], n_line[1]) # 干扰线条数
for i in range(line_num):
# 起始点
begin = (randint(0, width), randint(0, height))
# 结束点
end = (randint(0, width), randint(0, height))
draw.line([begin, end], fill=(0, 0, 0)) | 绘制干扰线 | Below is the the instruction that describes the task:
### Input:
绘制干扰线
### Response:
def create_lines(self, draw, n_line, width, height):
'''绘制干扰线'''
line_num = randint(n_line[0], n_line[1]) # 干扰线条数
for i in range(line_num):
# 起始点
begin = (randint(0, width), randint(0, height))
# 结束点
end = (randint(0, width), randint(0, height))
draw.line([begin, end], fill=(0, 0, 0)) |
def _decode_argv(self, argv, enc=None):
"""decode argv if bytes, using stin.encoding, falling back on default enc"""
uargv = []
if enc is None:
enc = DEFAULT_ENCODING
for arg in argv:
if not isinstance(arg, unicode):
# only decode if not already decoded
arg = arg.decode(enc)
uargv.append(arg)
return uargv | decode argv if bytes, using stin.encoding, falling back on default enc | Below is the the instruction that describes the task:
### Input:
decode argv if bytes, using stin.encoding, falling back on default enc
### Response:
def _decode_argv(self, argv, enc=None):
"""decode argv if bytes, using stin.encoding, falling back on default enc"""
uargv = []
if enc is None:
enc = DEFAULT_ENCODING
for arg in argv:
if not isinstance(arg, unicode):
# only decode if not already decoded
arg = arg.decode(enc)
uargv.append(arg)
return uargv |
def graceful_ctrlc(func):
"""
Makes the decorated function exit with code 1 on CTRL+C.
"""
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except KeyboardInterrupt:
exit(1)
return wrapper | Makes the decorated function exit with code 1 on CTRL+C. | Below is the the instruction that describes the task:
### Input:
Makes the decorated function exit with code 1 on CTRL+C.
### Response:
def graceful_ctrlc(func):
"""
Makes the decorated function exit with code 1 on CTRL+C.
"""
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except KeyboardInterrupt:
exit(1)
return wrapper |
def logp_gradient_contribution(self, calculation_set=None):
"""
Calculates the gradient of the joint log posterior with respect to self.
Calculation of the log posterior is restricted to the variables in calculation_set.
"""
# NEED some sort of check to see if the log p calculation has recently
# failed, in which case not to continue
return self.logp_partial_gradient(self, calculation_set) + builtins.sum(
[child.logp_partial_gradient(self, calculation_set) for child in self.children]) | Calculates the gradient of the joint log posterior with respect to self.
Calculation of the log posterior is restricted to the variables in calculation_set. | Below is the the instruction that describes the task:
### Input:
Calculates the gradient of the joint log posterior with respect to self.
Calculation of the log posterior is restricted to the variables in calculation_set.
### Response:
def logp_gradient_contribution(self, calculation_set=None):
"""
Calculates the gradient of the joint log posterior with respect to self.
Calculation of the log posterior is restricted to the variables in calculation_set.
"""
# NEED some sort of check to see if the log p calculation has recently
# failed, in which case not to continue
return self.logp_partial_gradient(self, calculation_set) + builtins.sum(
[child.logp_partial_gradient(self, calculation_set) for child in self.children]) |
def lyap_e_len(**kwargs):
"""
Helper function that calculates the minimum number of data points required
to use lyap_e.
Note that none of the required parameters may be set to None.
Kwargs:
kwargs(dict):
arguments used for lyap_e (required: emb_dim, matrix_dim, min_nb
and min_tsep)
Returns:
minimum number of data points required to call lyap_e with the given
parameters
"""
m = (kwargs['emb_dim'] - 1) // (kwargs['matrix_dim'] - 1)
# minimum length required to find single orbit vector
min_len = kwargs['emb_dim']
# we need to follow each starting point of an orbit vector for m more steps
min_len += m
# we need min_tsep * 2 + 1 orbit vectors to find neighbors for each
min_len += kwargs['min_tsep'] * 2
# we need at least min_nb neighbors for each orbit vector
min_len += kwargs['min_nb']
return min_len | Helper function that calculates the minimum number of data points required
to use lyap_e.
Note that none of the required parameters may be set to None.
Kwargs:
kwargs(dict):
arguments used for lyap_e (required: emb_dim, matrix_dim, min_nb
and min_tsep)
Returns:
minimum number of data points required to call lyap_e with the given
parameters | Below is the the instruction that describes the task:
### Input:
Helper function that calculates the minimum number of data points required
to use lyap_e.
Note that none of the required parameters may be set to None.
Kwargs:
kwargs(dict):
arguments used for lyap_e (required: emb_dim, matrix_dim, min_nb
and min_tsep)
Returns:
minimum number of data points required to call lyap_e with the given
parameters
### Response:
def lyap_e_len(**kwargs):
"""
Helper function that calculates the minimum number of data points required
to use lyap_e.
Note that none of the required parameters may be set to None.
Kwargs:
kwargs(dict):
arguments used for lyap_e (required: emb_dim, matrix_dim, min_nb
and min_tsep)
Returns:
minimum number of data points required to call lyap_e with the given
parameters
"""
m = (kwargs['emb_dim'] - 1) // (kwargs['matrix_dim'] - 1)
# minimum length required to find single orbit vector
min_len = kwargs['emb_dim']
# we need to follow each starting point of an orbit vector for m more steps
min_len += m
# we need min_tsep * 2 + 1 orbit vectors to find neighbors for each
min_len += kwargs['min_tsep'] * 2
# we need at least min_nb neighbors for each orbit vector
min_len += kwargs['min_nb']
return min_len |
def create_copy(self):
"""
Initialises a temporary directory structure and copy of MAGICC
configuration files and binary.
"""
if self.executable is None or not isfile(self.executable):
raise FileNotFoundError(
"Could not find MAGICC{} executable: {}".format(
self.version, self.executable
)
)
if self.is_temp:
assert (
self.root_dir is None
), "A temp copy for this instance has already been created"
self.root_dir = mkdtemp(prefix="pymagicc-")
if exists(self.run_dir):
raise Exception("A copy of MAGICC has already been created.")
if not exists(self.root_dir):
makedirs(self.root_dir)
exec_dir = basename(self.original_dir)
# Copy a subset of folders from the MAGICC `original_dir`
# Also copy anything which is in the root of the MAGICC distribution
# Assumes that the MAGICC binary is in a folder one level below the root
# of the MAGICC distribution. i.e. /run/magicc.exe or /bin/magicc
dirs_to_copy = [".", "bin", "run"]
# Check that the executable is in a valid sub directory
assert exec_dir in dirs_to_copy, "binary must be in bin/ or run/ directory"
for d in dirs_to_copy:
source_dir = abspath(join(self.original_dir, "..", d))
if exists(source_dir):
_copy_files(source_dir, join(self.root_dir, d))
# Create an empty out dir
# MAGICC assumes that the 'out' directory already exists
makedirs(join(self.root_dir, "out"))
# Create basic configuration files so magicc can run
self.set_years()
self.set_config() | Initialises a temporary directory structure and copy of MAGICC
configuration files and binary. | Below is the the instruction that describes the task:
### Input:
Initialises a temporary directory structure and copy of MAGICC
configuration files and binary.
### Response:
def create_copy(self):
"""
Initialises a temporary directory structure and copy of MAGICC
configuration files and binary.
"""
if self.executable is None or not isfile(self.executable):
raise FileNotFoundError(
"Could not find MAGICC{} executable: {}".format(
self.version, self.executable
)
)
if self.is_temp:
assert (
self.root_dir is None
), "A temp copy for this instance has already been created"
self.root_dir = mkdtemp(prefix="pymagicc-")
if exists(self.run_dir):
raise Exception("A copy of MAGICC has already been created.")
if not exists(self.root_dir):
makedirs(self.root_dir)
exec_dir = basename(self.original_dir)
# Copy a subset of folders from the MAGICC `original_dir`
# Also copy anything which is in the root of the MAGICC distribution
# Assumes that the MAGICC binary is in a folder one level below the root
# of the MAGICC distribution. i.e. /run/magicc.exe or /bin/magicc
dirs_to_copy = [".", "bin", "run"]
# Check that the executable is in a valid sub directory
assert exec_dir in dirs_to_copy, "binary must be in bin/ or run/ directory"
for d in dirs_to_copy:
source_dir = abspath(join(self.original_dir, "..", d))
if exists(source_dir):
_copy_files(source_dir, join(self.root_dir, d))
# Create an empty out dir
# MAGICC assumes that the 'out' directory already exists
makedirs(join(self.root_dir, "out"))
# Create basic configuration files so magicc can run
self.set_years()
self.set_config() |
def get_alignment_df_from_file(alignment_file, a_seq_id=None, b_seq_id=None):
"""Get a Pandas DataFrame of the Needle alignment results. Contains all positions of the sequences.
Args:
alignment_file:
a_seq_id: Optional specification of the ID of the reference sequence
b_seq_id: Optional specification of the ID of the aligned sequence
Returns:
Pandas DataFrame: all positions in the alignment
"""
alignments = list(AlignIO.parse(alignment_file, "emboss"))
alignment_df = pd.DataFrame(columns=['id_a', 'id_b', 'type', 'id_a_aa', 'id_a_pos', 'id_b_aa', 'id_b_pos'])
for alignment in alignments:
if not a_seq_id:
a_seq_id = list(alignment)[0].id
a_seq = str(list(alignment)[0].seq)
if not b_seq_id:
b_seq_id = list(alignment)[1].id
b_seq = str(list(alignment)[1].seq)
df = get_alignment_df(a_seq, b_seq, a_seq_id, b_seq_id)
alignment_df = alignment_df.append(df).reset_index(drop=True)
return alignment_df | Get a Pandas DataFrame of the Needle alignment results. Contains all positions of the sequences.
Args:
alignment_file:
a_seq_id: Optional specification of the ID of the reference sequence
b_seq_id: Optional specification of the ID of the aligned sequence
Returns:
Pandas DataFrame: all positions in the alignment | Below is the the instruction that describes the task:
### Input:
Get a Pandas DataFrame of the Needle alignment results. Contains all positions of the sequences.
Args:
alignment_file:
a_seq_id: Optional specification of the ID of the reference sequence
b_seq_id: Optional specification of the ID of the aligned sequence
Returns:
Pandas DataFrame: all positions in the alignment
### Response:
def get_alignment_df_from_file(alignment_file, a_seq_id=None, b_seq_id=None):
"""Get a Pandas DataFrame of the Needle alignment results. Contains all positions of the sequences.
Args:
alignment_file:
a_seq_id: Optional specification of the ID of the reference sequence
b_seq_id: Optional specification of the ID of the aligned sequence
Returns:
Pandas DataFrame: all positions in the alignment
"""
alignments = list(AlignIO.parse(alignment_file, "emboss"))
alignment_df = pd.DataFrame(columns=['id_a', 'id_b', 'type', 'id_a_aa', 'id_a_pos', 'id_b_aa', 'id_b_pos'])
for alignment in alignments:
if not a_seq_id:
a_seq_id = list(alignment)[0].id
a_seq = str(list(alignment)[0].seq)
if not b_seq_id:
b_seq_id = list(alignment)[1].id
b_seq = str(list(alignment)[1].seq)
df = get_alignment_df(a_seq, b_seq, a_seq_id, b_seq_id)
alignment_df = alignment_df.append(df).reset_index(drop=True)
return alignment_df |
def conics(elts, et):
"""
Determine the state (position, velocity) of an orbiting body
from a set of elliptic, hyperbolic, or parabolic orbital
elements.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/conics_c.html
:param elts: Conic elements.
:type elts: 8-Element Array of floats
:param et: Input time.
:type et: float
:return: State of orbiting body at et.
:rtype: 6-Element Array of floats
"""
elts = stypes.toDoubleVector(elts)
et = ctypes.c_double(et)
state = stypes.emptyDoubleVector(6)
libspice.conics_c(elts, et, state)
return stypes.cVectorToPython(state) | Determine the state (position, velocity) of an orbiting body
from a set of elliptic, hyperbolic, or parabolic orbital
elements.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/conics_c.html
:param elts: Conic elements.
:type elts: 8-Element Array of floats
:param et: Input time.
:type et: float
:return: State of orbiting body at et.
:rtype: 6-Element Array of floats | Below is the the instruction that describes the task:
### Input:
Determine the state (position, velocity) of an orbiting body
from a set of elliptic, hyperbolic, or parabolic orbital
elements.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/conics_c.html
:param elts: Conic elements.
:type elts: 8-Element Array of floats
:param et: Input time.
:type et: float
:return: State of orbiting body at et.
:rtype: 6-Element Array of floats
### Response:
def conics(elts, et):
"""
Determine the state (position, velocity) of an orbiting body
from a set of elliptic, hyperbolic, or parabolic orbital
elements.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/conics_c.html
:param elts: Conic elements.
:type elts: 8-Element Array of floats
:param et: Input time.
:type et: float
:return: State of orbiting body at et.
:rtype: 6-Element Array of floats
"""
elts = stypes.toDoubleVector(elts)
et = ctypes.c_double(et)
state = stypes.emptyDoubleVector(6)
libspice.conics_c(elts, et, state)
return stypes.cVectorToPython(state) |
def _processDDL(self):
"""Generate and process table SQL, SQLLite version"""
sql_statements = self._generateDDL()
logging.info('Generating sqllite tables')
for stmt in sql_statements:
c = self.conn.cursor()
c.execute(stmt)
self.conn.commit() | Generate and process table SQL, SQLLite version | Below is the the instruction that describes the task:
### Input:
Generate and process table SQL, SQLLite version
### Response:
def _processDDL(self):
"""Generate and process table SQL, SQLLite version"""
sql_statements = self._generateDDL()
logging.info('Generating sqllite tables')
for stmt in sql_statements:
c = self.conn.cursor()
c.execute(stmt)
self.conn.commit() |
def layout(self, value):
'Overloaded layout function to fix component names as needed'
if self._adjust_id:
self._fix_component_id(value)
return Dash.layout.fset(self, value) | Overloaded layout function to fix component names as needed | Below is the the instruction that describes the task:
### Input:
Overloaded layout function to fix component names as needed
### Response:
def layout(self, value):
'Overloaded layout function to fix component names as needed'
if self._adjust_id:
self._fix_component_id(value)
return Dash.layout.fset(self, value) |
def decode_door(packet, channel=1):
"""Decode a door sensor."""
val = str(packet.get(QSDATA, ''))
if len(val) == 6 and val.startswith('46') and channel == 1:
return val[-1] == '0'
return None | Decode a door sensor. | Below is the the instruction that describes the task:
### Input:
Decode a door sensor.
### Response:
def decode_door(packet, channel=1):
"""Decode a door sensor."""
val = str(packet.get(QSDATA, ''))
if len(val) == 6 and val.startswith('46') and channel == 1:
return val[-1] == '0'
return None |
def size(self, source):
'''Get the size component of the given s3url. If it is a
directory, combine the sizes of all the files under
that directory. Subdirectories will not be counted unless
--recursive option is set.
'''
result = []
for src in self.source_expand(source):
size = 0
for f in self.s3walk(src):
size += f['size']
result.append((src, size))
return result | Get the size component of the given s3url. If it is a
directory, combine the sizes of all the files under
that directory. Subdirectories will not be counted unless
--recursive option is set. | Below is the the instruction that describes the task:
### Input:
Get the size component of the given s3url. If it is a
directory, combine the sizes of all the files under
that directory. Subdirectories will not be counted unless
--recursive option is set.
### Response:
def size(self, source):
'''Get the size component of the given s3url. If it is a
directory, combine the sizes of all the files under
that directory. Subdirectories will not be counted unless
--recursive option is set.
'''
result = []
for src in self.source_expand(source):
size = 0
for f in self.s3walk(src):
size += f['size']
result.append((src, size))
return result |
def map_permissions_check(view_func):
"""
Used for URLs dealing with the map.
"""
@wraps(view_func)
def wrapper(request, *args, **kwargs):
map_inst = get_object_or_404(Map, pk=kwargs['map_id'])
user = request.user
kwargs['map_inst'] = map_inst # Avoid rerequesting the map in the view
if map_inst.edit_status >= map_inst.EDITORS:
can_edit = map_inst.can_edit(user=user, request=request)
if not can_edit:
if map_inst.owner and not user.is_authenticated:
return simple_json_response(login_required=str(LOGIN_URL))
return HttpResponseForbidden()
return view_func(request, *args, **kwargs)
return wrapper | Used for URLs dealing with the map. | Below is the the instruction that describes the task:
### Input:
Used for URLs dealing with the map.
### Response:
def map_permissions_check(view_func):
"""
Used for URLs dealing with the map.
"""
@wraps(view_func)
def wrapper(request, *args, **kwargs):
map_inst = get_object_or_404(Map, pk=kwargs['map_id'])
user = request.user
kwargs['map_inst'] = map_inst # Avoid rerequesting the map in the view
if map_inst.edit_status >= map_inst.EDITORS:
can_edit = map_inst.can_edit(user=user, request=request)
if not can_edit:
if map_inst.owner and not user.is_authenticated:
return simple_json_response(login_required=str(LOGIN_URL))
return HttpResponseForbidden()
return view_func(request, *args, **kwargs)
return wrapper |
def _prelu(attrs, inputs, proto_obj):
"""PRelu function"""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'prelu'})
return 'LeakyReLU', new_attrs, inputs | PRelu function | Below is the the instruction that describes the task:
### Input:
PRelu function
### Response:
def _prelu(attrs, inputs, proto_obj):
"""PRelu function"""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'prelu'})
return 'LeakyReLU', new_attrs, inputs |
def update_warning_box(self):
"""
updates the warning box with whatever the warning_text variable
contains for this specimen
"""
self.warning_box.Clear()
if self.warning_text == "":
self.warning_box.AppendText("No Problems")
else:
self.warning_box.AppendText(self.warning_text) | updates the warning box with whatever the warning_text variable
contains for this specimen | Below is the the instruction that describes the task:
### Input:
updates the warning box with whatever the warning_text variable
contains for this specimen
### Response:
def update_warning_box(self):
"""
updates the warning box with whatever the warning_text variable
contains for this specimen
"""
self.warning_box.Clear()
if self.warning_text == "":
self.warning_box.AppendText("No Problems")
else:
self.warning_box.AppendText(self.warning_text) |
def remove_resource(self, name):
"""https://github.com/frictionlessdata/datapackage-py#package
"""
resource = self.get_resource(name)
if resource:
predicat = lambda resource: resource.get('name') != name
self.__current_descriptor['resources'] = list(filter(
predicat, self.__current_descriptor['resources']))
self.__build()
return resource | https://github.com/frictionlessdata/datapackage-py#package | Below is the the instruction that describes the task:
### Input:
https://github.com/frictionlessdata/datapackage-py#package
### Response:
def remove_resource(self, name):
"""https://github.com/frictionlessdata/datapackage-py#package
"""
resource = self.get_resource(name)
if resource:
predicat = lambda resource: resource.get('name') != name
self.__current_descriptor['resources'] = list(filter(
predicat, self.__current_descriptor['resources']))
self.__build()
return resource |
def check_pypi_name(pypi_package_name, pypi_registry_host=None):
"""
Check if a package name exists on pypi.
TODO: Document the Registry URL construction.
It may not be obvious how pypi_package_name and pypi_registry_host are used
I'm appending the simple HTTP API parts of the registry standard specification.
It will return True if the package name, or any equivalent variation as defined by PEP 503 normalisation
rules (https://www.python.org/dev/peps/pep-0503/#normalized-names) is registered in the PyPI registry.
>>> check_pypi_name('pip')
True
>>> check_pypi_name('Pip')
True
It will return False if the package name, or any equivalent variation as defined by PEP 503 normalisation
rules (https://www.python.org/dev/peps/pep-0503/#normalized-names) is not registered in the PyPI registry.
>>> check_pypi_name('testy_mc-test_case-has.a.cousin_who_should_never_write_a_package')
False
:param pypi_package_name:
:param pypi_registry_host:
:return:
"""
if pypi_registry_host is None:
pypi_registry_host = 'pypi.python.org'
# Just a helpful reminder why this bytearray size was chosen.
# HTTP/1.1 200 OK
# HTTP/1.1 404 Not Found
receive_buffer = bytearray(b'------------')
context = ssl.create_default_context()
ssl_http_socket = context.wrap_socket(socket.socket(socket.AF_INET), server_hostname=pypi_registry_host)
ssl_http_socket.connect((pypi_registry_host, 443))
ssl_http_socket.send(b''.join([
b"HEAD /simple/", pypi_package_name.encode('ascii'), b"/ HTTP/1.0", b"\r\n",
b"Host: ", pypi_registry_host.encode('ascii'), b"\r\n",
b"\r\n\r\n"
]))
ssl_http_socket.recv_into(receive_buffer)
# Early return when possible.
if b'HTTP/1.1 200' in receive_buffer:
ssl_http_socket.shutdown(1)
ssl_http_socket.close()
return True
elif b'HTTP/1.1 404' in receive_buffer:
ssl_http_socket.shutdown(1)
ssl_http_socket.close()
return False
remaining_bytes = ssl_http_socket.recv(2048)
redirect_path_location_start = remaining_bytes.find(b'Location:') + 10
redirect_path_location_end = remaining_bytes.find(b'\r\n', redirect_path_location_start)
# Append the trailing slash to avoid a needless extra redirect.
redirect_path = remaining_bytes[redirect_path_location_start:redirect_path_location_end] + b'/'
ssl_http_socket.shutdown(1)
ssl_http_socket.close()
# Reset the bytearray to empty
# receive_buffer = bytearray(b'------------')
ssl_http_socket = context.wrap_socket(socket.socket(socket.AF_INET), server_hostname=pypi_registry_host)
ssl_http_socket.connect((pypi_registry_host, 443))
ssl_http_socket.send(b''.join([
b"HEAD ", redirect_path, b" HTTP/1.0", b"\r\n",
b"Host: ", pypi_registry_host.encode('ascii'), b"\r\n",
b"\r\n\r\n"]))
ssl_http_socket.recv_into(receive_buffer)
if b'HTTP/1.1 200' in receive_buffer:
return True
elif b'HTTP/1.1 404' in receive_buffer:
return False
else:
NotImplementedError('A definitive answer was not found by primary or secondary lookups.') | Check if a package name exists on pypi.
TODO: Document the Registry URL construction.
It may not be obvious how pypi_package_name and pypi_registry_host are used
I'm appending the simple HTTP API parts of the registry standard specification.
It will return True if the package name, or any equivalent variation as defined by PEP 503 normalisation
rules (https://www.python.org/dev/peps/pep-0503/#normalized-names) is registered in the PyPI registry.
>>> check_pypi_name('pip')
True
>>> check_pypi_name('Pip')
True
It will return False if the package name, or any equivalent variation as defined by PEP 503 normalisation
rules (https://www.python.org/dev/peps/pep-0503/#normalized-names) is not registered in the PyPI registry.
>>> check_pypi_name('testy_mc-test_case-has.a.cousin_who_should_never_write_a_package')
False
:param pypi_package_name:
:param pypi_registry_host:
:return: | Below is the the instruction that describes the task:
### Input:
Check if a package name exists on pypi.
TODO: Document the Registry URL construction.
It may not be obvious how pypi_package_name and pypi_registry_host are used
I'm appending the simple HTTP API parts of the registry standard specification.
It will return True if the package name, or any equivalent variation as defined by PEP 503 normalisation
rules (https://www.python.org/dev/peps/pep-0503/#normalized-names) is registered in the PyPI registry.
>>> check_pypi_name('pip')
True
>>> check_pypi_name('Pip')
True
It will return False if the package name, or any equivalent variation as defined by PEP 503 normalisation
rules (https://www.python.org/dev/peps/pep-0503/#normalized-names) is not registered in the PyPI registry.
>>> check_pypi_name('testy_mc-test_case-has.a.cousin_who_should_never_write_a_package')
False
:param pypi_package_name:
:param pypi_registry_host:
:return:
### Response:
def check_pypi_name(pypi_package_name, pypi_registry_host=None):
"""
Check if a package name exists on pypi.
TODO: Document the Registry URL construction.
It may not be obvious how pypi_package_name and pypi_registry_host are used
I'm appending the simple HTTP API parts of the registry standard specification.
It will return True if the package name, or any equivalent variation as defined by PEP 503 normalisation
rules (https://www.python.org/dev/peps/pep-0503/#normalized-names) is registered in the PyPI registry.
>>> check_pypi_name('pip')
True
>>> check_pypi_name('Pip')
True
It will return False if the package name, or any equivalent variation as defined by PEP 503 normalisation
rules (https://www.python.org/dev/peps/pep-0503/#normalized-names) is not registered in the PyPI registry.
>>> check_pypi_name('testy_mc-test_case-has.a.cousin_who_should_never_write_a_package')
False
:param pypi_package_name:
:param pypi_registry_host:
:return:
"""
if pypi_registry_host is None:
pypi_registry_host = 'pypi.python.org'
# Just a helpful reminder why this bytearray size was chosen.
# HTTP/1.1 200 OK
# HTTP/1.1 404 Not Found
receive_buffer = bytearray(b'------------')
context = ssl.create_default_context()
ssl_http_socket = context.wrap_socket(socket.socket(socket.AF_INET), server_hostname=pypi_registry_host)
ssl_http_socket.connect((pypi_registry_host, 443))
ssl_http_socket.send(b''.join([
b"HEAD /simple/", pypi_package_name.encode('ascii'), b"/ HTTP/1.0", b"\r\n",
b"Host: ", pypi_registry_host.encode('ascii'), b"\r\n",
b"\r\n\r\n"
]))
ssl_http_socket.recv_into(receive_buffer)
# Early return when possible.
if b'HTTP/1.1 200' in receive_buffer:
ssl_http_socket.shutdown(1)
ssl_http_socket.close()
return True
elif b'HTTP/1.1 404' in receive_buffer:
ssl_http_socket.shutdown(1)
ssl_http_socket.close()
return False
remaining_bytes = ssl_http_socket.recv(2048)
redirect_path_location_start = remaining_bytes.find(b'Location:') + 10
redirect_path_location_end = remaining_bytes.find(b'\r\n', redirect_path_location_start)
# Append the trailing slash to avoid a needless extra redirect.
redirect_path = remaining_bytes[redirect_path_location_start:redirect_path_location_end] + b'/'
ssl_http_socket.shutdown(1)
ssl_http_socket.close()
# Reset the bytearray to empty
# receive_buffer = bytearray(b'------------')
ssl_http_socket = context.wrap_socket(socket.socket(socket.AF_INET), server_hostname=pypi_registry_host)
ssl_http_socket.connect((pypi_registry_host, 443))
ssl_http_socket.send(b''.join([
b"HEAD ", redirect_path, b" HTTP/1.0", b"\r\n",
b"Host: ", pypi_registry_host.encode('ascii'), b"\r\n",
b"\r\n\r\n"]))
ssl_http_socket.recv_into(receive_buffer)
if b'HTTP/1.1 200' in receive_buffer:
return True
elif b'HTTP/1.1 404' in receive_buffer:
return False
else:
NotImplementedError('A definitive answer was not found by primary or secondary lookups.') |
def _finalize_axis(self, key, **kwargs):
"""
Extends the ElementPlot _finalize_axis method to set appropriate
labels, and axes options for 3D Plots.
"""
axis = self.handles['axis']
self.handles['fig'].set_frameon(False)
axis.grid(self.show_grid)
axis.view_init(elev=self.elevation, azim=self.azimuth)
axis.dist = self.distance
if self.xaxis is None:
axis.w_xaxis.line.set_lw(0.)
axis.w_xaxis.label.set_text('')
if self.yaxis is None:
axis.w_yaxis.line.set_lw(0.)
axis.w_yaxis.label.set_text('')
if self.zaxis is None:
axis.w_zaxis.line.set_lw(0.)
axis.w_zaxis.label.set_text('')
if self.disable_axes:
axis.set_axis_off()
if mpl_version <= '1.5.9':
axis.set_axis_bgcolor(self.bgcolor)
else:
axis.set_facecolor(self.bgcolor)
return super(Plot3D, self)._finalize_axis(key, **kwargs) | Extends the ElementPlot _finalize_axis method to set appropriate
labels, and axes options for 3D Plots. | Below is the the instruction that describes the task:
### Input:
Extends the ElementPlot _finalize_axis method to set appropriate
labels, and axes options for 3D Plots.
### Response:
def _finalize_axis(self, key, **kwargs):
"""
Extends the ElementPlot _finalize_axis method to set appropriate
labels, and axes options for 3D Plots.
"""
axis = self.handles['axis']
self.handles['fig'].set_frameon(False)
axis.grid(self.show_grid)
axis.view_init(elev=self.elevation, azim=self.azimuth)
axis.dist = self.distance
if self.xaxis is None:
axis.w_xaxis.line.set_lw(0.)
axis.w_xaxis.label.set_text('')
if self.yaxis is None:
axis.w_yaxis.line.set_lw(0.)
axis.w_yaxis.label.set_text('')
if self.zaxis is None:
axis.w_zaxis.line.set_lw(0.)
axis.w_zaxis.label.set_text('')
if self.disable_axes:
axis.set_axis_off()
if mpl_version <= '1.5.9':
axis.set_axis_bgcolor(self.bgcolor)
else:
axis.set_facecolor(self.bgcolor)
return super(Plot3D, self)._finalize_axis(key, **kwargs) |
def make_copy(cls, generator):
"""
Creates a copy of the generator.
:param generator: the generator to copy
:type generator: DataGenerator
:return: the copy of the generator
:rtype: DataGenerator
"""
return from_commandline(
to_commandline(generator), classname=classes.get_classname(DataGenerator())) | Creates a copy of the generator.
:param generator: the generator to copy
:type generator: DataGenerator
:return: the copy of the generator
:rtype: DataGenerator | Below is the the instruction that describes the task:
### Input:
Creates a copy of the generator.
:param generator: the generator to copy
:type generator: DataGenerator
:return: the copy of the generator
:rtype: DataGenerator
### Response:
def make_copy(cls, generator):
"""
Creates a copy of the generator.
:param generator: the generator to copy
:type generator: DataGenerator
:return: the copy of the generator
:rtype: DataGenerator
"""
return from_commandline(
to_commandline(generator), classname=classes.get_classname(DataGenerator())) |
def of_definition(service_def):
"""Create a connection to a Streaming Analytics service.
The single service is defined by `service_def` which can be one of
* The `service credentials` copied from the `Service credentials` page of the service console (not the Streams console). Credentials are provided in JSON format. They contain such as the API key and secret, as well as connection information for the service.
* A JSON object (`dict`) of the form: ``{ "type": "streaming-analytics", "name": "service name", "credentials": {...} }`` with the `service credentials` as the value of the ``credentials`` key.
Args:
service_def(dict): Definition of the service to connect to.
Returns:
StreamingAnalyticsConnection: Connection to defined service.
"""
vcap_services = streamsx.topology.context._vcap_from_service_definition(service_def)
service_name = streamsx.topology.context._name_from_service_definition(service_def)
return StreamingAnalyticsConnection(vcap_services, service_name) | Create a connection to a Streaming Analytics service.
The single service is defined by `service_def` which can be one of
* The `service credentials` copied from the `Service credentials` page of the service console (not the Streams console). Credentials are provided in JSON format. They contain such as the API key and secret, as well as connection information for the service.
* A JSON object (`dict`) of the form: ``{ "type": "streaming-analytics", "name": "service name", "credentials": {...} }`` with the `service credentials` as the value of the ``credentials`` key.
Args:
service_def(dict): Definition of the service to connect to.
Returns:
StreamingAnalyticsConnection: Connection to defined service. | Below is the the instruction that describes the task:
### Input:
Create a connection to a Streaming Analytics service.
The single service is defined by `service_def` which can be one of
* The `service credentials` copied from the `Service credentials` page of the service console (not the Streams console). Credentials are provided in JSON format. They contain such as the API key and secret, as well as connection information for the service.
* A JSON object (`dict`) of the form: ``{ "type": "streaming-analytics", "name": "service name", "credentials": {...} }`` with the `service credentials` as the value of the ``credentials`` key.
Args:
service_def(dict): Definition of the service to connect to.
Returns:
StreamingAnalyticsConnection: Connection to defined service.
### Response:
def of_definition(service_def):
"""Create a connection to a Streaming Analytics service.
The single service is defined by `service_def` which can be one of
* The `service credentials` copied from the `Service credentials` page of the service console (not the Streams console). Credentials are provided in JSON format. They contain such as the API key and secret, as well as connection information for the service.
* A JSON object (`dict`) of the form: ``{ "type": "streaming-analytics", "name": "service name", "credentials": {...} }`` with the `service credentials` as the value of the ``credentials`` key.
Args:
service_def(dict): Definition of the service to connect to.
Returns:
StreamingAnalyticsConnection: Connection to defined service.
"""
vcap_services = streamsx.topology.context._vcap_from_service_definition(service_def)
service_name = streamsx.topology.context._name_from_service_definition(service_def)
return StreamingAnalyticsConnection(vcap_services, service_name) |
def clearLocalServices(self):
'send Bye messages for the services and remove them'
for service in list(self._localServices.values()):
self._sendBye(service)
self._localServices.clear() | send Bye messages for the services and remove them | Below is the the instruction that describes the task:
### Input:
send Bye messages for the services and remove them
### Response:
def clearLocalServices(self):
'send Bye messages for the services and remove them'
for service in list(self._localServices.values()):
self._sendBye(service)
self._localServices.clear() |
def delete_vpc_peering_connection(conn_id=None, conn_name=None, region=None,
key=None, keyid=None, profile=None, dry_run=False):
'''
Delete a VPC peering connection.
.. versionadded:: 2016.11.0
conn_id
The connection ID to check. Exclusive with conn_name.
conn_name
The connection name to check. Exclusive with conn_id.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
dry_run
If True, skip application and simply return projected status.
CLI Example:
.. code-block:: bash
# Create a named VPC peering connection
salt myminion boto_vpc.delete_vpc_peering_connection conn_name=salt-vpc
# Specify a region
salt myminion boto_vpc.delete_vpc_peering_connection conn_name=salt-vpc region=us-west-2
# specify an id
salt myminion boto_vpc.delete_vpc_peering_connection conn_id=pcx-8a8939e3
'''
if not _exactly_one((conn_id, conn_name)):
raise SaltInvocationError('Exactly one of conn_id or '
'conn_name must be provided.')
conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)
if conn_name:
conn_id = _vpc_peering_conn_id_for_name(conn_name, conn)
if not conn_id:
raise SaltInvocationError("Couldn't resolve VPC peering connection "
"{0} to an ID".format(conn_name))
try:
log.debug('Trying to delete vpc peering connection')
conn.delete_vpc_peering_connection(DryRun=dry_run, VpcPeeringConnectionId=conn_id)
return {'msg': 'VPC peering connection deleted.'}
except botocore.exceptions.ClientError as err:
e = __utils__['boto.get_error'](err)
log.error('Failed to delete VPC peering %s: %s', conn_name or conn_id, e)
return {'error': e} | Delete a VPC peering connection.
.. versionadded:: 2016.11.0
conn_id
The connection ID to check. Exclusive with conn_name.
conn_name
The connection name to check. Exclusive with conn_id.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
dry_run
If True, skip application and simply return projected status.
CLI Example:
.. code-block:: bash
# Create a named VPC peering connection
salt myminion boto_vpc.delete_vpc_peering_connection conn_name=salt-vpc
# Specify a region
salt myminion boto_vpc.delete_vpc_peering_connection conn_name=salt-vpc region=us-west-2
# specify an id
salt myminion boto_vpc.delete_vpc_peering_connection conn_id=pcx-8a8939e3 | Below is the the instruction that describes the task:
### Input:
Delete a VPC peering connection.
.. versionadded:: 2016.11.0
conn_id
The connection ID to check. Exclusive with conn_name.
conn_name
The connection name to check. Exclusive with conn_id.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
dry_run
If True, skip application and simply return projected status.
CLI Example:
.. code-block:: bash
# Create a named VPC peering connection
salt myminion boto_vpc.delete_vpc_peering_connection conn_name=salt-vpc
# Specify a region
salt myminion boto_vpc.delete_vpc_peering_connection conn_name=salt-vpc region=us-west-2
# specify an id
salt myminion boto_vpc.delete_vpc_peering_connection conn_id=pcx-8a8939e3
### Response:
def delete_vpc_peering_connection(conn_id=None, conn_name=None, region=None,
key=None, keyid=None, profile=None, dry_run=False):
'''
Delete a VPC peering connection.
.. versionadded:: 2016.11.0
conn_id
The connection ID to check. Exclusive with conn_name.
conn_name
The connection name to check. Exclusive with conn_id.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
dry_run
If True, skip application and simply return projected status.
CLI Example:
.. code-block:: bash
# Create a named VPC peering connection
salt myminion boto_vpc.delete_vpc_peering_connection conn_name=salt-vpc
# Specify a region
salt myminion boto_vpc.delete_vpc_peering_connection conn_name=salt-vpc region=us-west-2
# specify an id
salt myminion boto_vpc.delete_vpc_peering_connection conn_id=pcx-8a8939e3
'''
if not _exactly_one((conn_id, conn_name)):
raise SaltInvocationError('Exactly one of conn_id or '
'conn_name must be provided.')
conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)
if conn_name:
conn_id = _vpc_peering_conn_id_for_name(conn_name, conn)
if not conn_id:
raise SaltInvocationError("Couldn't resolve VPC peering connection "
"{0} to an ID".format(conn_name))
try:
log.debug('Trying to delete vpc peering connection')
conn.delete_vpc_peering_connection(DryRun=dry_run, VpcPeeringConnectionId=conn_id)
return {'msg': 'VPC peering connection deleted.'}
except botocore.exceptions.ClientError as err:
e = __utils__['boto.get_error'](err)
log.error('Failed to delete VPC peering %s: %s', conn_name or conn_id, e)
return {'error': e} |
def todjango(table, model, update=True, create=True, use_bulk_create=True, *args, **kwargs):
'''
Given a table with appropriate headings create Django models.
'''
assert issubclass(model, Model), 'Must be supplied a valid Django model class'
table_iterator = iter(table)
table_headers = table_iterator.next()
model_pk_field_name = model._meta.pk.name
model_field_names = _get_model_field_names(model)
model_name = model.__name__
existing_models = _get_django_objects(model)
existing_model_map = dict([(m.pk, m) for m in existing_models])
if update:
# if we are going to update existing models we need to have a table field that
# corresponds to the model 'pk' field.
assert model_pk_field_name in set(model_field_names), 'To be able to update existing models the data must have a field corresponding to the Django Primary Key field {}'.format(model_pk_field_name)
# existing_model_pks = model.objects.all().values_list('pk', flat=True) # TODO: this could be a performance bottleneck if lots of models are being updated
updated_model_count = 0
unsaved_models = []
for row in table_iterator:
value_map = dict(zip(table_headers, row))
pk = value_map.get(model_pk_field_name, None)
try:
django_object = existing_model_map[pk]
if _will_model_change(value_map, django_object):
_apply_value_map(value_map, django_object)
try:
django_object.save()
except Exception as e:
# Add the data that cause the exception to the exception as reraise
e.petl_data = value_map
raise e
updated_model_count += 1
except KeyError:
django_object = model(**value_map)
if use_bulk_create:
unsaved_models.append(django_object)
else:
try:
django_object.save()
except Exception as e:
e.petl_data = value_map
raise e
logger.debug('Bulk creating unsaved {}'.format(model_name))
if use_bulk_create:
_chunked_bulk_create(model, unsaved_models)
msg = 'Updated {} existing {}'.format(updated_model_count, model_name)
logger.info(msg)
msg = 'Created {} new {}'.format(len(unsaved_models), model_name)
logger.info(msg) | Given a table with appropriate headings create Django models. | Below is the the instruction that describes the task:
### Input:
Given a table with appropriate headings create Django models.
### Response:
def todjango(table, model, update=True, create=True, use_bulk_create=True, *args, **kwargs):
'''
Given a table with appropriate headings create Django models.
'''
assert issubclass(model, Model), 'Must be supplied a valid Django model class'
table_iterator = iter(table)
table_headers = table_iterator.next()
model_pk_field_name = model._meta.pk.name
model_field_names = _get_model_field_names(model)
model_name = model.__name__
existing_models = _get_django_objects(model)
existing_model_map = dict([(m.pk, m) for m in existing_models])
if update:
# if we are going to update existing models we need to have a table field that
# corresponds to the model 'pk' field.
assert model_pk_field_name in set(model_field_names), 'To be able to update existing models the data must have a field corresponding to the Django Primary Key field {}'.format(model_pk_field_name)
# existing_model_pks = model.objects.all().values_list('pk', flat=True) # TODO: this could be a performance bottleneck if lots of models are being updated
updated_model_count = 0
unsaved_models = []
for row in table_iterator:
value_map = dict(zip(table_headers, row))
pk = value_map.get(model_pk_field_name, None)
try:
django_object = existing_model_map[pk]
if _will_model_change(value_map, django_object):
_apply_value_map(value_map, django_object)
try:
django_object.save()
except Exception as e:
# Add the data that cause the exception to the exception as reraise
e.petl_data = value_map
raise e
updated_model_count += 1
except KeyError:
django_object = model(**value_map)
if use_bulk_create:
unsaved_models.append(django_object)
else:
try:
django_object.save()
except Exception as e:
e.petl_data = value_map
raise e
logger.debug('Bulk creating unsaved {}'.format(model_name))
if use_bulk_create:
_chunked_bulk_create(model, unsaved_models)
msg = 'Updated {} existing {}'.format(updated_model_count, model_name)
logger.info(msg)
msg = 'Created {} new {}'.format(len(unsaved_models), model_name)
logger.info(msg) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.