repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
scour-project/scour
scour/scour.py
controlPoints
def controlPoints(cmd, data): """ Checks if there are control points in the path data Returns the indices of all values in the path data which are control points """ cmd = cmd.lower() if cmd in ['c', 's', 'q']: indices = range(len(data)) if cmd == 'c': # c: (x1 y1 x2 y2 x y)+ return [index for index in indices if (index % 6) < 4] elif cmd in ['s', 'q']: # s: (x2 y2 x y)+ q: (x1 y1 x y)+ return [index for index in indices if (index % 4) < 2] return []
python
def controlPoints(cmd, data): """ Checks if there are control points in the path data Returns the indices of all values in the path data which are control points """ cmd = cmd.lower() if cmd in ['c', 's', 'q']: indices = range(len(data)) if cmd == 'c': # c: (x1 y1 x2 y2 x y)+ return [index for index in indices if (index % 6) < 4] elif cmd in ['s', 'q']: # s: (x2 y2 x y)+ q: (x1 y1 x y)+ return [index for index in indices if (index % 4) < 2] return []
[ "def", "controlPoints", "(", "cmd", ",", "data", ")", ":", "cmd", "=", "cmd", ".", "lower", "(", ")", "if", "cmd", "in", "[", "'c'", ",", "'s'", ",", "'q'", "]", ":", "indices", "=", "range", "(", "len", "(", "data", ")", ")", "if", "cmd", "=...
Checks if there are control points in the path data Returns the indices of all values in the path data which are control points
[ "Checks", "if", "there", "are", "control", "points", "in", "the", "path", "data" ]
049264eba6b1a54ae5ba1d6a5077d8e7b80e8835
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2643-L2657
train
51,600
scour-project/scour
scour/scour.py
flags
def flags(cmd, data): """ Checks if there are flags in the path data Returns the indices of all values in the path data which are flags """ if cmd.lower() == 'a': # a: (rx ry x-axis-rotation large-arc-flag sweep-flag x y)+ indices = range(len(data)) return [index for index in indices if (index % 7) in [3, 4]] return []
python
def flags(cmd, data): """ Checks if there are flags in the path data Returns the indices of all values in the path data which are flags """ if cmd.lower() == 'a': # a: (rx ry x-axis-rotation large-arc-flag sweep-flag x y)+ indices = range(len(data)) return [index for index in indices if (index % 7) in [3, 4]] return []
[ "def", "flags", "(", "cmd", ",", "data", ")", ":", "if", "cmd", ".", "lower", "(", ")", "==", "'a'", ":", "# a: (rx ry x-axis-rotation large-arc-flag sweep-flag x y)+", "indices", "=", "range", "(", "len", "(", "data", ")", ")", "return", "[", "index", "fo...
Checks if there are flags in the path data Returns the indices of all values in the path data which are flags
[ "Checks", "if", "there", "are", "flags", "in", "the", "path", "data" ]
049264eba6b1a54ae5ba1d6a5077d8e7b80e8835
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2660-L2670
train
51,601
scour-project/scour
scour/scour.py
serializePath
def serializePath(pathObj, options): """ Reserializes the path data with some cleanups. """ # elliptical arc commands must have comma/wsp separating the coordinates # this fixes an issue outlined in Fix https://bugs.launchpad.net/scour/+bug/412754 return ''.join([cmd + scourCoordinates(data, options, control_points=controlPoints(cmd, data), flags=flags(cmd, data)) for cmd, data in pathObj])
python
def serializePath(pathObj, options): """ Reserializes the path data with some cleanups. """ # elliptical arc commands must have comma/wsp separating the coordinates # this fixes an issue outlined in Fix https://bugs.launchpad.net/scour/+bug/412754 return ''.join([cmd + scourCoordinates(data, options, control_points=controlPoints(cmd, data), flags=flags(cmd, data)) for cmd, data in pathObj])
[ "def", "serializePath", "(", "pathObj", ",", "options", ")", ":", "# elliptical arc commands must have comma/wsp separating the coordinates", "# this fixes an issue outlined in Fix https://bugs.launchpad.net/scour/+bug/412754", "return", "''", ".", "join", "(", "[", "cmd", "+", "...
Reserializes the path data with some cleanups.
[ "Reserializes", "the", "path", "data", "with", "some", "cleanups", "." ]
049264eba6b1a54ae5ba1d6a5077d8e7b80e8835
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2673-L2682
train
51,602
scour-project/scour
scour/scour.py
serializeTransform
def serializeTransform(transformObj): """ Reserializes the transform data with some cleanups. """ return ' '.join([command + '(' + ' '.join([scourUnitlessLength(number) for number in numbers]) + ')' for command, numbers in transformObj])
python
def serializeTransform(transformObj): """ Reserializes the transform data with some cleanups. """ return ' '.join([command + '(' + ' '.join([scourUnitlessLength(number) for number in numbers]) + ')' for command, numbers in transformObj])
[ "def", "serializeTransform", "(", "transformObj", ")", ":", "return", "' '", ".", "join", "(", "[", "command", "+", "'('", "+", "' '", ".", "join", "(", "[", "scourUnitlessLength", "(", "number", ")", "for", "number", "in", "numbers", "]", ")", "+", "'...
Reserializes the transform data with some cleanups.
[ "Reserializes", "the", "transform", "data", "with", "some", "cleanups", "." ]
049264eba6b1a54ae5ba1d6a5077d8e7b80e8835
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2685-L2690
train
51,603
scour-project/scour
scour/scour.py
scourLength
def scourLength(length): """ Scours a length. Accepts units. """ length = SVGLength(length) return scourUnitlessLength(length.value) + Unit.str(length.units)
python
def scourLength(length): """ Scours a length. Accepts units. """ length = SVGLength(length) return scourUnitlessLength(length.value) + Unit.str(length.units)
[ "def", "scourLength", "(", "length", ")", ":", "length", "=", "SVGLength", "(", "length", ")", "return", "scourUnitlessLength", "(", "length", ".", "value", ")", "+", "Unit", ".", "str", "(", "length", ".", "units", ")" ]
Scours a length. Accepts units.
[ "Scours", "a", "length", ".", "Accepts", "units", "." ]
049264eba6b1a54ae5ba1d6a5077d8e7b80e8835
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2745-L2751
train
51,604
scour-project/scour
scour/scour.py
scourUnitlessLength
def scourUnitlessLength(length, renderer_workaround=False, is_control_point=False): # length is of a numeric type """ Scours the numeric part of a length only. Does not accept units. This is faster than scourLength on elements guaranteed not to contain units. """ if not isinstance(length, Decimal): length = getcontext().create_decimal(str(length)) initial_length = length # reduce numeric precision # plus() corresponds to the unary prefix plus operator and applies context precision and rounding if is_control_point: length = scouringContextC.plus(length) else: length = scouringContext.plus(length) # remove trailing zeroes as we do not care for significance intLength = length.to_integral_value() if length == intLength: length = Decimal(intLength) else: length = length.normalize() # Gather the non-scientific notation version of the coordinate. # Re-quantize from the initial value to prevent unnecessary loss of precision # (e.g. 123.4 should become 123, not 120 or even 100) nonsci = '{0:f}'.format(length) nonsci = '{0:f}'.format(initial_length.quantize(Decimal(nonsci))) if not renderer_workaround: if len(nonsci) > 2 and nonsci[:2] == '0.': nonsci = nonsci[1:] # remove the 0, leave the dot elif len(nonsci) > 3 and nonsci[:3] == '-0.': nonsci = '-' + nonsci[2:] # remove the 0, leave the minus and dot return_value = nonsci # Gather the scientific notation version of the coordinate which # can only be shorter if the length of the number is at least 4 characters (e.g. 1000 = 1e3). if len(nonsci) > 3: # We have to implement this ourselves since both 'normalize()' and 'to_sci_string()' # don't handle negative exponents in a reasonable way (e.g. 0.000001 remains unchanged) exponent = length.adjusted() # how far do we have to shift the dot? length = length.scaleb(-exponent).normalize() # shift the dot and remove potential trailing zeroes sci = six.text_type(length) + 'e' + six.text_type(exponent) if len(sci) < len(nonsci): return_value = sci return return_value
python
def scourUnitlessLength(length, renderer_workaround=False, is_control_point=False): # length is of a numeric type """ Scours the numeric part of a length only. Does not accept units. This is faster than scourLength on elements guaranteed not to contain units. """ if not isinstance(length, Decimal): length = getcontext().create_decimal(str(length)) initial_length = length # reduce numeric precision # plus() corresponds to the unary prefix plus operator and applies context precision and rounding if is_control_point: length = scouringContextC.plus(length) else: length = scouringContext.plus(length) # remove trailing zeroes as we do not care for significance intLength = length.to_integral_value() if length == intLength: length = Decimal(intLength) else: length = length.normalize() # Gather the non-scientific notation version of the coordinate. # Re-quantize from the initial value to prevent unnecessary loss of precision # (e.g. 123.4 should become 123, not 120 or even 100) nonsci = '{0:f}'.format(length) nonsci = '{0:f}'.format(initial_length.quantize(Decimal(nonsci))) if not renderer_workaround: if len(nonsci) > 2 and nonsci[:2] == '0.': nonsci = nonsci[1:] # remove the 0, leave the dot elif len(nonsci) > 3 and nonsci[:3] == '-0.': nonsci = '-' + nonsci[2:] # remove the 0, leave the minus and dot return_value = nonsci # Gather the scientific notation version of the coordinate which # can only be shorter if the length of the number is at least 4 characters (e.g. 1000 = 1e3). if len(nonsci) > 3: # We have to implement this ourselves since both 'normalize()' and 'to_sci_string()' # don't handle negative exponents in a reasonable way (e.g. 0.000001 remains unchanged) exponent = length.adjusted() # how far do we have to shift the dot? length = length.scaleb(-exponent).normalize() # shift the dot and remove potential trailing zeroes sci = six.text_type(length) + 'e' + six.text_type(exponent) if len(sci) < len(nonsci): return_value = sci return return_value
[ "def", "scourUnitlessLength", "(", "length", ",", "renderer_workaround", "=", "False", ",", "is_control_point", "=", "False", ")", ":", "# length is of a numeric type", "if", "not", "isinstance", "(", "length", ",", "Decimal", ")", ":", "length", "=", "getcontext"...
Scours the numeric part of a length only. Does not accept units. This is faster than scourLength on elements guaranteed not to contain units.
[ "Scours", "the", "numeric", "part", "of", "a", "length", "only", ".", "Does", "not", "accept", "units", "." ]
049264eba6b1a54ae5ba1d6a5077d8e7b80e8835
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2754-L2804
train
51,605
scour-project/scour
scour/scour.py
reducePrecision
def reducePrecision(element): """ Because opacities, letter spacings, stroke widths and all that don't need to be preserved in SVG files with 9 digits of precision. Takes all of these attributes, in the given element node and its children, and reduces their precision to the current Decimal context's precision. Also checks for the attributes actually being lengths, not 'inherit', 'none' or anything that isn't an SVGLength. Returns the number of bytes saved after performing these reductions. """ num = 0 styles = _getStyle(element) for lengthAttr in ['opacity', 'flood-opacity', 'fill-opacity', 'stroke-opacity', 'stop-opacity', 'stroke-miterlimit', 'stroke-dashoffset', 'letter-spacing', 'word-spacing', 'kerning', 'font-size-adjust', 'font-size', 'stroke-width']: val = element.getAttribute(lengthAttr) if val != '': valLen = SVGLength(val) if valLen.units != Unit.INVALID: # not an absolute/relative size or inherit, can be % though newVal = scourLength(val) if len(newVal) < len(val): num += len(val) - len(newVal) element.setAttribute(lengthAttr, newVal) # repeat for attributes hidden in styles if lengthAttr in styles: val = styles[lengthAttr] valLen = SVGLength(val) if valLen.units != Unit.INVALID: newVal = scourLength(val) if len(newVal) < len(val): num += len(val) - len(newVal) styles[lengthAttr] = newVal _setStyle(element, styles) for child in element.childNodes: if child.nodeType == Node.ELEMENT_NODE: num += reducePrecision(child) return num
python
def reducePrecision(element): """ Because opacities, letter spacings, stroke widths and all that don't need to be preserved in SVG files with 9 digits of precision. Takes all of these attributes, in the given element node and its children, and reduces their precision to the current Decimal context's precision. Also checks for the attributes actually being lengths, not 'inherit', 'none' or anything that isn't an SVGLength. Returns the number of bytes saved after performing these reductions. """ num = 0 styles = _getStyle(element) for lengthAttr in ['opacity', 'flood-opacity', 'fill-opacity', 'stroke-opacity', 'stop-opacity', 'stroke-miterlimit', 'stroke-dashoffset', 'letter-spacing', 'word-spacing', 'kerning', 'font-size-adjust', 'font-size', 'stroke-width']: val = element.getAttribute(lengthAttr) if val != '': valLen = SVGLength(val) if valLen.units != Unit.INVALID: # not an absolute/relative size or inherit, can be % though newVal = scourLength(val) if len(newVal) < len(val): num += len(val) - len(newVal) element.setAttribute(lengthAttr, newVal) # repeat for attributes hidden in styles if lengthAttr in styles: val = styles[lengthAttr] valLen = SVGLength(val) if valLen.units != Unit.INVALID: newVal = scourLength(val) if len(newVal) < len(val): num += len(val) - len(newVal) styles[lengthAttr] = newVal _setStyle(element, styles) for child in element.childNodes: if child.nodeType == Node.ELEMENT_NODE: num += reducePrecision(child) return num
[ "def", "reducePrecision", "(", "element", ")", ":", "num", "=", "0", "styles", "=", "_getStyle", "(", "element", ")", "for", "lengthAttr", "in", "[", "'opacity'", ",", "'flood-opacity'", ",", "'fill-opacity'", ",", "'stroke-opacity'", ",", "'stop-opacity'", ",...
Because opacities, letter spacings, stroke widths and all that don't need to be preserved in SVG files with 9 digits of precision. Takes all of these attributes, in the given element node and its children, and reduces their precision to the current Decimal context's precision. Also checks for the attributes actually being lengths, not 'inherit', 'none' or anything that isn't an SVGLength. Returns the number of bytes saved after performing these reductions.
[ "Because", "opacities", "letter", "spacings", "stroke", "widths", "and", "all", "that", "don", "t", "need", "to", "be", "preserved", "in", "SVG", "files", "with", "9", "digits", "of", "precision", "." ]
049264eba6b1a54ae5ba1d6a5077d8e7b80e8835
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2807-L2850
train
51,606
scour-project/scour
scour/scour.py
optimizeAngle
def optimizeAngle(angle): """ Because any rotation can be expressed within 360 degrees of any given number, and since negative angles sometimes are one character longer than corresponding positive angle, we shorten the number to one in the range to [-90, 270[. """ # First, we put the new angle in the range ]-360, 360[. # The modulo operator yields results with the sign of the # divisor, so for negative dividends, we preserve the sign # of the angle. if angle < 0: angle %= -360 else: angle %= 360 # 720 degrees is unnecessary, as 360 covers all angles. # As "-x" is shorter than "35x" and "-xxx" one character # longer than positive angles <= 260, we constrain angle # range to [-90, 270[ (or, equally valid: ]-100, 260]). if angle >= 270: angle -= 360 elif angle < -90: angle += 360 return angle
python
def optimizeAngle(angle): """ Because any rotation can be expressed within 360 degrees of any given number, and since negative angles sometimes are one character longer than corresponding positive angle, we shorten the number to one in the range to [-90, 270[. """ # First, we put the new angle in the range ]-360, 360[. # The modulo operator yields results with the sign of the # divisor, so for negative dividends, we preserve the sign # of the angle. if angle < 0: angle %= -360 else: angle %= 360 # 720 degrees is unnecessary, as 360 covers all angles. # As "-x" is shorter than "35x" and "-xxx" one character # longer than positive angles <= 260, we constrain angle # range to [-90, 270[ (or, equally valid: ]-100, 260]). if angle >= 270: angle -= 360 elif angle < -90: angle += 360 return angle
[ "def", "optimizeAngle", "(", "angle", ")", ":", "# First, we put the new angle in the range ]-360, 360[.", "# The modulo operator yields results with the sign of the", "# divisor, so for negative dividends, we preserve the sign", "# of the angle.", "if", "angle", "<", "0", ":", "angle"...
Because any rotation can be expressed within 360 degrees of any given number, and since negative angles sometimes are one character longer than corresponding positive angle, we shorten the number to one in the range to [-90, 270[.
[ "Because", "any", "rotation", "can", "be", "expressed", "within", "360", "degrees", "of", "any", "given", "number", "and", "since", "negative", "angles", "sometimes", "are", "one", "character", "longer", "than", "corresponding", "positive", "angle", "we", "short...
049264eba6b1a54ae5ba1d6a5077d8e7b80e8835
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2853-L2876
train
51,607
scour-project/scour
scour/scour.py
optimizeTransforms
def optimizeTransforms(element, options): """ Attempts to optimise transform specifications on the given node and its children. Returns the number of bytes saved after performing these reductions. """ num = 0 for transformAttr in ['transform', 'patternTransform', 'gradientTransform']: val = element.getAttribute(transformAttr) if val != '': transform = svg_transform_parser.parse(val) optimizeTransform(transform) newVal = serializeTransform(transform) if len(newVal) < len(val): if len(newVal): element.setAttribute(transformAttr, newVal) else: element.removeAttribute(transformAttr) num += len(val) - len(newVal) for child in element.childNodes: if child.nodeType == Node.ELEMENT_NODE: num += optimizeTransforms(child, options) return num
python
def optimizeTransforms(element, options): """ Attempts to optimise transform specifications on the given node and its children. Returns the number of bytes saved after performing these reductions. """ num = 0 for transformAttr in ['transform', 'patternTransform', 'gradientTransform']: val = element.getAttribute(transformAttr) if val != '': transform = svg_transform_parser.parse(val) optimizeTransform(transform) newVal = serializeTransform(transform) if len(newVal) < len(val): if len(newVal): element.setAttribute(transformAttr, newVal) else: element.removeAttribute(transformAttr) num += len(val) - len(newVal) for child in element.childNodes: if child.nodeType == Node.ELEMENT_NODE: num += optimizeTransforms(child, options) return num
[ "def", "optimizeTransforms", "(", "element", ",", "options", ")", ":", "num", "=", "0", "for", "transformAttr", "in", "[", "'transform'", ",", "'patternTransform'", ",", "'gradientTransform'", "]", ":", "val", "=", "element", ".", "getAttribute", "(", "transfo...
Attempts to optimise transform specifications on the given node and its children. Returns the number of bytes saved after performing these reductions.
[ "Attempts", "to", "optimise", "transform", "specifications", "on", "the", "given", "node", "and", "its", "children", "." ]
049264eba6b1a54ae5ba1d6a5077d8e7b80e8835
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L3041-L3069
train
51,608
scour-project/scour
scour/scour.py
removeComments
def removeComments(element): """ Removes comments from the element and its children. """ global _num_bytes_saved_in_comments num = 0 if isinstance(element, xml.dom.minidom.Comment): _num_bytes_saved_in_comments += len(element.data) element.parentNode.removeChild(element) num += 1 else: for subelement in element.childNodes[:]: num += removeComments(subelement) return num
python
def removeComments(element): """ Removes comments from the element and its children. """ global _num_bytes_saved_in_comments num = 0 if isinstance(element, xml.dom.minidom.Comment): _num_bytes_saved_in_comments += len(element.data) element.parentNode.removeChild(element) num += 1 else: for subelement in element.childNodes[:]: num += removeComments(subelement) return num
[ "def", "removeComments", "(", "element", ")", ":", "global", "_num_bytes_saved_in_comments", "num", "=", "0", "if", "isinstance", "(", "element", ",", "xml", ".", "dom", ".", "minidom", ".", "Comment", ")", ":", "_num_bytes_saved_in_comments", "+=", "len", "("...
Removes comments from the element and its children.
[ "Removes", "comments", "from", "the", "element", "and", "its", "children", "." ]
049264eba6b1a54ae5ba1d6a5077d8e7b80e8835
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L3072-L3087
train
51,609
vedvyas/doxytag2zealdb
doxytag2zealdb/propertylist.py
DoxygenPropertyList.save
def save(self): '''Save current property list representation to the original file.''' with open(self.filename, 'w') as plist_file: plist_file.write(str(self.soup))
python
def save(self): '''Save current property list representation to the original file.''' with open(self.filename, 'w') as plist_file: plist_file.write(str(self.soup))
[ "def", "save", "(", "self", ")", ":", "with", "open", "(", "self", ".", "filename", ",", "'w'", ")", "as", "plist_file", ":", "plist_file", ".", "write", "(", "str", "(", "self", ".", "soup", ")", ")" ]
Save current property list representation to the original file.
[ "Save", "current", "property", "list", "representation", "to", "the", "original", "file", "." ]
8b07a88af6794248f8cfdabb0fda9dd61c777127
https://github.com/vedvyas/doxytag2zealdb/blob/8b07a88af6794248f8cfdabb0fda9dd61c777127/doxytag2zealdb/propertylist.py#L112-L115
train
51,610
mezz64/pyEight
pyeight/eight.py
EightSleep.fetch_userid
def fetch_userid(self, side): """Return the userid for the specified bed side.""" for user in self.users: obj = self.users[user] if obj.side == side: return user
python
def fetch_userid(self, side): """Return the userid for the specified bed side.""" for user in self.users: obj = self.users[user] if obj.side == side: return user
[ "def", "fetch_userid", "(", "self", ",", "side", ")", ":", "for", "user", "in", "self", ".", "users", ":", "obj", "=", "self", ".", "users", "[", "user", "]", "if", "obj", ".", "side", "==", "side", ":", "return", "user" ]
Return the userid for the specified bed side.
[ "Return", "the", "userid", "for", "the", "specified", "bed", "side", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/eight.py#L84-L89
train
51,611
mezz64/pyEight
pyeight/eight.py
EightSleep.start
async def start(self): """Start api initialization.""" _LOGGER.debug('Initializing pyEight Version: %s', __version__) await self.fetch_token() if self._token is not None: await self.fetch_device_list() await self.assign_users() return True else: # We couldn't authenticate return False
python
async def start(self): """Start api initialization.""" _LOGGER.debug('Initializing pyEight Version: %s', __version__) await self.fetch_token() if self._token is not None: await self.fetch_device_list() await self.assign_users() return True else: # We couldn't authenticate return False
[ "async", "def", "start", "(", "self", ")", ":", "_LOGGER", ".", "debug", "(", "'Initializing pyEight Version: %s'", ",", "__version__", ")", "await", "self", ".", "fetch_token", "(", ")", "if", "self", ".", "_token", "is", "not", "None", ":", "await", "sel...
Start api initialization.
[ "Start", "api", "initialization", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/eight.py#L96-L106
train
51,612
mezz64/pyEight
pyeight/eight.py
EightSleep.fetch_token
async def fetch_token(self): """Fetch new session token from api.""" url = '{}/login'.format(API_URL) payload = 'email={}&password={}'.format(self._email, self._password) reg = await self.api_post(url, None, payload) if reg is None: _LOGGER.error('Unable to authenticate and fetch eight token.') else: self._userid = reg['session']['userId'] self._token = reg['session']['token'] self._expdate = reg['session']['expirationDate'] _LOGGER.debug('UserID: %s, Token: %s', self._userid, self.token)
python
async def fetch_token(self): """Fetch new session token from api.""" url = '{}/login'.format(API_URL) payload = 'email={}&password={}'.format(self._email, self._password) reg = await self.api_post(url, None, payload) if reg is None: _LOGGER.error('Unable to authenticate and fetch eight token.') else: self._userid = reg['session']['userId'] self._token = reg['session']['token'] self._expdate = reg['session']['expirationDate'] _LOGGER.debug('UserID: %s, Token: %s', self._userid, self.token)
[ "async", "def", "fetch_token", "(", "self", ")", ":", "url", "=", "'{}/login'", ".", "format", "(", "API_URL", ")", "payload", "=", "'email={}&password={}'", ".", "format", "(", "self", ".", "_email", ",", "self", ".", "_password", ")", "reg", "=", "awai...
Fetch new session token from api.
[ "Fetch", "new", "session", "token", "from", "api", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/eight.py#L113-L125
train
51,613
mezz64/pyEight
pyeight/eight.py
EightSleep.fetch_device_list
async def fetch_device_list(self): """Fetch list of devices.""" url = '{}/users/me'.format(API_URL) dlist = await self.api_get(url) if dlist is None: _LOGGER.error('Unable to fetch eight devices.') else: self._devices = dlist['user']['devices'] _LOGGER.debug('Devices: %s', self._devices)
python
async def fetch_device_list(self): """Fetch list of devices.""" url = '{}/users/me'.format(API_URL) dlist = await self.api_get(url) if dlist is None: _LOGGER.error('Unable to fetch eight devices.') else: self._devices = dlist['user']['devices'] _LOGGER.debug('Devices: %s', self._devices)
[ "async", "def", "fetch_device_list", "(", "self", ")", ":", "url", "=", "'{}/users/me'", ".", "format", "(", "API_URL", ")", "dlist", "=", "await", "self", ".", "api_get", "(", "url", ")", "if", "dlist", "is", "None", ":", "_LOGGER", ".", "error", "(",...
Fetch list of devices.
[ "Fetch", "list", "of", "devices", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/eight.py#L127-L136
train
51,614
mezz64/pyEight
pyeight/eight.py
EightSleep.assign_users
async def assign_users(self): """Update device properties.""" device = self._devices[0] url = '{}/devices/{}?filter=ownerId,leftUserId,rightUserId' \ .format(API_URL, device) data = await self.api_get(url) if data is None: _LOGGER.error('Unable to assign eight device users.') else: # Find the side to the known userid if data['result']['rightUserId'] == self._userid: self.users[data['result']['rightUserId']] = \ EightUser(self, data['result']['rightUserId'], 'right') user_side = 'right' elif data['result']['leftUserId'] == self._userid: self.users[data['result']['leftUserId']] = \ EightUser(self, data['result']['leftUserId'], 'left') user_side = 'left' else: _LOGGER.error('Unable to assign eight device users.') if self._partner: if user_side == 'right': self.users[data['result']['leftUserId']] = \ EightUser(self, data['result']['leftUserId'], 'left') else: self.users[data['result']['rightUserId']] = \ EightUser(self, data['result']['rightUserId'], 'right')
python
async def assign_users(self): """Update device properties.""" device = self._devices[0] url = '{}/devices/{}?filter=ownerId,leftUserId,rightUserId' \ .format(API_URL, device) data = await self.api_get(url) if data is None: _LOGGER.error('Unable to assign eight device users.') else: # Find the side to the known userid if data['result']['rightUserId'] == self._userid: self.users[data['result']['rightUserId']] = \ EightUser(self, data['result']['rightUserId'], 'right') user_side = 'right' elif data['result']['leftUserId'] == self._userid: self.users[data['result']['leftUserId']] = \ EightUser(self, data['result']['leftUserId'], 'left') user_side = 'left' else: _LOGGER.error('Unable to assign eight device users.') if self._partner: if user_side == 'right': self.users[data['result']['leftUserId']] = \ EightUser(self, data['result']['leftUserId'], 'left') else: self.users[data['result']['rightUserId']] = \ EightUser(self, data['result']['rightUserId'], 'right')
[ "async", "def", "assign_users", "(", "self", ")", ":", "device", "=", "self", ".", "_devices", "[", "0", "]", "url", "=", "'{}/devices/{}?filter=ownerId,leftUserId,rightUserId'", ".", "format", "(", "API_URL", ",", "device", ")", "data", "=", "await", "self", ...
Update device properties.
[ "Update", "device", "properties", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/eight.py#L138-L166
train
51,615
mezz64/pyEight
pyeight/eight.py
EightSleep.room_temperature
def room_temperature(self): """Return room temperature for both sides of bed.""" # Check which side is active, if both are return the average tmp = None tmp2 = None for user in self.users: obj = self.users[user] if obj.current_values['processing']: if tmp is None: tmp = obj.current_values['room_temp'] else: tmp = (tmp + obj.current_values['room_temp']) / 2 else: if tmp2 is None: tmp2 = obj.current_values['room_temp'] else: tmp2 = (tmp2 + obj.current_values['room_temp']) / 2 if tmp is not None: return tmp elif tmp2 is not None: return tmp2
python
def room_temperature(self): """Return room temperature for both sides of bed.""" # Check which side is active, if both are return the average tmp = None tmp2 = None for user in self.users: obj = self.users[user] if obj.current_values['processing']: if tmp is None: tmp = obj.current_values['room_temp'] else: tmp = (tmp + obj.current_values['room_temp']) / 2 else: if tmp2 is None: tmp2 = obj.current_values['room_temp'] else: tmp2 = (tmp2 + obj.current_values['room_temp']) / 2 if tmp is not None: return tmp elif tmp2 is not None: return tmp2
[ "def", "room_temperature", "(", "self", ")", ":", "# Check which side is active, if both are return the average", "tmp", "=", "None", "tmp2", "=", "None", "for", "user", "in", "self", ".", "users", ":", "obj", "=", "self", ".", "users", "[", "user", "]", "if",...
Return room temperature for both sides of bed.
[ "Return", "room", "temperature", "for", "both", "sides", "of", "bed", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/eight.py#L168-L189
train
51,616
mezz64/pyEight
pyeight/eight.py
EightSleep.handle_device_json
def handle_device_json(self, data): """Manage the device json list.""" self._device_json.insert(0, data) self._device_json.pop()
python
def handle_device_json(self, data): """Manage the device json list.""" self._device_json.insert(0, data) self._device_json.pop()
[ "def", "handle_device_json", "(", "self", ",", "data", ")", ":", "self", ".", "_device_json", ".", "insert", "(", "0", ",", "data", ")", "self", ".", "_device_json", ".", "pop", "(", ")" ]
Manage the device json list.
[ "Manage", "the", "device", "json", "list", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/eight.py#L191-L194
train
51,617
mezz64/pyEight
pyeight/eight.py
EightSleep.update_device_data
async def update_device_data(self): """Update device data json.""" url = '{}/devices/{}?offlineView=true'.format(API_URL, self.deviceid) # Check for access token expiration (every 15days) exp_delta = datetime.strptime(self._expdate, '%Y-%m-%dT%H:%M:%S.%fZ') \ - datetime.fromtimestamp(time.time()) # Renew 1hr before expiration if exp_delta.total_seconds() < 3600: _LOGGER.debug('Fetching new access token before expiration.') await self.fetch_token() device_resp = await self.api_get(url) if device_resp is None: _LOGGER.error('Unable to fetch eight device data.') else: # Want to keep last 10 readings so purge the last after we add self.handle_device_json(device_resp['result']) for user in self.users: self.users[user].dynamic_presence()
python
async def update_device_data(self): """Update device data json.""" url = '{}/devices/{}?offlineView=true'.format(API_URL, self.deviceid) # Check for access token expiration (every 15days) exp_delta = datetime.strptime(self._expdate, '%Y-%m-%dT%H:%M:%S.%fZ') \ - datetime.fromtimestamp(time.time()) # Renew 1hr before expiration if exp_delta.total_seconds() < 3600: _LOGGER.debug('Fetching new access token before expiration.') await self.fetch_token() device_resp = await self.api_get(url) if device_resp is None: _LOGGER.error('Unable to fetch eight device data.') else: # Want to keep last 10 readings so purge the last after we add self.handle_device_json(device_resp['result']) for user in self.users: self.users[user].dynamic_presence()
[ "async", "def", "update_device_data", "(", "self", ")", ":", "url", "=", "'{}/devices/{}?offlineView=true'", ".", "format", "(", "API_URL", ",", "self", ".", "deviceid", ")", "# Check for access token expiration (every 15days)", "exp_delta", "=", "datetime", ".", "str...
Update device data json.
[ "Update", "device", "data", "json", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/eight.py#L196-L215
train
51,618
MonashBI/arcana
arcana/environment/static.py
StaticEnv.satisfy
def satisfy(self, *requirements): """ Checks whether the given requirements are satisfiable within the given execution context Parameter --------- requirements : list(Requirement) List of requirements to check whether they are satisfiable """ versions = [] for req_range in requirements: try: version = self._detected_versions[req_range.name] except KeyError: try: version = req_range.requirement.detect_version() except ArcanaRequirementNotFoundError as e: if self._fail_on_missing: raise else: logger.warning(e) except ArcanaVersionNotDetectableError as e: if self._fail_on_undetectable: raise else: logger.warning(e) else: self._detected_versions[req_range.name] = version if not req_range.within(version): raise ArcanaVersionError( "Detected {} version {} is not within requested range {}" .format(req_range.requirement, version, req_range)) versions.append(version) return versions
python
def satisfy(self, *requirements): """ Checks whether the given requirements are satisfiable within the given execution context Parameter --------- requirements : list(Requirement) List of requirements to check whether they are satisfiable """ versions = [] for req_range in requirements: try: version = self._detected_versions[req_range.name] except KeyError: try: version = req_range.requirement.detect_version() except ArcanaRequirementNotFoundError as e: if self._fail_on_missing: raise else: logger.warning(e) except ArcanaVersionNotDetectableError as e: if self._fail_on_undetectable: raise else: logger.warning(e) else: self._detected_versions[req_range.name] = version if not req_range.within(version): raise ArcanaVersionError( "Detected {} version {} is not within requested range {}" .format(req_range.requirement, version, req_range)) versions.append(version) return versions
[ "def", "satisfy", "(", "self", ",", "*", "requirements", ")", ":", "versions", "=", "[", "]", "for", "req_range", "in", "requirements", ":", "try", ":", "version", "=", "self", ".", "_detected_versions", "[", "req_range", ".", "name", "]", "except", "Key...
Checks whether the given requirements are satisfiable within the given execution context Parameter --------- requirements : list(Requirement) List of requirements to check whether they are satisfiable
[ "Checks", "whether", "the", "given", "requirements", "are", "satisfiable", "within", "the", "given", "execution", "context" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/environment/static.py#L30-L64
train
51,619
gwww/elkm1
elkm1_lib/message.py
housecode_to_index
def housecode_to_index(housecode): """Convert a X10 housecode to a zero-based index""" match = re.search(r'^([A-P])(\d{1,2})$', housecode.upper()) if match: house_index = int(match.group(2)) if 1 <= house_index <= 16: return (ord(match.group(1)) - ord('A')) * 16 + house_index - 1 raise ValueError("Invalid X10 housecode: %s" % housecode)
python
def housecode_to_index(housecode): """Convert a X10 housecode to a zero-based index""" match = re.search(r'^([A-P])(\d{1,2})$', housecode.upper()) if match: house_index = int(match.group(2)) if 1 <= house_index <= 16: return (ord(match.group(1)) - ord('A')) * 16 + house_index - 1 raise ValueError("Invalid X10 housecode: %s" % housecode)
[ "def", "housecode_to_index", "(", "housecode", ")", ":", "match", "=", "re", ".", "search", "(", "r'^([A-P])(\\d{1,2})$'", ",", "housecode", ".", "upper", "(", ")", ")", "if", "match", ":", "house_index", "=", "int", "(", "match", ".", "group", "(", "2",...
Convert a X10 housecode to a zero-based index
[ "Convert", "a", "X10", "housecode", "to", "a", "zero", "-", "based", "index" ]
078d0de30840c3fab46f1f8534d98df557931e91
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L236-L243
train
51,620
gwww/elkm1
elkm1_lib/message.py
index_to_housecode
def index_to_housecode(index): """Convert a zero-based index to a X10 housecode.""" if index < 0 or index > 255: raise ValueError quotient, remainder = divmod(index, 16) return chr(quotient+ord('A')) + '{:02d}'.format(remainder+1)
python
def index_to_housecode(index): """Convert a zero-based index to a X10 housecode.""" if index < 0 or index > 255: raise ValueError quotient, remainder = divmod(index, 16) return chr(quotient+ord('A')) + '{:02d}'.format(remainder+1)
[ "def", "index_to_housecode", "(", "index", ")", ":", "if", "index", "<", "0", "or", "index", ">", "255", ":", "raise", "ValueError", "quotient", ",", "remainder", "=", "divmod", "(", "index", ",", "16", ")", "return", "chr", "(", "quotient", "+", "ord"...
Convert a zero-based index to a X10 housecode.
[ "Convert", "a", "zero", "-", "based", "index", "to", "a", "X10", "housecode", "." ]
078d0de30840c3fab46f1f8534d98df557931e91
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L246-L251
train
51,621
gwww/elkm1
elkm1_lib/message.py
_check_checksum
def _check_checksum(msg): """Ensure checksum in message is good.""" checksum = int(msg[-2:], 16) for char in msg[:-2]: checksum += ord(char) if (checksum % 256) != 0: raise ValueError("Elk message checksum invalid")
python
def _check_checksum(msg): """Ensure checksum in message is good.""" checksum = int(msg[-2:], 16) for char in msg[:-2]: checksum += ord(char) if (checksum % 256) != 0: raise ValueError("Elk message checksum invalid")
[ "def", "_check_checksum", "(", "msg", ")", ":", "checksum", "=", "int", "(", "msg", "[", "-", "2", ":", "]", ",", "16", ")", "for", "char", "in", "msg", "[", ":", "-", "2", "]", ":", "checksum", "+=", "ord", "(", "char", ")", "if", "(", "chec...
Ensure checksum in message is good.
[ "Ensure", "checksum", "in", "message", "is", "good", "." ]
078d0de30840c3fab46f1f8534d98df557931e91
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L268-L274
train
51,622
gwww/elkm1
elkm1_lib/message.py
_check_message_valid
def _check_message_valid(msg): """Check packet length valid and that checksum is good.""" try: if int(msg[:2], 16) != (len(msg) - 2): raise ValueError("Elk message length incorrect") _check_checksum(msg) except IndexError: raise ValueError("Elk message length incorrect")
python
def _check_message_valid(msg): """Check packet length valid and that checksum is good.""" try: if int(msg[:2], 16) != (len(msg) - 2): raise ValueError("Elk message length incorrect") _check_checksum(msg) except IndexError: raise ValueError("Elk message length incorrect")
[ "def", "_check_message_valid", "(", "msg", ")", ":", "try", ":", "if", "int", "(", "msg", "[", ":", "2", "]", ",", "16", ")", "!=", "(", "len", "(", "msg", ")", "-", "2", ")", ":", "raise", "ValueError", "(", "\"Elk message length incorrect\"", ")", ...
Check packet length valid and that checksum is good.
[ "Check", "packet", "length", "valid", "and", "that", "checksum", "is", "good", "." ]
078d0de30840c3fab46f1f8534d98df557931e91
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L277-L284
train
51,623
gwww/elkm1
elkm1_lib/message.py
MessageDecode.add_handler
def add_handler(self, message_type, handler): """Manage callbacks for message handlers.""" if message_type not in self._handlers: self._handlers[message_type] = [] if handler not in self._handlers[message_type]: self._handlers[message_type].append(handler)
python
def add_handler(self, message_type, handler): """Manage callbacks for message handlers.""" if message_type not in self._handlers: self._handlers[message_type] = [] if handler not in self._handlers[message_type]: self._handlers[message_type].append(handler)
[ "def", "add_handler", "(", "self", ",", "message_type", ",", "handler", ")", ":", "if", "message_type", "not", "in", "self", ".", "_handlers", ":", "self", ".", "_handlers", "[", "message_type", "]", "=", "[", "]", "if", "handler", "not", "in", "self", ...
Manage callbacks for message handlers.
[ "Manage", "callbacks", "for", "message", "handlers", "." ]
078d0de30840c3fab46f1f8534d98df557931e91
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L33-L39
train
51,624
gwww/elkm1
elkm1_lib/message.py
MessageDecode.decode
def decode(self, msg): """Decode an Elk message by passing to appropriate decoder""" _check_message_valid(msg) cmd = msg[2:4] decoder = getattr(self, '_{}_decode'.format(cmd.lower()), None) if not decoder: cmd = 'unknown' decoder = self._unknown_decode decoded_msg = decoder(msg) for handler in self._handlers.get(cmd, []): handler(**decoded_msg)
python
def decode(self, msg): """Decode an Elk message by passing to appropriate decoder""" _check_message_valid(msg) cmd = msg[2:4] decoder = getattr(self, '_{}_decode'.format(cmd.lower()), None) if not decoder: cmd = 'unknown' decoder = self._unknown_decode decoded_msg = decoder(msg) for handler in self._handlers.get(cmd, []): handler(**decoded_msg)
[ "def", "decode", "(", "self", ",", "msg", ")", ":", "_check_message_valid", "(", "msg", ")", "cmd", "=", "msg", "[", "2", ":", "4", "]", "decoder", "=", "getattr", "(", "self", ",", "'_{}_decode'", ".", "format", "(", "cmd", ".", "lower", "(", ")",...
Decode an Elk message by passing to appropriate decoder
[ "Decode", "an", "Elk", "message", "by", "passing", "to", "appropriate", "decoder" ]
078d0de30840c3fab46f1f8534d98df557931e91
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L41-L51
train
51,625
MonashBI/arcana
arcana/repository/xnat.py
XnatRepo.get_fileset
def get_fileset(self, fileset): """ Caches a single fileset (if the 'path' attribute is accessed and it has not been previously cached for example Parameters ---------- fileset : Fileset The fileset to cache prev_login : xnat.XNATSession An XNATSession object to use for the connection. A new one is created if one isn't provided Returns ------- primary_path : str The path of the primary file once it has been cached aux_paths : dict[str, str] A dictionary containing a mapping of auxiliary file names to paths """ if fileset.format is None: raise ArcanaUsageError( "Attempting to download {}, which has not been assigned a " "file format (see Fileset.formatted)".format(fileset)) self._check_repository(fileset) with self: # Connect to the XNAT repository if haven't already xsession = self.get_xsession(fileset) xscan = xsession.scans[fileset.name] # Set URI so we can retrieve checksums if required fileset.uri = xscan.uri fileset.id = xscan.id cache_path = self._cache_path(fileset) need_to_download = True if op.exists(cache_path): if self._check_md5: md5_path = cache_path + XnatRepo.MD5_SUFFIX try: with open(md5_path, 'r') as f: cached_checksums = json.load(f) if cached_checksums == fileset.checksums: need_to_download = False except IOError: pass else: need_to_download = False if need_to_download: # if fileset._resource_name is not None: xresource = xscan.resources[fileset._resource_name] # else: # xresources = [] # for resource_name in fileset.format.xnat_resource_names: # try: # xresources.append(xscan.resources[resource_name]) # except KeyError: # pass # if not xresources: # raise ArcanaError( # "Could not find matching resource for {} ('{}') " # "in {}, available resources are '{}'" # .format( # self.format, # "', '".join( # fileset.format.xnat_resource_names), # xscan.uri, # "', '".join( # r.label # for r in list(xscan.resources.values())))) # elif len(xresources) > 1: # logger.warning( # "Found multiple acceptable resources for {}: {}" # .format(fileset, # ', '.join(str(r) for r in xresources))) # xresource = xresources[0] # The path to the directory which the files will be # downloaded to. tmp_dir = cache_path + '.download' try: # Attempt to make tmp download directory. This will # fail if another process (or previous attempt) has # already created it. In that case this process will # wait to see if that download finishes successfully, # and if so use the cached version. os.mkdir(tmp_dir) except OSError as e: if e.errno == errno.EEXIST: # Another process may be concurrently downloading # the same file to the cache. Wait for # 'race_cond_delay' seconds and then check that it # has been completed or assume interrupted and # redownload. self._delayed_download( tmp_dir, xresource, xscan, fileset, xsession.label, cache_path, delay=self._race_cond_delay) else: raise else: self.download_fileset( tmp_dir, xresource, xscan, fileset, xsession.label, cache_path) shutil.rmtree(tmp_dir) if not fileset.format.directory: (primary_path, aux_paths) = fileset.format.assort_files( op.join(cache_path, f) for f in os.listdir(cache_path)) else: primary_path = cache_path aux_paths = None return primary_path, aux_paths
python
def get_fileset(self, fileset): """ Caches a single fileset (if the 'path' attribute is accessed and it has not been previously cached for example Parameters ---------- fileset : Fileset The fileset to cache prev_login : xnat.XNATSession An XNATSession object to use for the connection. A new one is created if one isn't provided Returns ------- primary_path : str The path of the primary file once it has been cached aux_paths : dict[str, str] A dictionary containing a mapping of auxiliary file names to paths """ if fileset.format is None: raise ArcanaUsageError( "Attempting to download {}, which has not been assigned a " "file format (see Fileset.formatted)".format(fileset)) self._check_repository(fileset) with self: # Connect to the XNAT repository if haven't already xsession = self.get_xsession(fileset) xscan = xsession.scans[fileset.name] # Set URI so we can retrieve checksums if required fileset.uri = xscan.uri fileset.id = xscan.id cache_path = self._cache_path(fileset) need_to_download = True if op.exists(cache_path): if self._check_md5: md5_path = cache_path + XnatRepo.MD5_SUFFIX try: with open(md5_path, 'r') as f: cached_checksums = json.load(f) if cached_checksums == fileset.checksums: need_to_download = False except IOError: pass else: need_to_download = False if need_to_download: # if fileset._resource_name is not None: xresource = xscan.resources[fileset._resource_name] # else: # xresources = [] # for resource_name in fileset.format.xnat_resource_names: # try: # xresources.append(xscan.resources[resource_name]) # except KeyError: # pass # if not xresources: # raise ArcanaError( # "Could not find matching resource for {} ('{}') " # "in {}, available resources are '{}'" # .format( # self.format, # "', '".join( # fileset.format.xnat_resource_names), # xscan.uri, # "', '".join( # r.label # for r in list(xscan.resources.values())))) # elif len(xresources) > 1: # logger.warning( # "Found multiple acceptable resources for {}: {}" # .format(fileset, # ', '.join(str(r) for r in xresources))) # xresource = xresources[0] # The path to the directory which the files will be # downloaded to. tmp_dir = cache_path + '.download' try: # Attempt to make tmp download directory. This will # fail if another process (or previous attempt) has # already created it. In that case this process will # wait to see if that download finishes successfully, # and if so use the cached version. os.mkdir(tmp_dir) except OSError as e: if e.errno == errno.EEXIST: # Another process may be concurrently downloading # the same file to the cache. Wait for # 'race_cond_delay' seconds and then check that it # has been completed or assume interrupted and # redownload. self._delayed_download( tmp_dir, xresource, xscan, fileset, xsession.label, cache_path, delay=self._race_cond_delay) else: raise else: self.download_fileset( tmp_dir, xresource, xscan, fileset, xsession.label, cache_path) shutil.rmtree(tmp_dir) if not fileset.format.directory: (primary_path, aux_paths) = fileset.format.assort_files( op.join(cache_path, f) for f in os.listdir(cache_path)) else: primary_path = cache_path aux_paths = None return primary_path, aux_paths
[ "def", "get_fileset", "(", "self", ",", "fileset", ")", ":", "if", "fileset", ".", "format", "is", "None", ":", "raise", "ArcanaUsageError", "(", "\"Attempting to download {}, which has not been assigned a \"", "\"file format (see Fileset.formatted)\"", ".", "format", "("...
Caches a single fileset (if the 'path' attribute is accessed and it has not been previously cached for example Parameters ---------- fileset : Fileset The fileset to cache prev_login : xnat.XNATSession An XNATSession object to use for the connection. A new one is created if one isn't provided Returns ------- primary_path : str The path of the primary file once it has been cached aux_paths : dict[str, str] A dictionary containing a mapping of auxiliary file names to paths
[ "Caches", "a", "single", "fileset", "(", "if", "the", "path", "attribute", "is", "accessed", "and", "it", "has", "not", "been", "previously", "cached", "for", "example" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/xnat.py#L165-L273
train
51,626
MonashBI/arcana
arcana/repository/xnat.py
XnatRepo.get_checksums
def get_checksums(self, fileset): """ Downloads the MD5 digests associated with the files in the file-set. These are saved with the downloaded files in the cache and used to check if the files have been updated on the server Parameters ---------- resource : xnat.ResourceCatalog The xnat resource file_format : FileFormat The format of the fileset to get the checksums for. Used to determine the primary file within the resource and change the corresponding key in the checksums dictionary to '.' to match the way it is generated locally by Arcana. """ if fileset.uri is None: raise ArcanaUsageError( "Can't retrieve checksums as URI has not been set for {}" .format(fileset)) with self: checksums = {r['Name']: r['digest'] for r in self._login.get_json(fileset.uri + '/files')[ 'ResultSet']['Result']} if not fileset.format.directory: # Replace the key corresponding to the primary file with '.' to # match the way that checksums are created by Arcana primary = fileset.format.assort_files(checksums.keys())[0] checksums['.'] = checksums.pop(primary) return checksums
python
def get_checksums(self, fileset): """ Downloads the MD5 digests associated with the files in the file-set. These are saved with the downloaded files in the cache and used to check if the files have been updated on the server Parameters ---------- resource : xnat.ResourceCatalog The xnat resource file_format : FileFormat The format of the fileset to get the checksums for. Used to determine the primary file within the resource and change the corresponding key in the checksums dictionary to '.' to match the way it is generated locally by Arcana. """ if fileset.uri is None: raise ArcanaUsageError( "Can't retrieve checksums as URI has not been set for {}" .format(fileset)) with self: checksums = {r['Name']: r['digest'] for r in self._login.get_json(fileset.uri + '/files')[ 'ResultSet']['Result']} if not fileset.format.directory: # Replace the key corresponding to the primary file with '.' to # match the way that checksums are created by Arcana primary = fileset.format.assort_files(checksums.keys())[0] checksums['.'] = checksums.pop(primary) return checksums
[ "def", "get_checksums", "(", "self", ",", "fileset", ")", ":", "if", "fileset", ".", "uri", "is", "None", ":", "raise", "ArcanaUsageError", "(", "\"Can't retrieve checksums as URI has not been set for {}\"", ".", "format", "(", "fileset", ")", ")", "with", "self",...
Downloads the MD5 digests associated with the files in the file-set. These are saved with the downloaded files in the cache and used to check if the files have been updated on the server Parameters ---------- resource : xnat.ResourceCatalog The xnat resource file_format : FileFormat The format of the fileset to get the checksums for. Used to determine the primary file within the resource and change the corresponding key in the checksums dictionary to '.' to match the way it is generated locally by Arcana.
[ "Downloads", "the", "MD5", "digests", "associated", "with", "the", "files", "in", "the", "file", "-", "set", ".", "These", "are", "saved", "with", "the", "downloaded", "files", "in", "the", "cache", "and", "used", "to", "check", "if", "the", "files", "ha...
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/xnat.py#L377-L406
train
51,627
MonashBI/arcana
arcana/repository/xnat.py
XnatRepo.convert_subject_ids
def convert_subject_ids(self, subject_ids): """ Convert subject ids to strings if they are integers """ # TODO: need to make this generalisable via a # splitting+mapping function passed to the repository if subject_ids is not None: subject_ids = set( ('{:03d}'.format(s) if isinstance(s, int) else s) for s in subject_ids) return subject_ids
python
def convert_subject_ids(self, subject_ids): """ Convert subject ids to strings if they are integers """ # TODO: need to make this generalisable via a # splitting+mapping function passed to the repository if subject_ids is not None: subject_ids = set( ('{:03d}'.format(s) if isinstance(s, int) else s) for s in subject_ids) return subject_ids
[ "def", "convert_subject_ids", "(", "self", ",", "subject_ids", ")", ":", "# TODO: need to make this generalisable via a", "# splitting+mapping function passed to the repository", "if", "subject_ids", "is", "not", "None", ":", "subject_ids", "=", "set", "(", "(", "'{:0...
Convert subject ids to strings if they are integers
[ "Convert", "subject", "ids", "to", "strings", "if", "they", "are", "integers" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/xnat.py#L582-L592
train
51,628
MonashBI/arcana
arcana/repository/xnat.py
XnatRepo.get_xsession
def get_xsession(self, item): """ Returns the XNAT session and cache dir corresponding to the item. """ subj_label, sess_label = self._get_item_labels(item) with self: xproject = self._login.projects[self.project_id] try: xsubject = xproject.subjects[subj_label] except KeyError: xsubject = self._login.classes.SubjectData( label=subj_label, parent=xproject) try: xsession = xsubject.experiments[sess_label] except KeyError: xsession = self._login.classes.MrSessionData( label=sess_label, parent=xsubject) if item.derived: xsession.fields[ self.DERIVED_FROM_FIELD] = self._get_item_labels( item, no_from_study=True)[1] return xsession
python
def get_xsession(self, item): """ Returns the XNAT session and cache dir corresponding to the item. """ subj_label, sess_label = self._get_item_labels(item) with self: xproject = self._login.projects[self.project_id] try: xsubject = xproject.subjects[subj_label] except KeyError: xsubject = self._login.classes.SubjectData( label=subj_label, parent=xproject) try: xsession = xsubject.experiments[sess_label] except KeyError: xsession = self._login.classes.MrSessionData( label=sess_label, parent=xsubject) if item.derived: xsession.fields[ self.DERIVED_FROM_FIELD] = self._get_item_labels( item, no_from_study=True)[1] return xsession
[ "def", "get_xsession", "(", "self", ",", "item", ")", ":", "subj_label", ",", "sess_label", "=", "self", ".", "_get_item_labels", "(", "item", ")", "with", "self", ":", "xproject", "=", "self", ".", "_login", ".", "projects", "[", "self", ".", "project_i...
Returns the XNAT session and cache dir corresponding to the item.
[ "Returns", "the", "XNAT", "session", "and", "cache", "dir", "corresponding", "to", "the", "item", "." ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/xnat.py#L690-L712
train
51,629
MonashBI/arcana
arcana/environment/requirement/matlab.py
MatlabPackageRequirement.detect_version_str
def detect_version_str(self): """ Try to detect version of package from command help text. Bit of a long shot as they are typically included """ help_text = run_matlab_cmd("help('{}')".format(self.test_func)) if not help_text: raise ArcanaRequirementNotFoundError( "Did not find test function '{}' for {}" .format(self.test_func, self)) return self.parse_help_text(help_text)
python
def detect_version_str(self): """ Try to detect version of package from command help text. Bit of a long shot as they are typically included """ help_text = run_matlab_cmd("help('{}')".format(self.test_func)) if not help_text: raise ArcanaRequirementNotFoundError( "Did not find test function '{}' for {}" .format(self.test_func, self)) return self.parse_help_text(help_text)
[ "def", "detect_version_str", "(", "self", ")", ":", "help_text", "=", "run_matlab_cmd", "(", "\"help('{}')\"", ".", "format", "(", "self", ".", "test_func", ")", ")", "if", "not", "help_text", ":", "raise", "ArcanaRequirementNotFoundError", "(", "\"Did not find te...
Try to detect version of package from command help text. Bit of a long shot as they are typically included
[ "Try", "to", "detect", "version", "of", "package", "from", "command", "help", "text", ".", "Bit", "of", "a", "long", "shot", "as", "they", "are", "typically", "included" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/environment/requirement/matlab.py#L75-L85
train
51,630
mezz64/pyEight
pyeight/user.py
EightUser.target_heating_level
def target_heating_level(self): """Return target heating level.""" try: if self.side == 'left': level = self.device.device_data['leftTargetHeatingLevel'] elif self.side == 'right': level = self.device.device_data['rightTargetHeatingLevel'] return level except TypeError: return None
python
def target_heating_level(self): """Return target heating level.""" try: if self.side == 'left': level = self.device.device_data['leftTargetHeatingLevel'] elif self.side == 'right': level = self.device.device_data['rightTargetHeatingLevel'] return level except TypeError: return None
[ "def", "target_heating_level", "(", "self", ")", ":", "try", ":", "if", "self", ".", "side", "==", "'left'", ":", "level", "=", "self", ".", "device", ".", "device_data", "[", "'leftTargetHeatingLevel'", "]", "elif", "self", ".", "side", "==", "'right'", ...
Return target heating level.
[ "Return", "target", "heating", "level", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L41-L50
train
51,631
mezz64/pyEight
pyeight/user.py
EightUser.heating_level
def heating_level(self): """Return heating level.""" try: if self.side == 'left': level = self.device.device_data['leftHeatingLevel'] elif self.side == 'right': level = self.device.device_data['rightHeatingLevel'] return level except TypeError: return None
python
def heating_level(self): """Return heating level.""" try: if self.side == 'left': level = self.device.device_data['leftHeatingLevel'] elif self.side == 'right': level = self.device.device_data['rightHeatingLevel'] return level except TypeError: return None
[ "def", "heating_level", "(", "self", ")", ":", "try", ":", "if", "self", ".", "side", "==", "'left'", ":", "level", "=", "self", ".", "device", ".", "device_data", "[", "'leftHeatingLevel'", "]", "elif", "self", ".", "side", "==", "'right'", ":", "leve...
Return heating level.
[ "Return", "heating", "level", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L53-L62
train
51,632
mezz64/pyEight
pyeight/user.py
EightUser.past_heating_level
def past_heating_level(self, num): """Return a heating level from the past.""" if num > 9: return 0 try: if self.side == 'left': level = self.device.device_data_history[ num]['leftHeatingLevel'] elif self.side == 'right': level = self.device.device_data_history[ num]['rightHeatingLevel'] return level except TypeError: return 0
python
def past_heating_level(self, num): """Return a heating level from the past.""" if num > 9: return 0 try: if self.side == 'left': level = self.device.device_data_history[ num]['leftHeatingLevel'] elif self.side == 'right': level = self.device.device_data_history[ num]['rightHeatingLevel'] return level except TypeError: return 0
[ "def", "past_heating_level", "(", "self", ",", "num", ")", ":", "if", "num", ">", "9", ":", "return", "0", "try", ":", "if", "self", ".", "side", "==", "'left'", ":", "level", "=", "self", ".", "device", ".", "device_data_history", "[", "num", "]", ...
Return a heating level from the past.
[ "Return", "a", "heating", "level", "from", "the", "past", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L64-L78
train
51,633
mezz64/pyEight
pyeight/user.py
EightUser.now_heating
def now_heating(self): """Return current heating state.""" try: if self.side == 'left': heat = self.device.device_data['leftNowHeating'] elif self.side == 'right': heat = self.device.device_data['rightNowHeating'] return heat except TypeError: return None
python
def now_heating(self): """Return current heating state.""" try: if self.side == 'left': heat = self.device.device_data['leftNowHeating'] elif self.side == 'right': heat = self.device.device_data['rightNowHeating'] return heat except TypeError: return None
[ "def", "now_heating", "(", "self", ")", ":", "try", ":", "if", "self", ".", "side", "==", "'left'", ":", "heat", "=", "self", ".", "device", ".", "device_data", "[", "'leftNowHeating'", "]", "elif", "self", ".", "side", "==", "'right'", ":", "heat", ...
Return current heating state.
[ "Return", "current", "heating", "state", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L81-L90
train
51,634
mezz64/pyEight
pyeight/user.py
EightUser.heating_remaining
def heating_remaining(self): """Return seconds of heat time remaining.""" try: if self.side == 'left': timerem = self.device.device_data['leftHeatingDuration'] elif self.side == 'right': timerem = self.device.device_data['rightHeatingDuration'] return timerem except TypeError: return None
python
def heating_remaining(self): """Return seconds of heat time remaining.""" try: if self.side == 'left': timerem = self.device.device_data['leftHeatingDuration'] elif self.side == 'right': timerem = self.device.device_data['rightHeatingDuration'] return timerem except TypeError: return None
[ "def", "heating_remaining", "(", "self", ")", ":", "try", ":", "if", "self", ".", "side", "==", "'left'", ":", "timerem", "=", "self", ".", "device", ".", "device_data", "[", "'leftHeatingDuration'", "]", "elif", "self", ".", "side", "==", "'right'", ":"...
Return seconds of heat time remaining.
[ "Return", "seconds", "of", "heat", "time", "remaining", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L93-L102
train
51,635
mezz64/pyEight
pyeight/user.py
EightUser.last_seen
def last_seen(self): """Return mattress last seen time.""" """ These values seem to be rarely updated correctly in the API. Don't expect accurate results from this property. """ try: if self.side == 'left': lastseen = self.device.device_data['leftPresenceEnd'] elif self.side == 'right': lastseen = self.device.device_data['rightPresenceEnd'] date = datetime.fromtimestamp(int(lastseen)) \ .strftime('%Y-%m-%dT%H:%M:%S') return date except TypeError: return None
python
def last_seen(self): """Return mattress last seen time.""" """ These values seem to be rarely updated correctly in the API. Don't expect accurate results from this property. """ try: if self.side == 'left': lastseen = self.device.device_data['leftPresenceEnd'] elif self.side == 'right': lastseen = self.device.device_data['rightPresenceEnd'] date = datetime.fromtimestamp(int(lastseen)) \ .strftime('%Y-%m-%dT%H:%M:%S') return date except TypeError: return None
[ "def", "last_seen", "(", "self", ")", ":", "\"\"\"\n These values seem to be rarely updated correctly in the API.\n Don't expect accurate results from this property.\n \"\"\"", "try", ":", "if", "self", ".", "side", "==", "'left'", ":", "lastseen", "=", "self...
Return mattress last seen time.
[ "Return", "mattress", "last", "seen", "time", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L105-L121
train
51,636
mezz64/pyEight
pyeight/user.py
EightUser.heating_values
def heating_values(self): """Return a dict of all the current heating values.""" heating_dict = { 'level': self.heating_level, 'target': self.target_heating_level, 'active': self.now_heating, 'remaining': self.heating_remaining, 'last_seen': self.last_seen, } return heating_dict
python
def heating_values(self): """Return a dict of all the current heating values.""" heating_dict = { 'level': self.heating_level, 'target': self.target_heating_level, 'active': self.now_heating, 'remaining': self.heating_remaining, 'last_seen': self.last_seen, } return heating_dict
[ "def", "heating_values", "(", "self", ")", ":", "heating_dict", "=", "{", "'level'", ":", "self", ".", "heating_level", ",", "'target'", ":", "self", ".", "target_heating_level", ",", "'active'", ":", "self", ".", "now_heating", ",", "'remaining'", ":", "sel...
Return a dict of all the current heating values.
[ "Return", "a", "dict", "of", "all", "the", "current", "heating", "values", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L124-L133
train
51,637
mezz64/pyEight
pyeight/user.py
EightUser.current_sleep_stage
def current_sleep_stage(self): """Return sleep stage for in-progress session.""" try: stages = self.intervals[0]['stages'] num_stages = len(stages) if num_stages == 0: return None # API now always has an awake state last in the dict # so always pull the second to last stage while we are # in a processing state if self.current_session_processing: stage = stages[num_stages-2]['stage'] else: stage = stages[num_stages-1]['stage'] # UNRELIABLE... Removing for now. # Check sleep stage against last_seen time to make # sure we don't get stuck in a non-awake state. #delta_elap = datetime.fromtimestamp(time.time()) \ # - datetime.strptime(self.last_seen, '%Y-%m-%dT%H:%M:%S') #_LOGGER.debug('User elap: %s', delta_elap.total_seconds()) #if stage != 'awake' and delta_elap.total_seconds() > 1800: # Bed hasn't seen us for 30min so set awake. # stage = 'awake' except KeyError: stage = None return stage
python
def current_sleep_stage(self): """Return sleep stage for in-progress session.""" try: stages = self.intervals[0]['stages'] num_stages = len(stages) if num_stages == 0: return None # API now always has an awake state last in the dict # so always pull the second to last stage while we are # in a processing state if self.current_session_processing: stage = stages[num_stages-2]['stage'] else: stage = stages[num_stages-1]['stage'] # UNRELIABLE... Removing for now. # Check sleep stage against last_seen time to make # sure we don't get stuck in a non-awake state. #delta_elap = datetime.fromtimestamp(time.time()) \ # - datetime.strptime(self.last_seen, '%Y-%m-%dT%H:%M:%S') #_LOGGER.debug('User elap: %s', delta_elap.total_seconds()) #if stage != 'awake' and delta_elap.total_seconds() > 1800: # Bed hasn't seen us for 30min so set awake. # stage = 'awake' except KeyError: stage = None return stage
[ "def", "current_sleep_stage", "(", "self", ")", ":", "try", ":", "stages", "=", "self", ".", "intervals", "[", "0", "]", "[", "'stages'", "]", "num_stages", "=", "len", "(", "stages", ")", "if", "num_stages", "==", "0", ":", "return", "None", "# API no...
Return sleep stage for in-progress session.
[ "Return", "sleep", "stage", "for", "in", "-", "progress", "session", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L160-L188
train
51,638
mezz64/pyEight
pyeight/user.py
EightUser.current_sleep_breakdown
def current_sleep_breakdown(self): """Return durations of sleep stages for in-progress session.""" try: stages = self.intervals[0]['stages'] breakdown = {'awake': 0, 'light': 0, 'deep': 0, 'rem': 0} for stage in stages: if stage['stage'] == 'awake': breakdown['awake'] += stage['duration'] elif stage['stage'] == 'light': breakdown['light'] += stage['duration'] elif stage['stage'] == 'deep': breakdown['deep'] += stage['duration'] elif stage['stage'] == 'rem': breakdown['rem'] += stage['duration'] except KeyError: breakdown = None return breakdown
python
def current_sleep_breakdown(self): """Return durations of sleep stages for in-progress session.""" try: stages = self.intervals[0]['stages'] breakdown = {'awake': 0, 'light': 0, 'deep': 0, 'rem': 0} for stage in stages: if stage['stage'] == 'awake': breakdown['awake'] += stage['duration'] elif stage['stage'] == 'light': breakdown['light'] += stage['duration'] elif stage['stage'] == 'deep': breakdown['deep'] += stage['duration'] elif stage['stage'] == 'rem': breakdown['rem'] += stage['duration'] except KeyError: breakdown = None return breakdown
[ "def", "current_sleep_breakdown", "(", "self", ")", ":", "try", ":", "stages", "=", "self", ".", "intervals", "[", "0", "]", "[", "'stages'", "]", "breakdown", "=", "{", "'awake'", ":", "0", ",", "'light'", ":", "0", ",", "'deep'", ":", "0", ",", "...
Return durations of sleep stages for in-progress session.
[ "Return", "durations", "of", "sleep", "stages", "for", "in", "-", "progress", "session", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L200-L216
train
51,639
mezz64/pyEight
pyeight/user.py
EightUser.current_bed_temp
def current_bed_temp(self): """Return current bed temperature for in-progress session.""" try: bedtemps = self.intervals[0]['timeseries']['tempBedC'] num_temps = len(bedtemps) if num_temps == 0: return None bedtemp = bedtemps[num_temps-1][1] except KeyError: bedtemp = None return bedtemp
python
def current_bed_temp(self): """Return current bed temperature for in-progress session.""" try: bedtemps = self.intervals[0]['timeseries']['tempBedC'] num_temps = len(bedtemps) if num_temps == 0: return None bedtemp = bedtemps[num_temps-1][1] except KeyError: bedtemp = None return bedtemp
[ "def", "current_bed_temp", "(", "self", ")", ":", "try", ":", "bedtemps", "=", "self", ".", "intervals", "[", "0", "]", "[", "'timeseries'", "]", "[", "'tempBedC'", "]", "num_temps", "=", "len", "(", "bedtemps", ")", "if", "num_temps", "==", "0", ":", ...
Return current bed temperature for in-progress session.
[ "Return", "current", "bed", "temperature", "for", "in", "-", "progress", "session", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L219-L231
train
51,640
mezz64/pyEight
pyeight/user.py
EightUser.current_room_temp
def current_room_temp(self): """Return current room temperature for in-progress session.""" try: rmtemps = self.intervals[0]['timeseries']['tempRoomC'] num_temps = len(rmtemps) if num_temps == 0: return None rmtemp = rmtemps[num_temps-1][1] except KeyError: rmtemp = None return rmtemp
python
def current_room_temp(self): """Return current room temperature for in-progress session.""" try: rmtemps = self.intervals[0]['timeseries']['tempRoomC'] num_temps = len(rmtemps) if num_temps == 0: return None rmtemp = rmtemps[num_temps-1][1] except KeyError: rmtemp = None return rmtemp
[ "def", "current_room_temp", "(", "self", ")", ":", "try", ":", "rmtemps", "=", "self", ".", "intervals", "[", "0", "]", "[", "'timeseries'", "]", "[", "'tempRoomC'", "]", "num_temps", "=", "len", "(", "rmtemps", ")", "if", "num_temps", "==", "0", ":", ...
Return current room temperature for in-progress session.
[ "Return", "current", "room", "temperature", "for", "in", "-", "progress", "session", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L234-L246
train
51,641
mezz64/pyEight
pyeight/user.py
EightUser.current_resp_rate
def current_resp_rate(self): """Return current respiratory rate for in-progress session.""" try: rates = self.intervals[0]['timeseries']['respiratoryRate'] num_rates = len(rates) if num_rates == 0: return None rate = rates[num_rates-1][1] except KeyError: rate = None return rate
python
def current_resp_rate(self): """Return current respiratory rate for in-progress session.""" try: rates = self.intervals[0]['timeseries']['respiratoryRate'] num_rates = len(rates) if num_rates == 0: return None rate = rates[num_rates-1][1] except KeyError: rate = None return rate
[ "def", "current_resp_rate", "(", "self", ")", ":", "try", ":", "rates", "=", "self", ".", "intervals", "[", "0", "]", "[", "'timeseries'", "]", "[", "'respiratoryRate'", "]", "num_rates", "=", "len", "(", "rates", ")", "if", "num_rates", "==", "0", ":"...
Return current respiratory rate for in-progress session.
[ "Return", "current", "respiratory", "rate", "for", "in", "-", "progress", "session", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L258-L270
train
51,642
mezz64/pyEight
pyeight/user.py
EightUser.current_heart_rate
def current_heart_rate(self): """Return current heart rate for in-progress session.""" try: rates = self.intervals[0]['timeseries']['heartRate'] num_rates = len(rates) if num_rates == 0: return None rate = rates[num_rates-1][1] except KeyError: rate = None return rate
python
def current_heart_rate(self): """Return current heart rate for in-progress session.""" try: rates = self.intervals[0]['timeseries']['heartRate'] num_rates = len(rates) if num_rates == 0: return None rate = rates[num_rates-1][1] except KeyError: rate = None return rate
[ "def", "current_heart_rate", "(", "self", ")", ":", "try", ":", "rates", "=", "self", ".", "intervals", "[", "0", "]", "[", "'timeseries'", "]", "[", "'heartRate'", "]", "num_rates", "=", "len", "(", "rates", ")", "if", "num_rates", "==", "0", ":", "...
Return current heart rate for in-progress session.
[ "Return", "current", "heart", "rate", "for", "in", "-", "progress", "session", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L273-L285
train
51,643
mezz64/pyEight
pyeight/user.py
EightUser.current_values
def current_values(self): """Return a dict of all the 'current' parameters.""" current_dict = { 'date': self.current_session_date, 'score': self.current_sleep_score, 'stage': self.current_sleep_stage, 'breakdown': self.current_sleep_breakdown, 'tnt': self.current_tnt, 'bed_temp': self.current_bed_temp, 'room_temp': self.current_room_temp, 'resp_rate': self.current_resp_rate, 'heart_rate': self.current_heart_rate, 'processing': self.current_session_processing, } return current_dict
python
def current_values(self): """Return a dict of all the 'current' parameters.""" current_dict = { 'date': self.current_session_date, 'score': self.current_sleep_score, 'stage': self.current_sleep_stage, 'breakdown': self.current_sleep_breakdown, 'tnt': self.current_tnt, 'bed_temp': self.current_bed_temp, 'room_temp': self.current_room_temp, 'resp_rate': self.current_resp_rate, 'heart_rate': self.current_heart_rate, 'processing': self.current_session_processing, } return current_dict
[ "def", "current_values", "(", "self", ")", ":", "current_dict", "=", "{", "'date'", ":", "self", ".", "current_session_date", ",", "'score'", ":", "self", ".", "current_sleep_score", ",", "'stage'", ":", "self", ".", "current_sleep_stage", ",", "'breakdown'", ...
Return a dict of all the 'current' parameters.
[ "Return", "a", "dict", "of", "all", "the", "current", "parameters", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L288-L302
train
51,644
mezz64/pyEight
pyeight/user.py
EightUser.last_sleep_breakdown
def last_sleep_breakdown(self): """Return durations of sleep stages for last complete session.""" try: stages = self.intervals[1]['stages'] except KeyError: return None breakdown = {'awake': 0, 'light': 0, 'deep': 0, 'rem': 0} for stage in stages: if stage['stage'] == 'awake': breakdown['awake'] += stage['duration'] elif stage['stage'] == 'light': breakdown['light'] += stage['duration'] elif stage['stage'] == 'deep': breakdown['deep'] += stage['duration'] elif stage['stage'] == 'rem': breakdown['rem'] += stage['duration'] return breakdown
python
def last_sleep_breakdown(self): """Return durations of sleep stages for last complete session.""" try: stages = self.intervals[1]['stages'] except KeyError: return None breakdown = {'awake': 0, 'light': 0, 'deep': 0, 'rem': 0} for stage in stages: if stage['stage'] == 'awake': breakdown['awake'] += stage['duration'] elif stage['stage'] == 'light': breakdown['light'] += stage['duration'] elif stage['stage'] == 'deep': breakdown['deep'] += stage['duration'] elif stage['stage'] == 'rem': breakdown['rem'] += stage['duration'] return breakdown
[ "def", "last_sleep_breakdown", "(", "self", ")", ":", "try", ":", "stages", "=", "self", ".", "intervals", "[", "1", "]", "[", "'stages'", "]", "except", "KeyError", ":", "return", "None", "breakdown", "=", "{", "'awake'", ":", "0", ",", "'light'", ":"...
Return durations of sleep stages for last complete session.
[ "Return", "durations", "of", "sleep", "stages", "for", "last", "complete", "session", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L336-L353
train
51,645
mezz64/pyEight
pyeight/user.py
EightUser.last_bed_temp
def last_bed_temp(self): """Return avg bed temperature for last session.""" try: bedtemps = self.intervals[1]['timeseries']['tempBedC'] except KeyError: return None tmp = 0 num_temps = len(bedtemps) if num_temps == 0: return None for temp in bedtemps: tmp += temp[1] bedtemp = tmp/num_temps return bedtemp
python
def last_bed_temp(self): """Return avg bed temperature for last session.""" try: bedtemps = self.intervals[1]['timeseries']['tempBedC'] except KeyError: return None tmp = 0 num_temps = len(bedtemps) if num_temps == 0: return None for temp in bedtemps: tmp += temp[1] bedtemp = tmp/num_temps return bedtemp
[ "def", "last_bed_temp", "(", "self", ")", ":", "try", ":", "bedtemps", "=", "self", ".", "intervals", "[", "1", "]", "[", "'timeseries'", "]", "[", "'tempBedC'", "]", "except", "KeyError", ":", "return", "None", "tmp", "=", "0", "num_temps", "=", "len"...
Return avg bed temperature for last session.
[ "Return", "avg", "bed", "temperature", "for", "last", "session", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L356-L371
train
51,646
mezz64/pyEight
pyeight/user.py
EightUser.last_room_temp
def last_room_temp(self): """Return avg room temperature for last session.""" try: rmtemps = self.intervals[1]['timeseries']['tempRoomC'] except KeyError: return None tmp = 0 num_temps = len(rmtemps) if num_temps == 0: return None for temp in rmtemps: tmp += temp[1] rmtemp = tmp/num_temps return rmtemp
python
def last_room_temp(self): """Return avg room temperature for last session.""" try: rmtemps = self.intervals[1]['timeseries']['tempRoomC'] except KeyError: return None tmp = 0 num_temps = len(rmtemps) if num_temps == 0: return None for temp in rmtemps: tmp += temp[1] rmtemp = tmp/num_temps return rmtemp
[ "def", "last_room_temp", "(", "self", ")", ":", "try", ":", "rmtemps", "=", "self", ".", "intervals", "[", "1", "]", "[", "'timeseries'", "]", "[", "'tempRoomC'", "]", "except", "KeyError", ":", "return", "None", "tmp", "=", "0", "num_temps", "=", "len...
Return avg room temperature for last session.
[ "Return", "avg", "room", "temperature", "for", "last", "session", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L374-L389
train
51,647
mezz64/pyEight
pyeight/user.py
EightUser.last_heart_rate
def last_heart_rate(self): """Return avg heart rate for last session.""" try: rates = self.intervals[1]['timeseries']['heartRate'] except KeyError: return None tmp = 0 num_rates = len(rates) if num_rates == 0: return None for rate in rates: tmp += rate[1] rateavg = tmp/num_rates return rateavg
python
def last_heart_rate(self): """Return avg heart rate for last session.""" try: rates = self.intervals[1]['timeseries']['heartRate'] except KeyError: return None tmp = 0 num_rates = len(rates) if num_rates == 0: return None for rate in rates: tmp += rate[1] rateavg = tmp/num_rates return rateavg
[ "def", "last_heart_rate", "(", "self", ")", ":", "try", ":", "rates", "=", "self", ".", "intervals", "[", "1", "]", "[", "'timeseries'", "]", "[", "'heartRate'", "]", "except", "KeyError", ":", "return", "None", "tmp", "=", "0", "num_rates", "=", "len"...
Return avg heart rate for last session.
[ "Return", "avg", "heart", "rate", "for", "last", "session", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L419-L434
train
51,648
mezz64/pyEight
pyeight/user.py
EightUser.last_values
def last_values(self): """Return a dict of all the 'last' parameters.""" last_dict = { 'date': self.last_session_date, 'score': self.last_sleep_score, 'breakdown': self.last_sleep_breakdown, 'tnt': self.last_tnt, 'bed_temp': self.last_bed_temp, 'room_temp': self.last_room_temp, 'resp_rate': self.last_resp_rate, 'heart_rate': self.last_heart_rate, 'processing': self.last_session_processing, } return last_dict
python
def last_values(self): """Return a dict of all the 'last' parameters.""" last_dict = { 'date': self.last_session_date, 'score': self.last_sleep_score, 'breakdown': self.last_sleep_breakdown, 'tnt': self.last_tnt, 'bed_temp': self.last_bed_temp, 'room_temp': self.last_room_temp, 'resp_rate': self.last_resp_rate, 'heart_rate': self.last_heart_rate, 'processing': self.last_session_processing, } return last_dict
[ "def", "last_values", "(", "self", ")", ":", "last_dict", "=", "{", "'date'", ":", "self", ".", "last_session_date", ",", "'score'", ":", "self", ".", "last_sleep_score", ",", "'breakdown'", ":", "self", ".", "last_sleep_breakdown", ",", "'tnt'", ":", "self"...
Return a dict of all the 'last' parameters.
[ "Return", "a", "dict", "of", "all", "the", "last", "parameters", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L437-L450
train
51,649
mezz64/pyEight
pyeight/user.py
EightUser.heating_stats
def heating_stats(self): """Calculate some heating data stats.""" local_5 = [] local_10 = [] for i in range(0, 10): level = self.past_heating_level(i) if level == 0: _LOGGER.debug('Cant calculate stats yet...') return if i < 5: local_5.append(level) local_10.append(level) _LOGGER.debug('%s Heating History: %s', self.side, local_10) try: # Average of 5min on the history dict. fiveminavg = statistics.mean(local_5) tenminavg = statistics.mean(local_10) _LOGGER.debug('%s Heating 5 min avg: %s', self.side, fiveminavg) _LOGGER.debug('%s Heating 10 min avg: %s', self.side, tenminavg) # Standard deviation fivestdev = statistics.stdev(local_5) tenstdev = statistics.stdev(local_10) _LOGGER.debug('%s Heating 5 min stdev: %s', self.side, fivestdev) _LOGGER.debug('%s Heating 10 min stdev: %s', self.side, tenstdev) # Variance fivevar = statistics.variance(local_5) tenvar = statistics.variance(local_10) _LOGGER.debug('%s Heating 5 min variance: %s', self.side, fivevar) _LOGGER.debug('%s Heating 10 min variance: %s', self.side, tenvar) except: _LOGGER.debug('Cant calculate stats yet...')
python
def heating_stats(self): """Calculate some heating data stats.""" local_5 = [] local_10 = [] for i in range(0, 10): level = self.past_heating_level(i) if level == 0: _LOGGER.debug('Cant calculate stats yet...') return if i < 5: local_5.append(level) local_10.append(level) _LOGGER.debug('%s Heating History: %s', self.side, local_10) try: # Average of 5min on the history dict. fiveminavg = statistics.mean(local_5) tenminavg = statistics.mean(local_10) _LOGGER.debug('%s Heating 5 min avg: %s', self.side, fiveminavg) _LOGGER.debug('%s Heating 10 min avg: %s', self.side, tenminavg) # Standard deviation fivestdev = statistics.stdev(local_5) tenstdev = statistics.stdev(local_10) _LOGGER.debug('%s Heating 5 min stdev: %s', self.side, fivestdev) _LOGGER.debug('%s Heating 10 min stdev: %s', self.side, tenstdev) # Variance fivevar = statistics.variance(local_5) tenvar = statistics.variance(local_10) _LOGGER.debug('%s Heating 5 min variance: %s', self.side, fivevar) _LOGGER.debug('%s Heating 10 min variance: %s', self.side, tenvar) except: _LOGGER.debug('Cant calculate stats yet...')
[ "def", "heating_stats", "(", "self", ")", ":", "local_5", "=", "[", "]", "local_10", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "10", ")", ":", "level", "=", "self", ".", "past_heating_level", "(", "i", ")", "if", "level", "==", "0"...
Calculate some heating data stats.
[ "Calculate", "some", "heating", "data", "stats", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L452-L487
train
51,650
mezz64/pyEight
pyeight/user.py
EightUser.dynamic_presence
def dynamic_presence(self): """ Determine presence based on bed heating level and end presence time reported by the api. Idea originated from Alex Lee Yuk Cheung SmartThings Code. """ # self.heating_stats() if not self.presence: if self.heating_level > 50: # Can likely make this better if not self.now_heating: self.presence = True elif self.heating_level - self.target_heating_level >= 8: self.presence = True elif self.heating_level > 25: # Catch rising edge if self.past_heating_level(0) - self.past_heating_level(1) >= 2 \ and self.past_heating_level(1) - self.past_heating_level(2) >= 2 \ and self.past_heating_level(2) - self.past_heating_level(3) >= 2: # Values are increasing so we are likely in bed if not self.now_heating: self.presence = True elif self.heating_level - self.target_heating_level >= 8: self.presence = True elif self.presence: if self.heating_level <= 15: # Failsafe, very slow self.presence = False elif self.heating_level < 50: if self.past_heating_level(0) - self.past_heating_level(1) < 0 \ and self.past_heating_level(1) - self.past_heating_level(2) < 0 \ and self.past_heating_level(2) - self.past_heating_level(3) < 0: # Values are decreasing so we are likely out of bed self.presence = False # Last seen can lag real-time by up to 35min so this is # mostly a backup to using the heat values. # seen_delta = datetime.fromtimestamp(time.time()) \ # - datetime.strptime(self.last_seen, '%Y-%m-%dT%H:%M:%S') # _LOGGER.debug('%s Last seen time delta: %s', self.side, # seen_delta.total_seconds()) # if self.presence and seen_delta.total_seconds() > 2100: # self.presence = False _LOGGER.debug('%s Presence Results: %s', self.side, self.presence)
python
def dynamic_presence(self): """ Determine presence based on bed heating level and end presence time reported by the api. Idea originated from Alex Lee Yuk Cheung SmartThings Code. """ # self.heating_stats() if not self.presence: if self.heating_level > 50: # Can likely make this better if not self.now_heating: self.presence = True elif self.heating_level - self.target_heating_level >= 8: self.presence = True elif self.heating_level > 25: # Catch rising edge if self.past_heating_level(0) - self.past_heating_level(1) >= 2 \ and self.past_heating_level(1) - self.past_heating_level(2) >= 2 \ and self.past_heating_level(2) - self.past_heating_level(3) >= 2: # Values are increasing so we are likely in bed if not self.now_heating: self.presence = True elif self.heating_level - self.target_heating_level >= 8: self.presence = True elif self.presence: if self.heating_level <= 15: # Failsafe, very slow self.presence = False elif self.heating_level < 50: if self.past_heating_level(0) - self.past_heating_level(1) < 0 \ and self.past_heating_level(1) - self.past_heating_level(2) < 0 \ and self.past_heating_level(2) - self.past_heating_level(3) < 0: # Values are decreasing so we are likely out of bed self.presence = False # Last seen can lag real-time by up to 35min so this is # mostly a backup to using the heat values. # seen_delta = datetime.fromtimestamp(time.time()) \ # - datetime.strptime(self.last_seen, '%Y-%m-%dT%H:%M:%S') # _LOGGER.debug('%s Last seen time delta: %s', self.side, # seen_delta.total_seconds()) # if self.presence and seen_delta.total_seconds() > 2100: # self.presence = False _LOGGER.debug('%s Presence Results: %s', self.side, self.presence)
[ "def", "dynamic_presence", "(", "self", ")", ":", "# self.heating_stats()", "if", "not", "self", ".", "presence", ":", "if", "self", ".", "heating_level", ">", "50", ":", "# Can likely make this better", "if", "not", "self", ".", "now_heating", ":", "self", "....
Determine presence based on bed heating level and end presence time reported by the api. Idea originated from Alex Lee Yuk Cheung SmartThings Code.
[ "Determine", "presence", "based", "on", "bed", "heating", "level", "and", "end", "presence", "time", "reported", "by", "the", "api", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L494-L542
train
51,651
mezz64/pyEight
pyeight/user.py
EightUser.set_heating_level
async def set_heating_level(self, level, duration=0): """Update heating data json.""" url = '{}/devices/{}'.format(API_URL, self.device.deviceid) # Catch bad inputs level = 10 if level < 10 else level level = 100 if level > 100 else level if self.side == 'left': data = { 'leftHeatingDuration': duration, 'leftTargetHeatingLevel': level } elif self.side == 'right': data = { 'rightHeatingDuration': duration, 'rightTargetHeatingLevel': level } set_heat = await self.device.api_put(url, data) if set_heat is None: _LOGGER.error('Unable to set eight heating level.') else: # Standard device json is returned after setting self.device.handle_device_json(set_heat['device'])
python
async def set_heating_level(self, level, duration=0): """Update heating data json.""" url = '{}/devices/{}'.format(API_URL, self.device.deviceid) # Catch bad inputs level = 10 if level < 10 else level level = 100 if level > 100 else level if self.side == 'left': data = { 'leftHeatingDuration': duration, 'leftTargetHeatingLevel': level } elif self.side == 'right': data = { 'rightHeatingDuration': duration, 'rightTargetHeatingLevel': level } set_heat = await self.device.api_put(url, data) if set_heat is None: _LOGGER.error('Unable to set eight heating level.') else: # Standard device json is returned after setting self.device.handle_device_json(set_heat['device'])
[ "async", "def", "set_heating_level", "(", "self", ",", "level", ",", "duration", "=", "0", ")", ":", "url", "=", "'{}/devices/{}'", ".", "format", "(", "API_URL", ",", "self", ".", "device", ".", "deviceid", ")", "# Catch bad inputs", "level", "=", "10", ...
Update heating data json.
[ "Update", "heating", "data", "json", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L555-L579
train
51,652
mezz64/pyEight
pyeight/user.py
EightUser.update_trend_data
async def update_trend_data(self, startdate, enddate): """Update trends data json for specified time period.""" url = '{}/users/{}/trends'.format(API_URL, self.userid) params = { 'tz': self.device.tzone, 'from': startdate, 'to': enddate } trends = await self.device.api_get(url, params) if trends is None: _LOGGER.error('Unable to fetch eight trend data.') else: self.trends = trends['days']
python
async def update_trend_data(self, startdate, enddate): """Update trends data json for specified time period.""" url = '{}/users/{}/trends'.format(API_URL, self.userid) params = { 'tz': self.device.tzone, 'from': startdate, 'to': enddate } trends = await self.device.api_get(url, params) if trends is None: _LOGGER.error('Unable to fetch eight trend data.') else: self.trends = trends['days']
[ "async", "def", "update_trend_data", "(", "self", ",", "startdate", ",", "enddate", ")", ":", "url", "=", "'{}/users/{}/trends'", ".", "format", "(", "API_URL", ",", "self", ".", "userid", ")", "params", "=", "{", "'tz'", ":", "self", ".", "device", ".",...
Update trends data json for specified time period.
[ "Update", "trends", "data", "json", "for", "specified", "time", "period", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L581-L594
train
51,653
mezz64/pyEight
pyeight/user.py
EightUser.update_intervals_data
async def update_intervals_data(self): """Update intervals data json for specified time period.""" url = '{}/users/{}/intervals'.format(API_URL, self.userid) intervals = await self.device.api_get(url) if intervals is None: _LOGGER.error('Unable to fetch eight intervals data.') else: self.intervals = intervals['intervals']
python
async def update_intervals_data(self): """Update intervals data json for specified time period.""" url = '{}/users/{}/intervals'.format(API_URL, self.userid) intervals = await self.device.api_get(url) if intervals is None: _LOGGER.error('Unable to fetch eight intervals data.') else: self.intervals = intervals['intervals']
[ "async", "def", "update_intervals_data", "(", "self", ")", ":", "url", "=", "'{}/users/{}/intervals'", ".", "format", "(", "API_URL", ",", "self", ".", "userid", ")", "intervals", "=", "await", "self", ".", "device", ".", "api_get", "(", "url", ")", "if", ...
Update intervals data json for specified time period.
[ "Update", "intervals", "data", "json", "for", "specified", "time", "period", "." ]
e557e4e6876f490d0964298e9475d68b64222d4f
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L596-L604
train
51,654
erikvw/django-collect-offline
django_collect_offline/transaction/transaction_deserializer.py
save
def save(obj=None, m2m_data=None): """Saves a deserialized model object. Uses save_base to avoid running code in model.save() and to avoid triggering signals (if raw=True). """ m2m_data = {} if m2m_data is None else m2m_data obj.save_base(raw=True) for attr, values in m2m_data.items(): for value in values: getattr(obj, attr).add(value)
python
def save(obj=None, m2m_data=None): """Saves a deserialized model object. Uses save_base to avoid running code in model.save() and to avoid triggering signals (if raw=True). """ m2m_data = {} if m2m_data is None else m2m_data obj.save_base(raw=True) for attr, values in m2m_data.items(): for value in values: getattr(obj, attr).add(value)
[ "def", "save", "(", "obj", "=", "None", ",", "m2m_data", "=", "None", ")", ":", "m2m_data", "=", "{", "}", "if", "m2m_data", "is", "None", "else", "m2m_data", "obj", ".", "save_base", "(", "raw", "=", "True", ")", "for", "attr", ",", "values", "in"...
Saves a deserialized model object. Uses save_base to avoid running code in model.save() and to avoid triggering signals (if raw=True).
[ "Saves", "a", "deserialized", "model", "object", "." ]
3d5efd66c68e2db4b060a82b070ae490dc399ca7
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/transaction/transaction_deserializer.py#L16-L26
train
51,655
erikvw/django-collect-offline
django_collect_offline/transaction/transaction_deserializer.py
TransactionDeserializer.deserialize_transactions
def deserialize_transactions(self, transactions=None, deserialize_only=None): """Deserializes the encrypted serialized model instances, tx, in a queryset of transactions. Note: each transaction instance contains encrypted JSON text that represents just ONE model instance. """ if ( not self.allow_self and transactions.filter(producer=socket.gethostname()).exists() ): raise TransactionDeserializerError( f"Not deserializing own transactions. Got " f"allow_self=False, hostname={socket.gethostname()}" ) for transaction in transactions: json_text = self.aes_decrypt(cipher_text=transaction.tx) json_text = self.custom_parser(json_text) deserialized = next(self.deserialize(json_text=json_text)) if not deserialize_only: if transaction.action == DELETE: deserialized.object.delete() else: self.save(obj=deserialized.object, m2m_data=deserialized.m2m_data) transaction.is_consumed = True transaction.save()
python
def deserialize_transactions(self, transactions=None, deserialize_only=None): """Deserializes the encrypted serialized model instances, tx, in a queryset of transactions. Note: each transaction instance contains encrypted JSON text that represents just ONE model instance. """ if ( not self.allow_self and transactions.filter(producer=socket.gethostname()).exists() ): raise TransactionDeserializerError( f"Not deserializing own transactions. Got " f"allow_self=False, hostname={socket.gethostname()}" ) for transaction in transactions: json_text = self.aes_decrypt(cipher_text=transaction.tx) json_text = self.custom_parser(json_text) deserialized = next(self.deserialize(json_text=json_text)) if not deserialize_only: if transaction.action == DELETE: deserialized.object.delete() else: self.save(obj=deserialized.object, m2m_data=deserialized.m2m_data) transaction.is_consumed = True transaction.save()
[ "def", "deserialize_transactions", "(", "self", ",", "transactions", "=", "None", ",", "deserialize_only", "=", "None", ")", ":", "if", "(", "not", "self", ".", "allow_self", "and", "transactions", ".", "filter", "(", "producer", "=", "socket", ".", "gethost...
Deserializes the encrypted serialized model instances, tx, in a queryset of transactions. Note: each transaction instance contains encrypted JSON text that represents just ONE model instance.
[ "Deserializes", "the", "encrypted", "serialized", "model", "instances", "tx", "in", "a", "queryset", "of", "transactions", "." ]
3d5efd66c68e2db4b060a82b070ae490dc399ca7
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/transaction/transaction_deserializer.py#L49-L75
train
51,656
erikvw/django-collect-offline
django_collect_offline/transaction/transaction_deserializer.py
TransactionDeserializer.custom_parser
def custom_parser(self, json_text=None): """Runs json_text thru custom parsers. """ app_config = django_apps.get_app_config("django_collect_offline") for json_parser in app_config.custom_json_parsers: json_text = json_parser(json_text) return json_text
python
def custom_parser(self, json_text=None): """Runs json_text thru custom parsers. """ app_config = django_apps.get_app_config("django_collect_offline") for json_parser in app_config.custom_json_parsers: json_text = json_parser(json_text) return json_text
[ "def", "custom_parser", "(", "self", ",", "json_text", "=", "None", ")", ":", "app_config", "=", "django_apps", ".", "get_app_config", "(", "\"django_collect_offline\"", ")", "for", "json_parser", "in", "app_config", ".", "custom_json_parsers", ":", "json_text", "...
Runs json_text thru custom parsers.
[ "Runs", "json_text", "thru", "custom", "parsers", "." ]
3d5efd66c68e2db4b060a82b070ae490dc399ca7
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/transaction/transaction_deserializer.py#L77-L83
train
51,657
RedHatQE/python-stitches
stitches/connection.py
Connection.cli
def cli(self): """ cli lazy property """ client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(hostname=self.private_hostname, username=self.username, key_filename=self.key_filename, timeout=self.timeout, look_for_keys=self.look_for_keys) # set keepalive transport = client.get_transport() transport.set_keepalive(3) return client
python
def cli(self): """ cli lazy property """ client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(hostname=self.private_hostname, username=self.username, key_filename=self.key_filename, timeout=self.timeout, look_for_keys=self.look_for_keys) # set keepalive transport = client.get_transport() transport.set_keepalive(3) return client
[ "def", "cli", "(", "self", ")", ":", "client", "=", "paramiko", ".", "SSHClient", "(", ")", "client", ".", "set_missing_host_key_policy", "(", "paramiko", ".", "AutoAddPolicy", "(", ")", ")", "client", ".", "connect", "(", "hostname", "=", "self", ".", "...
cli lazy property
[ "cli", "lazy", "property" ]
957e9895e64ffd3b8157b38b9cce414969509288
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/connection.py#L111-L124
train
51,658
RedHatQE/python-stitches
stitches/connection.py
Connection.channel
def channel(self): """ channel lazy property """ # start shell, non-blocking channel chan = self.cli.invoke_shell(width=360, height=80) chan.setblocking(0) # set channel timeout chan.settimeout(10) # now waiting for shell prompt ('username@') result = "" count = 0 while count < 10: try: recv_part = chan.recv(16384).decode() result += recv_part except socket.timeout: # socket.timeout here means 'no more data' pass if result.find('%s@' % self.username) != -1: return chan time.sleep(1) count += 1 # failed to get shell prompt on channel :-( raise StitchesConnectionException("Failed to get shell prompt")
python
def channel(self): """ channel lazy property """ # start shell, non-blocking channel chan = self.cli.invoke_shell(width=360, height=80) chan.setblocking(0) # set channel timeout chan.settimeout(10) # now waiting for shell prompt ('username@') result = "" count = 0 while count < 10: try: recv_part = chan.recv(16384).decode() result += recv_part except socket.timeout: # socket.timeout here means 'no more data' pass if result.find('%s@' % self.username) != -1: return chan time.sleep(1) count += 1 # failed to get shell prompt on channel :-( raise StitchesConnectionException("Failed to get shell prompt")
[ "def", "channel", "(", "self", ")", ":", "# start shell, non-blocking channel", "chan", "=", "self", ".", "cli", ".", "invoke_shell", "(", "width", "=", "360", ",", "height", "=", "80", ")", "chan", ".", "setblocking", "(", "0", ")", "# set channel timeout",...
channel lazy property
[ "channel", "lazy", "property" ]
957e9895e64ffd3b8157b38b9cce414969509288
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/connection.py#L127-L150
train
51,659
RedHatQE/python-stitches
stitches/connection.py
Connection.pbm
def pbm(self): """ Plumbum lazy property """ if not self.disable_rpyc: from plumbum import SshMachine return SshMachine(host=self.private_hostname, user=self.username, keyfile=self.key_filename, ssh_opts=["-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no"]) else: return None
python
def pbm(self): """ Plumbum lazy property """ if not self.disable_rpyc: from plumbum import SshMachine return SshMachine(host=self.private_hostname, user=self.username, keyfile=self.key_filename, ssh_opts=["-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no"]) else: return None
[ "def", "pbm", "(", "self", ")", ":", "if", "not", "self", ".", "disable_rpyc", ":", "from", "plumbum", "import", "SshMachine", "return", "SshMachine", "(", "host", "=", "self", ".", "private_hostname", ",", "user", "=", "self", ".", "username", ",", "key...
Plumbum lazy property
[ "Plumbum", "lazy", "property" ]
957e9895e64ffd3b8157b38b9cce414969509288
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/connection.py#L158-L167
train
51,660
RedHatQE/python-stitches
stitches/connection.py
Connection.rpyc
def rpyc(self): """ RPyC lazy property """ if not self.disable_rpyc: try: import rpyc devnull_fd = open("/dev/null", "w") rpyc_dirname = os.path.dirname(rpyc.__file__) rnd_id = ''.join(random.choice(string.ascii_lowercase) for x in range(10)) pid_filename = "/tmp/%s.pid" % rnd_id pid_dest_filename = "/tmp/%s%s.pid" % (rnd_id, rnd_id) rnd_filename = "/tmp/" + rnd_id + ".tar.gz" rnd_dest_filename = "/tmp/" + rnd_id + rnd_id + ".tar.gz" subprocess.check_call(["tar", "-cz", "--exclude", "*.pyc", "--exclude", "*.pyo", "--transform", "s,%s,%s," % (rpyc_dirname[1:][:-5], rnd_id), rpyc_dirname, "-f", rnd_filename], stdout=devnull_fd, stderr=devnull_fd) devnull_fd.close() self.sftp.put(rnd_filename, rnd_dest_filename) os.remove(rnd_filename) self.recv_exit_status("tar -zxvf %s -C /tmp" % rnd_dest_filename, 10) server_script = r""" import os print os.environ from rpyc.utils.server import ThreadedServer from rpyc import SlaveService import sys t = ThreadedServer(SlaveService, hostname = 'localhost', port = 0, reuse_addr = True) fd = open('""" + pid_filename + r"""', 'w') fd.write(str(t.port)) fd.close() t.start() """ command = "echo \"%s\" | PYTHONPATH=\"/tmp/%s\" python " % (server_script, rnd_id) self.stdin_rpyc, self.stdout_rpyc, self.stderr_rpyc = self.exec_command(command, get_pty=True) self.recv_exit_status("while [ ! -f %s ]; do sleep 1; done" % (pid_filename), 10) self.sftp.get(pid_filename, pid_dest_filename) pid_fd = open(pid_dest_filename, 'r') port = int(pid_fd.read()) pid_fd.close() os.remove(pid_dest_filename) return rpyc.classic.ssh_connect(self.pbm, port) except Exception as err: self.logger.debug("Failed to setup rpyc: %s" % err) return None else: return None
python
def rpyc(self): """ RPyC lazy property """ if not self.disable_rpyc: try: import rpyc devnull_fd = open("/dev/null", "w") rpyc_dirname = os.path.dirname(rpyc.__file__) rnd_id = ''.join(random.choice(string.ascii_lowercase) for x in range(10)) pid_filename = "/tmp/%s.pid" % rnd_id pid_dest_filename = "/tmp/%s%s.pid" % (rnd_id, rnd_id) rnd_filename = "/tmp/" + rnd_id + ".tar.gz" rnd_dest_filename = "/tmp/" + rnd_id + rnd_id + ".tar.gz" subprocess.check_call(["tar", "-cz", "--exclude", "*.pyc", "--exclude", "*.pyo", "--transform", "s,%s,%s," % (rpyc_dirname[1:][:-5], rnd_id), rpyc_dirname, "-f", rnd_filename], stdout=devnull_fd, stderr=devnull_fd) devnull_fd.close() self.sftp.put(rnd_filename, rnd_dest_filename) os.remove(rnd_filename) self.recv_exit_status("tar -zxvf %s -C /tmp" % rnd_dest_filename, 10) server_script = r""" import os print os.environ from rpyc.utils.server import ThreadedServer from rpyc import SlaveService import sys t = ThreadedServer(SlaveService, hostname = 'localhost', port = 0, reuse_addr = True) fd = open('""" + pid_filename + r"""', 'w') fd.write(str(t.port)) fd.close() t.start() """ command = "echo \"%s\" | PYTHONPATH=\"/tmp/%s\" python " % (server_script, rnd_id) self.stdin_rpyc, self.stdout_rpyc, self.stderr_rpyc = self.exec_command(command, get_pty=True) self.recv_exit_status("while [ ! -f %s ]; do sleep 1; done" % (pid_filename), 10) self.sftp.get(pid_filename, pid_dest_filename) pid_fd = open(pid_dest_filename, 'r') port = int(pid_fd.read()) pid_fd.close() os.remove(pid_dest_filename) return rpyc.classic.ssh_connect(self.pbm, port) except Exception as err: self.logger.debug("Failed to setup rpyc: %s" % err) return None else: return None
[ "def", "rpyc", "(", "self", ")", ":", "if", "not", "self", ".", "disable_rpyc", ":", "try", ":", "import", "rpyc", "devnull_fd", "=", "open", "(", "\"/dev/null\"", ",", "\"w\"", ")", "rpyc_dirname", "=", "os", ".", "path", ".", "dirname", "(", "rpyc", ...
RPyC lazy property
[ "RPyC", "lazy", "property" ]
957e9895e64ffd3b8157b38b9cce414969509288
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/connection.py#L170-L219
train
51,661
RedHatQE/python-stitches
stitches/connection.py
Connection.exec_command
def exec_command(self, command, bufsize=-1, get_pty=False): """ Execute a command in the connection @param command: command to execute @type command: str @param bufsize: buffer size @type bufsize: int @param get_pty: get pty @type get_pty: bool @return: the stdin, stdout, and stderr of the executing command @rtype: tuple(L{paramiko.ChannelFile}, L{paramiko.ChannelFile}, L{paramiko.ChannelFile}) @raise SSHException: if the server fails to execute the command """ self.last_command = command return self.cli.exec_command(command, bufsize, get_pty=get_pty)
python
def exec_command(self, command, bufsize=-1, get_pty=False): """ Execute a command in the connection @param command: command to execute @type command: str @param bufsize: buffer size @type bufsize: int @param get_pty: get pty @type get_pty: bool @return: the stdin, stdout, and stderr of the executing command @rtype: tuple(L{paramiko.ChannelFile}, L{paramiko.ChannelFile}, L{paramiko.ChannelFile}) @raise SSHException: if the server fails to execute the command """ self.last_command = command return self.cli.exec_command(command, bufsize, get_pty=get_pty)
[ "def", "exec_command", "(", "self", ",", "command", ",", "bufsize", "=", "-", "1", ",", "get_pty", "=", "False", ")", ":", "self", ".", "last_command", "=", "command", "return", "self", ".", "cli", ".", "exec_command", "(", "command", ",", "bufsize", "...
Execute a command in the connection @param command: command to execute @type command: str @param bufsize: buffer size @type bufsize: int @param get_pty: get pty @type get_pty: bool @return: the stdin, stdout, and stderr of the executing command @rtype: tuple(L{paramiko.ChannelFile}, L{paramiko.ChannelFile}, L{paramiko.ChannelFile}) @raise SSHException: if the server fails to execute the command
[ "Execute", "a", "command", "in", "the", "connection" ]
957e9895e64ffd3b8157b38b9cce414969509288
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/connection.py#L252-L272
train
51,662
RedHatQE/python-stitches
stitches/connection.py
Connection.recv_exit_status
def recv_exit_status(self, command, timeout=10, get_pty=False): """ Execute a command and get its return value @param command: command to execute @type command: str @param timeout: command execution timeout @type timeout: int @param get_pty: get pty @type get_pty: bool @return: the exit code of the process or None in case of timeout @rtype: int or None """ status = None self.last_command = command stdin, stdout, stderr = self.cli.exec_command(command, get_pty=get_pty) if stdout and stderr and stdin: for _ in range(timeout): if stdout.channel.exit_status_ready(): status = stdout.channel.recv_exit_status() break time.sleep(1) self.last_stdout = stdout.read() self.last_stderr = stderr.read() stdin.close() stdout.close() stderr.close() return status
python
def recv_exit_status(self, command, timeout=10, get_pty=False): """ Execute a command and get its return value @param command: command to execute @type command: str @param timeout: command execution timeout @type timeout: int @param get_pty: get pty @type get_pty: bool @return: the exit code of the process or None in case of timeout @rtype: int or None """ status = None self.last_command = command stdin, stdout, stderr = self.cli.exec_command(command, get_pty=get_pty) if stdout and stderr and stdin: for _ in range(timeout): if stdout.channel.exit_status_ready(): status = stdout.channel.recv_exit_status() break time.sleep(1) self.last_stdout = stdout.read() self.last_stderr = stderr.read() stdin.close() stdout.close() stderr.close() return status
[ "def", "recv_exit_status", "(", "self", ",", "command", ",", "timeout", "=", "10", ",", "get_pty", "=", "False", ")", ":", "status", "=", "None", "self", ".", "last_command", "=", "command", "stdin", ",", "stdout", ",", "stderr", "=", "self", ".", "cli...
Execute a command and get its return value @param command: command to execute @type command: str @param timeout: command execution timeout @type timeout: int @param get_pty: get pty @type get_pty: bool @return: the exit code of the process or None in case of timeout @rtype: int or None
[ "Execute", "a", "command", "and", "get", "its", "return", "value" ]
957e9895e64ffd3b8157b38b9cce414969509288
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/connection.py#L274-L306
train
51,663
MonashBI/arcana
arcana/pipeline/provenance.py
Record.load
def load(cls, pipeline_name, frequency, subject_id, visit_id, from_study, path): """ Loads a saved provenance object from a JSON file Parameters ---------- path : str Path to the provenance file frequency : str The frequency of the record subject_id : str | None The subject ID of the provenance record visit_id : str | None The visit ID of the provenance record from_study : str Name of the study the derivatives were created for Returns ------- record : Record The loaded provenance record """ with open(path) as f: prov = json.load(f) return Record(pipeline_name, frequency, subject_id, visit_id, from_study, prov)
python
def load(cls, pipeline_name, frequency, subject_id, visit_id, from_study, path): """ Loads a saved provenance object from a JSON file Parameters ---------- path : str Path to the provenance file frequency : str The frequency of the record subject_id : str | None The subject ID of the provenance record visit_id : str | None The visit ID of the provenance record from_study : str Name of the study the derivatives were created for Returns ------- record : Record The loaded provenance record """ with open(path) as f: prov = json.load(f) return Record(pipeline_name, frequency, subject_id, visit_id, from_study, prov)
[ "def", "load", "(", "cls", ",", "pipeline_name", ",", "frequency", ",", "subject_id", ",", "visit_id", ",", "from_study", ",", "path", ")", ":", "with", "open", "(", "path", ")", "as", "f", ":", "prov", "=", "json", ".", "load", "(", "f", ")", "ret...
Loads a saved provenance object from a JSON file Parameters ---------- path : str Path to the provenance file frequency : str The frequency of the record subject_id : str | None The subject ID of the provenance record visit_id : str | None The visit ID of the provenance record from_study : str Name of the study the derivatives were created for Returns ------- record : Record The loaded provenance record
[ "Loads", "a", "saved", "provenance", "object", "from", "a", "JSON", "file" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/provenance.py#L143-L169
train
51,664
MonashBI/arcana
arcana/pipeline/provenance.py
Record.mismatches
def mismatches(self, other, include=None, exclude=None): """ Compares information stored within provenance objects with the exception of version information to see if they match. Matches are constrained to the paths passed to the 'include' kwarg, with the exception of sub-paths passed to the 'exclude' kwarg Parameters ---------- other : Provenance The provenance object to compare against include : list[list[str]] | None Paths in the provenance to include in the match. If None all are incluced exclude : list[list[str]] | None Paths in the provenance to exclude from the match. In None all are excluded """ if include is not None: include_res = [self._gen_prov_path_regex(p) for p in include] if exclude is not None: exclude_res = [self._gen_prov_path_regex(p) for p in exclude] diff = DeepDiff(self._prov, other._prov, ignore_order=True) # Create regular expresssions for the include and exclude paths in # the format that deepdiff uses for nested dictionary/lists def include_change(change): if include is None: included = True else: included = any(rx.match(change) for rx in include_res) if included and exclude is not None: included = not any(rx.match(change) for rx in exclude_res) return included filtered_diff = {} for change_type, changes in diff.items(): if isinstance(changes, dict): filtered = dict((k, v) for k, v in changes.items() if include_change(k)) else: filtered = [c for c in changes if include_change(c)] if filtered: filtered_diff[change_type] = filtered return filtered_diff
python
def mismatches(self, other, include=None, exclude=None): """ Compares information stored within provenance objects with the exception of version information to see if they match. Matches are constrained to the paths passed to the 'include' kwarg, with the exception of sub-paths passed to the 'exclude' kwarg Parameters ---------- other : Provenance The provenance object to compare against include : list[list[str]] | None Paths in the provenance to include in the match. If None all are incluced exclude : list[list[str]] | None Paths in the provenance to exclude from the match. In None all are excluded """ if include is not None: include_res = [self._gen_prov_path_regex(p) for p in include] if exclude is not None: exclude_res = [self._gen_prov_path_regex(p) for p in exclude] diff = DeepDiff(self._prov, other._prov, ignore_order=True) # Create regular expresssions for the include and exclude paths in # the format that deepdiff uses for nested dictionary/lists def include_change(change): if include is None: included = True else: included = any(rx.match(change) for rx in include_res) if included and exclude is not None: included = not any(rx.match(change) for rx in exclude_res) return included filtered_diff = {} for change_type, changes in diff.items(): if isinstance(changes, dict): filtered = dict((k, v) for k, v in changes.items() if include_change(k)) else: filtered = [c for c in changes if include_change(c)] if filtered: filtered_diff[change_type] = filtered return filtered_diff
[ "def", "mismatches", "(", "self", ",", "other", ",", "include", "=", "None", ",", "exclude", "=", "None", ")", ":", "if", "include", "is", "not", "None", ":", "include_res", "=", "[", "self", ".", "_gen_prov_path_regex", "(", "p", ")", "for", "p", "i...
Compares information stored within provenance objects with the exception of version information to see if they match. Matches are constrained to the paths passed to the 'include' kwarg, with the exception of sub-paths passed to the 'exclude' kwarg Parameters ---------- other : Provenance The provenance object to compare against include : list[list[str]] | None Paths in the provenance to include in the match. If None all are incluced exclude : list[list[str]] | None Paths in the provenance to exclude from the match. In None all are excluded
[ "Compares", "information", "stored", "within", "provenance", "objects", "with", "the", "exception", "of", "version", "information", "to", "see", "if", "they", "match", ".", "Matches", "are", "constrained", "to", "the", "paths", "passed", "to", "the", "include", ...
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/provenance.py#L171-L215
train
51,665
MonashBI/arcana
arcana/study/multi.py
MultiStudy.translate
def translate(cls, substudy_name, pipeline_getter, auto_added=False): """ A method for translating pipeline constructors from a sub-study to the namespace of a multi-study. Returns a new method that calls the sub-study pipeline constructor with appropriate keyword arguments Parameters ---------- substudy_name : str Name of the sub-study pipeline_getter : str Name of method used to construct the pipeline in the sub-study auto_added : bool Signify that a method was automatically added by the MultiStudyMetaClass. Used in checks when pickling Study objects """ assert isinstance(substudy_name, basestring) assert isinstance(pipeline_getter, basestring) def translated_getter(self, **name_maps): substudy_spec = self.substudy_spec(substudy_name) # Combine mapping of names of sub-study specs with return getattr(self.substudy(substudy_name), pipeline_getter)( prefix=substudy_name + '_', input_map=substudy_spec.name_map, output_map=substudy_spec.name_map, study=self, name_maps=name_maps) # Add reduce method to allow it to be pickled translated_getter.auto_added = auto_added return translated_getter
python
def translate(cls, substudy_name, pipeline_getter, auto_added=False): """ A method for translating pipeline constructors from a sub-study to the namespace of a multi-study. Returns a new method that calls the sub-study pipeline constructor with appropriate keyword arguments Parameters ---------- substudy_name : str Name of the sub-study pipeline_getter : str Name of method used to construct the pipeline in the sub-study auto_added : bool Signify that a method was automatically added by the MultiStudyMetaClass. Used in checks when pickling Study objects """ assert isinstance(substudy_name, basestring) assert isinstance(pipeline_getter, basestring) def translated_getter(self, **name_maps): substudy_spec = self.substudy_spec(substudy_name) # Combine mapping of names of sub-study specs with return getattr(self.substudy(substudy_name), pipeline_getter)( prefix=substudy_name + '_', input_map=substudy_spec.name_map, output_map=substudy_spec.name_map, study=self, name_maps=name_maps) # Add reduce method to allow it to be pickled translated_getter.auto_added = auto_added return translated_getter
[ "def", "translate", "(", "cls", ",", "substudy_name", ",", "pipeline_getter", ",", "auto_added", "=", "False", ")", ":", "assert", "isinstance", "(", "substudy_name", ",", "basestring", ")", "assert", "isinstance", "(", "pipeline_getter", ",", "basestring", ")",...
A method for translating pipeline constructors from a sub-study to the namespace of a multi-study. Returns a new method that calls the sub-study pipeline constructor with appropriate keyword arguments Parameters ---------- substudy_name : str Name of the sub-study pipeline_getter : str Name of method used to construct the pipeline in the sub-study auto_added : bool Signify that a method was automatically added by the MultiStudyMetaClass. Used in checks when pickling Study objects
[ "A", "method", "for", "translating", "pipeline", "constructors", "from", "a", "sub", "-", "study", "to", "the", "namespace", "of", "a", "multi", "-", "study", ".", "Returns", "a", "new", "method", "that", "calls", "the", "sub", "-", "study", "pipeline", ...
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/study/multi.py#L169-L199
train
51,666
MonashBI/arcana
arcana/study/multi.py
SubStudySpec.auto_data_specs
def auto_data_specs(self): """ Data specs in the sub-study class that are not explicitly provided in the name map """ for spec in self.study_class.data_specs(): if spec.name not in self._name_map: yield spec
python
def auto_data_specs(self): """ Data specs in the sub-study class that are not explicitly provided in the name map """ for spec in self.study_class.data_specs(): if spec.name not in self._name_map: yield spec
[ "def", "auto_data_specs", "(", "self", ")", ":", "for", "spec", "in", "self", ".", "study_class", ".", "data_specs", "(", ")", ":", "if", "spec", ".", "name", "not", "in", "self", ".", "_name_map", ":", "yield", "spec" ]
Data specs in the sub-study class that are not explicitly provided in the name map
[ "Data", "specs", "in", "the", "sub", "-", "study", "class", "that", "are", "not", "explicitly", "provided", "in", "the", "name", "map" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/study/multi.py#L264-L271
train
51,667
MonashBI/arcana
arcana/study/multi.py
SubStudySpec.auto_param_specs
def auto_param_specs(self): """ Parameter pecs in the sub-study class that are not explicitly provided in the name map """ for spec in self.study_class.parameter_specs(): if spec.name not in self._name_map: yield spec
python
def auto_param_specs(self): """ Parameter pecs in the sub-study class that are not explicitly provided in the name map """ for spec in self.study_class.parameter_specs(): if spec.name not in self._name_map: yield spec
[ "def", "auto_param_specs", "(", "self", ")", ":", "for", "spec", "in", "self", ".", "study_class", ".", "parameter_specs", "(", ")", ":", "if", "spec", ".", "name", "not", "in", "self", ".", "_name_map", ":", "yield", "spec" ]
Parameter pecs in the sub-study class that are not explicitly provided in the name map
[ "Parameter", "pecs", "in", "the", "sub", "-", "study", "class", "that", "are", "not", "explicitly", "provided", "in", "the", "name", "map" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/study/multi.py#L274-L281
train
51,668
MonashBI/arcana
arcana/environment/base.py
MapNode._make_nodes
def _make_nodes(self, cwd=None): """ Cast generated nodes to be Arcana nodes """ for i, node in NipypeMapNode._make_nodes(self, cwd=cwd): # "Cast" NiPype node to a Arcana Node and set Arcana Node # parameters node.__class__ = self.node_cls node._environment = self._environment node._versions = self._versions node._wall_time = self._wall_time node._annotations = self._annotations yield i, node
python
def _make_nodes(self, cwd=None): """ Cast generated nodes to be Arcana nodes """ for i, node in NipypeMapNode._make_nodes(self, cwd=cwd): # "Cast" NiPype node to a Arcana Node and set Arcana Node # parameters node.__class__ = self.node_cls node._environment = self._environment node._versions = self._versions node._wall_time = self._wall_time node._annotations = self._annotations yield i, node
[ "def", "_make_nodes", "(", "self", ",", "cwd", "=", "None", ")", ":", "for", "i", ",", "node", "in", "NipypeMapNode", ".", "_make_nodes", "(", "self", ",", "cwd", "=", "cwd", ")", ":", "# \"Cast\" NiPype node to a Arcana Node and set Arcana Node", "# parameters"...
Cast generated nodes to be Arcana nodes
[ "Cast", "generated", "nodes", "to", "be", "Arcana", "nodes" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/environment/base.py#L107-L119
train
51,669
tym-xqo/nerium
nerium/query.py
get_query
def get_query(query_name): """Find file matching query_name, read and return query object """ query_file_match = list( filter(lambda i: query_name == i.stem, FLAT_QUERIES)) if not query_file_match: return None # TODO: Log warning if more than one match query_file = query_file_match[0] with open(query_file) as f: metadata, query_body = frontmatter.parse(f.read()) result_mod = query_file.suffix.strip('.') query_obj = SimpleNamespace( name=query_name, metadata=metadata, path=query_file, result_mod=result_mod, body=query_body, error=False, executed=datetime.utcnow().isoformat()) return query_obj
python
def get_query(query_name): """Find file matching query_name, read and return query object """ query_file_match = list( filter(lambda i: query_name == i.stem, FLAT_QUERIES)) if not query_file_match: return None # TODO: Log warning if more than one match query_file = query_file_match[0] with open(query_file) as f: metadata, query_body = frontmatter.parse(f.read()) result_mod = query_file.suffix.strip('.') query_obj = SimpleNamespace( name=query_name, metadata=metadata, path=query_file, result_mod=result_mod, body=query_body, error=False, executed=datetime.utcnow().isoformat()) return query_obj
[ "def", "get_query", "(", "query_name", ")", ":", "query_file_match", "=", "list", "(", "filter", "(", "lambda", "i", ":", "query_name", "==", "i", ".", "stem", ",", "FLAT_QUERIES", ")", ")", "if", "not", "query_file_match", ":", "return", "None", "# TODO: ...
Find file matching query_name, read and return query object
[ "Find", "file", "matching", "query_name", "read", "and", "return", "query", "object" ]
b234847d95f37c3a49dff15a189205fe5bbbc05f
https://github.com/tym-xqo/nerium/blob/b234847d95f37c3a49dff15a189205fe5bbbc05f/nerium/query.py#L19-L39
train
51,670
tym-xqo/nerium
nerium/query.py
get_result_set
def get_result_set(query_name, **kwargs): """ Call get_query, then submit query from file to resultset module """ query = get_query(query_name) if not query: query = SimpleNamespace() query.error = f"No query found matching '{query_name}'" return query try: result_mod = import_module( f'nerium.contrib.resultset.{query.result_mod}') except ModuleNotFoundError: result_mod = import_module('nerium.resultset.sql') query.params = {**kwargs} query.body = process_template(sql=query.body, **query.params) result = result_mod.result(query, **query.params) # Dumping and reloading via json here gets us datetime and decimal # serialization handling courtesy of `tablib` query.result = json.loads(json.dumps(result, default=serialize_objects_handler)) try: if 'error' in query.result[0].keys(): query.error = query.result[0]['error'] except IndexError: pass return query
python
def get_result_set(query_name, **kwargs): """ Call get_query, then submit query from file to resultset module """ query = get_query(query_name) if not query: query = SimpleNamespace() query.error = f"No query found matching '{query_name}'" return query try: result_mod = import_module( f'nerium.contrib.resultset.{query.result_mod}') except ModuleNotFoundError: result_mod = import_module('nerium.resultset.sql') query.params = {**kwargs} query.body = process_template(sql=query.body, **query.params) result = result_mod.result(query, **query.params) # Dumping and reloading via json here gets us datetime and decimal # serialization handling courtesy of `tablib` query.result = json.loads(json.dumps(result, default=serialize_objects_handler)) try: if 'error' in query.result[0].keys(): query.error = query.result[0]['error'] except IndexError: pass return query
[ "def", "get_result_set", "(", "query_name", ",", "*", "*", "kwargs", ")", ":", "query", "=", "get_query", "(", "query_name", ")", "if", "not", "query", ":", "query", "=", "SimpleNamespace", "(", ")", "query", ".", "error", "=", "f\"No query found matching '{...
Call get_query, then submit query from file to resultset module
[ "Call", "get_query", "then", "submit", "query", "from", "file", "to", "resultset", "module" ]
b234847d95f37c3a49dff15a189205fe5bbbc05f
https://github.com/tym-xqo/nerium/blob/b234847d95f37c3a49dff15a189205fe5bbbc05f/nerium/query.py#L49-L73
train
51,671
tym-xqo/nerium
nerium/query.py
results_to_csv
def results_to_csv(query_name, **kwargs): """ Generate CSV from result data """ query = get_result_set(query_name, **kwargs) result = query.result columns = list(result[0].keys()) data = [tuple(row.values()) for row in result] frame = tablib.Dataset() frame.headers = columns for row in data: frame.append(row) csvs = frame.export('csv') return csvs
python
def results_to_csv(query_name, **kwargs): """ Generate CSV from result data """ query = get_result_set(query_name, **kwargs) result = query.result columns = list(result[0].keys()) data = [tuple(row.values()) for row in result] frame = tablib.Dataset() frame.headers = columns for row in data: frame.append(row) csvs = frame.export('csv') return csvs
[ "def", "results_to_csv", "(", "query_name", ",", "*", "*", "kwargs", ")", ":", "query", "=", "get_result_set", "(", "query_name", ",", "*", "*", "kwargs", ")", "result", "=", "query", ".", "result", "columns", "=", "list", "(", "result", "[", "0", "]",...
Generate CSV from result data
[ "Generate", "CSV", "from", "result", "data" ]
b234847d95f37c3a49dff15a189205fe5bbbc05f
https://github.com/tym-xqo/nerium/blob/b234847d95f37c3a49dff15a189205fe5bbbc05f/nerium/query.py#L76-L88
train
51,672
erikvw/django-collect-offline
django_collect_offline/site_offline_models.py
SiteOfflineModels.register
def register(self, models=None, wrapper_cls=None): """Registers with app_label.modelname, wrapper_cls. """ self.loaded = True for model in models: model = model.lower() if model not in self.registry: self.registry.update({model: wrapper_cls or self.wrapper_cls}) if self.register_historical: historical_model = ".historical".join(model.split(".")) self.registry.update( {historical_model: wrapper_cls or self.wrapper_cls} ) else: raise AlreadyRegistered(f"Model is already registered. Got {model}.")
python
def register(self, models=None, wrapper_cls=None): """Registers with app_label.modelname, wrapper_cls. """ self.loaded = True for model in models: model = model.lower() if model not in self.registry: self.registry.update({model: wrapper_cls or self.wrapper_cls}) if self.register_historical: historical_model = ".historical".join(model.split(".")) self.registry.update( {historical_model: wrapper_cls or self.wrapper_cls} ) else: raise AlreadyRegistered(f"Model is already registered. Got {model}.")
[ "def", "register", "(", "self", ",", "models", "=", "None", ",", "wrapper_cls", "=", "None", ")", ":", "self", ".", "loaded", "=", "True", "for", "model", "in", "models", ":", "model", "=", "model", ".", "lower", "(", ")", "if", "model", "not", "in...
Registers with app_label.modelname, wrapper_cls.
[ "Registers", "with", "app_label", ".", "modelname", "wrapper_cls", "." ]
3d5efd66c68e2db4b060a82b070ae490dc399ca7
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/site_offline_models.py#L28-L42
train
51,673
erikvw/django-collect-offline
django_collect_offline/site_offline_models.py
SiteOfflineModels.register_for_app
def register_for_app( self, app_label=None, exclude_models=None, exclude_model_classes=None ): """Registers all models for this app_label. """ models = [] exclude_models = exclude_models or [] app_config = django_apps.get_app_config(app_label) for model in app_config.get_models(): if model._meta.label_lower in exclude_models: pass elif exclude_model_classes and issubclass(model, exclude_model_classes): pass else: models.append(model._meta.label_lower) self.register(models)
python
def register_for_app( self, app_label=None, exclude_models=None, exclude_model_classes=None ): """Registers all models for this app_label. """ models = [] exclude_models = exclude_models or [] app_config = django_apps.get_app_config(app_label) for model in app_config.get_models(): if model._meta.label_lower in exclude_models: pass elif exclude_model_classes and issubclass(model, exclude_model_classes): pass else: models.append(model._meta.label_lower) self.register(models)
[ "def", "register_for_app", "(", "self", ",", "app_label", "=", "None", ",", "exclude_models", "=", "None", ",", "exclude_model_classes", "=", "None", ")", ":", "models", "=", "[", "]", "exclude_models", "=", "exclude_models", "or", "[", "]", "app_config", "=...
Registers all models for this app_label.
[ "Registers", "all", "models", "for", "this", "app_label", "." ]
3d5efd66c68e2db4b060a82b070ae490dc399ca7
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/site_offline_models.py#L44-L59
train
51,674
erikvw/django-collect-offline
django_collect_offline/site_offline_models.py
SiteOfflineModels.get_wrapped_instance
def get_wrapped_instance(self, instance=None): """Returns a wrapped model instance. """ if instance._meta.label_lower not in self.registry: raise ModelNotRegistered(f"{repr(instance)} is not registered with {self}.") wrapper_cls = self.registry.get(instance._meta.label_lower) or self.wrapper_cls if wrapper_cls: return wrapper_cls(instance) return instance
python
def get_wrapped_instance(self, instance=None): """Returns a wrapped model instance. """ if instance._meta.label_lower not in self.registry: raise ModelNotRegistered(f"{repr(instance)} is not registered with {self}.") wrapper_cls = self.registry.get(instance._meta.label_lower) or self.wrapper_cls if wrapper_cls: return wrapper_cls(instance) return instance
[ "def", "get_wrapped_instance", "(", "self", ",", "instance", "=", "None", ")", ":", "if", "instance", ".", "_meta", ".", "label_lower", "not", "in", "self", ".", "registry", ":", "raise", "ModelNotRegistered", "(", "f\"{repr(instance)} is not registered with {self}....
Returns a wrapped model instance.
[ "Returns", "a", "wrapped", "model", "instance", "." ]
3d5efd66c68e2db4b060a82b070ae490dc399ca7
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/site_offline_models.py#L61-L69
train
51,675
erikvw/django-collect-offline
django_collect_offline/site_offline_models.py
SiteOfflineModels.site_models
def site_models(self, app_label=None): """Returns a dictionary of registered models. """ site_models = {} app_configs = ( django_apps.get_app_configs() if app_label is None else [django_apps.get_app_config(app_label)] ) for app_config in app_configs: model_list = [ model for model in app_config.get_models() if model._meta.label_lower in self.registry ] if model_list: model_list.sort(key=lambda m: m._meta.verbose_name) site_models.update({app_config.name: model_list}) return site_models
python
def site_models(self, app_label=None): """Returns a dictionary of registered models. """ site_models = {} app_configs = ( django_apps.get_app_configs() if app_label is None else [django_apps.get_app_config(app_label)] ) for app_config in app_configs: model_list = [ model for model in app_config.get_models() if model._meta.label_lower in self.registry ] if model_list: model_list.sort(key=lambda m: m._meta.verbose_name) site_models.update({app_config.name: model_list}) return site_models
[ "def", "site_models", "(", "self", ",", "app_label", "=", "None", ")", ":", "site_models", "=", "{", "}", "app_configs", "=", "(", "django_apps", ".", "get_app_configs", "(", ")", "if", "app_label", "is", "None", "else", "[", "django_apps", ".", "get_app_c...
Returns a dictionary of registered models.
[ "Returns", "a", "dictionary", "of", "registered", "models", "." ]
3d5efd66c68e2db4b060a82b070ae490dc399ca7
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/site_offline_models.py#L71-L89
train
51,676
MonashBI/arcana
arcana/repository/basic.py
BasicRepo.get_fileset
def get_fileset(self, fileset): """ Set the path of the fileset from the repository """ # Don't need to cache fileset as it is already local as long # as the path is set if fileset._path is None: primary_path = self.fileset_path(fileset) aux_files = fileset.format.default_aux_file_paths(primary_path) if not op.exists(primary_path): raise ArcanaMissingDataException( "{} does not exist in {}" .format(fileset, self)) for aux_name, aux_path in aux_files.items(): if not op.exists(aux_path): raise ArcanaMissingDataException( "{} is missing '{}' side car in {}" .format(fileset, aux_name, self)) else: primary_path = fileset.path aux_files = fileset.aux_files return primary_path, aux_files
python
def get_fileset(self, fileset): """ Set the path of the fileset from the repository """ # Don't need to cache fileset as it is already local as long # as the path is set if fileset._path is None: primary_path = self.fileset_path(fileset) aux_files = fileset.format.default_aux_file_paths(primary_path) if not op.exists(primary_path): raise ArcanaMissingDataException( "{} does not exist in {}" .format(fileset, self)) for aux_name, aux_path in aux_files.items(): if not op.exists(aux_path): raise ArcanaMissingDataException( "{} is missing '{}' side car in {}" .format(fileset, aux_name, self)) else: primary_path = fileset.path aux_files = fileset.aux_files return primary_path, aux_files
[ "def", "get_fileset", "(", "self", ",", "fileset", ")", ":", "# Don't need to cache fileset as it is already local as long", "# as the path is set", "if", "fileset", ".", "_path", "is", "None", ":", "primary_path", "=", "self", ".", "fileset_path", "(", "fileset", ")"...
Set the path of the fileset from the repository
[ "Set", "the", "path", "of", "the", "fileset", "from", "the", "repository" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/basic.py#L94-L115
train
51,677
MonashBI/arcana
arcana/repository/basic.py
BasicRepo.get_field
def get_field(self, field): """ Update the value of the field from the repository """ # Load fields JSON, locking to prevent read/write conflicts # Would be better if only checked if locked to allow # concurrent reads but not possible with multi-process # locks (in my understanding at least). fpath = self.fields_json_path(field) try: with InterProcessLock(fpath + self.LOCK_SUFFIX, logger=logger), open(fpath, 'r') as f: dct = json.load(f) val = dct[field.name] if field.array: val = [field.dtype(v) for v in val] else: val = field.dtype(val) except (KeyError, IOError) as e: try: # Check to see if the IOError wasn't just because of a # missing file if e.errno != errno.ENOENT: raise except AttributeError: pass raise ArcanaMissingDataException( "{} does not exist in the local repository {}" .format(field.name, self)) return val
python
def get_field(self, field): """ Update the value of the field from the repository """ # Load fields JSON, locking to prevent read/write conflicts # Would be better if only checked if locked to allow # concurrent reads but not possible with multi-process # locks (in my understanding at least). fpath = self.fields_json_path(field) try: with InterProcessLock(fpath + self.LOCK_SUFFIX, logger=logger), open(fpath, 'r') as f: dct = json.load(f) val = dct[field.name] if field.array: val = [field.dtype(v) for v in val] else: val = field.dtype(val) except (KeyError, IOError) as e: try: # Check to see if the IOError wasn't just because of a # missing file if e.errno != errno.ENOENT: raise except AttributeError: pass raise ArcanaMissingDataException( "{} does not exist in the local repository {}" .format(field.name, self)) return val
[ "def", "get_field", "(", "self", ",", "field", ")", ":", "# Load fields JSON, locking to prevent read/write conflicts", "# Would be better if only checked if locked to allow", "# concurrent reads but not possible with multi-process", "# locks (in my understanding at least).", "fpath", "=",...
Update the value of the field from the repository
[ "Update", "the", "value", "of", "the", "field", "from", "the", "repository" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/basic.py#L117-L146
train
51,678
MonashBI/arcana
arcana/repository/basic.py
BasicRepo.put_fileset
def put_fileset(self, fileset): """ Inserts or updates a fileset in the repository """ target_path = self.fileset_path(fileset) if op.isfile(fileset.path): shutil.copyfile(fileset.path, target_path) # Copy side car files into repository for aux_name, aux_path in fileset.format.default_aux_file_paths( target_path).items(): shutil.copyfile(self.aux_file[aux_name], aux_path) elif op.isdir(fileset.path): if op.exists(target_path): shutil.rmtree(target_path) shutil.copytree(fileset.path, target_path) else: assert False
python
def put_fileset(self, fileset): """ Inserts or updates a fileset in the repository """ target_path = self.fileset_path(fileset) if op.isfile(fileset.path): shutil.copyfile(fileset.path, target_path) # Copy side car files into repository for aux_name, aux_path in fileset.format.default_aux_file_paths( target_path).items(): shutil.copyfile(self.aux_file[aux_name], aux_path) elif op.isdir(fileset.path): if op.exists(target_path): shutil.rmtree(target_path) shutil.copytree(fileset.path, target_path) else: assert False
[ "def", "put_fileset", "(", "self", ",", "fileset", ")", ":", "target_path", "=", "self", ".", "fileset_path", "(", "fileset", ")", "if", "op", ".", "isfile", "(", "fileset", ".", "path", ")", ":", "shutil", ".", "copyfile", "(", "fileset", ".", "path",...
Inserts or updates a fileset in the repository
[ "Inserts", "or", "updates", "a", "fileset", "in", "the", "repository" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/basic.py#L148-L164
train
51,679
MonashBI/arcana
arcana/repository/basic.py
BasicRepo.put_field
def put_field(self, field): """ Inserts or updates a field in the repository """ fpath = self.fields_json_path(field) # Open fields JSON, locking to prevent other processes # reading or writing with InterProcessLock(fpath + self.LOCK_SUFFIX, logger=logger): try: with open(fpath, 'r') as f: dct = json.load(f) except IOError as e: if e.errno == errno.ENOENT: dct = {} else: raise if field.array: dct[field.name] = list(field.value) else: dct[field.name] = field.value with open(fpath, 'w') as f: json.dump(dct, f, indent=2)
python
def put_field(self, field): """ Inserts or updates a field in the repository """ fpath = self.fields_json_path(field) # Open fields JSON, locking to prevent other processes # reading or writing with InterProcessLock(fpath + self.LOCK_SUFFIX, logger=logger): try: with open(fpath, 'r') as f: dct = json.load(f) except IOError as e: if e.errno == errno.ENOENT: dct = {} else: raise if field.array: dct[field.name] = list(field.value) else: dct[field.name] = field.value with open(fpath, 'w') as f: json.dump(dct, f, indent=2)
[ "def", "put_field", "(", "self", ",", "field", ")", ":", "fpath", "=", "self", ".", "fields_json_path", "(", "field", ")", "# Open fields JSON, locking to prevent other processes", "# reading or writing", "with", "InterProcessLock", "(", "fpath", "+", "self", ".", "...
Inserts or updates a field in the repository
[ "Inserts", "or", "updates", "a", "field", "in", "the", "repository" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/basic.py#L166-L187
train
51,680
MonashBI/arcana
arcana/data/collection.py
BaseCollection.bind
def bind(self, study, **kwargs): # @UnusedVariable """ Used for duck typing Collection objects with Spec and Match in source and sink initiation. Checks IDs match sessions in study. """ if self.frequency == 'per_subject': tree_subject_ids = list(study.tree.subject_ids) subject_ids = list(self._collection.keys()) if tree_subject_ids != subject_ids: raise ArcanaUsageError( "Subject IDs in collection provided to '{}' ('{}') " "do not match Study tree ('{}')".format( self.name, "', '".join(subject_ids), "', '".join(tree_subject_ids))) elif self.frequency == 'per_visit': tree_visit_ids = list(study.tree.visit_ids) visit_ids = list(self._collection.keys()) if tree_visit_ids != visit_ids: raise ArcanaUsageError( "Subject IDs in collection provided to '{}' ('{}') " "do not match Study tree ('{}')".format( self.name, "', '".join(visit_ids), "', '".join(tree_visit_ids))) elif self.frequency == 'per_session': for subject in study.tree.subjects: if subject.id not in self._collection: raise ArcanaUsageError( "Study subject ID '{}' was not found in colleciton " "provided to '{}' (found '{}')".format( subject.id, self.name, "', '".join(self._collection.keys()))) for session in subject.sessions: if session.visit_id not in self._collection[subject.id]: raise ArcanaUsageError( "Study visit ID '{}' for subject '{}' was not " "found in colleciton provided to '{}' (found '{}')" .format(subject.id, self.name, "', '".join( self._collection[subject.id].keys())))
python
def bind(self, study, **kwargs): # @UnusedVariable """ Used for duck typing Collection objects with Spec and Match in source and sink initiation. Checks IDs match sessions in study. """ if self.frequency == 'per_subject': tree_subject_ids = list(study.tree.subject_ids) subject_ids = list(self._collection.keys()) if tree_subject_ids != subject_ids: raise ArcanaUsageError( "Subject IDs in collection provided to '{}' ('{}') " "do not match Study tree ('{}')".format( self.name, "', '".join(subject_ids), "', '".join(tree_subject_ids))) elif self.frequency == 'per_visit': tree_visit_ids = list(study.tree.visit_ids) visit_ids = list(self._collection.keys()) if tree_visit_ids != visit_ids: raise ArcanaUsageError( "Subject IDs in collection provided to '{}' ('{}') " "do not match Study tree ('{}')".format( self.name, "', '".join(visit_ids), "', '".join(tree_visit_ids))) elif self.frequency == 'per_session': for subject in study.tree.subjects: if subject.id not in self._collection: raise ArcanaUsageError( "Study subject ID '{}' was not found in colleciton " "provided to '{}' (found '{}')".format( subject.id, self.name, "', '".join(self._collection.keys()))) for session in subject.sessions: if session.visit_id not in self._collection[subject.id]: raise ArcanaUsageError( "Study visit ID '{}' for subject '{}' was not " "found in colleciton provided to '{}' (found '{}')" .format(subject.id, self.name, "', '".join( self._collection[subject.id].keys())))
[ "def", "bind", "(", "self", ",", "study", ",", "*", "*", "kwargs", ")", ":", "# @UnusedVariable", "if", "self", ".", "frequency", "==", "'per_subject'", ":", "tree_subject_ids", "=", "list", "(", "study", ".", "tree", ".", "subject_ids", ")", "subject_ids"...
Used for duck typing Collection objects with Spec and Match in source and sink initiation. Checks IDs match sessions in study.
[ "Used", "for", "duck", "typing", "Collection", "objects", "with", "Spec", "and", "Match", "in", "source", "and", "sink", "initiation", ".", "Checks", "IDs", "match", "sessions", "in", "study", "." ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/data/collection.py#L167-L205
train
51,681
MonashBI/arcana
arcana/repository/tree.py
TreeNode.fileset
def fileset(self, name, from_study=None, format=None): # @ReservedAssignment @IgnorePep8 """ Gets the fileset named 'name' produced by the Study named 'study' if provided. If a spec is passed instead of a str to the name argument, then the study will be set from the spec iff it is derived Parameters ---------- name : str | FilesetSpec The name of the fileset or a spec matching the given name from_study : str | None Name of the study that produced the fileset if derived. If None and a spec is passed instaed of string to the name argument then the study name will be taken from the spec instead. format : FileFormat | str | None Either the format of the fileset to return or the name of the format. If None and only a single fileset is found for the given name and study then that is returned otherwise an exception is raised """ if isinstance(name, BaseFileset): if from_study is None and name.derived: from_study = name.study.name name = name.name try: format_dct = self._filesets[(name, from_study)] except KeyError: available = [ ('{}(format={})'.format(f.name, f._resource_name) if f._resource_name is not None else f.name) for f in self.filesets if f.from_study == from_study] other_studies = [ (f.from_study if f.from_study is not None else '<root>') for f in self.filesets if f.name == name] if other_studies: msg = (". NB: matching fileset(s) found for '{}' study(ies) " "('{}')".format(name, "', '".join(other_studies))) else: msg = '' raise ArcanaNameError( name, ("{} doesn't have a fileset named '{}'{} " "(available '{}'){}" .format(self, name, (" from study '{}'".format(from_study) if from_study is not None else ''), "', '".join(available), msg))) else: if format is None: all_formats = list(format_dct.values()) if len(all_formats) > 1: raise ArcanaNameError( "Multiple filesets found for '{}'{} in {} with formats" " {}. Need to specify a format" .format(name, ("in '{}'".format(from_study) if from_study is not None else ''), self, "', '".join(format_dct.keys()))) fileset = all_formats[0] else: try: if isinstance(format, str): fileset = format_dct[format] else: try: fileset = format_dct[format.ext] except KeyError: fileset = None for rname, rfileset in format_dct.items(): if rname in format.resource_names( self.tree.repository.type): fileset = rfileset break if fileset is None: raise except KeyError: raise ArcanaNameError( format, ("{} doesn't have a fileset named '{}'{} with " "format '{}' (available '{}'){}" .format(self, name, (" from study '{}'".format(from_study) if from_study is not None else ''), format, "', '".join(format_dct.keys()), msg))) return fileset
python
def fileset(self, name, from_study=None, format=None): # @ReservedAssignment @IgnorePep8 """ Gets the fileset named 'name' produced by the Study named 'study' if provided. If a spec is passed instead of a str to the name argument, then the study will be set from the spec iff it is derived Parameters ---------- name : str | FilesetSpec The name of the fileset or a spec matching the given name from_study : str | None Name of the study that produced the fileset if derived. If None and a spec is passed instaed of string to the name argument then the study name will be taken from the spec instead. format : FileFormat | str | None Either the format of the fileset to return or the name of the format. If None and only a single fileset is found for the given name and study then that is returned otherwise an exception is raised """ if isinstance(name, BaseFileset): if from_study is None and name.derived: from_study = name.study.name name = name.name try: format_dct = self._filesets[(name, from_study)] except KeyError: available = [ ('{}(format={})'.format(f.name, f._resource_name) if f._resource_name is not None else f.name) for f in self.filesets if f.from_study == from_study] other_studies = [ (f.from_study if f.from_study is not None else '<root>') for f in self.filesets if f.name == name] if other_studies: msg = (". NB: matching fileset(s) found for '{}' study(ies) " "('{}')".format(name, "', '".join(other_studies))) else: msg = '' raise ArcanaNameError( name, ("{} doesn't have a fileset named '{}'{} " "(available '{}'){}" .format(self, name, (" from study '{}'".format(from_study) if from_study is not None else ''), "', '".join(available), msg))) else: if format is None: all_formats = list(format_dct.values()) if len(all_formats) > 1: raise ArcanaNameError( "Multiple filesets found for '{}'{} in {} with formats" " {}. Need to specify a format" .format(name, ("in '{}'".format(from_study) if from_study is not None else ''), self, "', '".join(format_dct.keys()))) fileset = all_formats[0] else: try: if isinstance(format, str): fileset = format_dct[format] else: try: fileset = format_dct[format.ext] except KeyError: fileset = None for rname, rfileset in format_dct.items(): if rname in format.resource_names( self.tree.repository.type): fileset = rfileset break if fileset is None: raise except KeyError: raise ArcanaNameError( format, ("{} doesn't have a fileset named '{}'{} with " "format '{}' (available '{}'){}" .format(self, name, (" from study '{}'".format(from_study) if from_study is not None else ''), format, "', '".join(format_dct.keys()), msg))) return fileset
[ "def", "fileset", "(", "self", ",", "name", ",", "from_study", "=", "None", ",", "format", "=", "None", ")", ":", "# @ReservedAssignment @IgnorePep8", "if", "isinstance", "(", "name", ",", "BaseFileset", ")", ":", "if", "from_study", "is", "None", "and", "...
Gets the fileset named 'name' produced by the Study named 'study' if provided. If a spec is passed instead of a str to the name argument, then the study will be set from the spec iff it is derived Parameters ---------- name : str | FilesetSpec The name of the fileset or a spec matching the given name from_study : str | None Name of the study that produced the fileset if derived. If None and a spec is passed instaed of string to the name argument then the study name will be taken from the spec instead. format : FileFormat | str | None Either the format of the fileset to return or the name of the format. If None and only a single fileset is found for the given name and study then that is returned otherwise an exception is raised
[ "Gets", "the", "fileset", "named", "name", "produced", "by", "the", "Study", "named", "study", "if", "provided", ".", "If", "a", "spec", "is", "passed", "instead", "of", "a", "str", "to", "the", "name", "argument", "then", "the", "study", "will", "be", ...
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/tree.py#L114-L199
train
51,682
MonashBI/arcana
arcana/repository/tree.py
TreeNode.field
def field(self, name, from_study=None): """ Gets the field named 'name' produced by the Study named 'study' if provided. If a spec is passed instead of a str to the name argument, then the study will be set from the spec iff it is derived Parameters ---------- name : str | BaseField The name of the field or a spec matching the given name study : str | None Name of the study that produced the field if derived. If None and a spec is passed instaed of string to the name argument then the study name will be taken from the spec instead. """ if isinstance(name, BaseField): if from_study is None and name.derived: from_study = name.study.name name = name.name try: return self._fields[(name, from_study)] except KeyError: available = [d.name for d in self.fields if d.from_study == from_study] other_studies = [(d.from_study if d.from_study is not None else '<root>') for d in self.fields if d.name == name] if other_studies: msg = (". NB: matching field(s) found for '{}' study(ies) " "('{}')".format(name, "', '".join(other_studies))) else: msg = '' raise ArcanaNameError( name, ("{} doesn't have a field named '{}'{} " "(available '{}')" .format( self, name, (" from study '{}'".format(from_study) if from_study is not None else ''), "', '".join(available), msg)))
python
def field(self, name, from_study=None): """ Gets the field named 'name' produced by the Study named 'study' if provided. If a spec is passed instead of a str to the name argument, then the study will be set from the spec iff it is derived Parameters ---------- name : str | BaseField The name of the field or a spec matching the given name study : str | None Name of the study that produced the field if derived. If None and a spec is passed instaed of string to the name argument then the study name will be taken from the spec instead. """ if isinstance(name, BaseField): if from_study is None and name.derived: from_study = name.study.name name = name.name try: return self._fields[(name, from_study)] except KeyError: available = [d.name for d in self.fields if d.from_study == from_study] other_studies = [(d.from_study if d.from_study is not None else '<root>') for d in self.fields if d.name == name] if other_studies: msg = (". NB: matching field(s) found for '{}' study(ies) " "('{}')".format(name, "', '".join(other_studies))) else: msg = '' raise ArcanaNameError( name, ("{} doesn't have a field named '{}'{} " "(available '{}')" .format( self, name, (" from study '{}'".format(from_study) if from_study is not None else ''), "', '".join(available), msg)))
[ "def", "field", "(", "self", ",", "name", ",", "from_study", "=", "None", ")", ":", "if", "isinstance", "(", "name", ",", "BaseField", ")", ":", "if", "from_study", "is", "None", "and", "name", ".", "derived", ":", "from_study", "=", "name", ".", "st...
Gets the field named 'name' produced by the Study named 'study' if provided. If a spec is passed instead of a str to the name argument, then the study will be set from the spec iff it is derived Parameters ---------- name : str | BaseField The name of the field or a spec matching the given name study : str | None Name of the study that produced the field if derived. If None and a spec is passed instaed of string to the name argument then the study name will be taken from the spec instead.
[ "Gets", "the", "field", "named", "name", "produced", "by", "the", "Study", "named", "study", "if", "provided", ".", "If", "a", "spec", "is", "passed", "instead", "of", "a", "str", "to", "the", "name", "argument", "then", "the", "study", "will", "be", "...
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/tree.py#L201-L241
train
51,683
MonashBI/arcana
arcana/repository/tree.py
TreeNode.record
def record(self, pipeline_name, from_study): """ Returns the provenance record for a given pipeline Parameters ---------- pipeline_name : str The name of the pipeline that generated the record from_study : str The name of the study that the pipeline was generated from Returns ------- record : arcana.provenance.Record The provenance record generated by the specified pipeline """ try: return self._records[(pipeline_name, from_study)] except KeyError: found = [] for sname, pnames in groupby(sorted(self._records, key=itemgetter(1)), key=itemgetter(1)): found.append( "'{}' for '{}'".format("', '".join(p for p, _ in pnames), sname)) raise ArcanaNameError( (pipeline_name, from_study), ("{} doesn't have a provenance record for pipeline '{}' " "for '{}' study (found {})".format( self, pipeline_name, from_study, '; '.join(found))))
python
def record(self, pipeline_name, from_study): """ Returns the provenance record for a given pipeline Parameters ---------- pipeline_name : str The name of the pipeline that generated the record from_study : str The name of the study that the pipeline was generated from Returns ------- record : arcana.provenance.Record The provenance record generated by the specified pipeline """ try: return self._records[(pipeline_name, from_study)] except KeyError: found = [] for sname, pnames in groupby(sorted(self._records, key=itemgetter(1)), key=itemgetter(1)): found.append( "'{}' for '{}'".format("', '".join(p for p, _ in pnames), sname)) raise ArcanaNameError( (pipeline_name, from_study), ("{} doesn't have a provenance record for pipeline '{}' " "for '{}' study (found {})".format( self, pipeline_name, from_study, '; '.join(found))))
[ "def", "record", "(", "self", ",", "pipeline_name", ",", "from_study", ")", ":", "try", ":", "return", "self", ".", "_records", "[", "(", "pipeline_name", ",", "from_study", ")", "]", "except", "KeyError", ":", "found", "=", "[", "]", "for", "sname", "...
Returns the provenance record for a given pipeline Parameters ---------- pipeline_name : str The name of the pipeline that generated the record from_study : str The name of the study that the pipeline was generated from Returns ------- record : arcana.provenance.Record The provenance record generated by the specified pipeline
[ "Returns", "the", "provenance", "record", "for", "a", "given", "pipeline" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/tree.py#L243-L274
train
51,684
MonashBI/arcana
arcana/repository/tree.py
TreeNode.find_mismatch
def find_mismatch(self, other, indent=''): """ Highlights where two nodes differ in a human-readable form Parameters ---------- other : TreeNode The node to compare indent : str The white-space with which to indent output string Returns ------- mismatch : str The human-readable mismatch string """ if self != other: mismatch = "\n{}{}".format(indent, type(self).__name__) else: mismatch = '' sub_indent = indent + ' ' if len(list(self.filesets)) != len(list(other.filesets)): mismatch += ('\n{indent}mismatching summary fileset lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' .format(len(list(self.filesets)), len(list(other.filesets)), list(self.filesets), list(other.filesets), indent=sub_indent)) else: for s, o in zip(self.filesets, other.filesets): mismatch += s.find_mismatch(o, indent=sub_indent) if len(list(self.fields)) != len(list(other.fields)): mismatch += ('\n{indent}mismatching summary field lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' .format(len(list(self.fields)), len(list(other.fields)), list(self.fields), list(other.fields), indent=sub_indent)) else: for s, o in zip(self.fields, other.fields): mismatch += s.find_mismatch(o, indent=sub_indent) return mismatch
python
def find_mismatch(self, other, indent=''): """ Highlights where two nodes differ in a human-readable form Parameters ---------- other : TreeNode The node to compare indent : str The white-space with which to indent output string Returns ------- mismatch : str The human-readable mismatch string """ if self != other: mismatch = "\n{}{}".format(indent, type(self).__name__) else: mismatch = '' sub_indent = indent + ' ' if len(list(self.filesets)) != len(list(other.filesets)): mismatch += ('\n{indent}mismatching summary fileset lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' .format(len(list(self.filesets)), len(list(other.filesets)), list(self.filesets), list(other.filesets), indent=sub_indent)) else: for s, o in zip(self.filesets, other.filesets): mismatch += s.find_mismatch(o, indent=sub_indent) if len(list(self.fields)) != len(list(other.fields)): mismatch += ('\n{indent}mismatching summary field lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' .format(len(list(self.fields)), len(list(other.fields)), list(self.fields), list(other.fields), indent=sub_indent)) else: for s, o in zip(self.fields, other.fields): mismatch += s.find_mismatch(o, indent=sub_indent) return mismatch
[ "def", "find_mismatch", "(", "self", ",", "other", ",", "indent", "=", "''", ")", ":", "if", "self", "!=", "other", ":", "mismatch", "=", "\"\\n{}{}\"", ".", "format", "(", "indent", ",", "type", "(", "self", ")", ".", "__name__", ")", "else", ":", ...
Highlights where two nodes differ in a human-readable form Parameters ---------- other : TreeNode The node to compare indent : str The white-space with which to indent output string Returns ------- mismatch : str The human-readable mismatch string
[ "Highlights", "where", "two", "nodes", "differ", "in", "a", "human", "-", "readable", "form" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/tree.py#L283-L328
train
51,685
MonashBI/arcana
arcana/repository/tree.py
Tree.nodes
def nodes(self, frequency=None): """ Returns an iterator over all nodes in the tree for the specified frequency. If no frequency is specified then all nodes are returned Parameters ---------- frequency : str | None The frequency of the nodes to iterate over. If None all frequencies are returned Returns ------- nodes : iterable[TreeNode] """ if frequency is None: nodes = chain(*(self._nodes(f) for f in ('per_study', 'per_subject', 'per_visit', 'per_session'))) else: nodes = self._nodes(frequency=frequency) return nodes
python
def nodes(self, frequency=None): """ Returns an iterator over all nodes in the tree for the specified frequency. If no frequency is specified then all nodes are returned Parameters ---------- frequency : str | None The frequency of the nodes to iterate over. If None all frequencies are returned Returns ------- nodes : iterable[TreeNode] """ if frequency is None: nodes = chain(*(self._nodes(f) for f in ('per_study', 'per_subject', 'per_visit', 'per_session'))) else: nodes = self._nodes(frequency=frequency) return nodes
[ "def", "nodes", "(", "self", ",", "frequency", "=", "None", ")", ":", "if", "frequency", "is", "None", ":", "nodes", "=", "chain", "(", "*", "(", "self", ".", "_nodes", "(", "f", ")", "for", "f", "in", "(", "'per_study'", ",", "'per_subject'", ",",...
Returns an iterator over all nodes in the tree for the specified frequency. If no frequency is specified then all nodes are returned Parameters ---------- frequency : str | None The frequency of the nodes to iterate over. If None all frequencies are returned Returns ------- nodes : iterable[TreeNode]
[ "Returns", "an", "iterator", "over", "all", "nodes", "in", "the", "tree", "for", "the", "specified", "frequency", ".", "If", "no", "frequency", "is", "specified", "then", "all", "nodes", "are", "returned" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/tree.py#L488-L509
train
51,686
MonashBI/arcana
arcana/repository/tree.py
Tree.find_mismatch
def find_mismatch(self, other, indent=''): """ Used in debugging unittests """ mismatch = super(Tree, self).find_mismatch(other, indent) sub_indent = indent + ' ' if len(list(self.subjects)) != len(list(other.subjects)): mismatch += ('\n{indent}mismatching subject lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' .format(len(list(self.subjects)), len(list(other.subjects)), list(self.subjects), list(other.subjects), indent=sub_indent)) else: for s, o in zip(self.subjects, other.subjects): mismatch += s.find_mismatch(o, indent=sub_indent) if len(list(self.visits)) != len(list(other.visits)): mismatch += ('\n{indent}mismatching visit lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' .format(len(list(self.visits)), len(list(other.visits)), list(self.visits), list(other.visits), indent=sub_indent)) else: for s, o in zip(self.visits, other.visits): mismatch += s.find_mismatch(o, indent=sub_indent) return mismatch
python
def find_mismatch(self, other, indent=''): """ Used in debugging unittests """ mismatch = super(Tree, self).find_mismatch(other, indent) sub_indent = indent + ' ' if len(list(self.subjects)) != len(list(other.subjects)): mismatch += ('\n{indent}mismatching subject lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' .format(len(list(self.subjects)), len(list(other.subjects)), list(self.subjects), list(other.subjects), indent=sub_indent)) else: for s, o in zip(self.subjects, other.subjects): mismatch += s.find_mismatch(o, indent=sub_indent) if len(list(self.visits)) != len(list(other.visits)): mismatch += ('\n{indent}mismatching visit lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' .format(len(list(self.visits)), len(list(other.visits)), list(self.visits), list(other.visits), indent=sub_indent)) else: for s, o in zip(self.visits, other.visits): mismatch += s.find_mismatch(o, indent=sub_indent) return mismatch
[ "def", "find_mismatch", "(", "self", ",", "other", ",", "indent", "=", "''", ")", ":", "mismatch", "=", "super", "(", "Tree", ",", "self", ")", ".", "find_mismatch", "(", "other", ",", "indent", ")", "sub_indent", "=", "indent", "+", "' '", "if", "l...
Used in debugging unittests
[ "Used", "in", "debugging", "unittests" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/tree.py#L524-L554
train
51,687
MonashBI/arcana
arcana/repository/tree.py
Tree.construct
def construct(cls, repository, filesets=(), fields=(), records=(), file_formats=(), **kwargs): """ Return the hierarchical tree of the filesets and fields stored in a repository Parameters ---------- respository : Repository The repository that the tree comes from filesets : list[Fileset] List of all filesets in the tree fields : list[Field] List of all fields in the tree records : list[Record] List of all records in the tree Returns ------- tree : arcana.repository.Tree A hierarchical tree of subject, session and fileset information for the repository """ # Sort the data by subject and visit ID filesets_dict = defaultdict(list) for fset in filesets: if file_formats: fset.set_format(file_formats) filesets_dict[(fset.subject_id, fset.visit_id)].append(fset) fields_dict = defaultdict(list) for field in fields: fields_dict[(field.subject_id, field.visit_id)].append(field) records_dict = defaultdict(list) for record in records: records_dict[(record.subject_id, record.visit_id)].append(record) # Create all sessions subj_sessions = defaultdict(list) visit_sessions = defaultdict(list) for sess_id in set(chain(filesets_dict, fields_dict, records_dict)): if None in sess_id: continue # Save summaries for later subj_id, visit_id = sess_id session = Session( subject_id=subj_id, visit_id=visit_id, filesets=filesets_dict[sess_id], fields=fields_dict[sess_id], records=records_dict[sess_id]) subj_sessions[subj_id].append(session) visit_sessions[visit_id].append(session) subjects = [] for subj_id in subj_sessions: subjects.append(Subject( subj_id, sorted(subj_sessions[subj_id]), filesets_dict[(subj_id, None)], fields_dict[(subj_id, None)], records_dict[(subj_id, None)])) visits = [] for visit_id in visit_sessions: visits.append(Visit( visit_id, sorted(visit_sessions[visit_id]), filesets_dict[(None, visit_id)], fields_dict[(None, visit_id)], records_dict[(None, visit_id)])) return Tree(sorted(subjects), sorted(visits), repository, filesets_dict[(None, None)], fields_dict[(None, None)], records_dict[(None, None)], **kwargs)
python
def construct(cls, repository, filesets=(), fields=(), records=(), file_formats=(), **kwargs): """ Return the hierarchical tree of the filesets and fields stored in a repository Parameters ---------- respository : Repository The repository that the tree comes from filesets : list[Fileset] List of all filesets in the tree fields : list[Field] List of all fields in the tree records : list[Record] List of all records in the tree Returns ------- tree : arcana.repository.Tree A hierarchical tree of subject, session and fileset information for the repository """ # Sort the data by subject and visit ID filesets_dict = defaultdict(list) for fset in filesets: if file_formats: fset.set_format(file_formats) filesets_dict[(fset.subject_id, fset.visit_id)].append(fset) fields_dict = defaultdict(list) for field in fields: fields_dict[(field.subject_id, field.visit_id)].append(field) records_dict = defaultdict(list) for record in records: records_dict[(record.subject_id, record.visit_id)].append(record) # Create all sessions subj_sessions = defaultdict(list) visit_sessions = defaultdict(list) for sess_id in set(chain(filesets_dict, fields_dict, records_dict)): if None in sess_id: continue # Save summaries for later subj_id, visit_id = sess_id session = Session( subject_id=subj_id, visit_id=visit_id, filesets=filesets_dict[sess_id], fields=fields_dict[sess_id], records=records_dict[sess_id]) subj_sessions[subj_id].append(session) visit_sessions[visit_id].append(session) subjects = [] for subj_id in subj_sessions: subjects.append(Subject( subj_id, sorted(subj_sessions[subj_id]), filesets_dict[(subj_id, None)], fields_dict[(subj_id, None)], records_dict[(subj_id, None)])) visits = [] for visit_id in visit_sessions: visits.append(Visit( visit_id, sorted(visit_sessions[visit_id]), filesets_dict[(None, visit_id)], fields_dict[(None, visit_id)], records_dict[(None, visit_id)])) return Tree(sorted(subjects), sorted(visits), repository, filesets_dict[(None, None)], fields_dict[(None, None)], records_dict[(None, None)], **kwargs)
[ "def", "construct", "(", "cls", ",", "repository", ",", "filesets", "=", "(", ")", ",", "fields", "=", "(", ")", ",", "records", "=", "(", ")", ",", "file_formats", "=", "(", ")", ",", "*", "*", "kwargs", ")", ":", "# Sort the data by subject and visit...
Return the hierarchical tree of the filesets and fields stored in a repository Parameters ---------- respository : Repository The repository that the tree comes from filesets : list[Fileset] List of all filesets in the tree fields : list[Field] List of all fields in the tree records : list[Record] List of all records in the tree Returns ------- tree : arcana.repository.Tree A hierarchical tree of subject, session and fileset information for the repository
[ "Return", "the", "hierarchical", "tree", "of", "the", "filesets", "and", "fields", "stored", "in", "a", "repository" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/tree.py#L592-L664
train
51,688
MonashBI/arcana
arcana/repository/tree.py
Session.nodes
def nodes(self, frequency=None): """ Returns all nodes of the specified frequency that are related to the given Session Parameters ---------- frequency : str | None The frequency of the nodes to return Returns ------- nodes : iterable[TreeNode] All nodes related to the Session for the specified frequency """ if frequency is None: [] elif frequency == 'per_session': return [self] elif frequency in ('per_visit', 'per_subject'): return [self.parent] elif frequency == 'per_study': return [self.parent.parent]
python
def nodes(self, frequency=None): """ Returns all nodes of the specified frequency that are related to the given Session Parameters ---------- frequency : str | None The frequency of the nodes to return Returns ------- nodes : iterable[TreeNode] All nodes related to the Session for the specified frequency """ if frequency is None: [] elif frequency == 'per_session': return [self] elif frequency in ('per_visit', 'per_subject'): return [self.parent] elif frequency == 'per_study': return [self.parent.parent]
[ "def", "nodes", "(", "self", ",", "frequency", "=", "None", ")", ":", "if", "frequency", "is", "None", ":", "[", "]", "elif", "frequency", "==", "'per_session'", ":", "return", "[", "self", "]", "elif", "frequency", "in", "(", "'per_visit'", ",", "'per...
Returns all nodes of the specified frequency that are related to the given Session Parameters ---------- frequency : str | None The frequency of the nodes to return Returns ------- nodes : iterable[TreeNode] All nodes related to the Session for the specified frequency
[ "Returns", "all", "nodes", "of", "the", "specified", "frequency", "that", "are", "related", "to", "the", "given", "Session" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/tree.py#L1015-L1037
train
51,689
gwww/elkm1
elkm1_lib/proto.py
Connection.write_data
def write_data(self, data, response_required=None, timeout=5.0, raw=False): """Write data on the asyncio Protocol""" if self._transport is None: return if self._paused: return if self._waiting_for_response: LOG.debug("queueing write %s", data) self._queued_writes.append((data, response_required, timeout)) return if response_required: self._waiting_for_response = response_required if timeout > 0: self._timeout_task = self.loop.call_later( timeout, self._response_required_timeout) if not raw: cksum = 256 - reduce(lambda x, y: x+y, map(ord, data)) % 256 data = data + '{:02X}'.format(cksum) if int(data[0:2], 16) != len(data)-2: LOG.debug("message length wrong: %s", data) LOG.debug("write_data '%s'", data) self._transport.write((data + '\r\n').encode())
python
def write_data(self, data, response_required=None, timeout=5.0, raw=False): """Write data on the asyncio Protocol""" if self._transport is None: return if self._paused: return if self._waiting_for_response: LOG.debug("queueing write %s", data) self._queued_writes.append((data, response_required, timeout)) return if response_required: self._waiting_for_response = response_required if timeout > 0: self._timeout_task = self.loop.call_later( timeout, self._response_required_timeout) if not raw: cksum = 256 - reduce(lambda x, y: x+y, map(ord, data)) % 256 data = data + '{:02X}'.format(cksum) if int(data[0:2], 16) != len(data)-2: LOG.debug("message length wrong: %s", data) LOG.debug("write_data '%s'", data) self._transport.write((data + '\r\n').encode())
[ "def", "write_data", "(", "self", ",", "data", ",", "response_required", "=", "None", ",", "timeout", "=", "5.0", ",", "raw", "=", "False", ")", ":", "if", "self", ".", "_transport", "is", "None", ":", "return", "if", "self", ".", "_paused", ":", "re...
Write data on the asyncio Protocol
[ "Write", "data", "on", "the", "asyncio", "Protocol" ]
078d0de30840c3fab46f1f8534d98df557931e91
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/proto.py#L82-L108
train
51,690
tym-xqo/nerium
nerium/utils.py
unwrap_querystring_lists
def unwrap_querystring_lists(obj): """Convert responder querystring params, pulling values out of list if there's only one. """ new_dict = { key: (obj[key][0] if len(obj[key]) == 1 else obj[key]) for key in obj.keys() } return new_dict
python
def unwrap_querystring_lists(obj): """Convert responder querystring params, pulling values out of list if there's only one. """ new_dict = { key: (obj[key][0] if len(obj[key]) == 1 else obj[key]) for key in obj.keys() } return new_dict
[ "def", "unwrap_querystring_lists", "(", "obj", ")", ":", "new_dict", "=", "{", "key", ":", "(", "obj", "[", "key", "]", "[", "0", "]", "if", "len", "(", "obj", "[", "key", "]", ")", "==", "1", "else", "obj", "[", "key", "]", ")", "for", "key", ...
Convert responder querystring params, pulling values out of list if there's only one.
[ "Convert", "responder", "querystring", "params", "pulling", "values", "out", "of", "list", "if", "there", "s", "only", "one", "." ]
b234847d95f37c3a49dff15a189205fe5bbbc05f
https://github.com/tym-xqo/nerium/blob/b234847d95f37c3a49dff15a189205fe5bbbc05f/nerium/utils.py#L1-L10
train
51,691
MonashBI/arcana
arcana/pipeline/base.py
Pipeline.prerequisites
def prerequisites(self): """ Iterates through the inputs of the pipelinen and determines the all prerequisite pipelines """ # Loop through the inputs to the pipeline and add the instancemethods # for the pipelines to generate each of the processed inputs prereqs = defaultdict(set) for input in self.inputs: # @ReservedAssignment spec = self._study.spec(input) # Could be an input to the study or optional acquired spec if spec.is_spec and spec.derived: prereqs[spec.pipeline_getter].add(input.name) return prereqs
python
def prerequisites(self): """ Iterates through the inputs of the pipelinen and determines the all prerequisite pipelines """ # Loop through the inputs to the pipeline and add the instancemethods # for the pipelines to generate each of the processed inputs prereqs = defaultdict(set) for input in self.inputs: # @ReservedAssignment spec = self._study.spec(input) # Could be an input to the study or optional acquired spec if spec.is_spec and spec.derived: prereqs[spec.pipeline_getter].add(input.name) return prereqs
[ "def", "prerequisites", "(", "self", ")", ":", "# Loop through the inputs to the pipeline and add the instancemethods", "# for the pipelines to generate each of the processed inputs", "prereqs", "=", "defaultdict", "(", "set", ")", "for", "input", "in", "self", ".", "inputs", ...
Iterates through the inputs of the pipelinen and determines the all prerequisite pipelines
[ "Iterates", "through", "the", "inputs", "of", "the", "pipelinen", "and", "determines", "the", "all", "prerequisite", "pipelines" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L147-L160
train
51,692
MonashBI/arcana
arcana/pipeline/base.py
Pipeline.add
def add(self, name, interface, inputs=None, outputs=None, requirements=None, wall_time=None, annotations=None, **kwargs): """ Adds a processing Node to the pipeline Parameters ---------- name : str Name for the node interface : nipype.Interface The interface to use for the node inputs : dict[str, (str, FileFormat) | (Node, str)] Connections from inputs of the pipeline and outputs of other nodes to inputs of node. The keys of the dictionary are the field names and the values are 2-tuple containing either the name of the data spec and the data format it is expected in for pipeline inputs or the sending Node and the the name of an output of the sending Node. Note that pipeline inputs can be specified outside this method using the 'connect_input' method and connections between nodes with the the 'connect' method. outputs : dict[str, (str, FileFormat)] Connections to outputs of the pipeline from fields of the interface. The keys of the dictionary are the names of the data specs that will be written to and the values are the interface field name and the data format it is produced in. Note that output connections can also be specified using the 'connect_output' method. requirements : list(Requirement) List of required packages need for the node to run (default: []) wall_time : float Time required to execute the node in minutes (default: 1) mem_gb : int Required memory for the node in GB n_procs : int Preferred number of threads to run the node on (default: 1) annotations : dict[str, *] Additional annotations to add to the node, which may be used by the Processor node to optimise execution (e.g. 'gpu': True) iterfield : str Name of field to be passed an iterable to iterator over. If present, a MapNode will be created instead of a regular node joinsource : str Name of iterator field to join. Typically one of the implicit iterators (i.e. Study.SUBJECT_ID or Study.VISIT_ID) to join over the subjects and/or visits joinfield : str Name of field to pass the joined list when creating a JoinNode Returns ------- node : Node The Node object that has been added to the pipeline """ if annotations is None: annotations = {} if requirements is None: requirements = [] if wall_time is None: wall_time = self.study.processor.default_wall_time if 'mem_gb' not in kwargs or kwargs['mem_gb'] is None: kwargs['mem_gb'] = self.study.processor.default_mem_gb if 'iterfield' in kwargs: if 'joinfield' in kwargs or 'joinsource' in kwargs: raise ArcanaDesignError( "Cannot provide both joinsource and iterfield to when " "attempting to add '{}' node to {}" .foramt(name, self._error_msg_loc)) node_cls = self.study.environment.node_types['map'] elif 'joinsource' in kwargs or 'joinfield' in kwargs: if not ('joinfield' in kwargs and 'joinsource' in kwargs): raise ArcanaDesignError( "Both joinsource and joinfield kwargs are required to " "create a JoinNode (see {})".format(name, self._error_msg_loc)) joinsource = kwargs['joinsource'] if joinsource in self.study.ITERFIELDS: self._iterator_joins.add(joinsource) node_cls = self.study.environment.node_types['join'] # Prepend name of pipeline of joinsource to match name of nodes kwargs['joinsource'] = '{}_{}'.format(self.name, joinsource) else: node_cls = self.study.environment.node_types['base'] # Create node node = node_cls(self.study.environment, interface, name="{}_{}".format(self._name, name), requirements=requirements, wall_time=wall_time, annotations=annotations, **kwargs) # Ensure node is added to workflow self._workflow.add_nodes([node]) # Connect inputs, outputs and internal connections if inputs is not None: assert isinstance(inputs, dict) for node_input, connect_from in inputs.items(): if isinstance(connect_from[0], basestring): input_spec, input_format = connect_from self.connect_input(input_spec, node, node_input, input_format) else: conn_node, conn_field = connect_from self.connect(conn_node, conn_field, node, node_input) if outputs is not None: assert isinstance(outputs, dict) for output_spec, (node_output, output_format) in outputs.items(): self.connect_output(output_spec, node, node_output, output_format) return node
python
def add(self, name, interface, inputs=None, outputs=None, requirements=None, wall_time=None, annotations=None, **kwargs): """ Adds a processing Node to the pipeline Parameters ---------- name : str Name for the node interface : nipype.Interface The interface to use for the node inputs : dict[str, (str, FileFormat) | (Node, str)] Connections from inputs of the pipeline and outputs of other nodes to inputs of node. The keys of the dictionary are the field names and the values are 2-tuple containing either the name of the data spec and the data format it is expected in for pipeline inputs or the sending Node and the the name of an output of the sending Node. Note that pipeline inputs can be specified outside this method using the 'connect_input' method and connections between nodes with the the 'connect' method. outputs : dict[str, (str, FileFormat)] Connections to outputs of the pipeline from fields of the interface. The keys of the dictionary are the names of the data specs that will be written to and the values are the interface field name and the data format it is produced in. Note that output connections can also be specified using the 'connect_output' method. requirements : list(Requirement) List of required packages need for the node to run (default: []) wall_time : float Time required to execute the node in minutes (default: 1) mem_gb : int Required memory for the node in GB n_procs : int Preferred number of threads to run the node on (default: 1) annotations : dict[str, *] Additional annotations to add to the node, which may be used by the Processor node to optimise execution (e.g. 'gpu': True) iterfield : str Name of field to be passed an iterable to iterator over. If present, a MapNode will be created instead of a regular node joinsource : str Name of iterator field to join. Typically one of the implicit iterators (i.e. Study.SUBJECT_ID or Study.VISIT_ID) to join over the subjects and/or visits joinfield : str Name of field to pass the joined list when creating a JoinNode Returns ------- node : Node The Node object that has been added to the pipeline """ if annotations is None: annotations = {} if requirements is None: requirements = [] if wall_time is None: wall_time = self.study.processor.default_wall_time if 'mem_gb' not in kwargs or kwargs['mem_gb'] is None: kwargs['mem_gb'] = self.study.processor.default_mem_gb if 'iterfield' in kwargs: if 'joinfield' in kwargs or 'joinsource' in kwargs: raise ArcanaDesignError( "Cannot provide both joinsource and iterfield to when " "attempting to add '{}' node to {}" .foramt(name, self._error_msg_loc)) node_cls = self.study.environment.node_types['map'] elif 'joinsource' in kwargs or 'joinfield' in kwargs: if not ('joinfield' in kwargs and 'joinsource' in kwargs): raise ArcanaDesignError( "Both joinsource and joinfield kwargs are required to " "create a JoinNode (see {})".format(name, self._error_msg_loc)) joinsource = kwargs['joinsource'] if joinsource in self.study.ITERFIELDS: self._iterator_joins.add(joinsource) node_cls = self.study.environment.node_types['join'] # Prepend name of pipeline of joinsource to match name of nodes kwargs['joinsource'] = '{}_{}'.format(self.name, joinsource) else: node_cls = self.study.environment.node_types['base'] # Create node node = node_cls(self.study.environment, interface, name="{}_{}".format(self._name, name), requirements=requirements, wall_time=wall_time, annotations=annotations, **kwargs) # Ensure node is added to workflow self._workflow.add_nodes([node]) # Connect inputs, outputs and internal connections if inputs is not None: assert isinstance(inputs, dict) for node_input, connect_from in inputs.items(): if isinstance(connect_from[0], basestring): input_spec, input_format = connect_from self.connect_input(input_spec, node, node_input, input_format) else: conn_node, conn_field = connect_from self.connect(conn_node, conn_field, node, node_input) if outputs is not None: assert isinstance(outputs, dict) for output_spec, (node_output, output_format) in outputs.items(): self.connect_output(output_spec, node, node_output, output_format) return node
[ "def", "add", "(", "self", ",", "name", ",", "interface", ",", "inputs", "=", "None", ",", "outputs", "=", "None", ",", "requirements", "=", "None", ",", "wall_time", "=", "None", ",", "annotations", "=", "None", ",", "*", "*", "kwargs", ")", ":", ...
Adds a processing Node to the pipeline Parameters ---------- name : str Name for the node interface : nipype.Interface The interface to use for the node inputs : dict[str, (str, FileFormat) | (Node, str)] Connections from inputs of the pipeline and outputs of other nodes to inputs of node. The keys of the dictionary are the field names and the values are 2-tuple containing either the name of the data spec and the data format it is expected in for pipeline inputs or the sending Node and the the name of an output of the sending Node. Note that pipeline inputs can be specified outside this method using the 'connect_input' method and connections between nodes with the the 'connect' method. outputs : dict[str, (str, FileFormat)] Connections to outputs of the pipeline from fields of the interface. The keys of the dictionary are the names of the data specs that will be written to and the values are the interface field name and the data format it is produced in. Note that output connections can also be specified using the 'connect_output' method. requirements : list(Requirement) List of required packages need for the node to run (default: []) wall_time : float Time required to execute the node in minutes (default: 1) mem_gb : int Required memory for the node in GB n_procs : int Preferred number of threads to run the node on (default: 1) annotations : dict[str, *] Additional annotations to add to the node, which may be used by the Processor node to optimise execution (e.g. 'gpu': True) iterfield : str Name of field to be passed an iterable to iterator over. If present, a MapNode will be created instead of a regular node joinsource : str Name of iterator field to join. Typically one of the implicit iterators (i.e. Study.SUBJECT_ID or Study.VISIT_ID) to join over the subjects and/or visits joinfield : str Name of field to pass the joined list when creating a JoinNode Returns ------- node : Node The Node object that has been added to the pipeline
[ "Adds", "a", "processing", "Node", "to", "the", "pipeline" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L173-L281
train
51,693
MonashBI/arcana
arcana/pipeline/base.py
Pipeline.connect_input
def connect_input(self, spec_name, node, node_input, format=None, **kwargs): # @ReservedAssignment @IgnorePep8 """ Connects a study fileset_spec as an input to the provided node Parameters ---------- spec_name : str Name of the study data spec (or one of the IDs from the iterator nodes, 'subject_id' or 'visit_id') to connect to the node node : arcana.Node The node to connect the input to node_input : str Name of the input on the node to connect the fileset spec to format : FileFormat | None The file format the input is expected in. If it differs from the format in data spec or of study input then an implicit conversion is performed. If None the file format in the data spec is assumed """ if spec_name in self.study.ITERFIELDS: self._iterator_conns[spec_name].append((node, node_input, format)) else: name = self._map_name(spec_name, self._input_map) if name not in self.study.data_spec_names(): raise ArcanaDesignError( "Proposed input '{}' to {} is not a valid spec name ('{}')" .format(name, self._error_msg_loc, "', '".join(self.study.data_spec_names()))) self._input_conns[name].append((node, node_input, format, kwargs))
python
def connect_input(self, spec_name, node, node_input, format=None, **kwargs): # @ReservedAssignment @IgnorePep8 """ Connects a study fileset_spec as an input to the provided node Parameters ---------- spec_name : str Name of the study data spec (or one of the IDs from the iterator nodes, 'subject_id' or 'visit_id') to connect to the node node : arcana.Node The node to connect the input to node_input : str Name of the input on the node to connect the fileset spec to format : FileFormat | None The file format the input is expected in. If it differs from the format in data spec or of study input then an implicit conversion is performed. If None the file format in the data spec is assumed """ if spec_name in self.study.ITERFIELDS: self._iterator_conns[spec_name].append((node, node_input, format)) else: name = self._map_name(spec_name, self._input_map) if name not in self.study.data_spec_names(): raise ArcanaDesignError( "Proposed input '{}' to {} is not a valid spec name ('{}')" .format(name, self._error_msg_loc, "', '".join(self.study.data_spec_names()))) self._input_conns[name].append((node, node_input, format, kwargs))
[ "def", "connect_input", "(", "self", ",", "spec_name", ",", "node", ",", "node_input", ",", "format", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# @ReservedAssignment @IgnorePep8", "if", "spec_name", "in", "self", ".", "study", ".", "ITERFIELDS", ":", ...
Connects a study fileset_spec as an input to the provided node Parameters ---------- spec_name : str Name of the study data spec (or one of the IDs from the iterator nodes, 'subject_id' or 'visit_id') to connect to the node node : arcana.Node The node to connect the input to node_input : str Name of the input on the node to connect the fileset spec to format : FileFormat | None The file format the input is expected in. If it differs from the format in data spec or of study input then an implicit conversion is performed. If None the file format in the data spec is assumed
[ "Connects", "a", "study", "fileset_spec", "as", "an", "input", "to", "the", "provided", "node" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L283-L311
train
51,694
MonashBI/arcana
arcana/pipeline/base.py
Pipeline.connect_output
def connect_output(self, spec_name, node, node_output, format=None, # @ReservedAssignment @IgnorePep8 **kwargs): """ Connects an output to a study fileset spec Parameters ---------- spec_name : str Name of the study fileset spec to connect to node : arcana.Node The node to connect the output from node_output : str Name of the output on the node to connect to the fileset format : FileFormat | None The file format the output is returned in. If it differs from the format in data spec then an implicit conversion is performed. If None the it is assumed to be returned in the file format of the entry the data spec """ name = self._map_name(spec_name, self._output_map) if name not in self.study.data_spec_names(): raise ArcanaDesignError( "Proposed output '{}' to {} is not a valid spec name ('{}')" .format(name, self._error_msg_loc, "', '".join(self.study.data_spec_names()))) if name in self._output_conns: prev_node, prev_node_output, _, _ = self._output_conns[name] logger.info( "Reassigning '{}' output from {}:{} to {}:{} in {}" .format(name, prev_node.name, prev_node_output, node.name, node_output, self._error_msg_loc)) self._output_conns[name] = (node, node_output, format, kwargs)
python
def connect_output(self, spec_name, node, node_output, format=None, # @ReservedAssignment @IgnorePep8 **kwargs): """ Connects an output to a study fileset spec Parameters ---------- spec_name : str Name of the study fileset spec to connect to node : arcana.Node The node to connect the output from node_output : str Name of the output on the node to connect to the fileset format : FileFormat | None The file format the output is returned in. If it differs from the format in data spec then an implicit conversion is performed. If None the it is assumed to be returned in the file format of the entry the data spec """ name = self._map_name(spec_name, self._output_map) if name not in self.study.data_spec_names(): raise ArcanaDesignError( "Proposed output '{}' to {} is not a valid spec name ('{}')" .format(name, self._error_msg_loc, "', '".join(self.study.data_spec_names()))) if name in self._output_conns: prev_node, prev_node_output, _, _ = self._output_conns[name] logger.info( "Reassigning '{}' output from {}:{} to {}:{} in {}" .format(name, prev_node.name, prev_node_output, node.name, node_output, self._error_msg_loc)) self._output_conns[name] = (node, node_output, format, kwargs)
[ "def", "connect_output", "(", "self", ",", "spec_name", ",", "node", ",", "node_output", ",", "format", "=", "None", ",", "# @ReservedAssignment @IgnorePep8", "*", "*", "kwargs", ")", ":", "name", "=", "self", ".", "_map_name", "(", "spec_name", ",", "self",...
Connects an output to a study fileset spec Parameters ---------- spec_name : str Name of the study fileset spec to connect to node : arcana.Node The node to connect the output from node_output : str Name of the output on the node to connect to the fileset format : FileFormat | None The file format the output is returned in. If it differs from the format in data spec then an implicit conversion is performed. If None the it is assumed to be returned in the file format of the entry the data spec
[ "Connects", "an", "output", "to", "a", "study", "fileset", "spec" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L313-L344
train
51,695
MonashBI/arcana
arcana/pipeline/base.py
Pipeline._map_name
def _map_name(self, name, mapper): """ Maps a spec name to a new value based on the provided mapper """ if mapper is not None: if isinstance(mapper, basestring): name = mapper + name try: name = mapper[name] except KeyError: pass return name
python
def _map_name(self, name, mapper): """ Maps a spec name to a new value based on the provided mapper """ if mapper is not None: if isinstance(mapper, basestring): name = mapper + name try: name = mapper[name] except KeyError: pass return name
[ "def", "_map_name", "(", "self", ",", "name", ",", "mapper", ")", ":", "if", "mapper", "is", "not", "None", ":", "if", "isinstance", "(", "mapper", ",", "basestring", ")", ":", "name", "=", "mapper", "+", "name", "try", ":", "name", "=", "mapper", ...
Maps a spec name to a new value based on the provided mapper
[ "Maps", "a", "spec", "name", "to", "a", "new", "value", "based", "on", "the", "provided", "mapper" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L346-L357
train
51,696
MonashBI/arcana
arcana/pipeline/base.py
Pipeline.requires_conversion
def requires_conversion(cls, fileset, file_format): """Checks whether the fileset matches the requested file format""" if file_format is None: return False try: filset_format = fileset.format except AttributeError: return False # Field input else: return (file_format != filset_format)
python
def requires_conversion(cls, fileset, file_format): """Checks whether the fileset matches the requested file format""" if file_format is None: return False try: filset_format = fileset.format except AttributeError: return False # Field input else: return (file_format != filset_format)
[ "def", "requires_conversion", "(", "cls", ",", "fileset", ",", "file_format", ")", ":", "if", "file_format", "is", "None", ":", "return", "False", "try", ":", "filset_format", "=", "fileset", ".", "format", "except", "AttributeError", ":", "return", "False", ...
Checks whether the fileset matches the requested file format
[ "Checks", "whether", "the", "fileset", "matches", "the", "requested", "file", "format" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L449-L458
train
51,697
MonashBI/arcana
arcana/pipeline/base.py
Pipeline.save_graph
def save_graph(self, fname, style='flat', format='png', **kwargs): # @ReservedAssignment @IgnorePep8 """ Saves a graph of the pipeline to file Parameters ---------- fname : str The filename for the saved graph style : str The style of the graph, can be one of can be one of 'orig', 'flat', 'exec', 'hierarchical' plot : bool Whether to load and plot the graph after it has been written """ fname = os.path.expanduser(fname) if not fname.endswith('.png'): fname += '.png' orig_dir = os.getcwd() tmpdir = tempfile.mkdtemp() os.chdir(tmpdir) workflow = self._workflow workflow.write_graph(graph2use=style, format=format, **kwargs) os.chdir(orig_dir) try: shutil.move(os.path.join(tmpdir, 'graph_detailed.{}' .format(format)), fname) except IOError as e: if e.errno == errno.ENOENT: shutil.move(os.path.join(tmpdir, 'graph.{}'.format(format)), fname) else: raise shutil.rmtree(tmpdir)
python
def save_graph(self, fname, style='flat', format='png', **kwargs): # @ReservedAssignment @IgnorePep8 """ Saves a graph of the pipeline to file Parameters ---------- fname : str The filename for the saved graph style : str The style of the graph, can be one of can be one of 'orig', 'flat', 'exec', 'hierarchical' plot : bool Whether to load and plot the graph after it has been written """ fname = os.path.expanduser(fname) if not fname.endswith('.png'): fname += '.png' orig_dir = os.getcwd() tmpdir = tempfile.mkdtemp() os.chdir(tmpdir) workflow = self._workflow workflow.write_graph(graph2use=style, format=format, **kwargs) os.chdir(orig_dir) try: shutil.move(os.path.join(tmpdir, 'graph_detailed.{}' .format(format)), fname) except IOError as e: if e.errno == errno.ENOENT: shutil.move(os.path.join(tmpdir, 'graph.{}'.format(format)), fname) else: raise shutil.rmtree(tmpdir)
[ "def", "save_graph", "(", "self", ",", "fname", ",", "style", "=", "'flat'", ",", "format", "=", "'png'", ",", "*", "*", "kwargs", ")", ":", "# @ReservedAssignment @IgnorePep8", "fname", "=", "os", ".", "path", ".", "expanduser", "(", "fname", ")", "if",...
Saves a graph of the pipeline to file Parameters ---------- fname : str The filename for the saved graph style : str The style of the graph, can be one of can be one of 'orig', 'flat', 'exec', 'hierarchical' plot : bool Whether to load and plot the graph after it has been written
[ "Saves", "a", "graph", "of", "the", "pipeline", "to", "file" ]
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L473-L505
train
51,698
MonashBI/arcana
arcana/pipeline/base.py
Pipeline.cap
def cap(self): """ "Caps" the construction of the pipeline, signifying that no more inputs and outputs are expected to be added and therefore the input and output nodes can be created along with the provenance. """ to_cap = (self._inputnodes, self._outputnodes, self._prov) if to_cap == (None, None, None): self._inputnodes = { f: self._make_inputnode(f) for f in self.input_frequencies} self._outputnodes = { f: self._make_outputnode(f) for f in self.output_frequencies} self._prov = self._gen_prov() elif None in to_cap: raise ArcanaError( "If one of _inputnodes, _outputnodes or _prov is not None then" " they all should be in {}".format(self))
python
def cap(self): """ "Caps" the construction of the pipeline, signifying that no more inputs and outputs are expected to be added and therefore the input and output nodes can be created along with the provenance. """ to_cap = (self._inputnodes, self._outputnodes, self._prov) if to_cap == (None, None, None): self._inputnodes = { f: self._make_inputnode(f) for f in self.input_frequencies} self._outputnodes = { f: self._make_outputnode(f) for f in self.output_frequencies} self._prov = self._gen_prov() elif None in to_cap: raise ArcanaError( "If one of _inputnodes, _outputnodes or _prov is not None then" " they all should be in {}".format(self))
[ "def", "cap", "(", "self", ")", ":", "to_cap", "=", "(", "self", ".", "_inputnodes", ",", "self", ".", "_outputnodes", ",", "self", ".", "_prov", ")", "if", "to_cap", "==", "(", "None", ",", "None", ",", "None", ")", ":", "self", ".", "_inputnodes"...
"Caps" the construction of the pipeline, signifying that no more inputs and outputs are expected to be added and therefore the input and output nodes can be created along with the provenance.
[ "Caps", "the", "construction", "of", "the", "pipeline", "signifying", "that", "no", "more", "inputs", "and", "outputs", "are", "expected", "to", "be", "added", "and", "therefore", "the", "input", "and", "output", "nodes", "can", "be", "created", "along", "wi...
d6271a29d13733d00422d11417af8d200be62acc
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L694-L710
train
51,699